python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/musb/ux500_dma.c
*
* U8500 DMA support code
*
* Copyright (C) 2009 STMicroelectronics
* Copyright (C) 2011 ST-Ericsson SA
* Authors:
* Mian Yousaf Kaukab <[email protected]>
* Praveena Nadahally <[email protected]>
* Rajaram Regupathy <[email protected]>
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pfn.h>
#include <linux/sizes.h>
#include <linux/platform_data/usb-musb-ux500.h>
#include "musb_core.h"
static const char *iep_chan_names[] = { "iep_1_9", "iep_2_10", "iep_3_11", "iep_4_12",
"iep_5_13", "iep_6_14", "iep_7_15", "iep_8" };
static const char *oep_chan_names[] = { "oep_1_9", "oep_2_10", "oep_3_11", "oep_4_12",
"oep_5_13", "oep_6_14", "oep_7_15", "oep_8" };
struct ux500_dma_channel {
struct dma_channel channel;
struct ux500_dma_controller *controller;
struct musb_hw_ep *hw_ep;
struct dma_chan *dma_chan;
unsigned int cur_len;
dma_cookie_t cookie;
u8 ch_num;
u8 is_tx;
u8 is_allocated;
};
struct ux500_dma_controller {
struct dma_controller controller;
struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
void *private_data;
dma_addr_t phy_base;
};
/* Work function invoked from DMA callback to handle rx transfers. */
static void ux500_dma_callback(void *private_data)
{
struct dma_channel *channel = private_data;
struct ux500_dma_channel *ux500_channel = channel->private_data;
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct musb *musb = hw_ep->musb;
unsigned long flags;
dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
hw_ep->epnum);
spin_lock_irqsave(&musb->lock, flags);
ux500_channel->channel.actual_len = ux500_channel->cur_len;
ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
musb_dma_completion(musb, hw_ep->epnum, ux500_channel->is_tx);
spin_unlock_irqrestore(&musb->lock, flags);
}
static bool ux500_configure_channel(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct dma_chan *dma_chan = ux500_channel->dma_chan;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
struct scatterlist sg;
struct dma_slave_config slave_conf;
enum dma_slave_buswidth addr_width;
struct musb *musb = ux500_channel->controller->private_data;
dma_addr_t usb_fifo_addr = (musb->io.fifo_offset(hw_ep->epnum) +
ux500_channel->controller->phy_base);
dev_dbg(musb->controller,
"packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
packet_sz, mode, (unsigned long long) dma_addr,
len, ux500_channel->is_tx);
ux500_channel->cur_len = len;
sg_init_table(&sg, 1);
sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len,
offset_in_page(dma_addr));
sg_dma_address(&sg) = dma_addr;
sg_dma_len(&sg) = len;
direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
DMA_SLAVE_BUSWIDTH_4_BYTES;
slave_conf.direction = direction;
slave_conf.src_addr = usb_fifo_addr;
slave_conf.src_addr_width = addr_width;
slave_conf.src_maxburst = 16;
slave_conf.dst_addr = usb_fifo_addr;
slave_conf.dst_addr_width = addr_width;
slave_conf.dst_maxburst = 16;
slave_conf.device_fc = false;
dmaengine_slave_config(dma_chan, &slave_conf);
dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma_desc)
return false;
dma_desc->callback = ux500_dma_callback;
dma_desc->callback_param = channel;
ux500_channel->cookie = dma_desc->tx_submit(dma_desc);
dma_async_issue_pending(dma_chan);
return true;
}
static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 is_tx)
{
struct ux500_dma_controller *controller = container_of(c,
struct ux500_dma_controller, controller);
struct ux500_dma_channel *ux500_channel = NULL;
struct musb *musb = controller->private_data;
u8 ch_num = hw_ep->epnum - 1;
/* 8 DMA channels (0 - 7). Each DMA channel can only be allocated
* to specified hw_ep. For example DMA channel 0 can only be allocated
* to hw_ep 1 and 9.
*/
if (ch_num > 7)
ch_num -= 8;
if (ch_num >= UX500_MUSB_DMA_NUM_RX_TX_CHANNELS)
return NULL;
ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) :
&(controller->rx_channel[ch_num]) ;
/* Check if channel is already used. */
if (ux500_channel->is_allocated)
return NULL;
ux500_channel->hw_ep = hw_ep;
ux500_channel->is_allocated = 1;
dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
hw_ep->epnum, is_tx, ch_num);
return &(ux500_channel->channel);
}
static void ux500_dma_channel_release(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
struct musb *musb = ux500_channel->controller->private_data;
dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);
if (ux500_channel->is_allocated) {
ux500_channel->is_allocated = 0;
channel->status = MUSB_DMA_STATUS_FREE;
channel->actual_len = 0;
}
}
static int ux500_dma_is_compatible(struct dma_channel *channel,
u16 maxpacket, void *buf, u32 length)
{
if ((maxpacket & 0x3) ||
((unsigned long int) buf & 0x3) ||
(length < 512) ||
(length & 0x3))
return false;
else
return true;
}
static int ux500_dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
int ret;
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
if (!ret)
channel->status = MUSB_DMA_STATUS_FREE;
return ret;
}
static int ux500_dma_channel_abort(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
struct ux500_dma_controller *controller = ux500_channel->controller;
struct musb *musb = controller->private_data;
void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
u16 csr;
dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
ux500_channel->ch_num, ux500_channel->is_tx);
if (channel->status == MUSB_DMA_STATUS_BUSY) {
if (ux500_channel->is_tx) {
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_AUTOSET |
MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_DMAMODE);
musb_writew(epio, MUSB_TXCSR, csr);
} else {
csr = musb_readw(epio, MUSB_RXCSR);
csr &= ~(MUSB_RXCSR_AUTOCLEAR |
MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR, csr);
}
dmaengine_terminate_all(ux500_channel->dma_chan);
channel->status = MUSB_DMA_STATUS_FREE;
}
return 0;
}
static void ux500_dma_controller_stop(struct ux500_dma_controller *controller)
{
struct ux500_dma_channel *ux500_channel;
struct dma_channel *channel;
u8 ch_num;
for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
channel = &controller->rx_channel[ch_num].channel;
ux500_channel = channel->private_data;
ux500_dma_channel_release(channel);
if (ux500_channel->dma_chan)
dma_release_channel(ux500_channel->dma_chan);
}
for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
channel = &controller->tx_channel[ch_num].channel;
ux500_channel = channel->private_data;
ux500_dma_channel_release(channel);
if (ux500_channel->dma_chan)
dma_release_channel(ux500_channel->dma_chan);
}
}
static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
{
struct ux500_dma_channel *ux500_channel = NULL;
struct musb *musb = controller->private_data;
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct ux500_musb_board_data *data;
struct dma_channel *dma_channel = NULL;
char **chan_names;
u32 ch_num;
u8 dir;
u8 is_tx = 0;
void **param_array;
struct ux500_dma_channel *channel_array;
dma_cap_mask_t mask;
if (!plat) {
dev_err(musb->controller, "No platform data\n");
return -EINVAL;
}
data = plat->board_data;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Prepare the loop for RX channels */
channel_array = controller->rx_channel;
param_array = data ? data->dma_rx_param_array : NULL;
chan_names = (char **)iep_chan_names;
for (dir = 0; dir < 2; dir++) {
for (ch_num = 0;
ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS;
ch_num++) {
ux500_channel = &channel_array[ch_num];
ux500_channel->controller = controller;
ux500_channel->ch_num = ch_num;
ux500_channel->is_tx = is_tx;
dma_channel = &(ux500_channel->channel);
dma_channel->private_data = ux500_channel;
dma_channel->status = MUSB_DMA_STATUS_FREE;
dma_channel->max_len = SZ_16M;
ux500_channel->dma_chan =
dma_request_chan(dev, chan_names[ch_num]);
if (IS_ERR(ux500_channel->dma_chan))
ux500_channel->dma_chan =
dma_request_channel(mask,
data ?
data->dma_filter :
NULL,
param_array ?
param_array[ch_num] :
NULL);
if (!ux500_channel->dma_chan) {
ERR("Dma pipe allocation error dir=%d ch=%d\n",
dir, ch_num);
/* Release already allocated channels */
ux500_dma_controller_stop(controller);
return -EBUSY;
}
}
/* Prepare the loop for TX channels */
channel_array = controller->tx_channel;
param_array = data ? data->dma_tx_param_array : NULL;
chan_names = (char **)oep_chan_names;
is_tx = 1;
}
return 0;
}
void ux500_dma_controller_destroy(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
struct ux500_dma_controller, controller);
ux500_dma_controller_stop(controller);
kfree(controller);
}
EXPORT_SYMBOL_GPL(ux500_dma_controller_destroy);
struct dma_controller *
ux500_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct ux500_dma_controller *controller;
struct platform_device *pdev = to_platform_device(musb->controller);
struct resource *iomem;
int ret;
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
goto kzalloc_fail;
controller->private_data = musb;
/* Save physical address for DMA controller. */
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
dev_err(musb->controller, "no memory resource defined\n");
goto plat_get_fail;
}
controller->phy_base = (dma_addr_t) iomem->start;
controller->controller.channel_alloc = ux500_dma_channel_allocate;
controller->controller.channel_release = ux500_dma_channel_release;
controller->controller.channel_program = ux500_dma_channel_program;
controller->controller.channel_abort = ux500_dma_channel_abort;
controller->controller.is_compatible = ux500_dma_is_compatible;
ret = ux500_dma_controller_start(controller);
if (ret)
goto plat_get_fail;
return &controller->controller;
plat_get_fail:
kfree(controller);
kzalloc_fail:
return NULL;
}
EXPORT_SYMBOL_GPL(ux500_dma_controller_create);
| linux-master | drivers/usb/musb/ux500_dma.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Ingenic JZ4740 "glue layer"
*
* Copyright (C) 2013, Apelete Seketeli <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb/role.h>
#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
struct jz4740_glue {
struct platform_device *pdev;
struct musb *musb;
struct clk *clk;
struct usb_role_switch *role_sw;
};
static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
{
unsigned long flags;
irqreturn_t retval = IRQ_NONE, retval_dma = IRQ_NONE;
struct musb *musb = __hci;
if (IS_ENABLED(CONFIG_USB_INVENTRA_DMA) && musb->dma_controller)
retval_dma = dma_controller_irq(irq, musb->dma_controller);
spin_lock_irqsave(&musb->lock, flags);
musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
/*
* The controller is gadget only, the state of the host mode IRQ bits is
* undefined. Mask them to make sure that the musb driver core will
* never see them set
*/
musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
MUSB_INTR_RESET | MUSB_INTR_SOF;
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
spin_unlock_irqrestore(&musb->lock, flags);
if (retval == IRQ_HANDLED || retval_dma == IRQ_HANDLED)
return IRQ_HANDLED;
return IRQ_NONE;
}
static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
};
static const struct musb_hdrc_config jz4740_musb_config = {
/* Silicon does not implement USB OTG. */
.multipoint = 0,
/* Max EPs scanned, driver will decide which EP can be used. */
.num_eps = 4,
/* RAMbits needed to configure EPs from table */
.ram_bits = 9,
.fifo_cfg = jz4740_musb_fifo_cfg,
.fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
};
static int jz4740_musb_role_switch_set(struct usb_role_switch *sw,
enum usb_role role)
{
struct jz4740_glue *glue = usb_role_switch_get_drvdata(sw);
struct usb_phy *phy = glue->musb->xceiv;
if (!phy)
return 0;
switch (role) {
case USB_ROLE_NONE:
atomic_notifier_call_chain(&phy->notifier, USB_EVENT_NONE, phy);
break;
case USB_ROLE_DEVICE:
atomic_notifier_call_chain(&phy->notifier, USB_EVENT_VBUS, phy);
break;
case USB_ROLE_HOST:
atomic_notifier_call_chain(&phy->notifier, USB_EVENT_ID, phy);
break;
}
return 0;
}
static int jz4740_musb_init(struct musb *musb)
{
struct device *dev = musb->controller->parent;
struct jz4740_glue *glue = dev_get_drvdata(dev);
struct usb_role_switch_desc role_sw_desc = {
.set = jz4740_musb_role_switch_set,
.driver_data = glue,
.fwnode = dev_fwnode(dev),
};
int err;
glue->musb = musb;
if (IS_ENABLED(CONFIG_GENERIC_PHY)) {
musb->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
if (IS_ERR(musb->phy)) {
err = PTR_ERR(musb->phy);
if (err != -ENODEV) {
dev_err(dev, "Unable to get PHY\n");
return err;
}
musb->phy = NULL;
}
}
if (musb->phy) {
err = phy_init(musb->phy);
if (err) {
dev_err(dev, "Failed to init PHY\n");
return err;
}
err = phy_power_on(musb->phy);
if (err) {
dev_err(dev, "Unable to power on PHY\n");
goto err_phy_shutdown;
}
} else {
if (dev->of_node)
musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
else
musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(musb->xceiv)) {
dev_err(dev, "No transceiver configured\n");
return PTR_ERR(musb->xceiv);
}
}
glue->role_sw = usb_role_switch_register(dev, &role_sw_desc);
if (IS_ERR(glue->role_sw)) {
dev_err(dev, "Failed to register USB role switch\n");
err = PTR_ERR(glue->role_sw);
goto err_phy_power_down;
}
/*
* Silicon does not implement ConfigData register.
* Set dyn_fifo to avoid reading EP config from hardware.
*/
musb->dyn_fifo = true;
musb->isr = jz4740_musb_interrupt;
return 0;
err_phy_power_down:
if (musb->phy)
phy_power_off(musb->phy);
err_phy_shutdown:
if (musb->phy)
phy_exit(musb->phy);
return err;
}
static int jz4740_musb_exit(struct musb *musb)
{
struct jz4740_glue *glue = dev_get_drvdata(musb->controller->parent);
usb_role_switch_unregister(glue->role_sw);
if (musb->phy) {
phy_power_off(musb->phy);
phy_exit(musb->phy);
}
return 0;
}
static const struct musb_platform_ops jz4740_musb_ops = {
.quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP,
.fifo_mode = 2,
.init = jz4740_musb_init,
.exit = jz4740_musb_exit,
#ifdef CONFIG_USB_INVENTRA_DMA
.dma_init = musbhs_dma_controller_create_noirq,
.dma_exit = musbhs_dma_controller_destroy,
#endif
};
static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
.mode = MUSB_PERIPHERAL,
.config = &jz4740_musb_config,
.platform_ops = &jz4740_musb_ops,
};
static struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
};
static struct musb_hdrc_config jz4770_musb_config = {
.multipoint = 1,
.num_eps = 11,
.ram_bits = 11,
.fifo_cfg = jz4770_musb_fifo_cfg,
.fifo_cfg_size = ARRAY_SIZE(jz4770_musb_fifo_cfg),
};
static const struct musb_hdrc_platform_data jz4770_musb_pdata = {
.mode = MUSB_PERIPHERAL, /* TODO: support OTG */
.config = &jz4770_musb_config,
.platform_ops = &jz4740_musb_ops,
};
static int jz4740_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct musb_hdrc_platform_data *pdata;
struct platform_device *musb;
struct jz4740_glue *glue;
struct clk *clk;
int ret;
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
pdata = of_device_get_match_data(dev);
if (!pdata) {
dev_err(dev, "missing platform data\n");
return -EINVAL;
}
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
dev_err(dev, "failed to allocate musb device\n");
return -ENOMEM;
}
clk = devm_clk_get(dev, "udc");
if (IS_ERR(clk)) {
dev_err(dev, "failed to get clock\n");
ret = PTR_ERR(clk);
goto err_platform_device_put;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(dev, "failed to enable clock\n");
goto err_platform_device_put;
}
musb->dev.parent = dev;
musb->dev.dma_mask = &musb->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = DMA_BIT_MASK(32);
device_set_of_node_from_dev(&musb->dev, dev);
glue->pdev = musb;
glue->clk = clk;
platform_set_drvdata(pdev, glue);
ret = platform_device_add_resources(musb, pdev->resource,
pdev->num_resources);
if (ret) {
dev_err(dev, "failed to add resources\n");
goto err_clk_disable;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
dev_err(dev, "failed to add platform_data\n");
goto err_clk_disable;
}
ret = platform_device_add(musb);
if (ret) {
dev_err(dev, "failed to register musb device\n");
goto err_clk_disable;
}
return 0;
err_clk_disable:
clk_disable_unprepare(clk);
err_platform_device_put:
platform_device_put(musb);
return ret;
}
static void jz4740_remove(struct platform_device *pdev)
{
struct jz4740_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->pdev);
clk_disable_unprepare(glue->clk);
}
static const struct of_device_id jz4740_musb_of_match[] = {
{ .compatible = "ingenic,jz4740-musb", .data = &jz4740_musb_pdata },
{ .compatible = "ingenic,jz4770-musb", .data = &jz4770_musb_pdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
static struct platform_driver jz4740_driver = {
.probe = jz4740_probe,
.remove_new = jz4740_remove,
.driver = {
.name = "musb-jz4740",
.of_match_table = jz4740_musb_of_match,
},
};
MODULE_DESCRIPTION("JZ4740 MUSB Glue Layer");
MODULE_AUTHOR("Apelete Seketeli <[email protected]>");
MODULE_LICENSE("GPL v2");
module_platform_driver(jz4740_driver);
| linux-master | drivers/usb/musb/jz4740.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG peripheral driver ep0 handling
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include "musb_core.h"
/* ep0 is always musb->endpoints[0].ep_in */
#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
/*
* locking note: we use only the controller lock, for simpler correctness.
* It's always held with IRQs blocked.
*
* It protects the ep0 request queue as well as ep0_state, not just the
* controller and indexed registers. And that lock stays held unless it
* needs to be dropped to allow reentering this driver ... like upcalls to
* the gadget driver, or adjusting endpoint halt status.
*/
static char *decode_ep0stage(u8 stage)
{
switch (stage) {
case MUSB_EP0_STAGE_IDLE: return "idle";
case MUSB_EP0_STAGE_SETUP: return "setup";
case MUSB_EP0_STAGE_TX: return "in";
case MUSB_EP0_STAGE_RX: return "out";
case MUSB_EP0_STAGE_ACKWAIT: return "wait";
case MUSB_EP0_STAGE_STATUSIN: return "in/status";
case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
default: return "?";
}
}
/* handle a standard GET_STATUS request
* Context: caller holds controller lock
*/
static int service_tx_status_request(
struct musb *musb,
const struct usb_ctrlrequest *ctrlrequest)
{
void __iomem *mbase = musb->mregs;
int handled = 1;
u8 result[2], epnum = 0;
const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
result[1] = 0;
switch (recip) {
case USB_RECIP_DEVICE:
result[0] = musb->g.is_selfpowered << USB_DEVICE_SELF_POWERED;
result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
if (musb->g.is_otg) {
result[0] |= musb->g.b_hnp_enable
<< USB_DEVICE_B_HNP_ENABLE;
result[0] |= musb->g.a_alt_hnp_support
<< USB_DEVICE_A_ALT_HNP_SUPPORT;
result[0] |= musb->g.a_hnp_support
<< USB_DEVICE_A_HNP_SUPPORT;
}
break;
case USB_RECIP_INTERFACE:
result[0] = 0;
break;
case USB_RECIP_ENDPOINT: {
int is_in;
struct musb_ep *ep;
u16 tmp;
void __iomem *regs;
epnum = (u8) ctrlrequest->wIndex;
if (!epnum) {
result[0] = 0;
break;
}
is_in = epnum & USB_DIR_IN;
epnum &= 0x0f;
if (epnum >= MUSB_C_NUM_EPS) {
handled = -EINVAL;
break;
}
if (is_in)
ep = &musb->endpoints[epnum].ep_in;
else
ep = &musb->endpoints[epnum].ep_out;
regs = musb->endpoints[epnum].regs;
if (!ep->desc) {
handled = -EINVAL;
break;
}
musb_ep_select(mbase, epnum);
if (is_in)
tmp = musb_readw(regs, MUSB_TXCSR)
& MUSB_TXCSR_P_SENDSTALL;
else
tmp = musb_readw(regs, MUSB_RXCSR)
& MUSB_RXCSR_P_SENDSTALL;
musb_ep_select(mbase, 0);
result[0] = tmp ? 1 : 0;
} break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
/* fill up the fifo; caller updates csr0 */
if (handled > 0) {
u16 len = le16_to_cpu(ctrlrequest->wLength);
if (len > 2)
len = 2;
musb_write_fifo(&musb->endpoints[0], len, result);
}
return handled;
}
/*
* handle a control-IN request, the end0 buffer contains the current request
* that is supposed to be a standard control request. Assumes the fifo to
* be at least 2 bytes long.
*
* @return 0 if the request was NOT HANDLED,
* < 0 when error
* > 0 when the request is processed
*
* Context: caller holds controller lock
*/
static int
service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
{
int handled = 0; /* not handled */
if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
== USB_TYPE_STANDARD) {
switch (ctrlrequest->bRequest) {
case USB_REQ_GET_STATUS:
handled = service_tx_status_request(musb,
ctrlrequest);
break;
/* case USB_REQ_SYNC_FRAME: */
default:
break;
}
}
return handled;
}
/*
* Context: caller holds controller lock
*/
static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
{
musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
}
/*
* Tries to start B-device HNP negotiation if enabled via sysfs
*/
static inline void musb_try_b_hnp_enable(struct musb *musb)
{
void __iomem *mbase = musb->mregs;
u8 devctl;
musb_dbg(musb, "HNP: Setting HR");
devctl = musb_readb(mbase, MUSB_DEVCTL);
musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
}
/*
* Handle all control requests with no DATA stage, including standard
* requests such as:
* USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
* always delegated to the gadget driver
* USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
* always handled here, except for class/vendor/... features
*
* Context: caller holds controller lock
*/
static int
service_zero_data_request(struct musb *musb,
struct usb_ctrlrequest *ctrlrequest)
__releases(musb->lock)
__acquires(musb->lock)
{
int handled = -EINVAL;
void __iomem *mbase = musb->mregs;
const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
/* the gadget driver handles everything except what we MUST handle */
if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
== USB_TYPE_STANDARD) {
switch (ctrlrequest->bRequest) {
case USB_REQ_SET_ADDRESS:
/* change it after the status stage */
musb->set_address = true;
musb->address = (u8) (ctrlrequest->wValue & 0x7f);
handled = 1;
break;
case USB_REQ_CLEAR_FEATURE:
switch (recip) {
case USB_RECIP_DEVICE:
if (ctrlrequest->wValue
!= USB_DEVICE_REMOTE_WAKEUP)
break;
musb->may_wakeup = 0;
handled = 1;
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:{
const u8 epnum =
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
struct musb_request *request;
void __iomem *regs;
int is_in;
u16 csr;
if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
ep = musb->endpoints + epnum;
regs = ep->regs;
is_in = ctrlrequest->wIndex & USB_DIR_IN;
if (is_in)
musb_ep = &ep->ep_in;
else
musb_ep = &ep->ep_out;
if (!musb_ep->desc)
break;
handled = 1;
/* Ignore request if endpoint is wedged */
if (musb_ep->wedged)
break;
musb_ep_select(mbase, epnum);
if (is_in) {
csr = musb_readw(regs, MUSB_TXCSR);
csr |= MUSB_TXCSR_CLRDATATOG |
MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_P_SENDSTALL |
MUSB_TXCSR_P_SENTSTALL |
MUSB_TXCSR_TXPKTRDY);
musb_writew(regs, MUSB_TXCSR, csr);
} else {
csr = musb_readw(regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_CLRDATATOG |
MUSB_RXCSR_P_WZC_BITS;
csr &= ~(MUSB_RXCSR_P_SENDSTALL |
MUSB_RXCSR_P_SENTSTALL);
musb_writew(regs, MUSB_RXCSR, csr);
}
/* Maybe start the first request in the queue */
request = next_request(musb_ep);
if (!musb_ep->busy && request) {
musb_dbg(musb, "restarting the request");
musb_ep_restart(musb, request);
}
/* select ep0 again */
musb_ep_select(mbase, 0);
} break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
break;
case USB_REQ_SET_FEATURE:
switch (recip) {
case USB_RECIP_DEVICE:
handled = 1;
switch (ctrlrequest->wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
musb->may_wakeup = 1;
break;
case USB_DEVICE_TEST_MODE:
if (musb->g.speed != USB_SPEED_HIGH)
goto stall;
if (ctrlrequest->wIndex & 0xff)
goto stall;
switch (ctrlrequest->wIndex >> 8) {
case USB_TEST_J:
pr_debug("USB_TEST_J\n");
musb->test_mode_nr =
MUSB_TEST_J;
break;
case USB_TEST_K:
pr_debug("USB_TEST_K\n");
musb->test_mode_nr =
MUSB_TEST_K;
break;
case USB_TEST_SE0_NAK:
pr_debug("USB_TEST_SE0_NAK\n");
musb->test_mode_nr =
MUSB_TEST_SE0_NAK;
break;
case USB_TEST_PACKET:
pr_debug("USB_TEST_PACKET\n");
musb->test_mode_nr =
MUSB_TEST_PACKET;
break;
case 0xc0:
/* TEST_FORCE_HS */
pr_debug("TEST_FORCE_HS\n");
musb->test_mode_nr =
MUSB_TEST_FORCE_HS;
break;
case 0xc1:
/* TEST_FORCE_FS */
pr_debug("TEST_FORCE_FS\n");
musb->test_mode_nr =
MUSB_TEST_FORCE_FS;
break;
case 0xc2:
/* TEST_FIFO_ACCESS */
pr_debug("TEST_FIFO_ACCESS\n");
musb->test_mode_nr =
MUSB_TEST_FIFO_ACCESS;
break;
case 0xc3:
/* TEST_FORCE_HOST */
pr_debug("TEST_FORCE_HOST\n");
musb->test_mode_nr =
MUSB_TEST_FORCE_HOST;
break;
default:
goto stall;
}
/* enter test mode after irq */
if (handled > 0)
musb->test_mode = true;
break;
case USB_DEVICE_B_HNP_ENABLE:
if (!musb->g.is_otg)
goto stall;
musb->g.b_hnp_enable = 1;
musb_try_b_hnp_enable(musb);
break;
case USB_DEVICE_A_HNP_SUPPORT:
if (!musb->g.is_otg)
goto stall;
musb->g.a_hnp_support = 1;
break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
if (!musb->g.is_otg)
goto stall;
musb->g.a_alt_hnp_support = 1;
break;
case USB_DEVICE_DEBUG_MODE:
handled = 0;
break;
stall:
default:
handled = -EINVAL;
break;
}
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:{
const u8 epnum =
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
void __iomem *regs;
int is_in;
u16 csr;
if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
ep = musb->endpoints + epnum;
regs = ep->regs;
is_in = ctrlrequest->wIndex & USB_DIR_IN;
if (is_in)
musb_ep = &ep->ep_in;
else
musb_ep = &ep->ep_out;
if (!musb_ep->desc)
break;
musb_ep_select(mbase, epnum);
if (is_in) {
csr = musb_readw(regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY)
csr |= MUSB_TXCSR_FLUSHFIFO;
csr |= MUSB_TXCSR_P_SENDSTALL
| MUSB_TXCSR_CLRDATATOG
| MUSB_TXCSR_P_WZC_BITS;
musb_writew(regs, MUSB_TXCSR, csr);
} else {
csr = musb_readw(regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG
| MUSB_RXCSR_P_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, csr);
}
/* select ep0 again */
musb_ep_select(mbase, 0);
handled = 1;
} break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
break;
default:
/* delegate SET_CONFIGURATION, etc */
handled = 0;
}
} else
handled = 0;
return handled;
}
/* we have an ep0out data packet
* Context: caller holds controller lock
*/
static void ep0_rxstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
struct musb_request *request;
struct usb_request *req;
u16 count, csr;
request = next_ep0_request(musb);
req = &request->request;
/* read packet and ack; or stall because of gadget driver bug:
* should have provided the rx buffer before setup() returned.
*/
if (req) {
void *buf = req->buf + req->actual;
unsigned len = req->length - req->actual;
/* read the buffer */
count = musb_readb(regs, MUSB_COUNT0);
if (count > len) {
req->status = -EOVERFLOW;
count = len;
}
if (count > 0) {
musb_read_fifo(&musb->endpoints[0], count, buf);
req->actual += count;
}
csr = MUSB_CSR0_P_SVDRXPKTRDY;
if (count < 64 || req->actual == req->length) {
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
csr |= MUSB_CSR0_P_DATAEND;
} else
req = NULL;
} else
csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
/* Completion handler may choose to stall, e.g. because the
* message just received holds invalid data.
*/
if (req) {
musb->ackpend = csr;
musb_g_ep0_giveback(musb, req);
if (!musb->ackpend)
return;
musb->ackpend = 0;
}
musb_ep_select(musb->mregs, 0);
musb_writew(regs, MUSB_CSR0, csr);
}
/*
* transmitting to the host (IN), this code might be called from IRQ
* and from kernel thread.
*
* Context: caller holds controller lock
*/
static void ep0_txstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
struct musb_request *req = next_ep0_request(musb);
struct usb_request *request;
u16 csr = MUSB_CSR0_TXPKTRDY;
u8 *fifo_src;
u8 fifo_count;
if (!req) {
/* WARN_ON(1); */
musb_dbg(musb, "odd; csr0 %04x", musb_readw(regs, MUSB_CSR0));
return;
}
request = &req->request;
/* load the data */
fifo_src = (u8 *) request->buf + request->actual;
fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
request->length - request->actual);
musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
request->actual += fifo_count;
/* update the flags */
if (fifo_count < MUSB_MAX_END0_PACKET
|| (request->actual == request->length
&& !request->zero)) {
musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
csr |= MUSB_CSR0_P_DATAEND;
} else
request = NULL;
/* report completions as soon as the fifo's loaded; there's no
* win in waiting till this last packet gets acked. (other than
* very precise fault reporting, needed by USB TMC; possible with
* this hardware, but not usable from portable gadget drivers.)
*/
if (request) {
musb->ackpend = csr;
musb_g_ep0_giveback(musb, request);
if (!musb->ackpend)
return;
musb->ackpend = 0;
}
/* send it out, triggering a "txpktrdy cleared" irq */
musb_ep_select(musb->mregs, 0);
musb_writew(regs, MUSB_CSR0, csr);
}
/*
* Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
* Fields are left in USB byte-order.
*
* Context: caller holds controller lock.
*/
static void
musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
{
struct musb_request *r;
void __iomem *regs = musb->control_ep->regs;
musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
/* NOTE: earlier 2.6 versions changed setup packets to host
* order, but now USB packets always stay in USB byte order.
*/
musb_dbg(musb, "SETUP req%02x.%02x v%04x i%04x l%d",
req->bRequestType,
req->bRequest,
le16_to_cpu(req->wValue),
le16_to_cpu(req->wIndex),
le16_to_cpu(req->wLength));
/* clean up any leftover transfers */
r = next_ep0_request(musb);
if (r)
musb_g_ep0_giveback(musb, &r->request);
/* For zero-data requests we want to delay the STATUS stage to
* avoid SETUPEND errors. If we read data (OUT), delay accepting
* packets until there's a buffer to store them in.
*
* If we write data, the controller acts happier if we enable
* the TX FIFO right away, and give the controller a moment
* to switch modes...
*/
musb->set_address = false;
musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
if (req->wLength == 0) {
if (req->bRequestType & USB_DIR_IN)
musb->ackpend |= MUSB_CSR0_TXPKTRDY;
musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
} else if (req->bRequestType & USB_DIR_IN) {
musb->ep0_state = MUSB_EP0_STAGE_TX;
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
while ((musb_readw(regs, MUSB_CSR0)
& MUSB_CSR0_RXPKTRDY) != 0)
cpu_relax();
musb->ackpend = 0;
} else
musb->ep0_state = MUSB_EP0_STAGE_RX;
}
static int
forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
__releases(musb->lock)
__acquires(musb->lock)
{
int retval;
if (!musb->gadget_driver)
return -EOPNOTSUPP;
spin_unlock(&musb->lock);
retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
spin_lock(&musb->lock);
return retval;
}
/*
* Handle peripheral ep0 interrupt
*
* Context: irq handler; we won't re-enter the driver that way.
*/
irqreturn_t musb_g_ep0_irq(struct musb *musb)
{
u16 csr;
u16 len;
void __iomem *mbase = musb->mregs;
void __iomem *regs = musb->endpoints[0].regs;
irqreturn_t retval = IRQ_NONE;
musb_ep_select(mbase, 0); /* select ep0 */
csr = musb_readw(regs, MUSB_CSR0);
len = musb_readb(regs, MUSB_COUNT0);
musb_dbg(musb, "csr %04x, count %d, ep0stage %s",
csr, len, decode_ep0stage(musb->ep0_state));
if (csr & MUSB_CSR0_P_DATAEND) {
/*
* If DATAEND is set we should not call the callback,
* hence the status stage is not complete.
*/
return IRQ_HANDLED;
}
/* I sent a stall.. need to acknowledge it now.. */
if (csr & MUSB_CSR0_P_SENTSTALL) {
musb_writew(regs, MUSB_CSR0,
csr & ~MUSB_CSR0_P_SENTSTALL);
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
csr = musb_readw(regs, MUSB_CSR0);
}
/* request ended "early" */
if (csr & MUSB_CSR0_P_SETUPEND) {
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
retval = IRQ_HANDLED;
/* Transition into the early status phase */
switch (musb->ep0_state) {
case MUSB_EP0_STAGE_TX:
musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
break;
case MUSB_EP0_STAGE_RX:
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
break;
default:
ERR("SetupEnd came in a wrong ep0stage %s\n",
decode_ep0stage(musb->ep0_state));
}
csr = musb_readw(regs, MUSB_CSR0);
/* NOTE: request may need completion */
}
/* docs from Mentor only describe tx, rx, and idle/setup states.
* we need to handle nuances around status stages, and also the
* case where status and setup stages come back-to-back ...
*/
switch (musb->ep0_state) {
case MUSB_EP0_STAGE_TX:
/* irq on clearing txpktrdy */
if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
ep0_txstate(musb);
retval = IRQ_HANDLED;
}
break;
case MUSB_EP0_STAGE_RX:
/* irq on set rxpktrdy */
if (csr & MUSB_CSR0_RXPKTRDY) {
ep0_rxstate(musb);
retval = IRQ_HANDLED;
}
break;
case MUSB_EP0_STAGE_STATUSIN:
/* end of sequence #2 (OUT/RX state) or #3 (no data) */
/* update address (if needed) only @ the end of the
* status phase per usb spec, which also guarantees
* we get 10 msec to receive this irq... until this
* is done we won't see the next packet.
*/
if (musb->set_address) {
musb->set_address = false;
musb_writeb(mbase, MUSB_FADDR, musb->address);
}
/* enter test mode if needed (exit by reset) */
else if (musb->test_mode) {
musb_dbg(musb, "entering TESTMODE");
if (MUSB_TEST_PACKET == musb->test_mode_nr)
musb_load_testpacket(musb);
musb_writeb(mbase, MUSB_TESTMODE,
musb->test_mode_nr);
}
fallthrough;
case MUSB_EP0_STAGE_STATUSOUT:
/* end of sequence #1: write to host (TX state) */
{
struct musb_request *req;
req = next_ep0_request(musb);
if (req)
musb_g_ep0_giveback(musb, &req->request);
}
/*
* In case when several interrupts can get coalesced,
* check to see if we've already received a SETUP packet...
*/
if (csr & MUSB_CSR0_RXPKTRDY)
goto setup;
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
break;
case MUSB_EP0_STAGE_IDLE:
/*
* This state is typically (but not always) indiscernible
* from the status states since the corresponding interrupts
* tend to happen within too little period of time (with only
* a zero-length packet in between) and so get coalesced...
*/
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
fallthrough;
case MUSB_EP0_STAGE_SETUP:
setup:
if (csr & MUSB_CSR0_RXPKTRDY) {
struct usb_ctrlrequest setup;
int handled = 0;
if (len != 8) {
ERR("SETUP packet len %d != 8 ?\n", len);
break;
}
musb_read_setup(musb, &setup);
retval = IRQ_HANDLED;
/* sometimes the RESET won't be reported */
if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
u8 power;
printk(KERN_NOTICE "%s: peripheral reset "
"irq lost!\n",
musb_driver_name);
power = musb_readb(mbase, MUSB_POWER);
musb->g.speed = (power & MUSB_POWER_HSMODE)
? USB_SPEED_HIGH : USB_SPEED_FULL;
}
switch (musb->ep0_state) {
/* sequence #3 (no data stage), includes requests
* we can't forward (notably SET_ADDRESS and the
* device/endpoint feature set/clear operations)
* plus SET_CONFIGURATION and others we must
*/
case MUSB_EP0_STAGE_ACKWAIT:
handled = service_zero_data_request(
musb, &setup);
/*
* We're expecting no data in any case, so
* always set the DATAEND bit -- doing this
* here helps avoid SetupEnd interrupt coming
* in the idle stage when we're stalling...
*/
musb->ackpend |= MUSB_CSR0_P_DATAEND;
/* status stage might be immediate */
if (handled > 0)
musb->ep0_state =
MUSB_EP0_STAGE_STATUSIN;
break;
/* sequence #1 (IN to host), includes GET_STATUS
* requests that we can't forward, GET_DESCRIPTOR
* and others that we must
*/
case MUSB_EP0_STAGE_TX:
handled = service_in_request(musb, &setup);
if (handled > 0) {
musb->ackpend = MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_P_DATAEND;
musb->ep0_state =
MUSB_EP0_STAGE_STATUSOUT;
}
break;
/* sequence #2 (OUT from host), always forward */
default: /* MUSB_EP0_STAGE_RX */
break;
}
musb_dbg(musb, "handled %d, csr %04x, ep0stage %s",
handled, csr,
decode_ep0stage(musb->ep0_state));
/* unless we need to delegate this to the gadget
* driver, we know how to wrap this up: csr0 has
* not yet been written.
*/
if (handled < 0)
goto stall;
else if (handled > 0)
goto finish;
handled = forward_to_driver(musb, &setup);
if (handled < 0) {
musb_ep_select(mbase, 0);
stall:
musb_dbg(musb, "stall (%d)", handled);
musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
finish:
musb_writew(regs, MUSB_CSR0,
musb->ackpend);
musb->ackpend = 0;
}
}
break;
case MUSB_EP0_STAGE_ACKWAIT:
/* This should not happen. But happens with tusb6010 with
* g_file_storage and high speed. Do nothing.
*/
retval = IRQ_HANDLED;
break;
default:
/* "can't happen" */
WARN_ON(1);
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
break;
}
return retval;
}
static int
musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
{
/* always enabled */
return -EINVAL;
}
static int musb_g_ep0_disable(struct usb_ep *e)
{
/* always enabled */
return -EINVAL;
}
static int
musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
{
struct musb_ep *ep;
struct musb_request *req;
struct musb *musb;
int status;
unsigned long lockflags;
void __iomem *regs;
if (!e || !r)
return -EINVAL;
ep = to_musb_ep(e);
musb = ep->musb;
regs = musb->control_ep->regs;
req = to_musb_request(r);
req->musb = musb;
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->tx = ep->is_in;
spin_lock_irqsave(&musb->lock, lockflags);
if (!list_empty(&ep->req_list)) {
status = -EBUSY;
goto cleanup;
}
switch (musb->ep0_state) {
case MUSB_EP0_STAGE_RX: /* control-OUT data */
case MUSB_EP0_STAGE_TX: /* control-IN data */
case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
status = 0;
break;
default:
musb_dbg(musb, "ep0 request queued in state %d",
musb->ep0_state);
status = -EINVAL;
goto cleanup;
}
/* add request to the list */
list_add_tail(&req->list, &ep->req_list);
musb_dbg(musb, "queue to %s (%s), length=%d",
ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
req->request.length);
musb_ep_select(musb->mregs, 0);
/* sequence #1, IN ... start writing the data */
if (musb->ep0_state == MUSB_EP0_STAGE_TX)
ep0_txstate(musb);
/* sequence #3, no-data ... issue IN status */
else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
if (req->request.length)
status = -EINVAL;
else {
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
musb_writew(regs, MUSB_CSR0,
musb->ackpend | MUSB_CSR0_P_DATAEND);
musb->ackpend = 0;
musb_g_ep0_giveback(ep->musb, r);
}
/* else for sequence #2 (OUT), caller provides a buffer
* before the next packet arrives. deferred responses
* (after SETUP is acked) are racey.
*/
} else if (musb->ackpend) {
musb_writew(regs, MUSB_CSR0, musb->ackpend);
musb->ackpend = 0;
}
cleanup:
spin_unlock_irqrestore(&musb->lock, lockflags);
return status;
}
static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
{
/* we just won't support this */
return -EINVAL;
}
static int musb_g_ep0_halt(struct usb_ep *e, int value)
{
struct musb_ep *ep;
struct musb *musb;
void __iomem *base, *regs;
unsigned long flags;
int status;
u16 csr;
if (!e || !value)
return -EINVAL;
ep = to_musb_ep(e);
musb = ep->musb;
base = musb->mregs;
regs = musb->control_ep->regs;
status = 0;
spin_lock_irqsave(&musb->lock, flags);
if (!list_empty(&ep->req_list)) {
status = -EBUSY;
goto cleanup;
}
musb_ep_select(base, 0);
csr = musb->ackpend;
switch (musb->ep0_state) {
/* Stalls are usually issued after parsing SETUP packet, either
* directly in irq context from setup() or else later.
*/
case MUSB_EP0_STAGE_TX: /* control-IN data */
case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
case MUSB_EP0_STAGE_RX: /* control-OUT data */
csr = musb_readw(regs, MUSB_CSR0);
fallthrough;
/* It's also OK to issue stalls during callbacks when a non-empty
* DATA stage buffer has been read (or even written).
*/
case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
csr |= MUSB_CSR0_P_SENDSTALL;
musb_writew(regs, MUSB_CSR0, csr);
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
musb->ackpend = 0;
break;
default:
musb_dbg(musb, "ep0 can't halt in state %d", musb->ep0_state);
status = -EINVAL;
}
cleanup:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
const struct usb_ep_ops musb_g_ep0_ops = {
.enable = musb_g_ep0_enable,
.disable = musb_g_ep0_disable,
.alloc_request = musb_alloc_request,
.free_request = musb_free_request,
.queue = musb_g_ep0_queue,
.dequeue = musb_g_ep0_dequeue,
.set_halt = musb_g_ep0_halt,
};
| linux-master | drivers/usb/musb/musb_gadget_ep0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver peripheral support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2009 MontaVista Software, Inc. <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include "musb_core.h"
#include "musb_trace.h"
/* ----------------------------------------------------------------------- */
#define is_buffer_mapped(req) (is_dma_capable() && \
(req->map_state != UN_MAPPED))
/* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request,
struct musb *musb, struct musb_ep *musb_ep)
{
int compatible = true;
struct dma_controller *dma = musb->dma_controller;
request->map_state = UN_MAPPED;
if (!is_dma_capable() || !musb_ep->dma)
return;
/* Check if DMA engine can handle this request.
* DMA code must reject the USB request explicitly.
* Default behaviour is to map the request.
*/
if (dma->is_compatible)
compatible = dma->is_compatible(musb_ep->dma,
musb_ep->packet_sz, request->request.buf,
request->request.length);
if (!compatible)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
dma_addr_t dma_addr;
int ret;
dma_addr = dma_map_single(
musb->controller,
request->request.buf,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
ret = dma_mapping_error(musb->controller, dma_addr);
if (ret)
return;
request->request.dma = dma_addr;
request->map_state = MUSB_MAPPED;
} else {
dma_sync_single_for_device(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->map_state = PRE_MAPPED;
}
}
/* Unmap the buffer from dma and maps it back to cpu */
static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb)
{
struct musb_ep *musb_ep = request->ep;
if (!is_buffer_mapped(request) || !musb_ep->dma)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
dev_vdbg(musb->controller,
"not unmapping a never mapped buffer\n");
return;
}
if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID;
} else { /* PRE_MAPPED */
dma_sync_single_for_cpu(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
request->map_state = UN_MAPPED;
}
/*
* Immediately complete a request.
*
* @param request the request to complete
* @param status the status to complete the request with
* Context: controller locked, IRQs blocked.
*/
void musb_g_giveback(
struct musb_ep *ep,
struct usb_request *request,
int status)
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
{
struct musb_request *req;
struct musb *musb;
int busy = ep->busy;
req = to_musb_request(request);
list_del(&req->list);
if (req->request.status == -EINPROGRESS)
req->request.status = status;
musb = req->musb;
ep->busy = 1;
spin_unlock(&musb->lock);
if (!dma_mapping_error(&musb->g.dev, request->dma))
unmap_dma_buffer(req, musb);
trace_musb_req_gb(req);
usb_gadget_giveback_request(&req->ep->end_point, &req->request);
spin_lock(&musb->lock);
ep->busy = busy;
}
/* ----------------------------------------------------------------------- */
/*
* Abort requests queued to an endpoint using the status. Synchronous.
* caller locked controller and blocked irqs, and selected this ep.
*/
static void nuke(struct musb_ep *ep, const int status)
{
struct musb *musb = ep->musb;
struct musb_request *req = NULL;
void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
ep->busy = 1;
if (is_dma_capable() && ep->dma) {
struct dma_controller *c = ep->musb->dma_controller;
int value;
if (ep->is_in) {
/*
* The programming guide says that we must not clear
* the DMAMODE bit before DMAENAB, so we only
* clear it in the second write...
*/
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_TXCSR,
0 | MUSB_TXCSR_FLUSHFIFO);
} else {
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
}
value = c->channel_abort(ep->dma);
musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
while (!list_empty(&ep->req_list)) {
req = list_first_entry(&ep->req_list, struct musb_request, list);
musb_g_giveback(ep, &req->request, status);
}
}
/* ----------------------------------------------------------------------- */
/* Data transfers - pure PIO, pure DMA, or mixed mode */
/*
* This assumes the separate CPPI engine is responding to DMA requests
* from the usb core ... sequenced a bit differently from mentor dma.
*/
static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
{
if (can_bulk_split(musb, ep->type))
return ep->hw_ep->max_packet_sz_tx;
else
return ep->packet_sz;
}
/*
* An endpoint is transmitting data. This can be called either from
* the IRQ routine or from ep.queue() to kickstart a request on an
* endpoint.
*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void txstate(struct musb *musb, struct musb_request *req)
{
u8 epnum = req->epnum;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct usb_request *request;
u16 fifo_count = 0, csr;
int use_dma = 0;
musb_ep = req->ep;
/* Check if EP is disabled */
if (!musb_ep->desc) {
musb_dbg(musb, "ep:%s disabled - ignore request",
musb_ep->end_point.name);
return;
}
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
musb_dbg(musb, "dma pending...");
return;
}
/* read TXCSR before */
csr = musb_readw(epio, MUSB_TXCSR);
request = &req->request;
fifo_count = min(max_ep_writesize(musb, musb_ep),
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
musb_dbg(musb, "%s old packet still ready , txcsr %03x",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
musb_dbg(musb, "%s stalling, txcsr %03x",
musb_ep->end_point.name, csr);
return;
}
musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
epnum, musb_ep->packet_sz, fifo_count,
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
size_t request_size;
/* setup DMA, then program endpoint CSR */
request_size = min_t(size_t, request->length - request->actual,
musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
/* MUSB_TXCSR_P_ISO is still set correctly */
if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
musb_ep->dma->desired_mode = 1;
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
musb_ep->dma->desired_mode,
request->dma + request->actual, request_size);
if (use_dma) {
if (musb_ep->dma->desired_mode == 0) {
/*
* We must not clear the DMAMODE bit
* before the DMAENAB bit -- and the
* latter doesn't always get cleared
* before we get here...
*/
csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB);
musb_writew(epio, MUSB_TXCSR, csr
| MUSB_TXCSR_P_WZC_BITS);
csr &= ~MUSB_TXCSR_DMAMODE;
csr |= (MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_MODE);
/* against programming guide */
} else {
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
/*
* Enable Autoset according to table
* below
* bulk_split hb_mult Autoset_Enable
* 0 0 Yes(Normal)
* 0 >0 No(High BW ISO)
* 1 0 Yes(HS bulk)
* 1 >0 Yes(FS bulk)
*/
if (!musb_ep->hb_mult ||
can_bulk_split(musb,
musb_ep->type))
csr |= MUSB_TXCSR_AUTOSET;
}
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(epio, MUSB_TXCSR, csr);
}
}
if (is_cppi_enabled(musb)) {
/* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
MUSB_TXCSR_MODE;
musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
~MUSB_TXCSR_P_UNDERRUN) | csr);
/* ensure writebuffer is empty */
csr = musb_readw(epio, MUSB_TXCSR);
/*
* NOTE host side sets DMAENAB later than this; both are
* OK since the transfer dma glue (between CPPI and
* Mentor fifos) just tells CPPI it could start. Data
* only moves to the USB TX fifo when both fifos are
* ready.
*/
/*
* "mode" is irrelevant here; handle terminating ZLPs
* like PIO does, since the hardware RNDIS mode seems
* unreliable except for the
* last-packet-is-already-short case.
*/
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
request->dma + request->actual,
request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
/* invariant: prequest->buf is non-null */
}
} else if (tusb_dma_omap(musb))
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
request->dma + request->actual,
request_size);
}
#endif
if (!use_dma) {
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails
*/
unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
request->actual += fifo_count;
csr |= MUSB_TXCSR_TXPKTRDY;
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(epio, MUSB_TXCSR, csr);
}
/* host may already have the data when this message shows... */
musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
fifo_count,
musb_readw(epio, MUSB_TXMAXP));
}
/*
* FIFO state update (e.g. data ready).
* Called from IRQ, with controller locked.
*/
void musb_g_tx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
u8 __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
request = &req->request;
csr = musb_readw(epio, MUSB_TXCSR);
musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
/*
* REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
* probably rates reporting as a host error.
*/
if (csr & MUSB_TXCSR_P_SENTSTALL) {
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~MUSB_TXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_TXCSR, csr);
return;
}
if (csr & MUSB_TXCSR_P_UNDERRUN) {
/* We NAKed, no big deal... little reason to care. */
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
epnum, request);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/*
* SHOULD NOT HAPPEN... has with CPPI though, after
* changing SENDSTALL (and other cases); harmless?
*/
musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
return;
}
if (req) {
trace_musb_req_tx(req);
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
musb_writew(epio, MUSB_TXCSR, csr);
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
request->actual += musb_ep->dma->actual_len;
musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
epnum, csr, musb_ep->dma->actual_len, request);
}
/*
* First, maybe a terminating short packet. Some DMA
* engines might handle this by themselves.
*/
if ((request->zero && request->length)
&& (request->length % musb_ep->packet_sz == 0)
&& (request->actual == request->length)) {
/*
* On DMA completion, FIFO may not be
* available yet...
*/
if (csr & MUSB_TXCSR_TXPKTRDY)
return;
musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
| MUSB_TXCSR_TXPKTRDY);
request->zero = 0;
}
if (request->actual == request->length) {
musb_g_giveback(musb_ep, request, 0);
/*
* In the giveback function the MUSB lock is
* released and acquired after sometime. During
* this time period the INDEX register could get
* changed by the gadget_queue function especially
* on SMP systems. Reselect the INDEX to be sure
* we are reading/modifying the right registers
*/
musb_ep_select(mbase, epnum);
req = musb_ep->desc ? next_request(musb_ep) : NULL;
if (!req) {
musb_dbg(musb, "%s idle now",
musb_ep->end_point.name);
return;
}
}
txstate(musb, req);
}
}
/* ------------------------------------------------------------ */
/*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void rxstate(struct musb *musb, struct musb_request *req)
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned len = 0;
u16 fifo_count;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
u8 use_mode_1;
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
fifo_count = musb_ep->packet_sz;
/* Check if EP is disabled */
if (!musb_ep->desc) {
musb_dbg(musb, "ep:%s disabled - ignore request",
musb_ep->end_point.name);
return;
}
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
musb_dbg(musb, "DMA pending...");
return;
}
if (csr & MUSB_RXCSR_P_SENDSTALL) {
musb_dbg(musb, "%s stalling, RXCSR %04x",
musb_ep->end_point.name, csr);
return;
}
if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
/* NOTE: CPPI won't actually stop advancing the DMA
* queue after short packet transfers, so this is almost
* always going to run as IRQ-per-packet DMA so that
* faults will be handled correctly.
*/
if (c->channel_program(channel,
musb_ep->packet_sz,
!request->short_not_ok,
request->dma + request->actual,
request->length - request->actual)) {
/* make sure that if an rxpkt arrived after the irq,
* the cppi engine will be ready to take it as soon
* as DMA is enabled
*/
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAMODE);
csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
musb_writew(epio, MUSB_RXCSR, csr);
return;
}
}
if (csr & MUSB_RXCSR_RXPKTRDY) {
fifo_count = musb_readw(epio, MUSB_RXCOUNT);
/*
* Enable Mode 1 on RX transfers only when short_not_ok flag
* is set. Currently short_not_ok flag is set only from
* file_storage and f_mass_storage drivers
*/
if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
use_mode_1 = 1;
else
use_mode_1 = 0;
if (request->actual < request->length) {
if (!is_buffer_mapped(req))
goto buffer_aint_mapped;
if (musb_dma_inventra(musb)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
unsigned int transfer_size;
c = musb->dma_controller;
channel = musb_ep->dma;
/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
* mode 0 only. So we do not get endpoint interrupts due to DMA
* completion. We only get interrupts from DMA controller.
*
* We could operate in DMA mode 1 if we knew the size of the transfer
* in advance. For mass storage class, request->length = what the host
* sends, so that'd work. But for pretty much everything else,
* request->length is routinely more than what the host sends. For
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
* we don't get DMA completion interrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
* to work reliably.
*
* REVISIT an updated g_file_storage can set req->short_not_ok, which
* then becomes usable as a runtime "use mode 1" hint...
*/
/* Experimental: Mode1 works with mass storage use cases */
if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR, csr);
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
/*
* this special sequence (enabling and then
* disabling MUSB_RXCSR_DMAMODE) is required
* to get DMAReq to activate
*/
musb_writew(epio, MUSB_RXCSR,
csr | MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR, csr);
transfer_size = min_t(unsigned int,
request->length -
request->actual,
channel->max_len);
musb_ep->dma->desired_mode = 1;
} else {
if (!musb_ep->hb_mult &&
musb_ep->hw_ep->rx_double_buffered)
csr |= MUSB_RXCSR_AUTOCLEAR;
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
transfer_size = min(request->length - request->actual,
(unsigned)fifo_count);
musb_ep->dma->desired_mode = 0;
}
use_dma = c->channel_program(
channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size);
if (use_dma)
return;
}
if ((musb_dma_ux500(musb)) &&
(request->actual < request->length)) {
struct dma_controller *c;
struct dma_channel *channel;
unsigned int transfer_size = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
/* In case first packet is short */
if (fifo_count < musb_ep->packet_sz)
transfer_size = fifo_count;
else if (request->short_not_ok)
transfer_size = min_t(unsigned int,
request->length -
request->actual,
channel->max_len);
else
transfer_size = min_t(unsigned int,
request->length -
request->actual,
(unsigned)fifo_count);
csr &= ~MUSB_RXCSR_DMAMODE;
csr |= (MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
if (transfer_size <= musb_ep->packet_sz) {
musb_ep->dma->desired_mode = 0;
} else {
musb_ep->dma->desired_mode = 1;
/* Mode must be set after DMAENAB */
csr |= MUSB_RXCSR_DMAMODE;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size))
return;
}
len = request->length - request->actual;
musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
musb_ep->end_point.name,
fifo_count, len,
musb_ep->packet_sz);
fifo_count = min_t(unsigned, len, fifo_count);
if (tusb_dma_omap(musb)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
int ret;
ret = c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
dma_addr,
fifo_count);
if (ret)
return;
}
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails. This buffer is mapped if the
* channel allocation is successful
*/
unmap_dma_buffer(req, musb);
/*
* Clear DMAENAB and AUTOCLEAR for the
* PIO mode transfer
*/
csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
buffer_aint_mapped:
fifo_count = min_t(unsigned int,
request->length - request->actual,
(unsigned int)fifo_count);
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
/* REVISIT if we left anything in the fifo, flush
* it and report -EOVERFLOW
*/
/* ack the read! */
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
}
/* reach the end or short packet detected */
if (request->actual == request->length ||
fifo_count < musb_ep->packet_sz)
musb_g_giveback(musb_ep, request, 0);
}
/*
* Data ready for a request; called from IRQ
*/
void musb_g_rx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
if (!req)
return;
trace_musb_req_rx(req);
request = &req->request;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
csr, dma ? " (dma)" : "", request);
if (csr & MUSB_RXCSR_P_SENTSTALL) {
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_RXCSR, csr);
return;
}
if (csr & MUSB_RXCSR_P_OVERRUN) {
/* csr |= MUSB_RXCSR_P_WZC_BITS; */
csr &= ~MUSB_RXCSR_P_OVERRUN;
musb_writew(epio, MUSB_RXCSR, csr);
musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
if (request->status == -EINPROGRESS)
request->status = -EOVERFLOW;
}
if (csr & MUSB_RXCSR_INCOMPRX) {
/* REVISIT not necessarily an error */
musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/* "should not happen"; likely RXPKTRDY pending for DMA */
musb_dbg(musb, "%s busy, csr %04x",
musb_ep->end_point.name, csr);
return;
}
if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_P_WZC_BITS | csr);
request->actual += musb_ep->dma->actual_len;
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
|| (dma->actual_len
& (musb_ep->packet_sz - 1))) {
/* ack the read! */
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
/* incomplete, and not short? wait for next IN packet */
if ((request->actual < request->length)
&& (musb_ep->dma->actual_len
== musb_ep->packet_sz)) {
/* In double buffer case, continue to unload fifo if
* there is Rx packet in FIFO.
**/
csr = musb_readw(epio, MUSB_RXCSR);
if ((csr & MUSB_RXCSR_RXPKTRDY) &&
hw_ep->rx_double_buffered)
goto exit;
return;
}
#endif
musb_g_giveback(musb_ep, request, 0);
/*
* In the giveback function the MUSB lock is
* released and acquired after sometime. During
* this time period the INDEX register could get
* changed by the gadget_queue function especially
* on SMP systems. Reselect the INDEX to be sure
* we are reading/modifying the right registers
*/
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
if (!req)
return;
}
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
exit:
#endif
/* Analyze request */
rxstate(musb, req);
}
/* ------------------------------------------------------------ */
static int musb_gadget_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
unsigned long flags;
struct musb_ep *musb_ep;
struct musb_hw_ep *hw_ep;
void __iomem *regs;
struct musb *musb;
void __iomem *mbase;
u8 epnum;
u16 csr;
unsigned tmp;
int status = -EINVAL;
if (!ep || !desc)
return -EINVAL;
musb_ep = to_musb_ep(ep);
hw_ep = musb_ep->hw_ep;
regs = hw_ep->regs;
musb = musb_ep->musb;
mbase = musb->mregs;
epnum = musb_ep->current_epnum;
spin_lock_irqsave(&musb->lock, flags);
if (musb_ep->desc) {
status = -EBUSY;
goto fail;
}
musb_ep->type = usb_endpoint_type(desc);
/* check direction and (later) maxpacket size against endpoint */
if (usb_endpoint_num(desc) != epnum)
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
tmp = usb_endpoint_maxp_mult(desc) - 1;
if (tmp) {
int ok;
if (usb_endpoint_dir_in(desc))
ok = musb->hb_iso_tx;
else
ok = musb->hb_iso_rx;
if (!ok) {
musb_dbg(musb, "no support for high bandwidth ISO");
goto fail;
}
musb_ep->hb_mult = tmp;
} else {
musb_ep->hb_mult = 0;
}
musb_ep->packet_sz = usb_endpoint_maxp(desc);
tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
/* enable the interrupts for the endpoint, set the endpoint
* packet size (or fail), set the mode, clear the fifo
*/
musb_ep_select(mbase, epnum);
if (usb_endpoint_dir_in(desc)) {
if (hw_ep->is_shared_fifo)
musb_ep->is_in = 1;
if (!musb_ep->is_in)
goto fail;
if (tmp > hw_ep->max_packet_sz_tx) {
musb_dbg(musb, "packet size beyond hardware FIFO size");
goto fail;
}
musb->intrtxe |= (1 << epnum);
musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
/* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
if (can_bulk_split(musb, musb_ep->type))
musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
musb_ep->packet_sz) - 1;
musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
& MUSB_TXCSR_FIFONOTEMPTY)
csr |= MUSB_TXCSR_FLUSHFIFO;
if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
csr |= MUSB_TXCSR_P_ISO;
/* set twice in case of double buffering */
musb_writew(regs, MUSB_TXCSR, csr);
/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
musb_writew(regs, MUSB_TXCSR, csr);
} else {
if (hw_ep->is_shared_fifo)
musb_ep->is_in = 0;
if (musb_ep->is_in)
goto fail;
if (tmp > hw_ep->max_packet_sz_rx) {
musb_dbg(musb, "packet size beyond hardware FIFO size");
goto fail;
}
musb->intrrxe |= (1 << epnum);
musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
/* REVISIT if can_bulk_combine() use by updating "tmp"
* likewise high bandwidth periodic rx
*/
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
csr = musb_readw(regs, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
musb_writew(regs, MUSB_TXCSR, csr);
}
csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
csr |= MUSB_RXCSR_P_ISO;
else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
csr |= MUSB_RXCSR_DISNYET;
/* set twice in case of double buffering */
musb_writew(regs, MUSB_RXCSR, csr);
musb_writew(regs, MUSB_RXCSR, csr);
}
/* NOTE: all the I/O code _should_ work fine without DMA, in case
* for some reason you run out of channels here.
*/
if (is_dma_capable() && musb->dma_controller) {
struct dma_controller *c = musb->dma_controller;
musb_ep->dma = c->channel_alloc(c, hw_ep,
(desc->bEndpointAddress & USB_DIR_IN));
} else
musb_ep->dma = NULL;
musb_ep->desc = desc;
musb_ep->busy = 0;
musb_ep->wedged = 0;
status = 0;
pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
musb_driver_name, musb_ep->end_point.name,
musb_ep_xfertype_string(musb_ep->type),
musb_ep->is_in ? "IN" : "OUT",
musb_ep->dma ? "dma, " : "",
musb_ep->packet_sz);
schedule_delayed_work(&musb->irq_work, 0);
fail:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
/*
* Disable an endpoint flushing all requests queued.
*/
static int musb_gadget_disable(struct usb_ep *ep)
{
unsigned long flags;
struct musb *musb;
u8 epnum;
struct musb_ep *musb_ep;
void __iomem *epio;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
epnum = musb_ep->current_epnum;
epio = musb->endpoints[epnum].regs;
spin_lock_irqsave(&musb->lock, flags);
musb_ep_select(musb->mregs, epnum);
/* zero the endpoint sizes */
if (musb_ep->is_in) {
musb->intrtxe &= ~(1 << epnum);
musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
musb_writew(epio, MUSB_TXMAXP, 0);
} else {
musb->intrrxe &= ~(1 << epnum);
musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
musb_writew(epio, MUSB_RXMAXP, 0);
}
/* abort all pending DMA and requests */
nuke(musb_ep, -ESHUTDOWN);
musb_ep->desc = NULL;
musb_ep->end_point.desc = NULL;
schedule_delayed_work(&musb->irq_work, 0);
spin_unlock_irqrestore(&(musb->lock), flags);
musb_dbg(musb, "%s", musb_ep->end_point.name);
return 0;
}
/*
* Allocate a request for an endpoint.
* Reused by ep0 code.
*/
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
struct musb_request *request;
request = kzalloc(sizeof *request, gfp_flags);
if (!request)
return NULL;
request->request.dma = DMA_ADDR_INVALID;
request->epnum = musb_ep->current_epnum;
request->ep = musb_ep;
trace_musb_req_alloc(request);
return &request->request;
}
/*
* Free a request
* Reused by ep0 code.
*/
void musb_free_request(struct usb_ep *ep, struct usb_request *req)
{
struct musb_request *request = to_musb_request(req);
trace_musb_req_free(request);
kfree(request);
}
static LIST_HEAD(buffers);
struct free_record {
struct list_head list;
struct device *dev;
unsigned bytes;
dma_addr_t dma;
};
/*
* Context: controller locked, IRQs blocked.
*/
void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
trace_musb_req_start(req);
musb_ep_select(musb->mregs, req->epnum);
if (req->tx)
txstate(musb, req);
else
rxstate(musb, req);
}
static int musb_ep_restart_resume_work(struct musb *musb, void *data)
{
struct musb_request *req = data;
musb_ep_restart(musb, req);
return 0;
}
static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct musb_ep *musb_ep;
struct musb_request *request;
struct musb *musb;
int status;
unsigned long lockflags;
if (!ep || !req)
return -EINVAL;
if (!req->buf)
return -ENODATA;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
request = to_musb_request(req);
request->musb = musb;
if (request->ep != musb_ep)
return -EINVAL;
status = pm_runtime_get(musb->controller);
if ((status != -EINPROGRESS) && status < 0) {
dev_err(musb->controller,
"pm runtime get failed in %s\n",
__func__);
pm_runtime_put_noidle(musb->controller);
return status;
}
status = 0;
trace_musb_req_enq(request);
/* request is mine now... */
request->request.actual = 0;
request->request.status = -EINPROGRESS;
request->epnum = musb_ep->current_epnum;
request->tx = musb_ep->is_in;
map_dma_buffer(request, musb, musb_ep);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
if (!musb_ep->desc) {
musb_dbg(musb, "req %p queued to %s while ep %s",
req, ep->name, "disabled");
status = -ESHUTDOWN;
unmap_dma_buffer(request, musb);
goto unlock;
}
/* add request to the list */
list_add_tail(&request->list, &musb_ep->req_list);
/* it this is the head of the queue, start i/o ... */
if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
status = musb_queue_resume_work(musb,
musb_ep_restart_resume_work,
request);
if (status < 0) {
dev_err(musb->controller, "%s resume work: %i\n",
__func__, status);
list_del(&request->list);
}
}
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return status;
}
static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
struct musb_request *req = to_musb_request(request);
struct musb_request *r;
unsigned long flags;
int status = 0;
struct musb *musb = musb_ep->musb;
if (!ep || !request || req->ep != musb_ep)
return -EINVAL;
trace_musb_req_deq(req);
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry(r, &musb_ep->req_list, list) {
if (r == req)
break;
}
if (r != req) {
dev_err(musb->controller, "request %p not queued to %s\n",
request, ep->name);
status = -EINVAL;
goto done;
}
/* if the hardware doesn't have the request, easy ... */
if (musb_ep->req_list.next != &req->list || musb_ep->busy)
musb_g_giveback(musb_ep, request, -ECONNRESET);
/* ... else abort the dma transfer ... */
else if (is_dma_capable() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
musb_ep_select(musb->mregs, musb_ep->current_epnum);
if (c->channel_abort)
status = c->channel_abort(musb_ep->dma);
else
status = -EBUSY;
if (status == 0)
musb_g_giveback(musb_ep, request, -ECONNRESET);
} else {
/* NOTE: by sticking to easily tested hardware/driver states,
* we leave counting of in-flight packets imprecise.
*/
musb_g_giveback(musb_ep, request, -ECONNRESET);
}
done:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
/*
* Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
* data but will queue requests.
*
* exported to ep0 code
*/
static int musb_gadget_set_halt(struct usb_ep *ep, int value)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
u8 epnum = musb_ep->current_epnum;
struct musb *musb = musb_ep->musb;
void __iomem *epio = musb->endpoints[epnum].regs;
void __iomem *mbase;
unsigned long flags;
u16 csr;
struct musb_request *request;
int status = 0;
if (!ep)
return -EINVAL;
mbase = musb->mregs;
spin_lock_irqsave(&musb->lock, flags);
if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
status = -EINVAL;
goto done;
}
musb_ep_select(mbase, epnum);
request = next_request(musb_ep);
if (value) {
if (request) {
musb_dbg(musb, "request in progress, cannot halt %s",
ep->name);
status = -EAGAIN;
goto done;
}
/* Cannot portably stall with non-empty FIFO */
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
musb_dbg(musb, "FIFO busy, cannot halt %s",
ep->name);
status = -EAGAIN;
goto done;
}
}
} else
musb_ep->wedged = 0;
/* set/clear the stall and toggle bits */
musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
csr |= MUSB_TXCSR_P_WZC_BITS
| MUSB_TXCSR_CLRDATATOG;
if (value)
csr |= MUSB_TXCSR_P_SENDSTALL;
else
csr &= ~(MUSB_TXCSR_P_SENDSTALL
| MUSB_TXCSR_P_SENTSTALL);
csr &= ~MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
} else {
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_P_WZC_BITS
| MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG;
if (value)
csr |= MUSB_RXCSR_P_SENDSTALL;
else
csr &= ~(MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_P_SENTSTALL);
musb_writew(epio, MUSB_RXCSR, csr);
}
/* maybe start the first request in the queue */
if (!musb_ep->busy && !value && request) {
musb_dbg(musb, "restarting the request");
musb_ep_restart(musb, request);
}
done:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
/*
* Sets the halt feature with the clear requests ignored
*/
static int musb_gadget_set_wedge(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
if (!ep)
return -EINVAL;
musb_ep->wedged = 1;
return usb_ep_set_halt(ep);
}
static int musb_gadget_fifo_status(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
void __iomem *epio = musb_ep->hw_ep->regs;
int retval = -EINVAL;
if (musb_ep->desc && !musb_ep->is_in) {
struct musb *musb = musb_ep->musb;
int epnum = musb_ep->current_epnum;
void __iomem *mbase = musb->mregs;
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
musb_ep_select(mbase, epnum);
/* FIXME return zero unless RXPKTRDY is set */
retval = musb_readw(epio, MUSB_RXCOUNT);
spin_unlock_irqrestore(&musb->lock, flags);
}
return retval;
}
static void musb_gadget_fifo_flush(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
struct musb *musb = musb_ep->musb;
u8 epnum = musb_ep->current_epnum;
void __iomem *epio = musb->endpoints[epnum].regs;
void __iomem *mbase;
unsigned long flags;
u16 csr;
mbase = musb->mregs;
spin_lock_irqsave(&musb->lock, flags);
musb_ep_select(mbase, (u8) epnum);
/* disable interrupts */
musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
/*
* Setting both TXPKTRDY and FLUSHFIFO makes controller
* to interrupt current FIFO loading, but not flushing
* the already loaded ones.
*/
csr &= ~MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
musb_writew(epio, MUSB_TXCSR, csr);
}
} else {
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
musb_writew(epio, MUSB_RXCSR, csr);
musb_writew(epio, MUSB_RXCSR, csr);
}
/* re-enable interrupt */
musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
spin_unlock_irqrestore(&musb->lock, flags);
}
static const struct usb_ep_ops musb_ep_ops = {
.enable = musb_gadget_enable,
.disable = musb_gadget_disable,
.alloc_request = musb_alloc_request,
.free_request = musb_free_request,
.queue = musb_gadget_queue,
.dequeue = musb_gadget_dequeue,
.set_halt = musb_gadget_set_halt,
.set_wedge = musb_gadget_set_wedge,
.fifo_status = musb_gadget_fifo_status,
.fifo_flush = musb_gadget_fifo_flush
};
/* ----------------------------------------------------------------------- */
static int musb_gadget_get_frame(struct usb_gadget *gadget)
{
struct musb *musb = gadget_to_musb(gadget);
return (int)musb_readw(musb->mregs, MUSB_FRAME);
}
static int musb_gadget_wakeup(struct usb_gadget *gadget)
{
struct musb *musb = gadget_to_musb(gadget);
void __iomem *mregs = musb->mregs;
unsigned long flags;
int status = -EINVAL;
u8 power, devctl;
int retries;
spin_lock_irqsave(&musb->lock, flags);
switch (musb_get_state(musb)) {
case OTG_STATE_B_PERIPHERAL:
/* NOTE: OTG state machine doesn't include B_SUSPENDED;
* that's part of the standard usb 1.1 state machine, and
* doesn't affect OTG transitions.
*/
if (musb->may_wakeup && musb->is_suspended)
break;
goto done;
case OTG_STATE_B_IDLE:
/* Start SRP ... OTG not required. */
devctl = musb_readb(mregs, MUSB_DEVCTL);
musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(mregs, MUSB_DEVCTL);
retries = 100;
while (!(devctl & MUSB_DEVCTL_SESSION)) {
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (retries-- < 1)
break;
}
retries = 10000;
while (devctl & MUSB_DEVCTL_SESSION) {
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (retries-- < 1)
break;
}
if (musb->xceiv) {
spin_unlock_irqrestore(&musb->lock, flags);
otg_start_srp(musb->xceiv->otg);
spin_lock_irqsave(&musb->lock, flags);
}
/* Block idling for at least 1s */
musb_platform_try_idle(musb,
jiffies + msecs_to_jiffies(1 * HZ));
status = 0;
goto done;
default:
musb_dbg(musb, "Unhandled wake: %s",
musb_otg_state_string(musb));
goto done;
}
status = 0;
power = musb_readb(mregs, MUSB_POWER);
power |= MUSB_POWER_RESUME;
musb_writeb(mregs, MUSB_POWER, power);
musb_dbg(musb, "issue wakeup");
/* FIXME do this next chunk in a timer callback, no udelay */
mdelay(2);
power = musb_readb(mregs, MUSB_POWER);
power &= ~MUSB_POWER_RESUME;
musb_writeb(mregs, MUSB_POWER, power);
done:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
static int
musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
{
gadget->is_selfpowered = !!is_selfpowered;
return 0;
}
static void musb_pullup(struct musb *musb, int is_on)
{
u8 power;
power = musb_readb(musb->mregs, MUSB_POWER);
if (is_on)
power |= MUSB_POWER_SOFTCONN;
else
power &= ~MUSB_POWER_SOFTCONN;
/* FIXME if on, HdrcStart; if off, HdrcStop */
musb_dbg(musb, "gadget D+ pullup %s",
is_on ? "on" : "off");
musb_writeb(musb->mregs, MUSB_POWER, power);
}
#if 0
static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
{
musb_dbg(musb, "<= %s =>\n", __func__);
/*
* FIXME iff driver's softconnect flag is set (as it is during probe,
* though that can clear it), just musb_pullup().
*/
return -EINVAL;
}
#endif
static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct musb *musb = gadget_to_musb(gadget);
return usb_phy_set_power(musb->xceiv, mA);
}
static void musb_gadget_work(struct work_struct *work)
{
struct musb *musb;
unsigned long flags;
musb = container_of(work, struct musb, gadget_work.work);
pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, flags);
musb_pullup(musb, musb->softconnect);
spin_unlock_irqrestore(&musb->lock, flags);
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct musb *musb = gadget_to_musb(gadget);
unsigned long flags;
is_on = !!is_on;
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
spin_lock_irqsave(&musb->lock, flags);
if (is_on != musb->softconnect) {
musb->softconnect = is_on;
schedule_delayed_work(&musb->gadget_work, 0);
}
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int musb_gadget_stop(struct usb_gadget *g);
static const struct usb_gadget_ops musb_gadget_operations = {
.get_frame = musb_gadget_get_frame,
.wakeup = musb_gadget_wakeup,
.set_selfpowered = musb_gadget_set_self_powered,
/* .vbus_session = musb_gadget_vbus_session, */
.vbus_draw = musb_gadget_vbus_draw,
.pullup = musb_gadget_pullup,
.udc_start = musb_gadget_start,
.udc_stop = musb_gadget_stop,
};
/* ----------------------------------------------------------------------- */
/* Registration */
/* Only this registration code "knows" the rule (from USB standards)
* about there being only one external upstream port. It assumes
* all peripheral ports are external...
*/
static void
init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
{
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
memset(ep, 0, sizeof *ep);
ep->current_epnum = epnum;
ep->musb = musb;
ep->hw_ep = hw_ep;
ep->is_in = is_in;
INIT_LIST_HEAD(&ep->req_list);
sprintf(ep->name, "ep%d%s", epnum,
(!epnum || hw_ep->is_shared_fifo) ? "" : (
is_in ? "in" : "out"));
ep->end_point.name = ep->name;
INIT_LIST_HEAD(&ep->end_point.ep_list);
if (!epnum) {
usb_ep_set_maxpacket_limit(&ep->end_point, 64);
ep->end_point.caps.type_control = true;
ep->end_point.ops = &musb_g_ep0_ops;
musb->g.ep0 = &ep->end_point;
} else {
if (is_in)
usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
else
usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
ep->end_point.caps.type_iso = true;
ep->end_point.caps.type_bulk = true;
ep->end_point.caps.type_int = true;
ep->end_point.ops = &musb_ep_ops;
list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
}
if (!epnum || hw_ep->is_shared_fifo) {
ep->end_point.caps.dir_in = true;
ep->end_point.caps.dir_out = true;
} else if (is_in)
ep->end_point.caps.dir_in = true;
else
ep->end_point.caps.dir_out = true;
}
/*
* Initialize the endpoints exposed to peripheral drivers, with backlinks
* to the rest of the driver state.
*/
static inline void musb_g_init_endpoints(struct musb *musb)
{
u8 epnum;
struct musb_hw_ep *hw_ep;
unsigned count = 0;
/* initialize endpoint list just once */
INIT_LIST_HEAD(&(musb->g.ep_list));
for (epnum = 0, hw_ep = musb->endpoints;
epnum < musb->nr_endpoints;
epnum++, hw_ep++) {
if (hw_ep->is_shared_fifo /* || !epnum */) {
init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
count++;
} else {
if (hw_ep->max_packet_sz_tx) {
init_peripheral_ep(musb, &hw_ep->ep_in,
epnum, 1);
count++;
}
if (hw_ep->max_packet_sz_rx) {
init_peripheral_ep(musb, &hw_ep->ep_out,
epnum, 0);
count++;
}
}
}
}
/* called once during driver setup to initialize and link into
* the driver model; memory is zeroed.
*/
int musb_gadget_setup(struct musb *musb)
{
int status;
/* REVISIT minor race: if (erroneously) setting up two
* musb peripherals at the same time, only the bus lock
* is probably held.
*/
musb->g.ops = &musb_gadget_operations;
musb->g.max_speed = USB_SPEED_HIGH;
musb->g.speed = USB_SPEED_UNKNOWN;
MUSB_DEV_MODE(musb);
musb_set_state(musb, OTG_STATE_B_IDLE);
/* this "gadget" abstracts/virtualizes the controller */
musb->g.name = musb_driver_name;
/* don't support otg protocols */
musb->g.is_otg = 0;
INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
musb_g_init_endpoints(musb);
musb->is_active = 0;
musb_platform_try_idle(musb, 0);
status = usb_add_gadget_udc(musb->controller, &musb->g);
if (status)
goto err;
return 0;
err:
musb->g.dev.parent = NULL;
device_unregister(&musb->g.dev);
return status;
}
void musb_gadget_cleanup(struct musb *musb)
{
if (musb->port_mode == MUSB_HOST)
return;
cancel_delayed_work_sync(&musb->gadget_work);
usb_del_gadget_udc(&musb->g);
}
/*
* Register the gadget driver. Used by gadget drivers when
* registering themselves with the controller.
*
* -EINVAL something went wrong (not driver)
* -EBUSY another gadget is already using the controller
* -ENOMEM no memory to perform the operation
*
* @param driver the gadget driver
* @return <0 if error, 0 if everything is fine
*/
static int musb_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct musb *musb = gadget_to_musb(g);
unsigned long flags;
int retval = 0;
if (driver->max_speed < USB_SPEED_HIGH) {
retval = -EINVAL;
goto err;
}
pm_runtime_get_sync(musb->controller);
musb->softconnect = 0;
musb->gadget_driver = driver;
spin_lock_irqsave(&musb->lock, flags);
musb->is_active = 1;
if (musb->xceiv)
otg_set_peripheral(musb->xceiv->otg, &musb->g);
else
phy_set_mode(musb->phy, PHY_MODE_USB_DEVICE);
musb_set_state(musb, OTG_STATE_B_IDLE);
spin_unlock_irqrestore(&musb->lock, flags);
musb_start(musb);
/* REVISIT: funcall to other code, which also
* handles power budgeting ... this way also
* ensures HdrcStart is indirectly called.
*/
if (musb->xceiv && musb->xceiv->last_event == USB_EVENT_ID)
musb_platform_set_vbus(musb, 1);
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
err:
return retval;
}
/*
* Unregister the gadget driver. Used by gadget drivers when
* unregistering themselves from the controller.
*
* @param driver the gadget driver to unregister
*/
static int musb_gadget_stop(struct usb_gadget *g)
{
struct musb *musb = gadget_to_musb(g);
unsigned long flags;
pm_runtime_get_sync(musb->controller);
/*
* REVISIT always use otg_set_peripheral() here too;
* this needs to shut down the OTG engine.
*/
spin_lock_irqsave(&musb->lock, flags);
musb_hnp_stop(musb);
(void) musb_gadget_vbus_draw(&musb->g, 0);
musb_set_state(musb, OTG_STATE_UNDEFINED);
musb_stop(musb);
if (musb->xceiv)
otg_set_peripheral(musb->xceiv->otg, NULL);
else
phy_set_mode(musb->phy, PHY_MODE_INVALID);
musb->is_active = 0;
musb->gadget_driver = NULL;
musb_platform_try_idle(musb, 0);
spin_unlock_irqrestore(&musb->lock, flags);
/*
* FIXME we need to be able to register another
* gadget driver here and have everything work;
* that currently misbehaves.
*/
/* Force check of devctl register for PM runtime */
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
}
/* ----------------------------------------------------------------------- */
/* lifecycle operations called through plat_uds.c */
void musb_g_resume(struct musb *musb)
{
musb->is_suspended = 0;
switch (musb_get_state(musb)) {
case OTG_STATE_B_IDLE:
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_PERIPHERAL:
musb->is_active = 1;
if (musb->gadget_driver && musb->gadget_driver->resume) {
spin_unlock(&musb->lock);
musb->gadget_driver->resume(&musb->g);
spin_lock(&musb->lock);
}
break;
default:
WARNING("unhandled RESUME transition (%s)\n",
musb_otg_state_string(musb));
}
}
/* called when SOF packets stop for 3+ msec */
void musb_g_suspend(struct musb *musb)
{
u8 devctl;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
switch (musb_get_state(musb)) {
case OTG_STATE_B_IDLE:
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
break;
case OTG_STATE_B_PERIPHERAL:
musb->is_suspended = 1;
if (musb->gadget_driver && musb->gadget_driver->suspend) {
spin_unlock(&musb->lock);
musb->gadget_driver->suspend(&musb->g);
spin_lock(&musb->lock);
}
break;
default:
/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
* A_PERIPHERAL may need care too
*/
WARNING("unhandled SUSPEND transition (%s)",
musb_otg_state_string(musb));
}
}
/* Called during SRP */
void musb_g_wakeup(struct musb *musb)
{
musb_gadget_wakeup(&musb->g);
}
/* called when VBUS drops below session threshold, and in other cases */
void musb_g_disconnect(struct musb *musb)
{
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
/* clear HR */
musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
/* don't draw vbus until new b-default session */
(void) musb_gadget_vbus_draw(&musb->g, 0);
musb->g.speed = USB_SPEED_UNKNOWN;
if (musb->gadget_driver && musb->gadget_driver->disconnect) {
spin_unlock(&musb->lock);
musb->gadget_driver->disconnect(&musb->g);
spin_lock(&musb->lock);
}
switch (musb_get_state(musb)) {
default:
musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
musb_otg_state_string(musb));
musb_set_state(musb, OTG_STATE_A_IDLE);
MUSB_HST_MODE(musb);
break;
case OTG_STATE_A_PERIPHERAL:
musb_set_state(musb, OTG_STATE_A_WAIT_BCON);
MUSB_HST_MODE(musb);
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_HOST:
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb_set_state(musb, OTG_STATE_B_IDLE);
break;
case OTG_STATE_B_SRP_INIT:
break;
}
musb->is_active = 0;
}
void musb_g_reset(struct musb *musb)
__releases(musb->lock)
__acquires(musb->lock)
{
void __iomem *mbase = musb->mregs;
u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
u8 power;
musb_dbg(musb, "<== %s driver '%s'",
(devctl & MUSB_DEVCTL_BDEVICE)
? "B-Device" : "A-Device",
musb->gadget_driver
? musb->gadget_driver->driver.name
: NULL
);
/* report reset, if we didn't already (flushing EP state) */
if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
spin_unlock(&musb->lock);
usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
spin_lock(&musb->lock);
}
/* clear HR */
else if (devctl & MUSB_DEVCTL_HR)
musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
/* what speed did we negotiate? */
power = musb_readb(mbase, MUSB_POWER);
musb->g.speed = (power & MUSB_POWER_HSMODE)
? USB_SPEED_HIGH : USB_SPEED_FULL;
/* start in USB_STATE_DEFAULT */
musb->is_active = 1;
musb->is_suspended = 0;
MUSB_DEV_MODE(musb);
musb->address = 0;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
musb->may_wakeup = 0;
musb->g.b_hnp_enable = 0;
musb->g.a_alt_hnp_support = 0;
musb->g.a_hnp_support = 0;
musb->g.quirk_zlp_not_supp = 1;
/* Normal reset, as B-Device;
* or else after HNP, as A-Device
*/
if (!musb->g.is_otg) {
/* USB device controllers that are not OTG compatible
* may not have DEVCTL register in silicon.
* In that case, do not rely on devctl for setting
* peripheral mode.
*/
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
musb->g.is_a_peripheral = 0;
} else if (devctl & MUSB_DEVCTL_BDEVICE) {
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
musb->g.is_a_peripheral = 0;
} else {
musb_set_state(musb, OTG_STATE_A_PERIPHERAL);
musb->g.is_a_peripheral = 1;
}
/* start with default limits on VBUS power draw */
(void) musb_gadget_vbus_draw(&musb->g, 8);
}
| linux-master | drivers/usb/musb/musb_gadget.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver virtual root hub support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
#include "musb_core.h"
void musb_host_finish_resume(struct work_struct *work)
{
struct musb *musb;
unsigned long flags;
u8 power;
musb = container_of(work, struct musb, finish_resume_work.work);
spin_lock_irqsave(&musb->lock, flags);
power = musb_readb(musb->mregs, MUSB_POWER);
power &= ~MUSB_POWER_RESUME;
musb_dbg(musb, "root port resume stopped, power %02x", power);
musb_writeb(musb->mregs, MUSB_POWER, power);
/*
* ISSUE: DaVinci (RTL 1.300) disconnects after
* resume of high speed peripherals (but not full
* speed ones).
*/
musb->is_active = 1;
musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | MUSB_PORT_STAT_RESUME);
musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
usb_hcd_poll_rh_status(musb->hcd);
/* NOTE: it might really be A_WAIT_BCON ... */
musb_set_state(musb, OTG_STATE_A_HOST);
spin_unlock_irqrestore(&musb->lock, flags);
}
int musb_port_suspend(struct musb *musb, bool do_suspend)
{
u8 power;
void __iomem *mbase = musb->mregs;
if (!is_host_active(musb))
return 0;
/* NOTE: this doesn't necessarily put PHY into low power mode,
* turning off its clock; that's a function of PHY integration and
* MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
* SE0 changing to connect (J) or wakeup (K) states.
*/
power = musb_readb(mbase, MUSB_POWER);
if (do_suspend) {
int retries = 10000;
if (power & MUSB_POWER_RESUME)
return -EBUSY;
if (!(power & MUSB_POWER_SUSPENDM)) {
power |= MUSB_POWER_SUSPENDM;
musb_writeb(mbase, MUSB_POWER, power);
/* Needed for OPT A tests */
power = musb_readb(mbase, MUSB_POWER);
while (power & MUSB_POWER_SUSPENDM) {
power = musb_readb(mbase, MUSB_POWER);
if (retries-- < 1)
break;
}
}
musb_dbg(musb, "Root port suspended, power %02x", power);
musb->port1_status |= USB_PORT_STAT_SUSPEND;
switch (musb_get_state(musb)) {
case OTG_STATE_A_HOST:
musb_set_state(musb, OTG_STATE_A_SUSPEND);
musb->is_active = musb->xceiv &&
musb->xceiv->otg->host->b_hnp_enable;
if (musb->is_active)
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(
OTG_TIME_A_AIDL_BDIS));
musb_platform_try_idle(musb, 0);
break;
case OTG_STATE_B_HOST:
musb_set_state(musb, OTG_STATE_B_WAIT_ACON);
musb->is_active = musb->xceiv &&
musb->xceiv->otg->host->b_hnp_enable;
musb_platform_try_idle(musb, 0);
break;
default:
musb_dbg(musb, "bogus rh suspend? %s",
musb_otg_state_string(musb));
}
} else if (power & MUSB_POWER_SUSPENDM) {
power &= ~MUSB_POWER_SUSPENDM;
power |= MUSB_POWER_RESUME;
musb_writeb(mbase, MUSB_POWER, power);
musb_dbg(musb, "Root port resuming, power %02x", power);
musb->port1_status |= MUSB_PORT_STAT_RESUME;
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
return 0;
}
void musb_port_reset(struct musb *musb, bool do_reset)
{
u8 power;
void __iomem *mbase = musb->mregs;
if (musb_get_state(musb) == OTG_STATE_B_IDLE) {
musb_dbg(musb, "HNP: Returning from HNP; no hub reset from b_idle");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
}
if (!is_host_active(musb))
return;
/* NOTE: caller guarantees it will turn off the reset when
* the appropriate amount of time has passed
*/
power = musb_readb(mbase, MUSB_POWER);
if (do_reset) {
/*
* If RESUME is set, we must make sure it stays minimum 20 ms.
* Then we must clear RESUME and wait a bit to let musb start
* generating SOFs. If we don't do this, OPT HS A 6.8 tests
* fail with "Error! Did not receive an SOF before suspend
* detected".
*/
if (power & MUSB_POWER_RESUME) {
long remain = (unsigned long) musb->rh_timer - jiffies;
if (musb->rh_timer > 0 && remain > 0) {
/* take into account the minimum delay after resume */
schedule_delayed_work(
&musb->deassert_reset_work, remain);
return;
}
musb_writeb(mbase, MUSB_POWER,
power & ~MUSB_POWER_RESUME);
/* Give the core 1 ms to clear MUSB_POWER_RESUME */
schedule_delayed_work(&musb->deassert_reset_work,
msecs_to_jiffies(1));
return;
}
power &= 0xf0;
musb_writeb(mbase, MUSB_POWER,
power | MUSB_POWER_RESET);
musb->port1_status |= USB_PORT_STAT_RESET;
musb->port1_status &= ~USB_PORT_STAT_ENABLE;
schedule_delayed_work(&musb->deassert_reset_work,
msecs_to_jiffies(50));
} else {
musb_dbg(musb, "root port reset stopped");
musb_platform_pre_root_reset_end(musb);
musb_writeb(mbase, MUSB_POWER,
power & ~MUSB_POWER_RESET);
musb_platform_post_root_reset_end(musb);
power = musb_readb(mbase, MUSB_POWER);
if (power & MUSB_POWER_HSMODE) {
musb_dbg(musb, "high-speed device connected");
musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
}
musb->port1_status &= ~USB_PORT_STAT_RESET;
musb->port1_status |= USB_PORT_STAT_ENABLE
| (USB_PORT_STAT_C_RESET << 16)
| (USB_PORT_STAT_C_ENABLE << 16);
usb_hcd_poll_rh_status(musb->hcd);
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
}
}
void musb_root_disconnect(struct musb *musb)
{
musb->port1_status = USB_PORT_STAT_POWER
| (USB_PORT_STAT_C_CONNECTION << 16);
usb_hcd_poll_rh_status(musb->hcd);
musb->is_active = 0;
switch (musb_get_state(musb)) {
case OTG_STATE_A_SUSPEND:
if (musb->xceiv && musb->xceiv->otg->host->b_hnp_enable) {
musb_set_state(musb, OTG_STATE_A_PERIPHERAL);
musb->g.is_a_peripheral = 1;
break;
}
fallthrough;
case OTG_STATE_A_HOST:
musb_set_state(musb, OTG_STATE_A_WAIT_BCON);
musb->is_active = 0;
break;
case OTG_STATE_A_WAIT_VFALL:
musb_set_state(musb, OTG_STATE_B_IDLE);
break;
default:
musb_dbg(musb, "host disconnect (%s)",
musb_otg_state_string(musb));
}
}
EXPORT_SYMBOL_GPL(musb_root_disconnect);
/*---------------------------------------------------------------------*/
/* Caller may or may not hold musb->lock */
int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct musb *musb = hcd_to_musb(hcd);
int retval = 0;
/* called in_irq() via usb_hcd_poll_rh_status() */
if (musb->port1_status & 0xffff0000) {
*buf = 0x02;
retval = 1;
}
return retval;
}
static int musb_has_gadget(struct musb *musb)
{
/*
* In host-only mode we start a connection right away. In OTG mode
* we have to wait until we loaded a gadget. We don't really need a
* gadget if we operate as a host but we should not start a session
* as a device without a gadget or else we explode.
*/
#ifdef CONFIG_USB_MUSB_HOST
return 1;
#else
return musb->port_mode == MUSB_HOST;
#endif
}
int musb_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength)
{
struct musb *musb = hcd_to_musb(hcd);
u32 temp;
int retval = 0;
unsigned long flags;
bool start_musb = false;
spin_lock_irqsave(&musb->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
spin_unlock_irqrestore(&musb->lock, flags);
return -ESHUTDOWN;
}
/* hub features: always zero, setting is a NOP
* port features: reported, sometimes updated when host is active
* no indicators
*/
switch (typeReq) {
case ClearHubFeature:
case SetHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto error;
}
break;
case ClearPortFeature:
if ((wIndex & 0xff) != 1)
goto error;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
break;
case USB_PORT_FEAT_SUSPEND:
musb_port_suspend(musb, false);
break;
case USB_PORT_FEAT_POWER:
if (!hcd->self.is_b_host)
musb_platform_set_vbus(musb, 0);
break;
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_SUSPEND:
break;
default:
goto error;
}
musb_dbg(musb, "clear feature %d", wValue);
musb->port1_status &= ~(1 << wValue);
break;
case GetHubDescriptor:
{
struct usb_hub_descriptor *desc = (void *)buf;
desc->bDescLength = 9;
desc->bDescriptorType = USB_DT_HUB;
desc->bNbrPorts = 1;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM /* per-port power switching */
| HUB_CHAR_NO_OCPM /* no overcurrent reporting */
);
desc->bPwrOn2PwrGood = 5; /* msec/2 */
desc->bHubContrCurrent = 0;
/* workaround bogus struct definition */
desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */
desc->u.hs.DeviceRemovable[1] = 0xff;
}
break;
case GetHubStatus:
temp = 0;
*(__le32 *) buf = cpu_to_le32(temp);
break;
case GetPortStatus:
if (wIndex != 1)
goto error;
put_unaligned(cpu_to_le32(musb->port1_status
& ~MUSB_PORT_STAT_RESUME),
(__le32 *) buf);
/* port change status is more interesting */
musb_dbg(musb, "port status %08x", musb->port1_status);
break;
case SetPortFeature:
if ((wIndex & 0xff) != 1)
goto error;
switch (wValue) {
case USB_PORT_FEAT_POWER:
/* NOTE: this controller has a strange state machine
* that involves "requesting sessions" according to
* magic side effects from incompletely-described
* rules about startup...
*
* This call is what really starts the host mode; be
* very careful about side effects if you reorder any
* initialization logic, e.g. for OTG, or change any
* logic relating to VBUS power-up.
*/
if (!hcd->self.is_b_host && musb_has_gadget(musb))
start_musb = true;
break;
case USB_PORT_FEAT_RESET:
musb_port_reset(musb, true);
break;
case USB_PORT_FEAT_SUSPEND:
musb_port_suspend(musb, true);
break;
case USB_PORT_FEAT_TEST:
if (unlikely(is_host_active(musb)))
goto error;
wIndex >>= 8;
switch (wIndex) {
case USB_TEST_J:
pr_debug("USB_TEST_J\n");
temp = MUSB_TEST_J;
break;
case USB_TEST_K:
pr_debug("USB_TEST_K\n");
temp = MUSB_TEST_K;
break;
case USB_TEST_SE0_NAK:
pr_debug("USB_TEST_SE0_NAK\n");
temp = MUSB_TEST_SE0_NAK;
break;
case USB_TEST_PACKET:
pr_debug("USB_TEST_PACKET\n");
temp = MUSB_TEST_PACKET;
musb_load_testpacket(musb);
break;
case USB_TEST_FORCE_ENABLE:
pr_debug("USB_TEST_FORCE_ENABLE\n");
temp = MUSB_TEST_FORCE_HOST
| MUSB_TEST_FORCE_HS;
musb_writeb(musb->mregs, MUSB_DEVCTL,
MUSB_DEVCTL_SESSION);
break;
case 6:
pr_debug("TEST_FIFO_ACCESS\n");
temp = MUSB_TEST_FIFO_ACCESS;
break;
default:
goto error;
}
musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
break;
default:
goto error;
}
musb_dbg(musb, "set feature %d", wValue);
musb->port1_status |= 1 << wValue;
break;
default:
error:
/* "protocol stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&musb->lock, flags);
if (start_musb)
musb_start(musb);
return retval;
}
| linux-master | drivers/usb/musb/musb_virthub.c |
// SPDX-License-Identifier: GPL-2.0
/*
* TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include "musb_core.h"
#include "tusb6010.h"
#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
struct tusb_dma_data {
s8 dmareq;
struct dma_chan *chan;
};
struct tusb_omap_dma_ch {
struct musb *musb;
void __iomem *tbase;
unsigned long phys_offset;
int epnum;
u8 tx;
struct musb_hw_ep *hw_ep;
struct tusb_dma_data *dma_data;
struct tusb_omap_dma *tusb_dma;
dma_addr_t dma_addr;
u32 len;
u16 packet_sz;
u16 transfer_packet_sz;
u32 transfer_len;
u32 completed_len;
};
struct tusb_omap_dma {
struct dma_controller controller;
void __iomem *tbase;
struct tusb_dma_data dma_pool[MAX_DMAREQ];
unsigned multichannel:1;
};
/*
* Allocate dmareq0 to the current channel unless it's already taken
*/
static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
{
u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
if (reg != 0) {
dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
chdat->epnum, reg & 0xf);
return -EAGAIN;
}
if (chdat->tx)
reg = (1 << 4) | chdat->epnum;
else
reg = chdat->epnum;
musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
return 0;
}
static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
{
u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
if ((reg & 0xf) != chdat->epnum) {
printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
chdat->epnum, reg & 0xf);
return;
}
musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
}
/*
* See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
* musb_gadget.c.
*/
static void tusb_omap_dma_cb(void *data)
{
struct dma_channel *channel = (struct dma_channel *)data;
struct tusb_omap_dma_ch *chdat = to_chdat(channel);
struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
struct musb *musb = chdat->musb;
struct device *dev = musb->controller;
struct musb_hw_ep *hw_ep = chdat->hw_ep;
void __iomem *ep_conf = hw_ep->conf;
void __iomem *mbase = musb->mregs;
unsigned long remaining, flags, pio;
spin_lock_irqsave(&musb->lock, flags);
dev_dbg(musb->controller, "ep%i %s dma callback\n",
chdat->epnum, chdat->tx ? "tx" : "rx");
if (chdat->tx)
remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
else
remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
/* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
if (unlikely(remaining > chdat->transfer_len)) {
dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n",
chdat->tx ? "tx" : "rx", remaining);
remaining = 0;
}
channel->actual_len = chdat->transfer_len - remaining;
pio = chdat->len - channel->actual_len;
dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
/* Transfer remaining 1 - 31 bytes */
if (pio > 0 && pio < 32) {
u8 *buf;
dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
if (chdat->tx) {
dma_unmap_single(dev, chdat->dma_addr,
chdat->transfer_len,
DMA_TO_DEVICE);
musb_write_fifo(hw_ep, pio, buf);
} else {
dma_unmap_single(dev, chdat->dma_addr,
chdat->transfer_len,
DMA_FROM_DEVICE);
musb_read_fifo(hw_ep, pio, buf);
}
channel->actual_len += pio;
}
if (!tusb_dma->multichannel)
tusb_omap_free_shared_dmareq(chdat);
channel->status = MUSB_DMA_STATUS_FREE;
musb_dma_completion(musb, chdat->epnum, chdat->tx);
/* We must terminate short tx transfers manually by setting TXPKTRDY.
* REVISIT: This same problem may occur with other MUSB dma as well.
* Easy to test with g_ether by pinging the MUSB board with ping -s54.
*/
if ((chdat->transfer_len < chdat->packet_sz)
|| (chdat->transfer_len % chdat->packet_sz != 0)) {
u16 csr;
if (chdat->tx) {
dev_dbg(musb->controller, "terminating short tx packet\n");
musb_ep_select(mbase, chdat->epnum);
csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_P_WZC_BITS;
musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
}
}
spin_unlock_irqrestore(&musb->lock, flags);
}
static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
u8 rndis_mode, dma_addr_t dma_addr, u32 len)
{
struct tusb_omap_dma_ch *chdat = to_chdat(channel);
struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
struct musb *musb = chdat->musb;
struct device *dev = musb->controller;
struct musb_hw_ep *hw_ep = chdat->hw_ep;
void __iomem *mbase = musb->mregs;
void __iomem *ep_conf = hw_ep->conf;
dma_addr_t fifo_addr = hw_ep->fifo_sync;
u32 dma_remaining;
u16 csr;
u32 psize;
struct tusb_dma_data *dma_data;
struct dma_async_tx_descriptor *dma_desc;
struct dma_slave_config dma_cfg;
enum dma_transfer_direction dma_dir;
u32 port_window;
int ret;
if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
return false;
/*
* HW issue #10: Async dma will eventually corrupt the XFR_SIZE
* register which will cause missed DMA interrupt. We could try to
* use a timer for the callback, but it is unsafe as the XFR_SIZE
* register is corrupt, and we won't know if the DMA worked.
*/
if (dma_addr & 0x2)
return false;
/*
* Because of HW issue #10, it seems like mixing sync DMA and async
* PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
* using the channel for DMA.
*/
if (chdat->tx)
dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
else
dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
if (dma_remaining) {
dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n",
chdat->tx ? "tx" : "rx", dma_remaining);
return false;
}
chdat->transfer_len = len & ~0x1f;
if (len < packet_sz)
chdat->transfer_packet_sz = chdat->transfer_len;
else
chdat->transfer_packet_sz = packet_sz;
dma_data = chdat->dma_data;
if (!tusb_dma->multichannel) {
if (tusb_omap_use_shared_dmareq(chdat) != 0) {
dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
return false;
}
if (dma_data->dmareq < 0) {
/* REVISIT: This should get blocked earlier, happens
* with MSC ErrorRecoveryTest
*/
WARN_ON(1);
return false;
}
}
chdat->packet_sz = packet_sz;
chdat->len = len;
channel->actual_len = 0;
chdat->dma_addr = dma_addr;
channel->status = MUSB_DMA_STATUS_BUSY;
/* Since we're recycling dma areas, we need to clean or invalidate */
if (chdat->tx) {
dma_dir = DMA_MEM_TO_DEV;
dma_map_single(dev, phys_to_virt(dma_addr), len,
DMA_TO_DEVICE);
} else {
dma_dir = DMA_DEV_TO_MEM;
dma_map_single(dev, phys_to_virt(dma_addr), len,
DMA_FROM_DEVICE);
}
memset(&dma_cfg, 0, sizeof(dma_cfg));
/* Use 16-bit transfer if dma_addr is not 32-bit aligned */
if ((dma_addr & 0x3) == 0) {
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
port_window = 8;
} else {
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
port_window = 16;
fifo_addr = hw_ep->fifo_async;
}
dev_dbg(musb->controller,
"ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
chdat->epnum, chdat->tx ? "tx" : "rx", &dma_addr,
chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz);
dma_cfg.src_addr = fifo_addr;
dma_cfg.dst_addr = fifo_addr;
dma_cfg.src_port_window_size = port_window;
dma_cfg.src_maxburst = port_window;
dma_cfg.dst_port_window_size = port_window;
dma_cfg.dst_maxburst = port_window;
ret = dmaengine_slave_config(dma_data->chan, &dma_cfg);
if (ret) {
dev_err(musb->controller, "DMA slave config failed: %d\n", ret);
return false;
}
dma_desc = dmaengine_prep_slave_single(dma_data->chan, dma_addr,
chdat->transfer_len, dma_dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma_desc) {
dev_err(musb->controller, "DMA prep_slave_single failed\n");
return false;
}
dma_desc->callback = tusb_omap_dma_cb;
dma_desc->callback_param = channel;
dmaengine_submit(dma_desc);
dev_dbg(musb->controller,
"ep%i %s using %i-bit %s dma from %pad to %pad\n",
chdat->epnum, chdat->tx ? "tx" : "rx",
dma_cfg.src_addr_width * 8,
((dma_addr & 0x3) == 0) ? "sync" : "async",
(dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr,
(dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr);
/*
* Prepare MUSB for DMA transfer
*/
musb_ep_select(mbase, chdat->epnum);
if (chdat->tx) {
csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
} else {
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_DMAENAB;
csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
musb_writew(hw_ep->regs, MUSB_RXCSR,
csr | MUSB_RXCSR_P_WZC_BITS);
}
/* Start DMA transfer */
dma_async_issue_pending(dma_data->chan);
if (chdat->tx) {
/* Send transfer_packet_sz packets at a time */
psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
psize &= ~0x7ff;
psize |= chdat->transfer_packet_sz;
musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
} else {
/* Receive transfer_packet_sz packets at a time */
psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
psize &= ~(0x7ff << 16);
psize |= (chdat->transfer_packet_sz << 16);
musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
}
return true;
}
static int tusb_omap_dma_abort(struct dma_channel *channel)
{
struct tusb_omap_dma_ch *chdat = to_chdat(channel);
if (chdat->dma_data)
dmaengine_terminate_all(chdat->dma_data->chan);
channel->status = MUSB_DMA_STATUS_FREE;
return 0;
}
static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
{
u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
int i, dmareq_nr = -1;
for (i = 0; i < MAX_DMAREQ; i++) {
int cur = (reg & (0xf << (i * 5))) >> (i * 5);
if (cur == 0) {
dmareq_nr = i;
break;
}
}
if (dmareq_nr == -1)
return -EAGAIN;
reg |= (chdat->epnum << (dmareq_nr * 5));
if (chdat->tx)
reg |= ((1 << 4) << (dmareq_nr * 5));
musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr];
return 0;
}
static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
{
u32 reg;
if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0)
return;
reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
reg &= ~(0x1f << (chdat->dma_data->dmareq * 5));
musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
chdat->dma_data = NULL;
}
static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
static struct dma_channel *
tusb_omap_dma_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep,
u8 tx)
{
int ret, i;
struct tusb_omap_dma *tusb_dma;
struct musb *musb;
struct dma_channel *channel = NULL;
struct tusb_omap_dma_ch *chdat = NULL;
struct tusb_dma_data *dma_data = NULL;
tusb_dma = container_of(c, struct tusb_omap_dma, controller);
musb = tusb_dma->controller.musb;
/* REVISIT: Why does dmareq5 not work? */
if (hw_ep->epnum == 0) {
dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
return NULL;
}
for (i = 0; i < MAX_DMAREQ; i++) {
struct dma_channel *ch = dma_channel_pool[i];
if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
ch->status = MUSB_DMA_STATUS_FREE;
channel = ch;
chdat = ch->private_data;
break;
}
}
if (!channel)
return NULL;
chdat->musb = tusb_dma->controller.musb;
chdat->tbase = tusb_dma->tbase;
chdat->hw_ep = hw_ep;
chdat->epnum = hw_ep->epnum;
chdat->completed_len = 0;
chdat->tusb_dma = tusb_dma;
if (tx)
chdat->tx = 1;
else
chdat->tx = 0;
channel->max_len = 0x7fffffff;
channel->desired_mode = 0;
channel->actual_len = 0;
if (!chdat->dma_data) {
if (tusb_dma->multichannel) {
ret = tusb_omap_dma_allocate_dmareq(chdat);
if (ret != 0)
goto free_dmareq;
} else {
chdat->dma_data = &tusb_dma->dma_pool[0];
}
}
dma_data = chdat->dma_data;
dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n",
chdat->epnum,
chdat->tx ? "tx" : "rx",
tusb_dma->multichannel ? "shared" : "dedicated",
dma_data->dmareq);
return channel;
free_dmareq:
tusb_omap_dma_free_dmareq(chdat);
dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
channel->status = MUSB_DMA_STATUS_UNKNOWN;
return NULL;
}
static void tusb_omap_dma_release(struct dma_channel *channel)
{
struct tusb_omap_dma_ch *chdat = to_chdat(channel);
struct musb *musb = chdat->musb;
dev_dbg(musb->controller, "Release for ep%i\n", chdat->epnum);
channel->status = MUSB_DMA_STATUS_UNKNOWN;
dmaengine_terminate_sync(chdat->dma_data->chan);
tusb_omap_dma_free_dmareq(chdat);
channel = NULL;
}
void tusb_dma_controller_destroy(struct dma_controller *c)
{
struct tusb_omap_dma *tusb_dma;
int i;
tusb_dma = container_of(c, struct tusb_omap_dma, controller);
for (i = 0; i < MAX_DMAREQ; i++) {
struct dma_channel *ch = dma_channel_pool[i];
if (ch) {
kfree(ch->private_data);
kfree(ch);
}
/* Free up the DMA channels */
if (tusb_dma && tusb_dma->dma_pool[i].chan)
dma_release_channel(tusb_dma->dma_pool[i].chan);
}
kfree(tusb_dma);
}
EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy);
static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma)
{
struct musb *musb = tusb_dma->controller.musb;
int i;
int ret = 0;
for (i = 0; i < MAX_DMAREQ; i++) {
struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
/*
* Request DMA channels:
* - one channel in case of non multichannel mode
* - MAX_DMAREQ number of channels in multichannel mode
*/
if (i == 0 || tusb_dma->multichannel) {
char ch_name[8];
sprintf(ch_name, "dmareq%d", i);
dma_data->chan = dma_request_chan(musb->controller,
ch_name);
if (IS_ERR(dma_data->chan)) {
dev_err(musb->controller,
"Failed to request %s\n", ch_name);
ret = PTR_ERR(dma_data->chan);
goto dma_error;
}
dma_data->dmareq = i;
} else {
dma_data->dmareq = -1;
}
}
return 0;
dma_error:
for (; i >= 0; i--) {
struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
if (dma_data->dmareq >= 0)
dma_release_channel(dma_data->chan);
}
return ret;
}
struct dma_controller *
tusb_dma_controller_create(struct musb *musb, void __iomem *base)
{
void __iomem *tbase = musb->ctrl_base;
struct tusb_omap_dma *tusb_dma;
int i;
/* REVISIT: Get dmareq lines used from board-*.c */
musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
musb_writel(tbase, TUSB_DMA_REQ_CONF,
TUSB_DMA_REQ_CONF_BURST_SIZE(2)
| TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
| TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
if (!tusb_dma)
goto out;
tusb_dma->controller.musb = musb;
tusb_dma->tbase = musb->ctrl_base;
tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
tusb_dma->controller.channel_release = tusb_omap_dma_release;
tusb_dma->controller.channel_program = tusb_omap_dma_program;
tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
if (musb->tusb_revision >= TUSB_REV_30)
tusb_dma->multichannel = 1;
for (i = 0; i < MAX_DMAREQ; i++) {
struct dma_channel *ch;
struct tusb_omap_dma_ch *chdat;
ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
if (!ch)
goto cleanup;
dma_channel_pool[i] = ch;
chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
if (!chdat)
goto cleanup;
ch->status = MUSB_DMA_STATUS_UNKNOWN;
ch->private_data = chdat;
}
if (tusb_omap_allocate_dma_pool(tusb_dma))
goto cleanup;
return &tusb_dma->controller;
cleanup:
musb_dma_controller_destroy(&tusb_dma->controller);
out:
return NULL;
}
EXPORT_SYMBOL_GPL(tusb_dma_controller_create);
| linux-master | drivers/usb/musb/tusb6010_omap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* TUSB6010 USB 2.0 OTG Dual Role controller
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <[email protected]>
*
* Notes:
* - Driver assumes that interface to external host (main CPU) is
* configured for NOR FLASH interface instead of VLYNQ serial
* interface.
*/
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/prefetch.h>
#include <linux/usb.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
struct tusb6010_glue {
struct device *dev;
struct platform_device *musb;
struct platform_device *phy;
struct gpio_desc *enable;
struct gpio_desc *intpin;
};
static void tusb_musb_set_vbus(struct musb *musb, int is_on);
#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
/*
* Checks the revision. We need to use the DMA register as 3.0 does not
* have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
*/
static u8 tusb_get_revision(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
u32 die_id;
u8 rev;
rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
if (TUSB_REV_MAJOR(rev) == 3) {
die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
TUSB_DIDR1_HI));
if (die_id >= TUSB_DIDR1_HI_REV_31)
rev |= 1;
}
return rev;
}
static void tusb_print_revision(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
u8 rev;
rev = musb->tusb_revision;
pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
"prcm",
TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
"int",
TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
"gpio",
TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
"dma",
TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
"dieid",
TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
"rev",
TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
}
#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
| TUSB_PHY_OTG_CTRL_TESTM0)
/*
* Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
* Disables power detection in PHY for the duration of idle.
*/
static void tusb_wbus_quirk(struct musb *musb, int enabled)
{
void __iomem *tbase = musb->ctrl_base;
static u32 phy_otg_ctrl, phy_otg_ena;
u32 tmp;
if (enabled) {
phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
| phy_otg_ena | WBUS_QUIRK_MASK;
musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
dev_dbg(musb->controller, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
musb_readl(tbase, TUSB_PHY_OTG_CTRL),
musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
} else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
& TUSB_PHY_OTG_CTRL_TESTM2) {
tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
dev_dbg(musb->controller, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
musb_readl(tbase, TUSB_PHY_OTG_CTRL),
musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
phy_otg_ctrl = 0;
phy_otg_ena = 0;
}
}
static u32 tusb_fifo_offset(u8 epnum)
{
return 0x200 + (epnum * 0x20);
}
static u32 tusb_ep_offset(u8 epnum, u16 offset)
{
return 0x10 + offset;
}
/* TUSB mapping: "flat" plus ep0 special cases */
static void tusb_ep_select(void __iomem *mbase, u8 epnum)
{
musb_writeb(mbase, MUSB_INDEX, epnum);
}
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
*/
static u8 tusb_readb(void __iomem *addr, u32 offset)
{
u16 tmp;
u8 val;
tmp = __raw_readw(addr + (offset & ~1));
if (offset & 1)
val = (tmp >> 8);
else
val = tmp & 0xff;
return val;
}
static void tusb_writeb(void __iomem *addr, u32 offset, u8 data)
{
u16 tmp;
tmp = __raw_readw(addr + (offset & ~1));
if (offset & 1)
tmp = (data << 8) | (tmp & 0xff);
else
tmp = (tmp & 0xff00) | data;
__raw_writew(tmp, addr + (offset & ~1));
}
/*
* TUSB 6010 may use a parallel bus that doesn't support byte ops;
* so both loading and unloading FIFOs need explicit byte counts.
*/
static inline void
tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
{
u32 val;
int i;
if (len > 4) {
for (i = 0; i < (len >> 2); i++) {
memcpy(&val, buf, 4);
musb_writel(fifo, 0, val);
buf += 4;
}
len %= 4;
}
if (len > 0) {
/* Write the rest 1 - 3 bytes to FIFO */
val = 0;
memcpy(&val, buf, len);
musb_writel(fifo, 0, val);
}
}
static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
void *buf, u16 len)
{
u32 val;
int i;
if (len > 4) {
for (i = 0; i < (len >> 2); i++) {
val = musb_readl(fifo, 0);
memcpy(buf, &val, 4);
buf += 4;
}
len %= 4;
}
if (len > 0) {
/* Read the rest 1 - 3 bytes from FIFO */
val = musb_readl(fifo, 0);
memcpy(buf, &val, len);
}
}
static void tusb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
{
struct musb *musb = hw_ep->musb;
void __iomem *ep_conf = hw_ep->conf;
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
prefetch(buf);
dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'T', epnum, fifo, len, buf);
if (epnum)
musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
TUSB_EP_CONFIG_XFR_SIZE(len));
else
musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
TUSB_EP0_CONFIG_XFR_SIZE(len));
if (likely((0x01 & (unsigned long) buf) == 0)) {
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
iowrite32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
} else {
if (len >= 2) {
u32 val;
int i;
/* Cannot use writesw, fifo is 32-bit */
for (i = 0; i < (len >> 2); i++) {
val = (u32)(*(u16 *)buf);
buf += 2;
val |= (*(u16 *)buf) << 16;
buf += 2;
musb_writel(fifo, 0, val);
}
len &= 0x03;
}
}
}
if (len > 0)
tusb_fifo_write_unaligned(fifo, buf, len);
}
static void tusb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
{
struct musb *musb = hw_ep->musb;
void __iomem *ep_conf = hw_ep->conf;
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'R', epnum, fifo, len, buf);
if (epnum)
musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
TUSB_EP_CONFIG_XFR_SIZE(len));
else
musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
if (likely((0x01 & (unsigned long) buf) == 0)) {
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
ioread32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
} else {
if (len >= 2) {
u32 val;
int i;
/* Cannot use readsw, fifo is 32-bit */
for (i = 0; i < (len >> 2); i++) {
val = musb_readl(fifo, 0);
*(u16 *)buf = (u16)(val & 0xffff);
buf += 2;
*(u16 *)buf = (u16)(val >> 16);
buf += 2;
}
len &= 0x03;
}
}
}
if (len > 0)
tusb_fifo_read_unaligned(fifo, buf, len);
}
static struct musb *the_musb;
/* This is used by gadget drivers, and OTG transceiver logic, allowing
* at most mA current to be drawn from VBUS during a Default-B session
* (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
* mode), or low power Default-B sessions, something else supplies power.
* Caller must take care of locking.
*/
static int tusb_draw_power(struct usb_phy *x, unsigned mA)
{
struct musb *musb = the_musb;
void __iomem *tbase = musb->ctrl_base;
u32 reg;
/* tps65030 seems to consume max 100mA, with maybe 60mA available
* (measured on one board) for things other than tps and tusb.
*
* Boards sharing the CPU clock with CLKIN will need to prevent
* certain idle sleep states while the USB link is active.
*
* REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
* The actual current usage would be very board-specific. For now,
* it's simpler to just use an aggregate (also board-specific).
*/
if (x->otg->default_a || mA < (musb->min_power << 1))
mA = 0;
reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
if (mA) {
musb->is_bus_powered = 1;
reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
} else {
musb->is_bus_powered = 0;
reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
}
musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
dev_dbg(musb->controller, "draw max %d mA VBUS\n", mA);
return 0;
}
/* workaround for issue 13: change clock during chip idle
* (to be fixed in rev3 silicon) ... symptoms include disconnect
* or looping suspend/resume cycles
*/
static void tusb_set_clock_source(struct musb *musb, unsigned mode)
{
void __iomem *tbase = musb->ctrl_base;
u32 reg;
reg = musb_readl(tbase, TUSB_PRCM_CONF);
reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
/* 0 = refclk (clkin, XI)
* 1 = PHY 60 MHz (internal PLL)
* 2 = not supported
* 3 = what?
*/
if (mode > 0)
reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
musb_writel(tbase, TUSB_PRCM_CONF, reg);
/* FIXME tusb6010_platform_retime(mode == 0); */
}
/*
* Idle TUSB6010 until next wake-up event; NOR access always wakes.
* Other code ensures that we idle unless we're connected _and_ the
* USB link is not suspended ... and tells us the relevant wakeup
* events. SW_EN for voltage is handled separately.
*/
static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
{
void __iomem *tbase = musb->ctrl_base;
u32 reg;
if ((wakeup_enables & TUSB_PRCM_WBUS)
&& (musb->tusb_revision == TUSB_REV_30))
tusb_wbus_quirk(musb, 1);
tusb_set_clock_source(musb, 0);
wakeup_enables |= TUSB_PRCM_WNORCS;
musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
/* REVISIT writeup of WID implies that if WID set and ID is grounded,
* TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
* Presumably that's mostly to save power, hence WID is immaterial ...
*/
reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
/* issue 4: when driving vbus, use hipower (vbus_det) comparator */
if (is_host_active(musb)) {
reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
} else {
reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
}
reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
dev_dbg(musb->controller, "idle, wake on %02x\n", wakeup_enables);
}
/*
* Updates cable VBUS status. Caller must take care of locking.
*/
static int tusb_musb_vbus_status(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
u32 otg_stat, prcm_mngmt;
int ret = 0;
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
/* Temporarily enable VBUS detection if it was disabled for
* suspend mode. Unless it's enabled otg_stat and devctl will
* not show correct VBUS state.
*/
if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
u32 tmp = prcm_mngmt;
tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
}
if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
ret = 1;
return ret;
}
static void musb_do_idle(struct timer_list *t)
{
struct musb *musb = from_timer(musb, t, dev_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
if ((musb->a_wait_bcon != 0)
&& (musb->idle_timeout == 0
|| time_after(jiffies, musb->idle_timeout))) {
dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n",
usb_otg_state_string(musb->xceiv->otg->state));
}
fallthrough;
case OTG_STATE_A_IDLE:
tusb_musb_set_vbus(musb, 0);
break;
default:
break;
}
if (!musb->is_active) {
u32 wakeups;
/* wait until hub_wq handles port change status */
if (is_host_active(musb) && (musb->port1_status >> 16))
goto done;
if (!musb->gadget_driver) {
wakeups = 0;
} else {
wakeups = TUSB_PRCM_WHOSTDISCON
| TUSB_PRCM_WBUS
| TUSB_PRCM_WVBUS;
wakeups |= TUSB_PRCM_WID;
}
tusb_allow_idle(musb, wakeups);
}
done:
spin_unlock_irqrestore(&musb->lock, flags);
}
/*
* Maybe put TUSB6010 into idle mode depending on USB link status,
* like "disconnected" or "suspended". We'll be woken out of it by
* connect, resume, or disconnect.
*
* Needs to be called as the last function everywhere where there is
* register access to TUSB6010 because of NOR flash wake-up.
* Caller should own controller spinlock.
*
* Delay because peripheral enables D+ pullup 3msec after SE0, and
* we don't want to treat that full speed J as a wakeup event.
* ... peripherals must draw only suspend current after 10 msec.
*/
static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
{
unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
static unsigned long last_timer;
if (timeout == 0)
timeout = default_timeout;
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || ((musb->a_wait_bcon == 0)
&& (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
if (time_after(last_timer, timeout)) {
if (!timer_pending(&musb->dev_timer))
last_timer = timeout;
else {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
return;
}
}
last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
(unsigned long)jiffies_to_msecs(timeout - jiffies));
mod_timer(&musb->dev_timer, timeout);
}
/* ticks of 60 MHz clock */
#define DEVCLOCK 60000000
#define OTG_TIMER_MS(msecs) ((msecs) \
? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
| TUSB_DEV_OTG_TIMER_ENABLE) \
: 0)
static void tusb_musb_set_vbus(struct musb *musb, int is_on)
{
void __iomem *tbase = musb->ctrl_base;
u32 conf, prcm, timer;
u8 devctl;
struct usb_otg *otg = musb->xceiv->otg;
/* HDRC controls CPEN, but beware current surges during device
* connect. They can trigger transient overcurrent conditions
* that must be ignored.
*/
prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
conf = musb_readl(tbase, TUSB_DEV_CONF);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (is_on) {
timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
conf |= TUSB_DEV_CONF_USB_HOST_MODE;
MUSB_HST_MODE(musb);
} else {
u32 otg_stat;
timer = 0;
/* If ID pin is grounded, we want to be a_idle */
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
case OTG_STATE_A_WAIT_BCON:
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
break;
case OTG_STATE_A_WAIT_VFALL:
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
default:
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
}
musb->is_active = 0;
otg->default_a = 1;
MUSB_HST_MODE(musb);
} else {
musb->is_active = 0;
otg->default_a = 0;
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
}
devctl &= ~MUSB_DEVCTL_SESSION;
conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
}
prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
musb_writel(tbase, TUSB_DEV_CONF, conf);
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
dev_dbg(musb->controller, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL),
musb_readl(tbase, TUSB_DEV_OTG_STAT),
conf, prcm);
}
/*
* Sets the mode to OTG, peripheral or host by changing the ID detection.
* Caller must take care of locking.
*
* Note that if a mini-A cable is plugged in the ID line will stay down as
* the weak ID pull-up is not able to pull the ID up.
*/
static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode)
{
void __iomem *tbase = musb->ctrl_base;
u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
switch (musb_mode) {
case MUSB_HOST: /* Disable PHY ID detect, ground ID */
phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf |= TUSB_DEV_CONF_ID_SEL;
dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
break;
case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
break;
case MUSB_OTG: /* Use PHY ID detection */
phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
break;
default:
dev_dbg(musb->controller, "Trying to set mode %i\n", musb_mode);
return -EINVAL;
}
musb_writel(tbase, TUSB_PHY_OTG_CTRL,
TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
if ((musb_mode == MUSB_PERIPHERAL) &&
!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
INFO("Cannot be peripheral with mini-A cable "
"otg_stat: %08x\n", otg_stat);
return 0;
}
static inline unsigned long
tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
{
u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
unsigned long idle_timeout = 0;
struct usb_otg *otg = musb->xceiv->otg;
/* ID pin */
if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
int default_a;
default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
dev_dbg(musb->controller, "Default-%c\n", default_a ? 'A' : 'B');
otg->default_a = default_a;
tusb_musb_set_vbus(musb, default_a);
/* Don't allow idling immediately */
if (default_a)
idle_timeout = jiffies + (HZ * 3);
}
/* VBUS state change */
if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
/* B-dev state machine: no vbus ~= disconnect */
if (!otg->default_a) {
/* ? musb_root_disconnect(musb); */
musb->port1_status &=
~(USB_PORT_STAT_CONNECTION
| USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED
| USB_PORT_STAT_TEST
);
if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n");
if (musb->xceiv->otg->state != OTG_STATE_B_IDLE) {
/* INTR_DISCONNECT can hide... */
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
musb->int_usb |= MUSB_INTR_DISCONNECT;
}
musb->is_active = 0;
}
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
idle_timeout = jiffies + (1 * HZ);
schedule_delayed_work(&musb->irq_work, 0);
} else /* A-dev state machine */ {
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_IDLE:
dev_dbg(musb->controller, "Got SRP, turning on VBUS\n");
musb_platform_set_vbus(musb, 1);
/* CONNECT can wake if a_wait_bcon is set */
if (musb->a_wait_bcon != 0)
musb->is_active = 0;
else
musb->is_active = 1;
/*
* OPT FS A TD.4.6 needs few seconds for
* A_WAIT_VRISE
*/
idle_timeout = jiffies + (2 * HZ);
break;
case OTG_STATE_A_WAIT_VRISE:
/* ignore; A-session-valid < VBUS_VALID/2,
* we monitor this with the timer
*/
break;
case OTG_STATE_A_WAIT_VFALL:
/* REVISIT this irq triggers during short
* spikes caused by enumeration ...
*/
if (musb->vbuserr_retry) {
musb->vbuserr_retry--;
tusb_musb_set_vbus(musb, 1);
} else {
musb->vbuserr_retry
= VBUSERR_RETRY_COUNT;
tusb_musb_set_vbus(musb, 0);
}
break;
default:
break;
}
}
}
/* OTG timer expiration */
if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
u8 devctl;
dev_dbg(musb->controller, "%s timer, %03x\n",
usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
/* VBUS has probably been valid for a while now,
* but may well have bounced out of range a bit
*/
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
if ((devctl & MUSB_DEVCTL_VBUS)
!= MUSB_DEVCTL_VBUS) {
dev_dbg(musb->controller, "devctl %02x\n", devctl);
break;
}
musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
idle_timeout = jiffies
+ msecs_to_jiffies(musb->a_wait_bcon);
} else {
/* REVISIT report overcurrent to hub? */
ERR("vbus too slow, devctl %02x\n", devctl);
tusb_musb_set_vbus(musb, 0);
}
break;
case OTG_STATE_A_WAIT_BCON:
if (musb->a_wait_bcon != 0)
idle_timeout = jiffies
+ msecs_to_jiffies(musb->a_wait_bcon);
break;
case OTG_STATE_A_SUSPEND:
break;
case OTG_STATE_B_WAIT_ACON:
break;
default:
break;
}
}
schedule_delayed_work(&musb->irq_work, 0);
return idle_timeout;
}
static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
{
struct musb *musb = __hci;
void __iomem *tbase = musb->ctrl_base;
unsigned long flags, idle_timeout = 0;
u32 int_mask, int_src;
spin_lock_irqsave(&musb->lock, flags);
/* Mask all interrupts to allow using both edge and level GPIO irq */
int_mask = musb_readl(tbase, TUSB_INT_MASK);
musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
dev_dbg(musb->controller, "TUSB IRQ %08x\n", int_src);
musb->int_usb = (u8) int_src;
/* Acknowledge wake-up source interrupts */
if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
u32 reg;
u32 i;
if (musb->tusb_revision == TUSB_REV_30)
tusb_wbus_quirk(musb, 0);
/* there are issues re-locking the PLL on wakeup ... */
/* work around issue 8 */
for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
musb_writel(tbase, TUSB_SCRATCH_PAD, i);
reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
if (reg == i)
break;
dev_dbg(musb->controller, "TUSB NOR not ready\n");
}
/* work around issue 13 (2nd half) */
tusb_set_clock_source(musb, 1);
reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
if (reg & ~TUSB_PRCM_WNORCS) {
musb->is_active = 1;
schedule_delayed_work(&musb->irq_work, 0);
}
dev_dbg(musb->controller, "wake %sactive %02x\n",
musb->is_active ? "" : "in", reg);
/* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
}
if (int_src & TUSB_INT_SRC_USB_IP_CONN)
del_timer(&musb->dev_timer);
/* OTG state change reports (annoyingly) not issued by Mentor core */
if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
| TUSB_INT_SRC_OTG_TIMEOUT
| TUSB_INT_SRC_ID_STATUS_CHNG))
idle_timeout = tusb_otg_ints(musb, int_src, tbase);
/*
* Just clear the DMA interrupt if it comes as the completion for both
* TX and RX is handled by the DMA callback in tusb6010_omap
*/
if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src);
musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
}
/* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
musb->int_tx = (musb_src & 0xffff);
} else {
musb->int_rx = 0;
musb->int_tx = 0;
}
if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
musb_interrupt(musb);
/* Acknowledge TUSB interrupts. Clear only non-reserved bits */
musb_writel(tbase, TUSB_INT_SRC_CLEAR,
int_src & ~TUSB_INT_MASK_RESERVED_BITS);
tusb_musb_try_idle(musb, idle_timeout);
musb_writel(tbase, TUSB_INT_MASK, int_mask);
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_HANDLED;
}
static int dma_off;
/*
* Enables TUSB6010. Caller must take care of locking.
* REVISIT:
* - Check what is unnecessary in MGC_HdrcStart()
*/
static void tusb_musb_enable(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
/* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
* REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
/* Setup TUSB interrupt, disable DMA and GPIO interrupts */
musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
/* Clear all subsystem interrups */
musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
/* Acknowledge pending interrupt(s) */
musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
/* Only 0 clock cycles for minimum interrupt de-assertion time and
* interrupt polarity active low seems to work reliably here */
musb_writel(tbase, TUSB_INT_CTRL_CONF,
TUSB_INT_CTRL_CONF_INT_RELCYC(0));
irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
/* maybe force into the Default-A OTG state machine */
if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
& TUSB_DEV_OTG_STAT_ID_STATUS))
musb_writel(tbase, TUSB_INT_SRC_SET,
TUSB_INT_SRC_ID_STATUS_CHNG);
if (is_dma_capable() && dma_off)
printk(KERN_WARNING "%s %s: dma not reactivated\n",
__FILE__, __func__);
else
dma_off = 1;
}
/*
* Disables TUSB6010. Caller must take care of locking.
*/
static void tusb_musb_disable(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
/* FIXME stop DMA, IRQs, timers, ... */
/* disable all IRQs */
musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
del_timer(&musb->dev_timer);
if (is_dma_capable() && !dma_off) {
printk(KERN_WARNING "%s %s: dma still active\n",
__FILE__, __func__);
dma_off = 1;
}
}
/*
* Sets up TUSB6010 CPU interface specific signals and registers
* Note: Settings optimized for OMAP24xx
*/
static void tusb_setup_cpu_interface(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
/*
* Disable GPIO[5:0] pullups (used as output DMA requests)
* Don't disable GPIO[7:6] as they are needed for wake-up.
*/
musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
/* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
/* Turn GPIO[5:0] to DMAREQ[5:0] signals */
musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
/* Burst size 16x16 bits, all six DMA requests enabled, DMA request
* de-assertion time 2 system clocks p 62 */
musb_writel(tbase, TUSB_DMA_REQ_CONF,
TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
/* Set 0 wait count for synchronous burst access */
musb_writel(tbase, TUSB_WAIT_COUNT, 1);
}
static int tusb_musb_start(struct musb *musb)
{
struct tusb6010_glue *glue = dev_get_drvdata(musb->controller->parent);
void __iomem *tbase = musb->ctrl_base;
unsigned long flags;
u32 reg;
int ret;
/*
* Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
* 1.5 V voltage regulators of PM companion chip. Companion chip will then
* provide then PGOOD signal to TUSB6010 which will release it from reset.
*/
gpiod_set_value(glue->enable, 1);
/* Wait for 100ms until TUSB6010 pulls INT pin down */
ret = read_poll_timeout(gpiod_get_value, reg, !reg, 5000, 100000, true,
glue->intpin);
if (ret) {
pr_err("tusb: Powerup response failed\n");
return ret;
}
spin_lock_irqsave(&musb->lock, flags);
if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
TUSB_PROD_TEST_RESET_VAL) {
printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
goto err;
}
musb->tusb_revision = tusb_get_revision(musb);
tusb_print_revision(musb);
if (musb->tusb_revision < 2) {
printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
musb->tusb_revision);
goto err;
}
/* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
* NOR FLASH interface is used */
musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
/* Select PHY free running 60MHz as a system clock */
tusb_set_clock_source(musb, 1);
/* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
* power saving, enable VBus detect and session end comparators,
* enable IDpullup, enable VBus charging */
musb_writel(tbase, TUSB_PRCM_MNGMT,
TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
tusb_setup_cpu_interface(musb);
/* simplify: always sense/pullup ID pins, as if in OTG mode */
reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
err:
spin_unlock_irqrestore(&musb->lock, flags);
gpiod_set_value(glue->enable, 0);
msleep(10);
return -ENODEV;
}
static int tusb_musb_init(struct musb *musb)
{
struct platform_device *pdev;
struct resource *mem;
void __iomem *sync = NULL;
int ret;
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv))
return -EPROBE_DEFER;
pdev = to_platform_device(musb->controller);
/* dma address for async dma */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
pr_debug("no async dma resource?\n");
ret = -ENODEV;
goto done;
}
musb->async = mem->start;
/* dma address for sync dma */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!mem) {
pr_debug("no sync dma resource?\n");
ret = -ENODEV;
goto done;
}
musb->sync = mem->start;
sync = ioremap(mem->start, resource_size(mem));
if (!sync) {
pr_debug("ioremap for sync failed\n");
ret = -ENOMEM;
goto done;
}
musb->sync_va = sync;
/* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
* FIFOs at 0x600, TUSB at 0x800
*/
musb->mregs += TUSB_BASE_OFFSET;
ret = tusb_musb_start(musb);
if (ret) {
printk(KERN_ERR "Could not start tusb6010 (%d)\n",
ret);
goto done;
}
musb->isr = tusb_musb_interrupt;
musb->xceiv->set_power = tusb_draw_power;
the_musb = musb;
timer_setup(&musb->dev_timer, musb_do_idle, 0);
done:
if (ret < 0) {
if (sync)
iounmap(sync);
usb_put_phy(musb->xceiv);
}
return ret;
}
static int tusb_musb_exit(struct musb *musb)
{
struct tusb6010_glue *glue = dev_get_drvdata(musb->controller->parent);
del_timer_sync(&musb->dev_timer);
the_musb = NULL;
gpiod_set_value(glue->enable, 0);
msleep(10);
iounmap(musb->sync_va);
usb_put_phy(musb->xceiv);
return 0;
}
static const struct musb_platform_ops tusb_ops = {
.quirks = MUSB_DMA_TUSB_OMAP | MUSB_IN_TUSB |
MUSB_G_NO_SKB_RESERVE,
.init = tusb_musb_init,
.exit = tusb_musb_exit,
.ep_offset = tusb_ep_offset,
.ep_select = tusb_ep_select,
.fifo_offset = tusb_fifo_offset,
.readb = tusb_readb,
.writeb = tusb_writeb,
.read_fifo = tusb_read_fifo,
.write_fifo = tusb_write_fifo,
#ifdef CONFIG_USB_TUSB_OMAP_DMA
.dma_init = tusb_dma_controller_create,
.dma_exit = tusb_dma_controller_destroy,
#endif
.enable = tusb_musb_enable,
.disable = tusb_musb_disable,
.set_mode = tusb_musb_set_mode,
.try_idle = tusb_musb_try_idle,
.vbus_status = tusb_musb_vbus_status,
.set_vbus = tusb_musb_set_vbus,
};
static const struct platform_device_info tusb_dev_info = {
.name = "musb-hdrc",
.id = PLATFORM_DEVID_AUTO,
.dma_mask = DMA_BIT_MASK(32),
};
static int tusb_probe(struct platform_device *pdev)
{
struct resource musb_resources[3];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct tusb6010_glue *glue;
struct platform_device_info pinfo;
int ret;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
glue->dev = &pdev->dev;
glue->enable = devm_gpiod_get(glue->dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(glue->enable))
return dev_err_probe(glue->dev, PTR_ERR(glue->enable),
"could not obtain power on/off GPIO\n");
glue->intpin = devm_gpiod_get(glue->dev, "int", GPIOD_IN);
if (IS_ERR(glue->intpin))
return dev_err_probe(glue->dev, PTR_ERR(glue->intpin),
"could not obtain INT GPIO\n");
pdata->platform_ops = &tusb_ops;
usb_phy_generic_register();
platform_set_drvdata(pdev, glue);
memset(musb_resources, 0x00, sizeof(*musb_resources) *
ARRAY_SIZE(musb_resources));
musb_resources[0].name = pdev->resource[0].name;
musb_resources[0].start = pdev->resource[0].start;
musb_resources[0].end = pdev->resource[0].end;
musb_resources[0].flags = pdev->resource[0].flags;
musb_resources[1].name = pdev->resource[1].name;
musb_resources[1].start = pdev->resource[1].start;
musb_resources[1].end = pdev->resource[1].end;
musb_resources[1].flags = pdev->resource[1].flags;
musb_resources[2] = DEFINE_RES_IRQ_NAMED(gpiod_to_irq(glue->intpin), "mc");
pinfo = tusb_dev_info;
pinfo.parent = &pdev->dev;
pinfo.res = musb_resources;
pinfo.num_res = ARRAY_SIZE(musb_resources);
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
glue->musb = musb = platform_device_register_full(&pinfo);
if (IS_ERR(musb)) {
ret = PTR_ERR(musb);
dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
return ret;
}
return 0;
}
static void tusb_remove(struct platform_device *pdev)
{
struct tusb6010_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
usb_phy_generic_unregister(glue->phy);
}
static struct platform_driver tusb_driver = {
.probe = tusb_probe,
.remove_new = tusb_remove,
.driver = {
.name = "musb-tusb",
},
};
MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
MODULE_LICENSE("GPL v2");
module_platform_driver(tusb_driver);
| linux-master | drivers/usb/musb/tusb6010.c |
// SPDX-License-Identifier: GPL-2.0
/*
* musb_trace.c - MUSB Controller Trace Support
*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Bin Liu <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "musb_trace.h"
void musb_dbg(struct musb *musb, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
trace_musb_log(musb, &vaf);
va_end(args);
}
| linux-master | drivers/usb/musb/musb_trace.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 ST-Ericsson AB
* Mian Yousaf Kaukab <[email protected]>
*
* Based on omap2430.c
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/musb-ux500.h>
#include "musb_core.h"
static const struct musb_hdrc_config ux500_musb_hdrc_config = {
.multipoint = true,
.dyn_fifo = true,
.num_eps = 16,
.ram_bits = 16,
};
struct ux500_glue {
struct device *dev;
struct platform_device *musb;
struct clk *clk;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
static void ux500_musb_set_vbus(struct musb *musb, int is_on)
{
u8 devctl;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
/* HDRC controls CPEN, but beware current surges during device
* connect. They can trigger transient overcurrent conditions
* that must be ignored.
*/
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (is_on) {
if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
/* start the session */
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
/*
* Wait for the musb to set as A device to enable the
* VBUS
*/
while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
if (time_after(jiffies, timeout)) {
dev_err(musb->controller,
"configured as A device timeout");
break;
}
}
} else {
musb->is_active = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
MUSB_HST_MODE(musb);
}
} else {
musb->is_active = 0;
/* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and jumping
* right to B_IDLE...
*/
devctl &= ~MUSB_DEVCTL_SESSION;
MUSB_DEV_MODE(musb);
}
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
/*
* Devctl values will be updated after vbus goes below
* session_valid. The time taken depends on the capacitance
* on VBUS line. The max discharge time can be upto 1 sec
* as per the spec. Typically on our platform, it is 200ms
*/
if (!is_on)
mdelay(200);
dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
static int musb_otg_notifications(struct notifier_block *nb,
unsigned long event, void *unused)
{
struct musb *musb = container_of(nb, struct musb, nb);
dev_dbg(musb->controller, "musb_otg_notifications %ld %s\n",
event, usb_otg_state_string(musb->xceiv->otg->state));
switch (event) {
case UX500_MUSB_ID:
dev_dbg(musb->controller, "ID GND\n");
ux500_musb_set_vbus(musb, 1);
break;
case UX500_MUSB_VBUS:
dev_dbg(musb->controller, "VBUS Connect\n");
break;
case UX500_MUSB_NONE:
dev_dbg(musb->controller, "VBUS Disconnect\n");
if (is_host_active(musb))
ux500_musb_set_vbus(musb, 0);
else
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
break;
default:
dev_dbg(musb->controller, "ID float\n");
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static irqreturn_t ux500_musb_interrupt(int irq, void *__hci)
{
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
static int ux500_musb_init(struct musb *musb)
{
int status;
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv)) {
pr_err("HS USB OTG: no transceiver configured\n");
return -EPROBE_DEFER;
}
musb->nb.notifier_call = musb_otg_notifications;
status = usb_register_notifier(musb->xceiv, &musb->nb);
if (status < 0) {
dev_dbg(musb->controller, "notification register failed\n");
return status;
}
musb->isr = ux500_musb_interrupt;
return 0;
}
static int ux500_musb_exit(struct musb *musb)
{
usb_unregister_notifier(musb->xceiv, &musb->nb);
usb_put_phy(musb->xceiv);
return 0;
}
static const struct musb_platform_ops ux500_ops = {
.quirks = MUSB_DMA_UX500 | MUSB_INDEXED_EP,
#ifdef CONFIG_USB_UX500_DMA
.dma_init = ux500_dma_controller_create,
.dma_exit = ux500_dma_controller_destroy,
#endif
.init = ux500_musb_init,
.exit = ux500_musb_exit,
.fifo_mode = 5,
.set_vbus = ux500_musb_set_vbus,
};
static struct musb_hdrc_platform_data *
ux500_of_probe(struct platform_device *pdev, struct device_node *np)
{
struct musb_hdrc_platform_data *pdata;
const char *mode;
int strlen;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
mode = of_get_property(np, "dr_mode", &strlen);
if (!mode) {
dev_err(&pdev->dev, "No 'dr_mode' property found\n");
return NULL;
}
if (strlen > 0) {
if (!strcmp(mode, "host"))
pdata->mode = MUSB_HOST;
if (!strcmp(mode, "otg"))
pdata->mode = MUSB_OTG;
if (!strcmp(mode, "peripheral"))
pdata->mode = MUSB_PERIPHERAL;
}
return pdata;
}
static int ux500_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct platform_device *musb;
struct ux500_glue *glue;
struct clk *clk;
int ret = -ENOMEM;
if (!pdata) {
if (np) {
pdata = ux500_of_probe(pdev, np);
if (!pdata)
goto err0;
pdev->dev.platform_data = pdata;
} else {
dev_err(&pdev->dev, "no pdata or device tree found\n");
goto err0;
}
}
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
goto err0;
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
dev_err(&pdev->dev, "failed to allocate musb device\n");
goto err0;
}
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(clk);
goto err1;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable clock\n");
goto err1;
}
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
device_set_of_node_from_dev(&musb->dev, &pdev->dev);
glue->dev = &pdev->dev;
glue->musb = musb;
glue->clk = clk;
pdata->platform_ops = &ux500_ops;
pdata->config = &ux500_musb_hdrc_config;
platform_set_drvdata(pdev, glue);
ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto err2;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
dev_err(&pdev->dev, "failed to add platform_data\n");
goto err2;
}
ret = platform_device_add(musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device\n");
goto err2;
}
return 0;
err2:
clk_disable_unprepare(clk);
err1:
platform_device_put(musb);
err0:
return ret;
}
static void ux500_remove(struct platform_device *pdev)
{
struct ux500_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
clk_disable_unprepare(glue->clk);
}
#ifdef CONFIG_PM_SLEEP
static int ux500_suspend(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
if (musb)
usb_phy_set_suspend(musb->xceiv, 1);
clk_disable_unprepare(glue->clk);
return 0;
}
static int ux500_resume(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
int ret;
ret = clk_prepare_enable(glue->clk);
if (ret) {
dev_err(dev, "failed to enable clock\n");
return ret;
}
if (musb)
usb_phy_set_suspend(musb->xceiv, 0);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(ux500_pm_ops, ux500_suspend, ux500_resume);
static const struct of_device_id ux500_match[] = {
{ .compatible = "stericsson,db8500-musb", },
{}
};
MODULE_DEVICE_TABLE(of, ux500_match);
static struct platform_driver ux500_driver = {
.probe = ux500_probe,
.remove_new = ux500_remove,
.driver = {
.name = "musb-ux500",
.pm = &ux500_pm_ops,
.of_match_table = ux500_match,
},
};
MODULE_DESCRIPTION("UX500 MUSB Glue Layer");
MODULE_AUTHOR("Mian Yousaf Kaukab <[email protected]>");
MODULE_LICENSE("GPL v2");
module_platform_driver(ux500_driver);
| linux-master | drivers/usb/musb/ux500.c |
// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* speedtch.c - Alcatel SpeedTouch USB xDSL modem driver
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands
* Copyright (C) 2004, David Woodhouse
*
* Based on "modem_run.c", copyright (C) 2001, Benoit Papillault
******************************************************************************/
#include <asm/page.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/usb/ch9.h>
#include <linux/workqueue.h>
#include "usbatm.h"
#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <[email protected]>"
#define DRIVER_DESC "Alcatel SpeedTouch USB driver"
static const char speedtch_driver_name[] = "speedtch";
#define CTRL_TIMEOUT 2000 /* milliseconds */
#define DATA_TIMEOUT 2000 /* milliseconds */
#define OFFSET_7 0 /* size 1 */
#define OFFSET_b 1 /* size 8 */
#define OFFSET_d 9 /* size 4 */
#define OFFSET_e 13 /* size 1 */
#define OFFSET_f 14 /* size 1 */
#define SIZE_7 1
#define SIZE_b 8
#define SIZE_d 4
#define SIZE_e 1
#define SIZE_f 1
#define MIN_POLL_DELAY 5000 /* milliseconds */
#define MAX_POLL_DELAY 60000 /* milliseconds */
#define RESUBMIT_DELAY 1000 /* milliseconds */
#define DEFAULT_BULK_ALTSETTING 1
#define DEFAULT_ISOC_ALTSETTING 3
#define DEFAULT_DL_512_FIRST 0
#define DEFAULT_ENABLE_ISOC 0
#define DEFAULT_SW_BUFFERING 0
static unsigned int altsetting = 0; /* zero means: use the default */
static bool dl_512_first = DEFAULT_DL_512_FIRST;
static bool enable_isoc = DEFAULT_ENABLE_ISOC;
static bool sw_buffering = DEFAULT_SW_BUFFERING;
#define DEFAULT_B_MAX_DSL 8128
#define DEFAULT_MODEM_MODE 11
#define MODEM_OPTION_LENGTH 16
static const unsigned char DEFAULT_MODEM_OPTION[MODEM_OPTION_LENGTH] = {
0x10, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static unsigned int BMaxDSL = DEFAULT_B_MAX_DSL;
static unsigned char ModemMode = DEFAULT_MODEM_MODE;
static unsigned char ModemOption[MODEM_OPTION_LENGTH];
static unsigned int num_ModemOption;
module_param(altsetting, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(altsetting,
"Alternative setting for data interface (bulk_default: "
__MODULE_STRING(DEFAULT_BULK_ALTSETTING) "; isoc_default: "
__MODULE_STRING(DEFAULT_ISOC_ALTSETTING) ")");
module_param(dl_512_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dl_512_first,
"Read 512 bytes before sending firmware (default: "
__MODULE_STRING(DEFAULT_DL_512_FIRST) ")");
module_param(enable_isoc, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_isoc,
"Use isochronous transfers if available (default: "
__MODULE_STRING(DEFAULT_ENABLE_ISOC) ")");
module_param(sw_buffering, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sw_buffering,
"Enable software buffering (default: "
__MODULE_STRING(DEFAULT_SW_BUFFERING) ")");
module_param(BMaxDSL, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(BMaxDSL,
"default: " __MODULE_STRING(DEFAULT_B_MAX_DSL));
module_param(ModemMode, byte, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ModemMode,
"default: " __MODULE_STRING(DEFAULT_MODEM_MODE));
module_param_array(ModemOption, byte, &num_ModemOption, S_IRUGO);
MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20");
#define INTERFACE_DATA 1
#define ENDPOINT_INT 0x81
#define ENDPOINT_BULK_DATA 0x07
#define ENDPOINT_ISOC_DATA 0x07
#define ENDPOINT_FIRMWARE 0x05
struct speedtch_params {
unsigned int altsetting;
unsigned int BMaxDSL;
unsigned char ModemMode;
unsigned char ModemOption[MODEM_OPTION_LENGTH];
};
struct speedtch_instance_data {
struct usbatm_data *usbatm;
struct speedtch_params params; /* set in probe, constant afterwards */
struct timer_list status_check_timer;
struct work_struct status_check_work;
unsigned char last_status;
int poll_delay; /* milliseconds */
struct timer_list resubmit_timer;
struct urb *int_urb;
unsigned char int_data[16];
unsigned char scratch_buffer[16];
};
/***************
** firmware **
***************/
static void speedtch_set_swbuff(struct speedtch_instance_data *instance, int state)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
int ret;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x32, 0x40, state ? 0x01 : 0x00, 0x00, NULL, 0, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm,
"%sabling SW buffering: usb_control_msg returned %d\n",
state ? "En" : "Dis", ret);
else
usb_dbg(usbatm, "speedtch_set_swbuff: %sbled SW buffering\n", state ? "En" : "Dis");
}
static void speedtch_test_sequence(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
/* URB 147 */
buf[0] = 0x1c;
buf[1] = 0x50;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x0b, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB147: %d\n", __func__, ret);
/* URB 148 */
buf[0] = 0x32;
buf[1] = 0x00;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x02, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB148: %d\n", __func__, ret);
/* URB 149 */
buf[0] = 0x01;
buf[1] = 0x00;
buf[2] = 0x01;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x03, 0x00, buf, 3, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB149: %d\n", __func__, ret);
/* URB 150 */
buf[0] = 0x01;
buf[1] = 0x00;
buf[2] = 0x01;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x04, 0x00, buf, 3, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB150: %d\n", __func__, ret);
/* Extra initialisation in recent drivers - gives higher speeds */
/* URBext1 */
buf[0] = instance->params.ModemMode;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x11, 0x00, buf, 1, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext1: %d\n", __func__, ret);
/* URBext2 */
/* This seems to be the one which actually triggers the higher sync
rate -- it does require the new firmware too, although it works OK
with older firmware */
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x14, 0x00,
instance->params.ModemOption,
MODEM_OPTION_LENGTH, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext2: %d\n", __func__, ret);
/* URBext3 */
buf[0] = instance->params.BMaxDSL & 0xff;
buf[1] = instance->params.BMaxDSL >> 8;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x12, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext3: %d\n", __func__, ret);
}
static int speedtch_upload_firmware(struct speedtch_instance_data *instance,
const struct firmware *fw1,
const struct firmware *fw2)
{
unsigned char *buffer;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
int actual_length;
int ret = 0;
int offset;
usb_dbg(usbatm, "%s entered\n", __func__);
buffer = (unsigned char *)__get_free_page(GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
usb_dbg(usbatm, "%s: no memory for buffer!\n", __func__);
goto out;
}
if (!usb_ifnum_to_if(usb_dev, 2)) {
ret = -ENODEV;
usb_dbg(usbatm, "%s: interface not found!\n", __func__);
goto out_free;
}
/* URB 7 */
if (dl_512_first) { /* some modems need a read before writing the firmware */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, 2000);
if (ret < 0 && ret != -ETIMEDOUT)
usb_warn(usbatm, "%s: read BLOCK0 from modem failed (%d)!\n", __func__, ret);
else
usb_dbg(usbatm, "%s: BLOCK0 downloaded (%d bytes)\n", __func__, ret);
}
/* URB 8 : both leds are static green */
for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) {
int thislen = min_t(int, PAGE_SIZE, fw1->size - offset);
memcpy(buffer, fw1->data + offset, thislen);
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, thislen, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: write BLOCK1 to modem failed (%d)!\n", __func__, ret);
goto out_free;
}
usb_dbg(usbatm, "%s: BLOCK1 uploaded (%zu bytes)\n", __func__, fw1->size);
}
/* USB led blinking green, ADSL led off */
/* URB 11 */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: read BLOCK2 from modem failed (%d)!\n", __func__, ret);
goto out_free;
}
usb_dbg(usbatm, "%s: BLOCK2 downloaded (%d bytes)\n", __func__, actual_length);
/* URBs 12 to 139 - USB led blinking green, ADSL led off */
for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) {
int thislen = min_t(int, PAGE_SIZE, fw2->size - offset);
memcpy(buffer, fw2->data + offset, thislen);
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, thislen, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: write BLOCK3 to modem failed (%d)!\n", __func__, ret);
goto out_free;
}
}
usb_dbg(usbatm, "%s: BLOCK3 uploaded (%zu bytes)\n", __func__, fw2->size);
/* USB led static green, ADSL led static red */
/* URB 142 */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: read BLOCK4 from modem failed (%d)!\n", __func__, ret);
goto out_free;
}
/* success */
usb_dbg(usbatm, "%s: BLOCK4 downloaded (%d bytes)\n", __func__, actual_length);
/* Delay to allow firmware to start up. We can do this here
because we're in our own kernel thread anyway. */
msleep_interruptible(1000);
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) {
usb_err(usbatm, "%s: setting interface to %d failed (%d)!\n", __func__, instance->params.altsetting, ret);
goto out_free;
}
/* Enable software buffering, if requested */
if (sw_buffering)
speedtch_set_swbuff(instance, 1);
/* Magic spell; don't ask us what this does */
speedtch_test_sequence(instance);
ret = 0;
out_free:
free_page((unsigned long)buffer);
out:
return ret;
}
static int speedtch_find_firmware(struct usbatm_data *usbatm, struct usb_interface *intf,
int phase, const struct firmware **fw_p)
{
struct device *dev = &intf->dev;
const u16 bcdDevice = le16_to_cpu(interface_to_usbdev(intf)->descriptor.bcdDevice);
const u8 major_revision = bcdDevice >> 8;
const u8 minor_revision = bcdDevice & 0xff;
char buf[24];
sprintf(buf, "speedtch-%d.bin.%x.%02x", phase, major_revision, minor_revision);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
sprintf(buf, "speedtch-%d.bin.%x", phase, major_revision);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
sprintf(buf, "speedtch-%d.bin", phase);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
usb_err(usbatm, "%s: no stage %d firmware found!\n", __func__, phase);
return -ENOENT;
}
}
}
usb_info(usbatm, "found stage %d firmware %s\n", phase, buf);
return 0;
}
static int speedtch_heavy_init(struct usbatm_data *usbatm, struct usb_interface *intf)
{
const struct firmware *fw1, *fw2;
struct speedtch_instance_data *instance = usbatm->driver_data;
int ret;
if ((ret = speedtch_find_firmware(usbatm, intf, 1, &fw1)) < 0)
return ret;
if ((ret = speedtch_find_firmware(usbatm, intf, 2, &fw2)) < 0) {
release_firmware(fw1);
return ret;
}
if ((ret = speedtch_upload_firmware(instance, fw1, fw2)) < 0)
usb_err(usbatm, "%s: firmware upload failed (%d)!\n", __func__, ret);
release_firmware(fw2);
release_firmware(fw1);
return ret;
}
/**********
** ATM **
**********/
static int speedtch_read_status(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
memset(buf, 0, 16);
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x07, 0x00, buf + OFFSET_7, SIZE_7,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG 7 failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x0b, 0x00, buf + OFFSET_b, SIZE_b,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG B failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x0d, 0x00, buf + OFFSET_d, SIZE_d,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG D failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x01, 0xc0, 0x0e, 0x00, buf + OFFSET_e, SIZE_e,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG E failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x01, 0xc0, 0x0f, 0x00, buf + OFFSET_f, SIZE_f,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG F failed\n", __func__);
return ret;
}
return 0;
}
static int speedtch_start_synchro(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
atm_dbg(usbatm, "%s entered\n", __func__);
memset(buf, 0, 2);
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x04, 0x00,
buf, 2, CTRL_TIMEOUT);
if (ret < 0)
atm_warn(usbatm, "failed to start ADSL synchronisation: %d\n", ret);
else
atm_dbg(usbatm, "%s: modem prodded. %d bytes returned: %02x %02x\n",
__func__, ret, buf[0], buf[1]);
return ret;
}
static void speedtch_check_status(struct work_struct *work)
{
struct speedtch_instance_data *instance =
container_of(work, struct speedtch_instance_data,
status_check_work);
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
unsigned char *buf = instance->scratch_buffer;
int down_speed, up_speed, ret;
unsigned char status;
#ifdef VERBOSE_DEBUG
atm_dbg(usbatm, "%s entered\n", __func__);
#endif
ret = speedtch_read_status(instance);
if (ret < 0) {
atm_warn(usbatm, "error %d fetching device status\n", ret);
instance->poll_delay = min(2 * instance->poll_delay, MAX_POLL_DELAY);
return;
}
instance->poll_delay = max(instance->poll_delay / 2, MIN_POLL_DELAY);
status = buf[OFFSET_7];
if ((status != instance->last_status) || !status) {
atm_dbg(usbatm, "%s: line state 0x%02x\n", __func__, status);
switch (status) {
case 0:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
if (instance->last_status)
atm_info(usbatm, "ADSL line is down\n");
/* It may never resync again unless we ask it to... */
ret = speedtch_start_synchro(instance);
break;
case 0x08:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "ADSL line is blocked?\n");
break;
case 0x10:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line is synchronising\n");
break;
case 0x20:
down_speed = buf[OFFSET_b] | (buf[OFFSET_b + 1] << 8)
| (buf[OFFSET_b + 2] << 16) | (buf[OFFSET_b + 3] << 24);
up_speed = buf[OFFSET_b + 4] | (buf[OFFSET_b + 5] << 8)
| (buf[OFFSET_b + 6] << 16) | (buf[OFFSET_b + 7] << 24);
if (!(down_speed & 0x0000ffff) && !(up_speed & 0x0000ffff)) {
down_speed >>= 16;
up_speed >>= 16;
}
atm_dev->link_rate = down_speed * 1000 / 424;
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND);
atm_info(usbatm,
"ADSL line is up (%d kb/s down | %d kb/s up)\n",
down_speed, up_speed);
break;
default:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "unknown line state %02x\n", status);
break;
}
instance->last_status = status;
}
}
static void speedtch_status_poll(struct timer_list *t)
{
struct speedtch_instance_data *instance = from_timer(instance, t,
status_check_timer);
schedule_work(&instance->status_check_work);
/* The following check is racy, but the race is harmless */
if (instance->poll_delay < MAX_POLL_DELAY)
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(instance->poll_delay));
else
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
static void speedtch_resubmit_int(struct timer_list *t)
{
struct speedtch_instance_data *instance = from_timer(instance, t,
resubmit_timer);
struct urb *int_urb = instance->int_urb;
int ret;
atm_dbg(instance->usbatm, "%s entered\n", __func__);
if (int_urb) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
if (!ret)
schedule_work(&instance->status_check_work);
else {
atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
}
}
}
static void speedtch_handle_int(struct urb *int_urb)
{
struct speedtch_instance_data *instance = int_urb->context;
struct usbatm_data *usbatm = instance->usbatm;
unsigned int count = int_urb->actual_length;
int status = int_urb->status;
int ret;
/* The magic interrupt for "up state" */
static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
/* The magic interrupt for "down state" */
static const unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 };
atm_dbg(usbatm, "%s entered\n", __func__);
if (status < 0) {
atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status);
goto fail;
}
if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) {
del_timer(&instance->status_check_timer);
atm_info(usbatm, "DSL line goes up\n");
} else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) {
atm_info(usbatm, "DSL line goes down\n");
} else {
int i;
atm_dbg(usbatm, "%s: unknown interrupt packet of length %d:", __func__, count);
for (i = 0; i < count; i++)
printk(" %02x", instance->int_data[i]);
printk("\n");
goto fail;
}
int_urb = instance->int_urb;
if (int_urb) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
schedule_work(&instance->status_check_work);
if (ret < 0) {
atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
goto fail;
}
}
return;
fail:
int_urb = instance->int_urb;
if (int_urb)
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
}
static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct usb_device *usb_dev = usbatm->usb_dev;
struct speedtch_instance_data *instance = usbatm->driver_data;
int i, ret;
unsigned char mac_str[13];
atm_dbg(usbatm, "%s entered\n", __func__);
/* Set MAC address, it is stored in the serial number */
memset(atm_dev->esi, 0, sizeof(atm_dev->esi));
if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
for (i = 0; i < 6; i++)
atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) +
hex_to_bin(mac_str[i * 2 + 1]);
}
/* Start modem synchronisation */
ret = speedtch_start_synchro(instance);
/* Set up interrupt endpoint */
if (instance->int_urb) {
ret = usb_submit_urb(instance->int_urb, GFP_KERNEL);
if (ret < 0) {
/* Doesn't matter; we'll poll anyway */
atm_dbg(usbatm, "%s: submission of interrupt URB failed (%d)!\n", __func__, ret);
usb_free_urb(instance->int_urb);
instance->int_urb = NULL;
}
}
/* Start status polling */
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(1000));
return 0;
}
static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct speedtch_instance_data *instance = usbatm->driver_data;
struct urb *int_urb = instance->int_urb;
atm_dbg(usbatm, "%s entered\n", __func__);
del_timer_sync(&instance->status_check_timer);
/*
* Since resubmit_timer and int_urb can schedule themselves and
* each other, shutting them down correctly takes some care
*/
instance->int_urb = NULL; /* signal shutdown */
mb();
usb_kill_urb(int_urb);
del_timer_sync(&instance->resubmit_timer);
/*
* At this point, speedtch_handle_int and speedtch_resubmit_int
* can run or be running, but instance->int_urb == NULL means that
* they will not reschedule
*/
usb_kill_urb(int_urb);
del_timer_sync(&instance->resubmit_timer);
usb_free_urb(int_urb);
flush_work(&instance->status_check_work);
}
static int speedtch_pre_reset(struct usb_interface *intf)
{
return 0;
}
static int speedtch_post_reset(struct usb_interface *intf)
{
return 0;
}
/**********
** USB **
**********/
static const struct usb_device_id speedtch_usb_ids[] = {
{USB_DEVICE(0x06b9, 0x4061)},
{}
};
MODULE_DEVICE_TABLE(usb, speedtch_usb_ids);
static int speedtch_usb_probe(struct usb_interface *, const struct usb_device_id *);
static struct usb_driver speedtch_usb_driver = {
.name = speedtch_driver_name,
.probe = speedtch_usb_probe,
.disconnect = usbatm_usb_disconnect,
.pre_reset = speedtch_pre_reset,
.post_reset = speedtch_post_reset,
.id_table = speedtch_usb_ids
};
static void speedtch_release_interfaces(struct usb_device *usb_dev,
int num_interfaces)
{
struct usb_interface *cur_intf;
int i;
for (i = 0; i < num_interfaces; i++) {
cur_intf = usb_ifnum_to_if(usb_dev, i);
if (cur_intf) {
usb_set_intfdata(cur_intf, NULL);
usb_driver_release_interface(&speedtch_usb_driver, cur_intf);
}
}
}
static int speedtch_bind(struct usbatm_data *usbatm,
struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_interface *cur_intf, *data_intf;
struct speedtch_instance_data *instance;
int ifnum = intf->altsetting->desc.bInterfaceNumber;
int num_interfaces = usb_dev->actconfig->desc.bNumInterfaces;
int i, ret;
int use_isoc;
usb_dbg(usbatm, "%s entered\n", __func__);
/* sanity checks */
if (usb_dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
usb_err(usbatm, "%s: wrong device class %d\n", __func__, usb_dev->descriptor.bDeviceClass);
return -ENODEV;
}
data_intf = usb_ifnum_to_if(usb_dev, INTERFACE_DATA);
if (!data_intf) {
usb_err(usbatm, "%s: data interface not found!\n", __func__);
return -ENODEV;
}
/* claim all interfaces */
for (i = 0; i < num_interfaces; i++) {
cur_intf = usb_ifnum_to_if(usb_dev, i);
if ((i != ifnum) && cur_intf) {
ret = usb_driver_claim_interface(&speedtch_usb_driver, cur_intf, usbatm);
if (ret < 0) {
usb_err(usbatm, "%s: failed to claim interface %2d (%d)!\n", __func__, i, ret);
speedtch_release_interfaces(usb_dev, i);
return ret;
}
}
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
ret = -ENOMEM;
goto fail_release;
}
instance->usbatm = usbatm;
/* module parameters may change at any moment, so take a snapshot */
instance->params.altsetting = altsetting;
instance->params.BMaxDSL = BMaxDSL;
instance->params.ModemMode = ModemMode;
memcpy(instance->params.ModemOption, DEFAULT_MODEM_OPTION, MODEM_OPTION_LENGTH);
memcpy(instance->params.ModemOption, ModemOption, num_ModemOption);
use_isoc = enable_isoc;
if (instance->params.altsetting)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) {
usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, instance->params.altsetting, ret);
instance->params.altsetting = 0; /* fall back to default */
}
if (!instance->params.altsetting && use_isoc)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_ISOC_ALTSETTING)) < 0) {
usb_dbg(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_ISOC_ALTSETTING, ret);
use_isoc = 0; /* fall back to bulk */
}
if (use_isoc) {
const struct usb_host_interface *desc = data_intf->cur_altsetting;
const __u8 target_address = USB_DIR_IN | usbatm->driver->isoc_in;
use_isoc = 0; /* fall back to bulk if endpoint not found */
for (i = 0; i < desc->desc.bNumEndpoints; i++) {
const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc;
if ((endpoint_desc->bEndpointAddress == target_address)) {
use_isoc =
usb_endpoint_xfer_isoc(endpoint_desc);
break;
}
}
if (!use_isoc)
usb_info(usbatm, "isochronous transfer not supported - using bulk\n");
}
if (!use_isoc && !instance->params.altsetting)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_BULK_ALTSETTING)) < 0) {
usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_BULK_ALTSETTING, ret);
goto fail_free;
}
if (!instance->params.altsetting)
instance->params.altsetting = use_isoc ? DEFAULT_ISOC_ALTSETTING : DEFAULT_BULK_ALTSETTING;
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
timer_setup(&instance->status_check_timer, speedtch_status_poll, 0);
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0);
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (instance->int_urb)
usb_fill_int_urb(instance->int_urb, usb_dev,
usb_rcvintpipe(usb_dev, ENDPOINT_INT),
instance->int_data, sizeof(instance->int_data),
speedtch_handle_int, instance, 16);
else
usb_dbg(usbatm, "%s: no memory for interrupt urb!\n", __func__);
/* check whether the modem already seems to be alive */
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x07, 0x00,
instance->scratch_buffer + OFFSET_7, SIZE_7, 500);
usbatm->flags |= (ret == SIZE_7 ? UDSL_SKIP_HEAVY_INIT : 0);
usb_dbg(usbatm, "%s: firmware %s loaded\n", __func__, usbatm->flags & UDSL_SKIP_HEAVY_INIT ? "already" : "not");
if (!(usbatm->flags & UDSL_SKIP_HEAVY_INIT))
if ((ret = usb_reset_device(usb_dev)) < 0) {
usb_err(usbatm, "%s: device reset failed (%d)!\n", __func__, ret);
goto fail_free;
}
usbatm->driver_data = instance;
return 0;
fail_free:
usb_free_urb(instance->int_urb);
kfree(instance);
fail_release:
speedtch_release_interfaces(usb_dev, num_interfaces);
return ret;
}
static void speedtch_unbind(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct speedtch_instance_data *instance = usbatm->driver_data;
usb_dbg(usbatm, "%s entered\n", __func__);
speedtch_release_interfaces(usb_dev, usb_dev->actconfig->desc.bNumInterfaces);
usb_free_urb(instance->int_urb);
kfree(instance);
}
/***********
** init **
***********/
static struct usbatm_driver speedtch_usbatm_driver = {
.driver_name = speedtch_driver_name,
.bind = speedtch_bind,
.heavy_init = speedtch_heavy_init,
.unbind = speedtch_unbind,
.atm_start = speedtch_atm_start,
.atm_stop = speedtch_atm_stop,
.bulk_in = ENDPOINT_BULK_DATA,
.bulk_out = ENDPOINT_BULK_DATA,
.isoc_in = ENDPOINT_ISOC_DATA
};
static int speedtch_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver);
}
module_usb_driver(speedtch_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/atm/speedtch.c |
// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* cxacru.c - driver for USB ADSL modems based on
* Conexant AccessRunner chipset
*
* Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
* Copyright (C) 2007 Simon Arlott
* Copyright (C) 2009 Simon Arlott
******************************************************************************/
/*
* Credit is due for Josep Comas, who created the original patch to speedtch.c
* to support the different padding used by the AccessRunner (now generalized
* into usbatm), and the userspace firmware loading utility.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/mutex.h>
#include <asm/unaligned.h>
#include "usbatm.h"
#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
static const char cxacru_driver_name[] = "cxacru";
#define CXACRU_EP_CMD 0x01 /* Bulk/interrupt in/out */
#define CXACRU_EP_DATA 0x02 /* Bulk in/out */
#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */
#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2)
/* Addresses */
#define PLLFCLK_ADDR 0x00350068
#define PLLBCLK_ADDR 0x0035006c
#define SDRAMEN_ADDR 0x00350010
#define FW_ADDR 0x00801000
#define BR_ADDR 0x00180600
#define SIG_ADDR 0x00180500
#define BR_STACK_ADDR 0x00187f10
/* Values */
#define SDRAM_ENA 0x1
#define CMD_TIMEOUT 2000 /* msecs */
#define POLL_INTERVAL 1 /* secs */
/* commands for interaction with the modem through the control channel before
* firmware is loaded */
enum cxacru_fw_request {
FW_CMD_ERR,
FW_GET_VER,
FW_READ_MEM,
FW_WRITE_MEM,
FW_RMW_MEM,
FW_CHECKSUM_MEM,
FW_GOTO_MEM,
};
/* commands for interaction with the modem through the control channel once
* firmware is loaded */
enum cxacru_cm_request {
CM_REQUEST_UNDEFINED = 0x80,
CM_REQUEST_TEST,
CM_REQUEST_CHIP_GET_MAC_ADDRESS,
CM_REQUEST_CHIP_GET_DP_VERSIONS,
CM_REQUEST_CHIP_ADSL_LINE_START,
CM_REQUEST_CHIP_ADSL_LINE_STOP,
CM_REQUEST_CHIP_ADSL_LINE_GET_STATUS,
CM_REQUEST_CHIP_ADSL_LINE_GET_SPEED,
CM_REQUEST_CARD_INFO_GET,
CM_REQUEST_CARD_DATA_GET,
CM_REQUEST_CARD_DATA_SET,
CM_REQUEST_COMMAND_HW_IO,
CM_REQUEST_INTERFACE_HW_IO,
CM_REQUEST_CARD_SERIAL_DATA_PATH_GET,
CM_REQUEST_CARD_SERIAL_DATA_PATH_SET,
CM_REQUEST_CARD_CONTROLLER_VERSION_GET,
CM_REQUEST_CARD_GET_STATUS,
CM_REQUEST_CARD_GET_MAC_ADDRESS,
CM_REQUEST_CARD_GET_DATA_LINK_STATUS,
CM_REQUEST_MAX,
};
/* commands for interaction with the flash memory
*
* read: response is the contents of the first 60 bytes of flash memory
* write: request contains the 60 bytes of data to write to flash memory
* response is the contents of the first 60 bytes of flash memory
*
* layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS
* SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
* P: le16 USB Product ID
* V: le16 USB Vendor ID
* M: be48 MAC Address
* S: le16 ASCII Serial Number
*/
enum cxacru_cm_flash {
CM_FLASH_READ = 0xa1,
CM_FLASH_WRITE = 0xa2
};
/* reply codes to the commands above */
enum cxacru_cm_status {
CM_STATUS_UNDEFINED,
CM_STATUS_SUCCESS,
CM_STATUS_ERROR,
CM_STATUS_UNSUPPORTED,
CM_STATUS_UNIMPLEMENTED,
CM_STATUS_PARAMETER_ERROR,
CM_STATUS_DBG_LOOPBACK,
CM_STATUS_MAX,
};
/* indices into CARD_INFO_GET return array */
enum cxacru_info_idx {
CXINF_DOWNSTREAM_RATE,
CXINF_UPSTREAM_RATE,
CXINF_LINK_STATUS,
CXINF_LINE_STATUS,
CXINF_MAC_ADDRESS_HIGH,
CXINF_MAC_ADDRESS_LOW,
CXINF_UPSTREAM_SNR_MARGIN,
CXINF_DOWNSTREAM_SNR_MARGIN,
CXINF_UPSTREAM_ATTENUATION,
CXINF_DOWNSTREAM_ATTENUATION,
CXINF_TRANSMITTER_POWER,
CXINF_UPSTREAM_BITS_PER_FRAME,
CXINF_DOWNSTREAM_BITS_PER_FRAME,
CXINF_STARTUP_ATTEMPTS,
CXINF_UPSTREAM_CRC_ERRORS,
CXINF_DOWNSTREAM_CRC_ERRORS,
CXINF_UPSTREAM_FEC_ERRORS,
CXINF_DOWNSTREAM_FEC_ERRORS,
CXINF_UPSTREAM_HEC_ERRORS,
CXINF_DOWNSTREAM_HEC_ERRORS,
CXINF_LINE_STARTABLE,
CXINF_MODULATION,
CXINF_ADSL_HEADEND,
CXINF_ADSL_HEADEND_ENVIRONMENT,
CXINF_CONTROLLER_VERSION,
/* dunno what the missing two mean */
CXINF_MAX = 0x1c,
};
enum cxacru_poll_state {
CXPOLL_STOPPING,
CXPOLL_STOPPED,
CXPOLL_POLLING,
CXPOLL_SHUTDOWN
};
struct cxacru_modem_type {
u32 pll_f_clk;
u32 pll_b_clk;
int boot_rom_patch;
};
struct cxacru_data {
struct usbatm_data *usbatm;
const struct cxacru_modem_type *modem_type;
int line_status;
struct mutex adsl_state_serialize;
int adsl_status;
struct delayed_work poll_work;
u32 card_info[CXINF_MAX];
struct mutex poll_state_serialize;
enum cxacru_poll_state poll_state;
/* control handles */
struct mutex cm_serialize;
u8 *rcv_buf;
u8 *snd_buf;
struct urb *rcv_urb;
struct urb *snd_urb;
struct completion rcv_done;
struct completion snd_done;
};
static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
u8 *wdata, int wsize, u8 *rdata, int rsize);
static void cxacru_poll_status(struct work_struct *work);
/* Card info exported through sysfs */
#define CXACRU__ATTR_INIT(_name) \
static DEVICE_ATTR_RO(_name)
#define CXACRU_CMD_INIT(_name) \
static DEVICE_ATTR_RW(_name)
#define CXACRU_SET_INIT(_name) \
static DEVICE_ATTR_WO(_name)
#define CXACRU_ATTR_INIT(_value, _type, _name) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct cxacru_data *instance = to_usbatm_driver_data(\
to_usb_interface(dev)); \
\
if (instance == NULL) \
return -ENODEV; \
\
return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
} \
CXACRU__ATTR_INIT(_name)
#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
{
return sprintf(buf, "%u\n", value);
}
static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf)
{
return sprintf(buf, "%d\n", value);
}
static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf)
{
if (likely(value >= 0)) {
return snprintf(buf, PAGE_SIZE, "%u.%02u\n",
value / 100, value % 100);
} else {
value = -value;
return snprintf(buf, PAGE_SIZE, "-%u.%02u\n",
value / 100, value % 100);
}
}
static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
{
static char *str[] = { "no", "yes" };
if (unlikely(value >= ARRAY_SIZE(str)))
return sprintf(buf, "%u\n", value);
return sprintf(buf, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
{
static char *str[] = { NULL, "not connected", "connected", "lost" };
if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
return sprintf(buf, "%u\n", value);
return sprintf(buf, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
{
static char *str[] = { "down", "attempting to activate",
"training", "channel analysis", "exchange", "up",
"waiting", "initialising"
};
if (unlikely(value >= ARRAY_SIZE(str)))
return sprintf(buf, "%u\n", value);
return sprintf(buf, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
{
static char *str[] = {
"",
"ANSI T1.413",
"ITU-T G.992.1 (G.DMT)",
"ITU-T G.992.2 (G.LITE)"
};
if (unlikely(value >= ARRAY_SIZE(str)))
return sprintf(buf, "%u\n", value);
return sprintf(buf, "%s\n", str[value]);
}
/*
* This could use MAC_ADDRESS_HIGH and MAC_ADDRESS_LOW, but since
* this data is already in atm_dev there's no point.
*
* MAC_ADDRESS_HIGH = 0x????5544
* MAC_ADDRESS_LOW = 0x33221100
* Where 00-55 are bytes 0-5 of the MAC.
*/
static ssize_t mac_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
if (instance == NULL || instance->usbatm->atm_dev == NULL)
return -ENODEV;
return sprintf(buf, "%pM\n", instance->usbatm->atm_dev->esi);
}
static ssize_t adsl_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
static char *str[] = { "running", "stopped" };
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
u32 value;
if (instance == NULL)
return -ENODEV;
value = instance->card_info[CXINF_LINE_STARTABLE];
if (unlikely(value >= ARRAY_SIZE(str)))
return sprintf(buf, "%u\n", value);
return sprintf(buf, "%s\n", str[value]);
}
static ssize_t adsl_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
int ret;
int poll = -1;
char str_cmd[8];
int len = strlen(buf);
if (!capable(CAP_NET_ADMIN))
return -EACCES;
ret = sscanf(buf, "%7s", str_cmd);
if (ret != 1)
return -EINVAL;
ret = 0;
if (instance == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&instance->adsl_state_serialize))
return -ERESTARTSYS;
if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
if (ret < 0) {
atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_STOP returned %d\n", ret);
ret = -EIO;
} else {
ret = len;
poll = CXPOLL_STOPPED;
}
}
/* Line status is only updated every second
* and the device appears to only react to
* START/STOP every second too. Wait 1.5s to
* be sure that restart will have an effect. */
if (!strcmp(str_cmd, "restart"))
msleep(1500);
if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
if (ret < 0) {
atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_START returned %d\n", ret);
ret = -EIO;
} else {
ret = len;
poll = CXPOLL_POLLING;
}
}
if (!strcmp(str_cmd, "poll")) {
ret = len;
poll = CXPOLL_POLLING;
}
if (ret == 0) {
ret = -EINVAL;
poll = -1;
}
if (poll == CXPOLL_POLLING) {
mutex_lock(&instance->poll_state_serialize);
switch (instance->poll_state) {
case CXPOLL_STOPPED:
/* start polling */
instance->poll_state = CXPOLL_POLLING;
break;
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
fallthrough;
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
poll = -1;
}
mutex_unlock(&instance->poll_state_serialize);
} else if (poll == CXPOLL_STOPPED) {
mutex_lock(&instance->poll_state_serialize);
/* request stop */
if (instance->poll_state == CXPOLL_POLLING)
instance->poll_state = CXPOLL_STOPPING;
mutex_unlock(&instance->poll_state_serialize);
}
mutex_unlock(&instance->adsl_state_serialize);
if (poll == CXPOLL_POLLING)
cxacru_poll_status(&instance->poll_work.work);
return ret;
}
/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
static ssize_t adsl_config_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
int len = strlen(buf);
int ret, pos, num;
__le32 data[CMD_PACKET_SIZE / 4];
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (instance == NULL)
return -ENODEV;
pos = 0;
num = 0;
while (pos < len) {
int tmp;
u32 index;
u32 value;
ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
if (ret < 2)
return -EINVAL;
if (index > 0x7f)
return -EINVAL;
if (tmp < 0 || tmp > len - pos)
return -EINVAL;
pos += tmp;
/* skip trailing newline */
if (buf[pos] == '\n' && pos == len-1)
pos++;
data[num * 2 + 1] = cpu_to_le32(index);
data[num * 2 + 2] = cpu_to_le32(value);
num++;
/* send config values when data buffer is full
* or no more data
*/
if (pos >= len || num >= CMD_MAX_CONFIG) {
char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */
data[0] = cpu_to_le32(num);
ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
(u8 *) data, 4 + num * 8, NULL, 0);
if (ret < 0) {
atm_err(instance->usbatm,
"set card data returned %d\n", ret);
return -EIO;
}
for (tmp = 0; tmp < num; tmp++)
snprintf(log + tmp*12, 13, " %02x=%08x",
le32_to_cpu(data[tmp * 2 + 1]),
le32_to_cpu(data[tmp * 2 + 2]));
atm_info(instance->usbatm, "config%s\n", log);
num = 0;
}
}
return len;
}
/*
* All device attributes are included in CXACRU_ALL_FILES
* so that the same list can be used multiple times:
* INIT (define the device attributes)
* CREATE (create all the device files)
* REMOVE (remove all the device files)
*
* With the last two being defined as needed in the functions
* they are used in before calling CXACRU_ALL_FILES()
*/
#define CXACRU_ALL_FILES(_action) \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_RATE, u32, downstream_rate); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_RATE, u32, upstream_rate); \
CXACRU_ATTR_##_action(CXINF_LINK_STATUS, LINK, link_status); \
CXACRU_ATTR_##_action(CXINF_LINE_STATUS, LINE, line_status); \
CXACRU__ATTR_##_action( mac_address); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_SNR_MARGIN, dB, upstream_snr_margin); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_SNR_MARGIN, dB, downstream_snr_margin); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_ATTENUATION, dB, upstream_attenuation); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_ATTENUATION, dB, downstream_attenuation); \
CXACRU_ATTR_##_action(CXINF_TRANSMITTER_POWER, s8, transmitter_power); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_BITS_PER_FRAME, u32, upstream_bits_per_frame); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_BITS_PER_FRAME, u32, downstream_bits_per_frame); \
CXACRU_ATTR_##_action(CXINF_STARTUP_ATTEMPTS, u32, startup_attempts); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_CRC_ERRORS, u32, upstream_crc_errors); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_CRC_ERRORS, u32, downstream_crc_errors); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_FEC_ERRORS, u32, upstream_fec_errors); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_FEC_ERRORS, u32, downstream_fec_errors); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_HEC_ERRORS, u32, upstream_hec_errors); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_HEC_ERRORS, u32, downstream_hec_errors); \
CXACRU_ATTR_##_action(CXINF_LINE_STARTABLE, bool, line_startable); \
CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
CXACRU_CMD_##_action( adsl_state); \
CXACRU_SET_##_action( adsl_config);
CXACRU_ALL_FILES(INIT);
static struct attribute *cxacru_attrs[] = {
&dev_attr_adsl_config.attr,
&dev_attr_adsl_state.attr,
&dev_attr_adsl_controller_version.attr,
&dev_attr_adsl_headend_environment.attr,
&dev_attr_adsl_headend.attr,
&dev_attr_modulation.attr,
&dev_attr_line_startable.attr,
&dev_attr_downstream_hec_errors.attr,
&dev_attr_upstream_hec_errors.attr,
&dev_attr_downstream_fec_errors.attr,
&dev_attr_upstream_fec_errors.attr,
&dev_attr_downstream_crc_errors.attr,
&dev_attr_upstream_crc_errors.attr,
&dev_attr_startup_attempts.attr,
&dev_attr_downstream_bits_per_frame.attr,
&dev_attr_upstream_bits_per_frame.attr,
&dev_attr_transmitter_power.attr,
&dev_attr_downstream_attenuation.attr,
&dev_attr_upstream_attenuation.attr,
&dev_attr_downstream_snr_margin.attr,
&dev_attr_upstream_snr_margin.attr,
&dev_attr_mac_address.attr,
&dev_attr_line_status.attr,
&dev_attr_link_status.attr,
&dev_attr_upstream_rate.attr,
&dev_attr_downstream_rate.attr,
NULL,
};
ATTRIBUTE_GROUPS(cxacru);
/* the following three functions are stolen from drivers/usb/core/message.c */
static void cxacru_blocking_completion(struct urb *urb)
{
complete(urb->context);
}
struct cxacru_timer {
struct timer_list timer;
struct urb *urb;
};
static void cxacru_timeout_kill(struct timer_list *t)
{
struct cxacru_timer *timer = from_timer(timer, t, timer);
usb_unlink_urb(timer->urb);
}
static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
int *actual_length)
{
struct cxacru_timer timer = {
.urb = urb,
};
timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0);
mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
wait_for_completion(done);
del_timer_sync(&timer.timer);
destroy_timer_on_stack(&timer.timer);
if (actual_length)
*actual_length = urb->actual_length;
return urb->status; /* must read status after completion */
}
static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
u8 *wdata, int wsize, u8 *rdata, int rsize)
{
int ret, actlen;
int offb, offd;
const int stride = CMD_PACKET_SIZE - 4;
u8 *wbuf = instance->snd_buf;
u8 *rbuf = instance->rcv_buf;
int wbuflen = ((wsize - 1) / stride + 1) * CMD_PACKET_SIZE;
int rbuflen = ((rsize - 1) / stride + 1) * CMD_PACKET_SIZE;
if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) {
if (printk_ratelimit())
usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n",
wbuflen, rbuflen);
ret = -ENOMEM;
goto err;
}
mutex_lock(&instance->cm_serialize);
/* submit reading urb before the writing one */
init_completion(&instance->rcv_done);
ret = usb_submit_urb(instance->rcv_urb, GFP_KERNEL);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "submit of read urb for cm %#x failed (%d)\n",
cm, ret);
goto fail;
}
memset(wbuf, 0, wbuflen);
/* handle wsize == 0 */
wbuf[0] = cm;
for (offb = offd = 0; offd < wsize; offd += stride, offb += CMD_PACKET_SIZE) {
wbuf[offb] = cm;
memcpy(wbuf + offb + 4, wdata + offd, min_t(int, stride, wsize - offd));
}
instance->snd_urb->transfer_buffer_length = wbuflen;
init_completion(&instance->snd_done);
ret = usb_submit_urb(instance->snd_urb, GFP_KERNEL);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "submit of write urb for cm %#x failed (%d)\n",
cm, ret);
goto fail;
}
ret = cxacru_start_wait_urb(instance->snd_urb, &instance->snd_done, NULL);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "send of cm %#x failed (%d)\n", cm, ret);
goto fail;
}
ret = cxacru_start_wait_urb(instance->rcv_urb, &instance->rcv_done, &actlen);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "receive of cm %#x failed (%d)\n", cm, ret);
goto fail;
}
if (actlen % CMD_PACKET_SIZE || !actlen) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid response length to cm %#x: %d\n",
cm, actlen);
ret = -EIO;
goto fail;
}
/* check the return status and copy the data to the output buffer, if needed */
for (offb = offd = 0; offd < rsize && offb < actlen; offb += CMD_PACKET_SIZE) {
if (rbuf[offb] != cm) {
if (printk_ratelimit())
usb_err(instance->usbatm, "wrong cm %#x in response to cm %#x\n",
rbuf[offb], cm);
ret = -EIO;
goto fail;
}
if (rbuf[offb + 1] != CM_STATUS_SUCCESS) {
if (printk_ratelimit())
usb_err(instance->usbatm, "response to cm %#x failed: %#x\n",
cm, rbuf[offb + 1]);
ret = -EIO;
goto fail;
}
if (offd >= rsize)
break;
memcpy(rdata + offd, rbuf + offb + 4, min_t(int, stride, rsize - offd));
offd += stride;
}
ret = offd;
usb_dbg(instance->usbatm, "cm %#x\n", cm);
fail:
mutex_unlock(&instance->cm_serialize);
err:
return ret;
}
static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_request cm,
u32 *data, int size)
{
int ret, len;
__le32 *buf;
int offb;
unsigned int offd;
const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
buf = kmalloc(buflen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = cxacru_cm(instance, cm, NULL, 0, (u8 *) buf, buflen);
if (ret < 0)
goto cleanup;
/* len > 0 && len % 4 == 0 guaranteed by cxacru_cm() */
len = ret / 4;
for (offb = 0; offb < len; ) {
int l = le32_to_cpu(buf[offb++]);
if (l < 0 || l > stride || l > (len - offb) / 2) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
cm, l);
ret = -EIO;
goto cleanup;
}
while (l--) {
offd = le32_to_cpu(buf[offb++]);
if (offd >= size) {
if (printk_ratelimit())
usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
offd, cm);
ret = -EIO;
goto cleanup;
}
data[offd] = le32_to_cpu(buf[offb++]);
}
}
ret = 0;
cleanup:
kfree(buf);
return ret;
}
static int cxacru_card_status(struct cxacru_data *instance)
{
int ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0);
if (ret < 0) { /* firmware not loaded */
usb_dbg(instance->usbatm, "cxacru_adsl_start: CARD_GET_STATUS returned %d\n", ret);
return ret;
}
return 0;
}
static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
struct atm_dev *atm_dev)
{
struct cxacru_data *instance = usbatm_instance->driver_data;
struct usb_interface *intf = usbatm_instance->usb_intf;
int ret;
int start_polling = 1;
dev_dbg(&intf->dev, "%s\n", __func__);
/* Read MAC address */
ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_MAC_ADDRESS, NULL, 0,
atm_dev->esi, sizeof(atm_dev->esi));
if (ret < 0) {
atm_err(usbatm_instance, "cxacru_atm_start: CARD_GET_MAC_ADDRESS returned %d\n", ret);
return ret;
}
/* start ADSL */
mutex_lock(&instance->adsl_state_serialize);
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
if (ret < 0)
atm_err(usbatm_instance, "cxacru_atm_start: CHIP_ADSL_LINE_START returned %d\n", ret);
/* Start status polling */
mutex_lock(&instance->poll_state_serialize);
switch (instance->poll_state) {
case CXPOLL_STOPPED:
/* start polling */
instance->poll_state = CXPOLL_POLLING;
break;
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
fallthrough;
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
start_polling = 0;
}
mutex_unlock(&instance->poll_state_serialize);
mutex_unlock(&instance->adsl_state_serialize);
if (start_polling)
cxacru_poll_status(&instance->poll_work.work);
return 0;
}
static void cxacru_poll_status(struct work_struct *work)
{
struct cxacru_data *instance =
container_of(work, struct cxacru_data, poll_work.work);
u32 buf[CXINF_MAX] = {};
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
int keep_polling = 1;
int ret;
ret = cxacru_cm_get_array(instance, CM_REQUEST_CARD_INFO_GET, buf, CXINF_MAX);
if (ret < 0) {
if (ret != -ESHUTDOWN)
atm_warn(usbatm, "poll status: error %d\n", ret);
mutex_lock(&instance->poll_state_serialize);
if (instance->poll_state != CXPOLL_SHUTDOWN) {
instance->poll_state = CXPOLL_STOPPED;
if (ret != -ESHUTDOWN)
atm_warn(usbatm, "polling disabled, set adsl_state"
" to 'start' or 'poll' to resume\n");
}
mutex_unlock(&instance->poll_state_serialize);
goto reschedule;
}
memcpy(instance->card_info, buf, sizeof(instance->card_info));
if (instance->adsl_status != buf[CXINF_LINE_STARTABLE]) {
instance->adsl_status = buf[CXINF_LINE_STARTABLE];
switch (instance->adsl_status) {
case 0:
atm_info(usbatm, "ADSL state: running\n");
break;
case 1:
atm_info(usbatm, "ADSL state: stopped\n");
break;
default:
atm_info(usbatm, "Unknown adsl status %02x\n", instance->adsl_status);
break;
}
}
if (instance->line_status == buf[CXINF_LINE_STATUS])
goto reschedule;
instance->line_status = buf[CXINF_LINE_STATUS];
switch (instance->line_status) {
case 0:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: down\n");
break;
case 1:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: attempting to activate\n");
break;
case 2:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: training\n");
break;
case 3:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: channel analysis\n");
break;
case 4:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: exchange\n");
break;
case 5:
atm_dev->link_rate = buf[CXINF_DOWNSTREAM_RATE] * 1000 / 424;
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND);
atm_info(usbatm, "ADSL line: up (%d kb/s down | %d kb/s up)\n",
buf[CXINF_DOWNSTREAM_RATE], buf[CXINF_UPSTREAM_RATE]);
break;
case 6:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: waiting\n");
break;
case 7:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: initializing\n");
break;
default:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "Unknown line state %02x\n", instance->line_status);
break;
}
reschedule:
mutex_lock(&instance->poll_state_serialize);
if (instance->poll_state == CXPOLL_STOPPING &&
instance->adsl_status == 1 && /* stopped */
instance->line_status == 0) /* down */
instance->poll_state = CXPOLL_STOPPED;
if (instance->poll_state == CXPOLL_STOPPED)
keep_polling = 0;
mutex_unlock(&instance->poll_state_serialize);
if (keep_polling)
schedule_delayed_work(&instance->poll_work,
round_jiffies_relative(POLL_INTERVAL*HZ));
}
static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
u8 code1, u8 code2, u32 addr, const u8 *data, int size)
{
int ret;
u8 *buf;
int offd, offb;
const int stride = CMD_PACKET_SIZE - 8;
buf = (u8 *) __get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
offb = offd = 0;
do {
int l = min_t(int, stride, size - offd);
buf[offb++] = fw;
buf[offb++] = l;
buf[offb++] = code1;
buf[offb++] = code2;
put_unaligned(cpu_to_le32(addr), (__le32 *)(buf + offb));
offb += 4;
addr += l;
if (l)
memcpy(buf + offb, data + offd, l);
if (l < stride)
memset(buf + offb + l, 0, stride - l);
offb += stride;
offd += stride;
if ((offb >= PAGE_SIZE) || (offd >= size)) {
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
buf, offb, NULL, CMD_TIMEOUT);
if (ret < 0) {
dev_dbg(&usb_dev->dev, "sending fw %#x failed\n", fw);
goto cleanup;
}
offb = 0;
}
} while (offd < size);
dev_dbg(&usb_dev->dev, "sent fw %#x\n", fw);
ret = 0;
cleanup:
free_page((unsigned long) buf);
return ret;
}
static void cxacru_upload_firmware(struct cxacru_data *instance,
const struct firmware *fw,
const struct firmware *bp)
{
int ret;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
__le16 signature[] = { usb_dev->descriptor.idVendor,
usb_dev->descriptor.idProduct };
__le32 val;
usb_dbg(usbatm, "%s\n", __func__);
/* FirmwarePllFClkValue */
val = cpu_to_le32(instance->modem_type->pll_f_clk);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLFCLK_ADDR, (u8 *) &val, 4);
if (ret) {
usb_err(usbatm, "FirmwarePllFClkValue failed: %d\n", ret);
return;
}
/* FirmwarePllBClkValue */
val = cpu_to_le32(instance->modem_type->pll_b_clk);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLBCLK_ADDR, (u8 *) &val, 4);
if (ret) {
usb_err(usbatm, "FirmwarePllBClkValue failed: %d\n", ret);
return;
}
/* Enable SDRAM */
val = cpu_to_le32(SDRAM_ENA);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SDRAMEN_ADDR, (u8 *) &val, 4);
if (ret) {
usb_err(usbatm, "Enable SDRAM failed: %d\n", ret);
return;
}
/* Firmware */
usb_info(usbatm, "loading firmware\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
if (ret) {
usb_err(usbatm, "Firmware upload failed: %d\n", ret);
return;
}
/* Boot ROM patch */
if (instance->modem_type->boot_rom_patch) {
usb_info(usbatm, "loading boot ROM patch\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
if (ret) {
usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
return;
}
}
/* Signature */
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SIG_ADDR, (u8 *) signature, 4);
if (ret) {
usb_err(usbatm, "Signature storing failed: %d\n", ret);
return;
}
usb_info(usbatm, "starting device\n");
if (instance->modem_type->boot_rom_patch) {
val = cpu_to_le32(BR_ADDR);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
} else {
ret = cxacru_fw(usb_dev, FW_GOTO_MEM, 0x0, 0x0, FW_ADDR, NULL, 0);
}
if (ret) {
usb_err(usbatm, "Passing control to firmware failed: %d\n", ret);
return;
}
/* Delay to allow firmware to start up. */
msleep_interruptible(1000);
usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD));
usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD));
usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_DATA));
usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_DATA));
ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0);
if (ret < 0) {
usb_err(usbatm, "modem failed to initialize: %d\n", ret);
return;
}
}
static int cxacru_find_firmware(struct cxacru_data *instance,
char *phase, const struct firmware **fw_p)
{
struct usbatm_data *usbatm = instance->usbatm;
struct device *dev = &usbatm->usb_intf->dev;
char buf[16];
sprintf(buf, "cxacru-%s.bin", phase);
usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf);
if (request_firmware(fw_p, buf, dev)) {
usb_dbg(usbatm, "no stage %s firmware found\n", phase);
return -ENOENT;
}
usb_info(usbatm, "found firmware %s\n", buf);
return 0;
}
static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
struct usb_interface *usb_intf)
{
const struct firmware *fw, *bp;
struct cxacru_data *instance = usbatm_instance->driver_data;
int ret = cxacru_find_firmware(instance, "fw", &fw);
if (ret) {
usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n");
return ret;
}
if (instance->modem_type->boot_rom_patch) {
ret = cxacru_find_firmware(instance, "bp", &bp);
if (ret) {
usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n");
release_firmware(fw);
return ret;
}
}
cxacru_upload_firmware(instance, fw, bp);
if (instance->modem_type->boot_rom_patch)
release_firmware(bp);
release_firmware(fw);
ret = cxacru_card_status(instance);
if (ret)
usb_dbg(usbatm_instance, "modem initialisation failed\n");
else
usb_dbg(usbatm_instance, "done setting up the modem\n");
return ret;
}
static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct usb_interface *intf, const struct usb_device_id *id)
{
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
int ret;
/* instance init */
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance)
return -ENOMEM;
instance->usbatm = usbatm_instance;
instance->modem_type = (struct cxacru_modem_type *) id->driver_info;
mutex_init(&instance->poll_state_serialize);
instance->poll_state = CXPOLL_STOPPED;
instance->line_status = -1;
instance->adsl_status = -1;
mutex_init(&instance->adsl_state_serialize);
instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL);
if (!instance->rcv_buf) {
usb_dbg(usbatm_instance, "cxacru_bind: no memory for rcv_buf\n");
ret = -ENOMEM;
goto fail;
}
instance->snd_buf = (u8 *) __get_free_page(GFP_KERNEL);
if (!instance->snd_buf) {
usb_dbg(usbatm_instance, "cxacru_bind: no memory for snd_buf\n");
ret = -ENOMEM;
goto fail;
}
instance->rcv_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!instance->rcv_urb) {
ret = -ENOMEM;
goto fail;
}
instance->snd_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!instance->snd_urb) {
ret = -ENOMEM;
goto fail;
}
if (!cmd_ep) {
usb_dbg(usbatm_instance, "cxacru_bind: no command endpoint\n");
ret = -ENODEV;
goto fail;
}
if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_INT) {
usb_fill_int_urb(instance->rcv_urb,
usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done, 1);
usb_fill_int_urb(instance->snd_urb,
usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done, 4);
} else {
usb_fill_bulk_urb(instance->rcv_urb,
usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done);
usb_fill_bulk_urb(instance->snd_urb,
usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done);
}
mutex_init(&instance->cm_serialize);
INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
usbatm_instance->driver_data = instance;
usbatm_instance->flags = (cxacru_card_status(instance) ? 0 : UDSL_SKIP_HEAVY_INIT);
return 0;
fail:
free_page((unsigned long) instance->snd_buf);
free_page((unsigned long) instance->rcv_buf);
usb_free_urb(instance->snd_urb);
usb_free_urb(instance->rcv_urb);
kfree(instance);
return ret;
}
static void cxacru_unbind(struct usbatm_data *usbatm_instance,
struct usb_interface *intf)
{
struct cxacru_data *instance = usbatm_instance->driver_data;
int is_polling = 1;
usb_dbg(usbatm_instance, "cxacru_unbind entered\n");
if (!instance) {
usb_dbg(usbatm_instance, "cxacru_unbind: NULL instance!\n");
return;
}
mutex_lock(&instance->poll_state_serialize);
BUG_ON(instance->poll_state == CXPOLL_SHUTDOWN);
/* ensure that status polling continues unless
* it has already stopped */
if (instance->poll_state == CXPOLL_STOPPED)
is_polling = 0;
/* stop polling from being stopped or started */
instance->poll_state = CXPOLL_SHUTDOWN;
mutex_unlock(&instance->poll_state_serialize);
if (is_polling)
cancel_delayed_work_sync(&instance->poll_work);
usb_kill_urb(instance->snd_urb);
usb_kill_urb(instance->rcv_urb);
usb_free_urb(instance->snd_urb);
usb_free_urb(instance->rcv_urb);
free_page((unsigned long) instance->snd_buf);
free_page((unsigned long) instance->rcv_buf);
kfree(instance);
usbatm_instance->driver_data = NULL;
}
static const struct cxacru_modem_type cxacru_cafe = {
.pll_f_clk = 0x02d874df,
.pll_b_clk = 0x0196a51a,
.boot_rom_patch = 1,
};
static const struct cxacru_modem_type cxacru_cb00 = {
.pll_f_clk = 0x5,
.pll_b_clk = 0x3,
.boot_rom_patch = 0,
};
static const struct usb_device_id cxacru_usb_ids[] = {
{ /* V = Conexant P = ADSL modem (Euphrates project) */
USB_DEVICE(0x0572, 0xcafe), .driver_info = (unsigned long) &cxacru_cafe
},
{ /* V = Conexant P = ADSL modem (Hasbani project) */
USB_DEVICE(0x0572, 0xcb00), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem */
USB_DEVICE(0x0572, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem (Well PTI-800) */
USB_DEVICE(0x0572, 0xcb02), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem */
USB_DEVICE(0x0572, 0xcb06), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem (ZTE ZXDSL 852) */
USB_DEVICE(0x0572, 0xcb07), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Olitec P = ADSL modem version 2 */
USB_DEVICE(0x08e3, 0x0100), .driver_info = (unsigned long) &cxacru_cafe
},
{ /* V = Olitec P = ADSL modem version 3 */
USB_DEVICE(0x08e3, 0x0102), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Trust/Amigo Technology Co. P = AMX-CA86U */
USB_DEVICE(0x0eb0, 0x3457), .driver_info = (unsigned long) &cxacru_cafe
},
{ /* V = Zoom P = 5510 */
USB_DEVICE(0x1803, 0x5510), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Draytek P = Vigor 318 */
USB_DEVICE(0x0675, 0x0200), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Zyxel P = 630-C1 aka OMNI ADSL USB (Annex A) */
USB_DEVICE(0x0586, 0x330a), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Zyxel P = 630-C3 aka OMNI ADSL USB (Annex B) */
USB_DEVICE(0x0586, 0x330b), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Aethra P = Starmodem UM1020 */
USB_DEVICE(0x0659, 0x0020), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Aztech Systems P = ? AKA Pirelli AUA-010 */
USB_DEVICE(0x0509, 0x0812), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Netopia P = Cayman 3341(Annex A)/3351(Annex B) */
USB_DEVICE(0x100d, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Netopia P = Cayman 3342(Annex A)/3352(Annex B) */
USB_DEVICE(0x100d, 0x3342), .driver_info = (unsigned long) &cxacru_cb00
},
{}
};
MODULE_DEVICE_TABLE(usb, cxacru_usb_ids);
static struct usbatm_driver cxacru_driver = {
.driver_name = cxacru_driver_name,
.bind = cxacru_bind,
.heavy_init = cxacru_heavy_init,
.unbind = cxacru_unbind,
.atm_start = cxacru_atm_start,
.bulk_in = CXACRU_EP_DATA,
.bulk_out = CXACRU_EP_DATA,
.rx_padding = 3,
.tx_padding = 11,
};
static int cxacru_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
char buf[15];
/* Avoid ADSL routers (cx82310_eth).
* Abort if bDeviceClass is 0xff and iProduct is "USB NET CARD".
*/
if (usb_dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC
&& usb_string(usb_dev, usb_dev->descriptor.iProduct,
buf, sizeof(buf)) > 0) {
if (!strcmp(buf, "USB NET CARD")) {
dev_info(&intf->dev, "ignoring cx82310_eth device\n");
return -ENODEV;
}
}
return usbatm_usb_probe(intf, id, &cxacru_driver);
}
static struct usb_driver cxacru_usb_driver = {
.name = cxacru_driver_name,
.probe = cxacru_usb_probe,
.disconnect = usbatm_usb_disconnect,
.id_table = cxacru_usb_ids,
.dev_groups = cxacru_groups,
};
module_usb_driver(cxacru_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/atm/cxacru.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause)
/*
* Copyright (c) 2003, 2004
* Damien Bergamini <[email protected]>. All rights reserved.
*
* Copyright (c) 2005-2007 Matthieu Castet <[email protected]>
* Copyright (c) 2005-2007 Stanislaw Gruszka <[email protected]>
*
* HISTORY : some part of the code was base on ueagle 1.3 BSD driver,
* Damien Bergamini agree to put his code under a DUAL GPL/BSD license.
*
* The rest of the code was rewritten from scratch.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/crc32.h>
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include "usbatm.h"
#define EAGLEUSBVERSION "ueagle 1.4"
/*
* Debug macros
*/
#define uea_dbg(usb_dev, format, args...) \
do { \
if (debug >= 1) \
dev_dbg(&(usb_dev)->dev, \
"[ueagle-atm dbg] %s: " format, \
__func__, ##args); \
} while (0)
#define uea_vdbg(usb_dev, format, args...) \
do { \
if (debug >= 2) \
dev_dbg(&(usb_dev)->dev, \
"[ueagle-atm vdbg] " format, ##args); \
} while (0)
#define uea_enters(usb_dev) \
uea_vdbg(usb_dev, "entering %s\n" , __func__)
#define uea_leaves(usb_dev) \
uea_vdbg(usb_dev, "leaving %s\n" , __func__)
#define uea_err(usb_dev, format, args...) \
dev_err(&(usb_dev)->dev , "[UEAGLE-ATM] " format , ##args)
#define uea_warn(usb_dev, format, args...) \
dev_warn(&(usb_dev)->dev , "[Ueagle-atm] " format, ##args)
#define uea_info(usb_dev, format, args...) \
dev_info(&(usb_dev)->dev , "[ueagle-atm] " format, ##args)
struct intr_pkt;
/* cmv's from firmware */
struct uea_cmvs_v1 {
u32 address;
u16 offset;
u32 data;
} __packed;
struct uea_cmvs_v2 {
u32 group;
u32 address;
u32 offset;
u32 data;
} __packed;
/* information about currently processed cmv */
struct cmv_dsc_e1 {
u8 function;
u16 idx;
u32 address;
u16 offset;
};
struct cmv_dsc_e4 {
u16 function;
u16 offset;
u16 address;
u16 group;
};
union cmv_dsc {
struct cmv_dsc_e1 e1;
struct cmv_dsc_e4 e4;
};
struct uea_softc {
struct usb_device *usb_dev;
struct usbatm_data *usbatm;
int modem_index;
unsigned int driver_info;
int annex;
#define ANNEXA 0
#define ANNEXB 1
int booting;
int reset;
wait_queue_head_t sync_q;
struct task_struct *kthread;
u32 data;
u32 data1;
int cmv_ack;
union cmv_dsc cmv_dsc;
struct work_struct task;
u16 pageno;
u16 ovl;
const struct firmware *dsp_firm;
struct urb *urb_int;
void (*dispatch_cmv)(struct uea_softc *, struct intr_pkt *);
void (*schedule_load_page)(struct uea_softc *, struct intr_pkt *);
int (*stat)(struct uea_softc *);
int (*send_cmvs)(struct uea_softc *);
/* keep in sync with eaglectl */
struct uea_stats {
struct {
u32 state;
u32 flags;
u32 mflags;
u32 vidcpe;
u32 vidco;
u32 dsrate;
u32 usrate;
u32 dsunc;
u32 usunc;
u32 dscorr;
u32 uscorr;
u32 txflow;
u32 rxflow;
u32 usattenuation;
u32 dsattenuation;
u32 dsmargin;
u32 usmargin;
u32 firmid;
} phy;
} stats;
};
/*
* Elsa IDs
*/
#define ELSA_VID 0x05CC
#define ELSA_PID_PSTFIRM 0x3350
#define ELSA_PID_PREFIRM 0x3351
#define ELSA_PID_A_PREFIRM 0x3352
#define ELSA_PID_A_PSTFIRM 0x3353
#define ELSA_PID_B_PREFIRM 0x3362
#define ELSA_PID_B_PSTFIRM 0x3363
/*
* Devolo IDs : pots if (pid & 0x10)
*/
#define DEVOLO_VID 0x1039
#define DEVOLO_EAGLE_I_A_PID_PSTFIRM 0x2110
#define DEVOLO_EAGLE_I_A_PID_PREFIRM 0x2111
#define DEVOLO_EAGLE_I_B_PID_PSTFIRM 0x2100
#define DEVOLO_EAGLE_I_B_PID_PREFIRM 0x2101
#define DEVOLO_EAGLE_II_A_PID_PSTFIRM 0x2130
#define DEVOLO_EAGLE_II_A_PID_PREFIRM 0x2131
#define DEVOLO_EAGLE_II_B_PID_PSTFIRM 0x2120
#define DEVOLO_EAGLE_II_B_PID_PREFIRM 0x2121
/*
* Reference design USB IDs
*/
#define ANALOG_VID 0x1110
#define ADI930_PID_PREFIRM 0x9001
#define ADI930_PID_PSTFIRM 0x9000
#define EAGLE_I_PID_PREFIRM 0x9010 /* Eagle I */
#define EAGLE_I_PID_PSTFIRM 0x900F /* Eagle I */
#define EAGLE_IIC_PID_PREFIRM 0x9024 /* Eagle IIC */
#define EAGLE_IIC_PID_PSTFIRM 0x9023 /* Eagle IIC */
#define EAGLE_II_PID_PREFIRM 0x9022 /* Eagle II */
#define EAGLE_II_PID_PSTFIRM 0x9021 /* Eagle II */
#define EAGLE_III_PID_PREFIRM 0x9032 /* Eagle III */
#define EAGLE_III_PID_PSTFIRM 0x9031 /* Eagle III */
#define EAGLE_IV_PID_PREFIRM 0x9042 /* Eagle IV */
#define EAGLE_IV_PID_PSTFIRM 0x9041 /* Eagle IV */
/*
* USR USB IDs
*/
#define USR_VID 0x0BAF
#define MILLER_A_PID_PREFIRM 0x00F2
#define MILLER_A_PID_PSTFIRM 0x00F1
#define MILLER_B_PID_PREFIRM 0x00FA
#define MILLER_B_PID_PSTFIRM 0x00F9
#define HEINEKEN_A_PID_PREFIRM 0x00F6
#define HEINEKEN_A_PID_PSTFIRM 0x00F5
#define HEINEKEN_B_PID_PREFIRM 0x00F8
#define HEINEKEN_B_PID_PSTFIRM 0x00F7
#define PREFIRM 0
#define PSTFIRM (1<<7)
#define AUTO_ANNEX_A (1<<8)
#define AUTO_ANNEX_B (1<<9)
enum {
ADI930 = 0,
EAGLE_I,
EAGLE_II,
EAGLE_III,
EAGLE_IV
};
/* macros for both struct usb_device_id and struct uea_softc */
#define UEA_IS_PREFIRM(x) \
(!((x)->driver_info & PSTFIRM))
#define UEA_CHIP_VERSION(x) \
((x)->driver_info & 0xf)
#define IS_ISDN(x) \
((x)->annex & ANNEXB)
#define INS_TO_USBDEV(ins) (ins->usb_dev)
#define GET_STATUS(data) \
((data >> 8) & 0xf)
#define IS_OPERATIONAL(sc) \
((UEA_CHIP_VERSION(sc) != EAGLE_IV) ? \
(GET_STATUS(sc->stats.phy.state) == 2) : \
(sc->stats.phy.state == 7))
/*
* Set of macros to handle unaligned data in the firmware blob.
* The FW_GET_BYTE() macro is provided only for consistency.
*/
#define FW_GET_BYTE(p) (*((__u8 *) (p)))
#define FW_DIR "ueagle-atm/"
#define EAGLE_FIRMWARE FW_DIR "eagle.fw"
#define ADI930_FIRMWARE FW_DIR "adi930.fw"
#define EAGLE_I_FIRMWARE FW_DIR "eagleI.fw"
#define EAGLE_II_FIRMWARE FW_DIR "eagleII.fw"
#define EAGLE_III_FIRMWARE FW_DIR "eagleIII.fw"
#define EAGLE_IV_FIRMWARE FW_DIR "eagleIV.fw"
#define DSP4I_FIRMWARE FW_DIR "DSP4i.bin"
#define DSP4P_FIRMWARE FW_DIR "DSP4p.bin"
#define DSP9I_FIRMWARE FW_DIR "DSP9i.bin"
#define DSP9P_FIRMWARE FW_DIR "DSP9p.bin"
#define DSPEI_FIRMWARE FW_DIR "DSPei.bin"
#define DSPEP_FIRMWARE FW_DIR "DSPep.bin"
#define FPGA930_FIRMWARE FW_DIR "930-fpga.bin"
#define CMV4P_FIRMWARE FW_DIR "CMV4p.bin"
#define CMV4PV2_FIRMWARE FW_DIR "CMV4p.bin.v2"
#define CMV4I_FIRMWARE FW_DIR "CMV4i.bin"
#define CMV4IV2_FIRMWARE FW_DIR "CMV4i.bin.v2"
#define CMV9P_FIRMWARE FW_DIR "CMV9p.bin"
#define CMV9PV2_FIRMWARE FW_DIR "CMV9p.bin.v2"
#define CMV9I_FIRMWARE FW_DIR "CMV9i.bin"
#define CMV9IV2_FIRMWARE FW_DIR "CMV9i.bin.v2"
#define CMVEP_FIRMWARE FW_DIR "CMVep.bin"
#define CMVEPV2_FIRMWARE FW_DIR "CMVep.bin.v2"
#define CMVEI_FIRMWARE FW_DIR "CMVei.bin"
#define CMVEIV2_FIRMWARE FW_DIR "CMVei.bin.v2"
#define UEA_FW_NAME_MAX 30
#define NB_MODEM 4
#define BULK_TIMEOUT 300
#define CTRL_TIMEOUT 1000
#define ACK_TIMEOUT msecs_to_jiffies(3000)
#define UEA_INTR_IFACE_NO 0
#define UEA_US_IFACE_NO 1
#define UEA_DS_IFACE_NO 2
#define FASTEST_ISO_INTF 8
#define UEA_BULK_DATA_PIPE 0x02
#define UEA_IDMA_PIPE 0x04
#define UEA_INTR_PIPE 0x04
#define UEA_ISO_DATA_PIPE 0x08
#define UEA_E1_SET_BLOCK 0x0001
#define UEA_E4_SET_BLOCK 0x002c
#define UEA_SET_MODE 0x0003
#define UEA_SET_2183_DATA 0x0004
#define UEA_SET_TIMEOUT 0x0011
#define UEA_LOOPBACK_OFF 0x0002
#define UEA_LOOPBACK_ON 0x0003
#define UEA_BOOT_IDMA 0x0006
#define UEA_START_RESET 0x0007
#define UEA_END_RESET 0x0008
#define UEA_SWAP_MAILBOX (0x3fcd | 0x4000)
#define UEA_MPTX_START (0x3fce | 0x4000)
#define UEA_MPTX_MAILBOX (0x3fd6 | 0x4000)
#define UEA_MPRX_MAILBOX (0x3fdf | 0x4000)
/* block information in eagle4 dsp firmware */
struct block_index {
__le32 PageOffset;
__le32 NotLastBlock;
__le32 dummy;
__le32 PageSize;
__le32 PageAddress;
__le16 dummy1;
__le16 PageNumber;
} __packed;
#define E4_IS_BOOT_PAGE(PageSize) ((le32_to_cpu(PageSize)) & 0x80000000)
#define E4_PAGE_BYTES(PageSize) ((le32_to_cpu(PageSize) & 0x7fffffff) * 4)
#define E4_L1_STRING_HEADER 0x10
#define E4_MAX_PAGE_NUMBER 0x58
#define E4_NO_SWAPPAGE_HEADERS 0x31
/* l1_code is eagle4 dsp firmware format */
struct l1_code {
u8 string_header[E4_L1_STRING_HEADER];
u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
u8 code[];
} __packed;
/* structures describing a block within a DSP page */
struct block_info_e1 {
__le16 wHdr;
__le16 wAddress;
__le16 wSize;
__le16 wOvlOffset;
__le16 wOvl; /* overlay */
__le16 wLast;
} __packed;
#define E1_BLOCK_INFO_SIZE 12
struct block_info_e4 {
__be16 wHdr;
__u8 bBootPage;
__u8 bPageNumber;
__be32 dwSize;
__be32 dwAddress;
__be16 wReserved;
} __packed;
#define E4_BLOCK_INFO_SIZE 14
#define UEA_BIHDR 0xabcd
#define UEA_RESERVED 0xffff
/* constants describing cmv type */
#define E1_PREAMBLE 0x535c
#define E1_MODEMTOHOST 0x01
#define E1_HOSTTOMODEM 0x10
#define E1_MEMACCESS 0x1
#define E1_ADSLDIRECTIVE 0x7
#define E1_FUNCTION_TYPE(f) ((f) >> 4)
#define E1_FUNCTION_SUBTYPE(f) ((f) & 0x0f)
#define E4_MEMACCESS 0
#define E4_ADSLDIRECTIVE 0xf
#define E4_FUNCTION_TYPE(f) ((f) >> 8)
#define E4_FUNCTION_SIZE(f) ((f) & 0x0f)
#define E4_FUNCTION_SUBTYPE(f) (((f) >> 4) & 0x0f)
/* for MEMACCESS */
#define E1_REQUESTREAD 0x0
#define E1_REQUESTWRITE 0x1
#define E1_REPLYREAD 0x2
#define E1_REPLYWRITE 0x3
#define E4_REQUESTREAD 0x0
#define E4_REQUESTWRITE 0x4
#define E4_REPLYREAD (E4_REQUESTREAD | 1)
#define E4_REPLYWRITE (E4_REQUESTWRITE | 1)
/* for ADSLDIRECTIVE */
#define E1_KERNELREADY 0x0
#define E1_MODEMREADY 0x1
#define E4_KERNELREADY 0x0
#define E4_MODEMREADY 0x1
#define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf))
#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | \
((st) & 0xf) << 4 | ((s) & 0xf))
#define E1_MAKESA(a, b, c, d) \
(((c) & 0xff) << 24 | \
((d) & 0xff) << 16 | \
((a) & 0xff) << 8 | \
((b) & 0xff))
#define E1_GETSA1(a) ((a >> 8) & 0xff)
#define E1_GETSA2(a) (a & 0xff)
#define E1_GETSA3(a) ((a >> 24) & 0xff)
#define E1_GETSA4(a) ((a >> 16) & 0xff)
#define E1_SA_CNTL E1_MAKESA('C', 'N', 'T', 'L')
#define E1_SA_DIAG E1_MAKESA('D', 'I', 'A', 'G')
#define E1_SA_INFO E1_MAKESA('I', 'N', 'F', 'O')
#define E1_SA_OPTN E1_MAKESA('O', 'P', 'T', 'N')
#define E1_SA_RATE E1_MAKESA('R', 'A', 'T', 'E')
#define E1_SA_STAT E1_MAKESA('S', 'T', 'A', 'T')
#define E4_SA_CNTL 1
#define E4_SA_STAT 2
#define E4_SA_INFO 3
#define E4_SA_TEST 4
#define E4_SA_OPTN 5
#define E4_SA_RATE 6
#define E4_SA_DIAG 7
#define E4_SA_CNFG 8
/* structures representing a CMV (Configuration and Management Variable) */
struct cmv_e1 {
__le16 wPreamble;
__u8 bDirection;
__u8 bFunction;
__le16 wIndex;
__le32 dwSymbolicAddress;
__le16 wOffsetAddress;
__le32 dwData;
} __packed;
struct cmv_e4 {
__be16 wGroup;
__be16 wFunction;
__be16 wOffset;
__be16 wAddress;
__be32 dwData[6];
} __packed;
/* structures representing swap information */
struct swap_info_e1 {
__u8 bSwapPageNo;
__u8 bOvl; /* overlay */
} __packed;
struct swap_info_e4 {
__u8 bSwapPageNo;
} __packed;
/* structures representing interrupt data */
#define e1_bSwapPageNo u.e1.s1.swapinfo.bSwapPageNo
#define e1_bOvl u.e1.s1.swapinfo.bOvl
#define e4_bSwapPageNo u.e4.s1.swapinfo.bSwapPageNo
#define INT_LOADSWAPPAGE 0x0001
#define INT_INCOMINGCMV 0x0002
union intr_data_e1 {
struct {
struct swap_info_e1 swapinfo;
__le16 wDataSize;
} __packed s1;
struct {
struct cmv_e1 cmv;
__le16 wDataSize;
} __packed s2;
} __packed;
union intr_data_e4 {
struct {
struct swap_info_e4 swapinfo;
__le16 wDataSize;
} __packed s1;
struct {
struct cmv_e4 cmv;
__le16 wDataSize;
} __packed s2;
} __packed;
struct intr_pkt {
__u8 bType;
__u8 bNotification;
__le16 wValue;
__le16 wIndex;
__le16 wLength;
__le16 wInterrupt;
union {
union intr_data_e1 e1;
union intr_data_e4 e4;
} u;
} __packed;
#define E1_INTR_PKT_SIZE 28
#define E4_INTR_PKT_SIZE 64
static struct usb_driver uea_driver;
static DEFINE_MUTEX(uea_mutex);
static const char * const chip_name[] = {
"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"};
static int modem_index;
static unsigned int debug;
static unsigned int altsetting[NB_MODEM] = {
[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
static bool sync_wait[NB_MODEM];
static char *cmv_file[NB_MODEM];
static int annex[NB_MODEM];
module_param(debug, uint, 0644);
MODULE_PARM_DESC(debug, "module debug level (0=off,1=on,2=verbose)");
module_param_array(altsetting, uint, NULL, 0644);
MODULE_PARM_DESC(altsetting, "alternate setting for incoming traffic: 0=bulk, "
"1=isoc slowest, ... , 8=isoc fastest (default)");
module_param_array(sync_wait, bool, NULL, 0644);
MODULE_PARM_DESC(sync_wait, "wait the synchronisation before starting ATM");
module_param_array(cmv_file, charp, NULL, 0644);
MODULE_PARM_DESC(cmv_file,
"file name with configuration and management variables");
module_param_array(annex, uint, NULL, 0644);
MODULE_PARM_DESC(annex,
"manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
#define uea_wait(sc, cond, timeo) \
({ \
int _r = wait_event_interruptible_timeout(sc->sync_q, \
(cond) || kthread_should_stop(), timeo); \
if (kthread_should_stop()) \
_r = -ENODEV; \
_r; \
})
#define UPDATE_ATM_STAT(type, val) \
do { \
if (sc->usbatm->atm_dev) \
sc->usbatm->atm_dev->type = val; \
} while (0)
#define UPDATE_ATM_SIGNAL(val) \
do { \
if (sc->usbatm->atm_dev) \
atm_dev_signal_change(sc->usbatm->atm_dev, val); \
} while (0)
/* Firmware loading */
#define LOAD_INTERNAL 0xA0
#define F8051_USBCS 0x7f92
/*
* uea_send_modem_cmd - Send a command for pre-firmware devices.
*/
static int uea_send_modem_cmd(struct usb_device *usb,
u16 addr, u16 size, const u8 *buff)
{
int ret = -ENOMEM;
u8 *xfer_buff;
xfer_buff = kmemdup(buff, size, GFP_KERNEL);
if (xfer_buff) {
ret = usb_control_msg(usb,
usb_sndctrlpipe(usb, 0),
LOAD_INTERNAL,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, addr, 0, xfer_buff,
size, CTRL_TIMEOUT);
kfree(xfer_buff);
}
if (ret < 0)
return ret;
return (ret == size) ? 0 : -EIO;
}
static void uea_upload_pre_firmware(const struct firmware *fw_entry,
void *context)
{
struct usb_device *usb = context;
const u8 *pfw;
u8 value;
u32 crc = 0;
int ret, size;
uea_enters(usb);
if (!fw_entry) {
uea_err(usb, "firmware is not available\n");
goto err;
}
pfw = fw_entry->data;
size = fw_entry->size;
if (size < 4)
goto err_fw_corrupted;
crc = get_unaligned_le32(pfw);
pfw += 4;
size -= 4;
if (crc32_be(0, pfw, size) != crc)
goto err_fw_corrupted;
/*
* Start to upload firmware : send reset
*/
value = 1;
ret = uea_send_modem_cmd(usb, F8051_USBCS, sizeof(value), &value);
if (ret < 0) {
uea_err(usb, "modem reset failed with error %d\n", ret);
goto err;
}
while (size > 3) {
u8 len = FW_GET_BYTE(pfw);
u16 add = get_unaligned_le16(pfw + 1);
size -= len + 3;
if (size < 0)
goto err_fw_corrupted;
ret = uea_send_modem_cmd(usb, add, len, pfw + 3);
if (ret < 0) {
uea_err(usb, "uploading firmware data failed "
"with error %d\n", ret);
goto err;
}
pfw += len + 3;
}
if (size != 0)
goto err_fw_corrupted;
/*
* Tell the modem we finish : de-assert reset
*/
value = 0;
ret = uea_send_modem_cmd(usb, F8051_USBCS, 1, &value);
if (ret < 0)
uea_err(usb, "modem de-assert failed with error %d\n", ret);
else
uea_info(usb, "firmware uploaded\n");
goto err;
err_fw_corrupted:
uea_err(usb, "firmware is corrupted\n");
err:
release_firmware(fw_entry);
uea_leaves(usb);
}
/*
* uea_load_firmware - Load usb firmware for pre-firmware devices.
*/
static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
{
int ret;
char *fw_name = EAGLE_FIRMWARE;
uea_enters(usb);
uea_info(usb, "pre-firmware device, uploading firmware\n");
switch (ver) {
case ADI930:
fw_name = ADI930_FIRMWARE;
break;
case EAGLE_I:
fw_name = EAGLE_I_FIRMWARE;
break;
case EAGLE_II:
fw_name = EAGLE_II_FIRMWARE;
break;
case EAGLE_III:
fw_name = EAGLE_III_FIRMWARE;
break;
case EAGLE_IV:
fw_name = EAGLE_IV_FIRMWARE;
break;
}
ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev,
GFP_KERNEL, usb,
uea_upload_pre_firmware);
if (ret)
uea_err(usb, "firmware %s is not available\n", fw_name);
else
uea_info(usb, "loading firmware %s\n", fw_name);
uea_leaves(usb);
return ret;
}
/* modem management : dsp firmware, send/read CMV, monitoring statistic
*/
/*
* Make sure that the DSP code provided is safe to use.
*/
static int check_dsp_e1(const u8 *dsp, unsigned int len)
{
u8 pagecount, blockcount;
u16 blocksize;
u32 pageoffset;
unsigned int i, j, p, pp;
pagecount = FW_GET_BYTE(dsp);
p = 1;
/* enough space for page offsets? */
if (p + 4 * pagecount > len)
return 1;
for (i = 0; i < pagecount; i++) {
pageoffset = get_unaligned_le32(dsp + p);
p += 4;
if (pageoffset == 0)
continue;
/* enough space for blockcount? */
if (pageoffset >= len)
return 1;
pp = pageoffset;
blockcount = FW_GET_BYTE(dsp + pp);
pp += 1;
for (j = 0; j < blockcount; j++) {
/* enough space for block header? */
if (pp + 4 > len)
return 1;
pp += 2; /* skip blockaddr */
blocksize = get_unaligned_le16(dsp + pp);
pp += 2;
/* enough space for block data? */
if (pp + blocksize > len)
return 1;
pp += blocksize;
}
}
return 0;
}
static int check_dsp_e4(const u8 *dsp, int len)
{
int i;
struct l1_code *p = (struct l1_code *) dsp;
unsigned int sum = p->code - dsp;
if (len < sum)
return 1;
if (strcmp("STRATIPHY ANEXA", p->string_header) != 0 &&
strcmp("STRATIPHY ANEXB", p->string_header) != 0)
return 1;
for (i = 0; i < E4_MAX_PAGE_NUMBER; i++) {
struct block_index *blockidx;
u8 blockno = p->page_number_to_block_index[i];
if (blockno >= E4_NO_SWAPPAGE_HEADERS)
continue;
do {
u64 l;
if (blockno >= E4_NO_SWAPPAGE_HEADERS)
return 1;
blockidx = &p->page_header[blockno++];
if ((u8 *)(blockidx + 1) - dsp >= len)
return 1;
if (le16_to_cpu(blockidx->PageNumber) != i)
return 1;
l = E4_PAGE_BYTES(blockidx->PageSize);
sum += l;
l += le32_to_cpu(blockidx->PageOffset);
if (l > len)
return 1;
/* zero is zero regardless endianes */
} while (blockidx->NotLastBlock);
}
return (sum == len) ? 0 : 1;
}
/*
* send data to the idma pipe
* */
static int uea_idma_write(struct uea_softc *sc, const void *data, u32 size)
{
int ret = -ENOMEM;
u8 *xfer_buff;
int bytes_read;
xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
ret = usb_bulk_msg(sc->usb_dev,
usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE),
xfer_buff, size, &bytes_read, BULK_TIMEOUT);
kfree(xfer_buff);
if (ret < 0)
return ret;
if (size != bytes_read) {
uea_err(INS_TO_USBDEV(sc), "size != bytes_read %d %d\n", size,
bytes_read);
return -EIO;
}
return 0;
}
static int request_dsp(struct uea_softc *sc)
{
int ret;
char *dsp_name;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
if (IS_ISDN(sc))
dsp_name = DSP4I_FIRMWARE;
else
dsp_name = DSP4P_FIRMWARE;
} else if (UEA_CHIP_VERSION(sc) == ADI930) {
if (IS_ISDN(sc))
dsp_name = DSP9I_FIRMWARE;
else
dsp_name = DSP9P_FIRMWARE;
} else {
if (IS_ISDN(sc))
dsp_name = DSPEI_FIRMWARE;
else
dsp_name = DSPEP_FIRMWARE;
}
ret = request_firmware(&sc->dsp_firm, dsp_name, &sc->usb_dev->dev);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
dsp_name, ret);
return ret;
}
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
ret = check_dsp_e4(sc->dsp_firm->data, sc->dsp_firm->size);
else
ret = check_dsp_e1(sc->dsp_firm->data, sc->dsp_firm->size);
if (ret) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n",
dsp_name);
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
return -EILSEQ;
}
return 0;
}
/*
* The uea_load_page() function must be called within a process context
*/
static void uea_load_page_e1(struct work_struct *work)
{
struct uea_softc *sc = container_of(work, struct uea_softc, task);
u16 pageno = sc->pageno;
u16 ovl = sc->ovl;
struct block_info_e1 bi;
const u8 *p;
u8 pagecount, blockcount;
u16 blockaddr, blocksize;
u32 pageoffset;
int i;
/* reload firmware when reboot start and it's loaded already */
if (ovl == 0 && pageno == 0) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
if (sc->dsp_firm == NULL && request_dsp(sc) < 0)
return;
p = sc->dsp_firm->data;
pagecount = FW_GET_BYTE(p);
p += 1;
if (pageno >= pagecount)
goto bad1;
p += 4 * pageno;
pageoffset = get_unaligned_le32(p);
if (pageoffset == 0)
goto bad1;
p = sc->dsp_firm->data + pageoffset;
blockcount = FW_GET_BYTE(p);
p += 1;
uea_dbg(INS_TO_USBDEV(sc),
"sending %u blocks for DSP page %u\n", blockcount, pageno);
bi.wHdr = cpu_to_le16(UEA_BIHDR);
bi.wOvl = cpu_to_le16(ovl);
bi.wOvlOffset = cpu_to_le16(ovl | 0x8000);
for (i = 0; i < blockcount; i++) {
blockaddr = get_unaligned_le16(p);
p += 2;
blocksize = get_unaligned_le16(p);
p += 2;
bi.wSize = cpu_to_le16(blocksize);
bi.wAddress = cpu_to_le16(blockaddr);
bi.wLast = cpu_to_le16((i == blockcount - 1) ? 1 : 0);
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E1_BLOCK_INFO_SIZE))
goto bad2;
/* send block data through the IDMA pipe */
if (uea_idma_write(sc, p, blocksize))
goto bad2;
p += blocksize;
}
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", i);
return;
bad1:
uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
}
static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot)
{
struct block_info_e4 bi;
struct block_index *blockidx;
struct l1_code *p = (struct l1_code *) sc->dsp_firm->data;
u8 blockno = p->page_number_to_block_index[pageno];
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = boot;
bi.bPageNumber = pageno;
bi.wReserved = cpu_to_be16(UEA_RESERVED);
do {
const u8 *blockoffset;
unsigned int blocksize;
blockidx = &p->page_header[blockno];
blocksize = E4_PAGE_BYTES(blockidx->PageSize);
blockoffset = sc->dsp_firm->data + le32_to_cpu(
blockidx->PageOffset);
bi.dwSize = cpu_to_be32(blocksize);
bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress));
uea_dbg(INS_TO_USBDEV(sc),
"sending block %u for DSP page "
"%u size %u address %x\n",
blockno, pageno, blocksize,
le32_to_cpu(blockidx->PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
goto bad;
/* send block data through the IDMA pipe */
if (uea_idma_write(sc, blockoffset, blocksize))
goto bad;
blockno++;
} while (blockidx->NotLastBlock);
return;
bad:
uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", blockno);
return;
}
static void uea_load_page_e4(struct work_struct *work)
{
struct uea_softc *sc = container_of(work, struct uea_softc, task);
u8 pageno = sc->pageno;
int i;
struct block_info_e4 bi;
struct l1_code *p;
uea_dbg(INS_TO_USBDEV(sc), "sending DSP page %u\n", pageno);
/* reload firmware when reboot start and it's loaded already */
if (pageno == 0) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
if (sc->dsp_firm == NULL && request_dsp(sc) < 0)
return;
p = (struct l1_code *) sc->dsp_firm->data;
if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) {
uea_err(INS_TO_USBDEV(sc), "invalid DSP "
"page %u requested\n", pageno);
return;
}
if (pageno != 0) {
__uea_load_page_e4(sc, pageno, 0);
return;
}
uea_dbg(INS_TO_USBDEV(sc),
"sending Main DSP page %u\n", p->page_header[0].PageNumber);
for (i = 0; i < le16_to_cpu(p->page_header[0].PageNumber); i++) {
if (E4_IS_BOOT_PAGE(p->page_header[i].PageSize))
__uea_load_page_e4(sc, i, 1);
}
uea_dbg(INS_TO_USBDEV(sc) , "sending start bi\n");
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = 0;
bi.bPageNumber = 0xff;
bi.wReserved = cpu_to_be16(UEA_RESERVED);
bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize));
bi.dwAddress = cpu_to_be32(le32_to_cpu(p->page_header[0].PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
uea_err(INS_TO_USBDEV(sc), "sending DSP start bi failed\n");
}
static inline void wake_up_cmv_ack(struct uea_softc *sc)
{
BUG_ON(sc->cmv_ack);
sc->cmv_ack = 1;
wake_up(&sc->sync_q);
}
static inline int wait_cmv_ack(struct uea_softc *sc)
{
int ret = uea_wait(sc, sc->cmv_ack , ACK_TIMEOUT);
sc->cmv_ack = 0;
uea_dbg(INS_TO_USBDEV(sc), "wait_event_timeout : %d ms\n",
jiffies_to_msecs(ret));
if (ret < 0)
return ret;
return (ret == 0) ? -ETIMEDOUT : 0;
}
#define UCDC_SEND_ENCAPSULATED_COMMAND 0x00
static int uea_request(struct uea_softc *sc,
u16 value, u16 index, u16 size, const void *data)
{
u8 *xfer_buff;
int ret = -ENOMEM;
xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0),
UCDC_SEND_ENCAPSULATED_COMMAND,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, xfer_buff, size, CTRL_TIMEOUT);
kfree(xfer_buff);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc), "usb_control_msg error %d\n", ret);
return ret;
}
if (ret != size) {
uea_err(INS_TO_USBDEV(sc),
"usb_control_msg send only %d bytes (instead of %d)\n",
ret, size);
return -EIO;
}
return 0;
}
static int uea_cmv_e1(struct uea_softc *sc,
u8 function, u32 address, u16 offset, u32 data)
{
struct cmv_e1 cmv;
int ret;
uea_enters(INS_TO_USBDEV(sc));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, "
"offset : 0x%04x, data : 0x%08x\n",
E1_FUNCTION_TYPE(function),
E1_FUNCTION_SUBTYPE(function),
E1_GETSA1(address), E1_GETSA2(address),
E1_GETSA3(address),
E1_GETSA4(address), offset, data);
/* we send a request, but we expect a reply */
sc->cmv_dsc.e1.function = function | 0x2;
sc->cmv_dsc.e1.idx++;
sc->cmv_dsc.e1.address = address;
sc->cmv_dsc.e1.offset = offset;
cmv.wPreamble = cpu_to_le16(E1_PREAMBLE);
cmv.bDirection = E1_HOSTTOMODEM;
cmv.bFunction = function;
cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx);
put_unaligned_le32(address, &cmv.dwSymbolicAddress);
cmv.wOffsetAddress = cpu_to_le16(offset);
put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START,
sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
static int uea_cmv_e4(struct uea_softc *sc,
u16 function, u16 group, u16 address, u16 offset, u32 data)
{
struct cmv_e4 cmv;
int ret;
uea_enters(INS_TO_USBDEV(sc));
memset(&cmv, 0, sizeof(cmv));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Group : 0x%04x, "
"Address : 0x%04x, offset : 0x%04x, data : 0x%08x\n",
E4_FUNCTION_TYPE(function), E4_FUNCTION_SUBTYPE(function),
group, address, offset, data);
/* we send a request, but we expect a reply */
sc->cmv_dsc.e4.function = function | (0x1 << 4);
sc->cmv_dsc.e4.offset = offset;
sc->cmv_dsc.e4.address = address;
sc->cmv_dsc.e4.group = group;
cmv.wFunction = cpu_to_be16(function);
cmv.wGroup = cpu_to_be16(group);
cmv.wAddress = cpu_to_be16(address);
cmv.wOffset = cpu_to_be16(offset);
cmv.dwData[0] = cpu_to_be32(data);
ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START,
sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
static inline int uea_read_cmv_e1(struct uea_softc *sc,
u32 address, u16 offset, u32 *data)
{
int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTREAD),
address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else
*data = sc->data;
return ret;
}
static inline int uea_read_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 *data)
{
int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
E4_REQUESTREAD, size),
group, address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else {
*data = sc->data;
/* size is in 16-bit word quantities */
if (size > 2)
*(data + 1) = sc->data1;
}
return ret;
}
static inline int uea_write_cmv_e1(struct uea_softc *sc,
u32 address, u16 offset, u32 data)
{
int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTWRITE),
address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"writing cmv failed with error %d\n", ret);
return ret;
}
static inline int uea_write_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 data)
{
int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
E4_REQUESTWRITE, size),
group, address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"writing cmv failed with error %d\n", ret);
return ret;
}
static void uea_set_bulk_timeout(struct uea_softc *sc, u32 dsrate)
{
int ret;
u16 timeout;
/* in bulk mode the modem have problem with high rate
* changing internal timing could improve things, but the
* value is mysterious.
* ADI930 don't support it (-EPIPE error).
*/
if (UEA_CHIP_VERSION(sc) == ADI930 ||
altsetting[sc->modem_index] > 0 ||
sc->stats.phy.dsrate == dsrate)
return;
/* Original timming (1Mbit/s) from ADI (used in windows driver) */
timeout = (dsrate <= 1024*1024) ? 0 : 1;
ret = uea_request(sc, UEA_SET_TIMEOUT, timeout, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "setting new timeout %d%s\n",
timeout, ret < 0 ? " failed" : "");
}
/*
* Monitor the modem and update the stat
* return 0 if everything is ok
* return < 0 if an error occurs (-EAGAIN reboot needed)
*/
static int uea_stat_e1(struct uea_softc *sc)
{
u32 data;
int ret;
uea_enters(INS_TO_USBDEV(sc));
data = sc->stats.phy.state;
ret = uea_read_cmv_e1(sc, E1_SA_STAT, 0, &sc->stats.phy.state);
if (ret < 0)
return ret;
switch (GET_STATUS(sc->stats.phy.state)) {
case 0: /* not yet synchronized */
uea_dbg(INS_TO_USBDEV(sc),
"modem not yet synchronized\n");
return 0;
case 1: /* initialization */
uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
return 0;
case 2: /* operational */
uea_vdbg(INS_TO_USBDEV(sc), "modem operational\n");
break;
case 3: /* fail ... */
uea_info(INS_TO_USBDEV(sc), "modem synchronization failed"
" (may be try other cmv/dsp)\n");
return -EAGAIN;
case 4 ... 6: /* test state */
uea_warn(INS_TO_USBDEV(sc),
"modem in test mode - not supported\n");
return -EAGAIN;
case 7: /* fast-retain ... */
uea_info(INS_TO_USBDEV(sc), "modem in fast-retain mode\n");
return 0;
default:
uea_err(INS_TO_USBDEV(sc), "modem invalid SW mode %d\n",
GET_STATUS(sc->stats.phy.state));
return -EAGAIN;
}
if (GET_STATUS(data) != 2) {
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "modem operational\n");
/* release the dsp firmware as it is not needed until
* the next failure
*/
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
/* always update it as atm layer could not be init when we switch to
* operational state
*/
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND);
/* wake up processes waiting for synchronization */
wake_up(&sc->sync_q);
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 2, &sc->stats.phy.flags);
if (ret < 0)
return ret;
sc->stats.phy.mflags |= sc->stats.phy.flags;
/* in case of a flags ( for example delineation LOSS (& 0x10)),
* we check the status again in order to detect the failure earlier
*/
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
return 0;
}
ret = uea_read_cmv_e1(sc, E1_SA_RATE, 0, &data);
if (ret < 0)
return ret;
uea_set_bulk_timeout(sc, (data >> 16) * 32);
sc->stats.phy.dsrate = (data >> 16) * 32;
sc->stats.phy.usrate = (data & 0xffff) * 32;
UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 23, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsattenuation = (data & 0xff) / 2;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 47, &data);
if (ret < 0)
return ret;
sc->stats.phy.usattenuation = (data & 0xff) / 2;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 25, &sc->stats.phy.dsmargin);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 49, &sc->stats.phy.usmargin);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 51, &sc->stats.phy.rxflow);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 52, &sc->stats.phy.txflow);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 54, &sc->stats.phy.dsunc);
if (ret < 0)
return ret;
/* only for atu-c */
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 58, &sc->stats.phy.usunc);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 53, &sc->stats.phy.dscorr);
if (ret < 0)
return ret;
/* only for atu-c */
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 57, &sc->stats.phy.uscorr);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 8, &sc->stats.phy.vidco);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 13, &sc->stats.phy.vidcpe);
if (ret < 0)
return ret;
return 0;
}
static int uea_stat_e4(struct uea_softc *sc)
{
u32 data;
u32 tmp_arr[2];
int ret;
uea_enters(INS_TO_USBDEV(sc));
data = sc->stats.phy.state;
/* XXX only need to be done before operationnal... */
ret = uea_read_cmv_e4(sc, 1, E4_SA_STAT, 0, 0, &sc->stats.phy.state);
if (ret < 0)
return ret;
switch (sc->stats.phy.state) {
case 0x0: /* not yet synchronized */
case 0x1:
case 0x3:
case 0x4:
uea_dbg(INS_TO_USBDEV(sc), "modem not yet "
"synchronized\n");
return 0;
case 0x5: /* initialization */
case 0x6:
case 0x9:
case 0xa:
uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
return 0;
case 0x2: /* fail ... */
uea_info(INS_TO_USBDEV(sc), "modem synchronization "
"failed (may be try other cmv/dsp)\n");
return -EAGAIN;
case 0x7: /* operational */
break;
default:
uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n",
sc->stats.phy.state);
return 0;
}
if (data != 7) {
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "modem operational\n");
/* release the dsp firmware as it is not needed until
* the next failure
*/
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
/* always update it as atm layer could not be init when we switch to
* operational state
*/
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND);
/* wake up processes waiting for synchronization */
wake_up(&sc->sync_q);
/* TODO improve this state machine :
* we need some CMV info : what they do and their unit
* we should find the equivalent of eagle3- CMV
*/
/* check flags */
ret = uea_read_cmv_e4(sc, 1, E4_SA_DIAG, 0, 0, &sc->stats.phy.flags);
if (ret < 0)
return ret;
sc->stats.phy.mflags |= sc->stats.phy.flags;
/* in case of a flags ( for example delineation LOSS (& 0x10)),
* we check the status again in order to detect the failure earlier
*/
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
if (sc->stats.phy.flags & 1) /* delineation LOSS */
return -EAGAIN;
if (sc->stats.phy.flags & 0x4000) /* Reset Flag */
return -EAGAIN;
return 0;
}
/* rate data may be in upper or lower half of 64 bit word, strange */
ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 0, 0, tmp_arr);
if (ret < 0)
return ret;
data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
sc->stats.phy.usrate = data / 1000;
ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 1, 0, tmp_arr);
if (ret < 0)
return ret;
data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
uea_set_bulk_timeout(sc, data / 1000);
sc->stats.phy.dsrate = data / 1000;
UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 1, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsattenuation = data / 10;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 1, &data);
if (ret < 0)
return ret;
sc->stats.phy.usattenuation = data / 10;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 3, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsmargin = data / 2;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 3, &data);
if (ret < 0)
return ret;
sc->stats.phy.usmargin = data / 10;
return 0;
}
static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
{
char file_arr[] = "CMVxy.bin";
char *file;
kernel_param_lock(THIS_MODULE);
/* set proper name corresponding modem version and line type */
if (cmv_file[sc->modem_index] == NULL) {
if (UEA_CHIP_VERSION(sc) == ADI930)
file_arr[3] = '9';
else if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
file_arr[3] = '4';
else
file_arr[3] = 'e';
file_arr[4] = IS_ISDN(sc) ? 'i' : 'p';
file = file_arr;
} else
file = cmv_file[sc->modem_index];
strcpy(cmv_name, FW_DIR);
strlcat(cmv_name, file, UEA_FW_NAME_MAX);
if (ver == 2)
strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
kernel_param_unlock(THIS_MODULE);
}
static int request_cmvs_old(struct uea_softc *sc,
void **cmvs, const struct firmware **fw)
{
int ret, size;
u8 *data;
char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 1);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
cmv_name, ret);
return ret;
}
data = (u8 *) (*fw)->data;
size = (*fw)->size;
if (size < 1)
goto err_fw_corrupted;
if (size != *data * sizeof(struct uea_cmvs_v1) + 1)
goto err_fw_corrupted;
*cmvs = (void *)(data + 1);
return *data;
err_fw_corrupted:
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
release_firmware(*fw);
return -EILSEQ;
}
static int request_cmvs(struct uea_softc *sc,
void **cmvs, const struct firmware **fw, int *ver)
{
int ret, size;
u32 crc;
u8 *data;
char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 2);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
if (ret < 0) {
/* if caller can handle old version, try to provide it */
if (*ver == 1) {
uea_warn(INS_TO_USBDEV(sc), "requesting "
"firmware %s failed, "
"try to get older cmvs\n", cmv_name);
return request_cmvs_old(sc, cmvs, fw);
}
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
cmv_name, ret);
return ret;
}
size = (*fw)->size;
data = (u8 *) (*fw)->data;
if (size < 4 || strncmp(data, "cmv2", 4) != 0) {
if (*ver == 1) {
uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted,"
" try to get older cmvs\n", cmv_name);
release_firmware(*fw);
return request_cmvs_old(sc, cmvs, fw);
}
goto err_fw_corrupted;
}
*ver = 2;
data += 4;
size -= 4;
if (size < 5)
goto err_fw_corrupted;
crc = get_unaligned_le32(data);
data += 4;
size -= 4;
if (crc32_be(0, data, size) != crc)
goto err_fw_corrupted;
if (size != *data * sizeof(struct uea_cmvs_v2) + 1)
goto err_fw_corrupted;
*cmvs = (void *) (data + 1);
return *data;
err_fw_corrupted:
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
release_firmware(*fw);
return -EILSEQ;
}
static int uea_send_cmvs_e1(struct uea_softc *sc)
{
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
int ver = 1; /* we can handle v1 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1);
if (ret < 0)
return ret;
/* Dump firmware version */
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 10, &sc->stats.phy.firmid);
if (ret < 0)
return ret;
uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
sc->stats.phy.firmid);
/* get options */
ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
/* send options */
if (ver == 1) {
struct uea_cmvs_v1 *cmvs_v1 = cmvs_ptr;
uea_warn(INS_TO_USBDEV(sc), "use deprecated cmvs version, "
"please update your firmware\n");
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e1(sc,
get_unaligned_le32(&cmvs_v1[i].address),
get_unaligned_le16(&cmvs_v1[i].offset),
get_unaligned_le32(&cmvs_v1[i].data));
if (ret < 0)
goto out;
}
} else if (ver == 2) {
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e1(sc,
get_unaligned_le32(&cmvs_v2[i].address),
(u16) get_unaligned_le32(&cmvs_v2[i].offset),
get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
} else {
/* This really should not happen */
uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
goto out;
}
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
"synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
}
static int uea_send_cmvs_e4(struct uea_softc *sc)
{
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
int ver = 2; /* we can only handle v2 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1);
if (ret < 0)
return ret;
/* Dump firmware version */
/* XXX don't read the 3th byte as it is always 6 */
ret = uea_read_cmv_e4(sc, 2, E4_SA_INFO, 55, 0, &sc->stats.phy.firmid);
if (ret < 0)
return ret;
uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
sc->stats.phy.firmid);
/* get options */
ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
/* send options */
if (ver == 2) {
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e4(sc, 1,
get_unaligned_le32(&cmvs_v2[i].group),
get_unaligned_le32(&cmvs_v2[i].address),
get_unaligned_le32(&cmvs_v2[i].offset),
get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
} else {
/* This really should not happen */
uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
goto out;
}
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
"synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
}
/* Start boot post firmware modem:
* - send reset commands through usb control pipe
* - start workqueue for DSP loading
* - send CMV options to modem
*/
static int uea_start_reset(struct uea_softc *sc)
{
u16 zero = 0; /* ;-) */
int ret;
uea_enters(INS_TO_USBDEV(sc));
uea_info(INS_TO_USBDEV(sc), "(re)booting started\n");
/* mask interrupt */
sc->booting = 1;
/* We need to set this here because, a ack timeout could have occurred,
* but before we start the reboot, the ack occurs and set this to 1.
* So we will failed to wait Ready CMV.
*/
sc->cmv_ack = 0;
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST);
/* reset statistics */
memset(&sc->stats, 0, sizeof(struct uea_stats));
/* tell the modem that we want to boot in IDMA mode */
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
uea_request(sc, UEA_SET_MODE, UEA_BOOT_IDMA, 0, NULL);
/* enter reset mode */
uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL);
/* original driver use 200ms, but windows driver use 100ms */
ret = uea_wait(sc, 0, msecs_to_jiffies(100));
if (ret < 0)
return ret;
/* leave reset mode */
uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL);
if (UEA_CHIP_VERSION(sc) != EAGLE_IV) {
/* clear tx and rx mailboxes */
uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero);
}
ret = uea_wait(sc, 0, msecs_to_jiffies(1000));
if (ret < 0)
return ret;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
E4_MODEMREADY, 1);
else
sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
E1_MODEMREADY);
/* demask interrupt */
sc->booting = 0;
/* start loading DSP */
sc->pageno = 0;
sc->ovl = 0;
schedule_work(&sc->task);
/* wait for modem ready CMV */
ret = wait_cmv_ack(sc);
if (ret < 0)
return ret;
uea_vdbg(INS_TO_USBDEV(sc), "Ready CMV received\n");
ret = sc->send_cmvs(sc);
if (ret < 0)
return ret;
sc->reset = 0;
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/*
* In case of an error wait 1s before rebooting the modem
* if the modem don't request reboot (-EAGAIN).
* Monitor the modem every 1s.
*/
static int uea_kthread(void *data)
{
struct uea_softc *sc = data;
int ret = -EAGAIN;
set_freezable();
uea_enters(INS_TO_USBDEV(sc));
while (!kthread_should_stop()) {
if (ret < 0 || sc->reset)
ret = uea_start_reset(sc);
if (!ret)
ret = sc->stat(sc);
if (ret != -EAGAIN)
uea_wait(sc, 0, msecs_to_jiffies(1000));
try_to_freeze();
}
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/* Load second usb firmware for ADI930 chip */
static int load_XILINX_firmware(struct uea_softc *sc)
{
const struct firmware *fw_entry;
int ret, size, u, ln;
const u8 *pfw;
u8 value;
char *fw_name = FPGA930_FIRMWARE;
uea_enters(INS_TO_USBDEV(sc));
ret = request_firmware(&fw_entry, fw_name, &sc->usb_dev->dev);
if (ret) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is not available\n",
fw_name);
goto err0;
}
pfw = fw_entry->data;
size = fw_entry->size;
if (size != 0x577B) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n",
fw_name);
ret = -EILSEQ;
goto err1;
}
for (u = 0; u < size; u += ln) {
ln = min(size - u, 64);
ret = uea_request(sc, 0xe, 0, ln, pfw + u);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"elsa download data failed (%d)\n", ret);
goto err1;
}
}
/* finish to send the fpga */
ret = uea_request(sc, 0xe, 1, 0, NULL);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"elsa download data failed (%d)\n", ret);
goto err1;
}
/* Tell the modem we finish : de-assert reset */
value = 0;
ret = uea_send_modem_cmd(sc->usb_dev, 0xe, 1, &value);
if (ret < 0)
uea_err(sc->usb_dev, "elsa de-assert failed with error"
" %d\n", ret);
err1:
release_firmware(fw_entry);
err0:
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/* The modem send us an ack. First with check if it right */
static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
{
struct cmv_dsc_e1 *dsc = &sc->cmv_dsc.e1;
struct cmv_e1 *cmv = &intr->u.e1.s2.cmv;
uea_enters(INS_TO_USBDEV(sc));
if (le16_to_cpu(cmv->wPreamble) != E1_PREAMBLE)
goto bad1;
if (cmv->bDirection != E1_MODEMTOHOST)
goto bad1;
/* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to
* the first MEMACCESS cmv. Ignore it...
*/
if (cmv->bFunction != dsc->function) {
if (UEA_CHIP_VERSION(sc) == ADI930
&& cmv->bFunction == E1_MAKEFUNCTION(2, 2)) {
cmv->wIndex = cpu_to_le16(dsc->idx);
put_unaligned_le32(dsc->address,
&cmv->dwSymbolicAddress);
cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
} else
goto bad2;
}
if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
E1_MODEMREADY)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
}
/* in case of MEMACCESS */
if (le16_to_cpu(cmv->wIndex) != dsc->idx ||
get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address ||
le16_to_cpu(cmv->wOffsetAddress) != dsc->offset)
goto bad2;
sc->data = get_unaligned_le32(&cmv->dwData);
sc->data = sc->data << 16 | sc->data >> 16;
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, "
"Function : %d, Subfunction : %d\n",
E1_FUNCTION_TYPE(cmv->bFunction),
E1_FUNCTION_SUBTYPE(cmv->bFunction));
uea_leaves(INS_TO_USBDEV(sc));
return;
bad1:
uea_err(INS_TO_USBDEV(sc), "invalid cmv received, "
"wPreamble %d, bDirection %d\n",
le16_to_cpu(cmv->wPreamble), cmv->bDirection);
uea_leaves(INS_TO_USBDEV(sc));
}
/* The modem send us an ack. First with check if it right */
static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr)
{
struct cmv_dsc_e4 *dsc = &sc->cmv_dsc.e4;
struct cmv_e4 *cmv = &intr->u.e4.s2.cmv;
uea_enters(INS_TO_USBDEV(sc));
uea_dbg(INS_TO_USBDEV(sc), "cmv %x %x %x %x %x %x\n",
be16_to_cpu(cmv->wGroup), be16_to_cpu(cmv->wFunction),
be16_to_cpu(cmv->wOffset), be16_to_cpu(cmv->wAddress),
be32_to_cpu(cmv->dwData[0]), be32_to_cpu(cmv->dwData[1]));
if (be16_to_cpu(cmv->wFunction) != dsc->function)
goto bad2;
if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
E4_MODEMREADY, 1)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
}
/* in case of MEMACCESS */
if (be16_to_cpu(cmv->wOffset) != dsc->offset ||
be16_to_cpu(cmv->wGroup) != dsc->group ||
be16_to_cpu(cmv->wAddress) != dsc->address)
goto bad2;
sc->data = be32_to_cpu(cmv->dwData[0]);
sc->data1 = be32_to_cpu(cmv->dwData[1]);
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, "
"Function : %d, Subfunction : %d\n",
E4_FUNCTION_TYPE(cmv->wFunction),
E4_FUNCTION_SUBTYPE(cmv->wFunction));
uea_leaves(INS_TO_USBDEV(sc));
return;
}
static void uea_schedule_load_page_e1(struct uea_softc *sc,
struct intr_pkt *intr)
{
sc->pageno = intr->e1_bSwapPageNo;
sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4;
schedule_work(&sc->task);
}
static void uea_schedule_load_page_e4(struct uea_softc *sc,
struct intr_pkt *intr)
{
sc->pageno = intr->e4_bSwapPageNo;
schedule_work(&sc->task);
}
/*
* interrupt handler
*/
static void uea_intr(struct urb *urb)
{
struct uea_softc *sc = urb->context;
struct intr_pkt *intr = urb->transfer_buffer;
int status = urb->status;
uea_enters(INS_TO_USBDEV(sc));
if (unlikely(status < 0)) {
uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n",
status);
return;
}
/* device-to-host interrupt */
if (intr->bType != 0x08 || sc->booting) {
uea_err(INS_TO_USBDEV(sc), "wrong interrupt\n");
goto resubmit;
}
switch (le16_to_cpu(intr->wInterrupt)) {
case INT_LOADSWAPPAGE:
sc->schedule_load_page(sc, intr);
break;
case INT_INCOMINGCMV:
sc->dispatch_cmv(sc, intr);
break;
default:
uea_err(INS_TO_USBDEV(sc), "unknown interrupt %u\n",
le16_to_cpu(intr->wInterrupt));
}
resubmit:
usb_submit_urb(sc->urb_int, GFP_ATOMIC);
}
/*
* Start the modem : init the data and start kernel thread
*/
static int uea_boot(struct uea_softc *sc, struct usb_interface *intf)
{
struct intr_pkt *intr;
int ret = -ENOMEM;
int size;
uea_enters(INS_TO_USBDEV(sc));
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
size = E4_INTR_PKT_SIZE;
sc->dispatch_cmv = uea_dispatch_cmv_e4;
sc->schedule_load_page = uea_schedule_load_page_e4;
sc->stat = uea_stat_e4;
sc->send_cmvs = uea_send_cmvs_e4;
INIT_WORK(&sc->task, uea_load_page_e4);
} else {
size = E1_INTR_PKT_SIZE;
sc->dispatch_cmv = uea_dispatch_cmv_e1;
sc->schedule_load_page = uea_schedule_load_page_e1;
sc->stat = uea_stat_e1;
sc->send_cmvs = uea_send_cmvs_e1;
INIT_WORK(&sc->task, uea_load_page_e1);
}
init_waitqueue_head(&sc->sync_q);
if (UEA_CHIP_VERSION(sc) == ADI930)
load_XILINX_firmware(sc);
if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
ret = -ENODEV;
goto err0;
}
intr = kmalloc(size, GFP_KERNEL);
if (!intr)
goto err0;
sc->urb_int = usb_alloc_urb(0, GFP_KERNEL);
if (!sc->urb_int)
goto err1;
usb_fill_int_urb(sc->urb_int, sc->usb_dev,
usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
intr, size, uea_intr, sc,
intf->cur_altsetting->endpoint[0].desc.bInterval);
ret = usb_submit_urb(sc->urb_int, GFP_KERNEL);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"urb submission failed with error %d\n", ret);
goto err1;
}
/* Create worker thread, but don't start it here. Start it after
* all usbatm generic initialization is done.
*/
sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm");
if (IS_ERR(sc->kthread)) {
uea_err(INS_TO_USBDEV(sc), "failed to create thread\n");
ret = PTR_ERR(sc->kthread);
goto err2;
}
uea_leaves(INS_TO_USBDEV(sc));
return 0;
err2:
usb_kill_urb(sc->urb_int);
err1:
usb_free_urb(sc->urb_int);
sc->urb_int = NULL;
kfree(intr);
err0:
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/*
* Stop the modem : kill kernel thread and free data
*/
static void uea_stop(struct uea_softc *sc)
{
int ret;
uea_enters(INS_TO_USBDEV(sc));
ret = kthread_stop(sc->kthread);
uea_dbg(INS_TO_USBDEV(sc), "kthread finish with status %d\n", ret);
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
usb_kill_urb(sc->urb_int);
kfree(sc->urb_int->transfer_buffer);
usb_free_urb(sc->urb_int);
/* flush the work item, when no one can schedule it */
flush_work(&sc->task);
release_firmware(sc->dsp_firm);
uea_leaves(INS_TO_USBDEV(sc));
}
/* syfs interface */
static struct uea_softc *dev_to_uea(struct device *dev)
{
struct usb_interface *intf;
struct usbatm_data *usbatm;
intf = to_usb_interface(dev);
if (!intf)
return NULL;
usbatm = usb_get_intfdata(intf);
if (!usbatm)
return NULL;
return usbatm->driver_data;
}
static ssize_t stat_status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.state);
out:
mutex_unlock(&uea_mutex);
return ret;
}
static ssize_t stat_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = -ENODEV;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
sc->reset = 1;
ret = count;
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR_RW(stat_status);
static ssize_t stat_human_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret = -ENODEV;
int modem_state;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
switch (sc->stats.phy.state) {
case 0x0: /* not yet synchronized */
case 0x1:
case 0x3:
case 0x4:
modem_state = 0;
break;
case 0x5: /* initialization */
case 0x6:
case 0x9:
case 0xa:
modem_state = 1;
break;
case 0x7: /* operational */
modem_state = 2;
break;
case 0x2: /* fail ... */
modem_state = 3;
break;
default: /* unknown */
modem_state = 4;
break;
}
} else
modem_state = GET_STATUS(sc->stats.phy.state);
switch (modem_state) {
case 0:
ret = sprintf(buf, "Modem is booting\n");
break;
case 1:
ret = sprintf(buf, "Modem is initializing\n");
break;
case 2:
ret = sprintf(buf, "Modem is operational\n");
break;
case 3:
ret = sprintf(buf, "Modem synchronization failed\n");
break;
default:
ret = sprintf(buf, "Modem state is unknown\n");
break;
}
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR_RO(stat_human_status);
static ssize_t stat_delin_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
struct uea_softc *sc;
char *delin = "GOOD";
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
if (sc->stats.phy.flags & 0x4000)
delin = "RESET";
else if (sc->stats.phy.flags & 0x0001)
delin = "LOSS";
} else {
if (sc->stats.phy.flags & 0x0C00)
delin = "ERROR";
else if (sc->stats.phy.flags & 0x0030)
delin = "LOSS";
}
ret = sprintf(buf, "%s\n", delin);
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR_RO(stat_delin);
#define UEA_ATTR(name, reset) \
\
static ssize_t stat_##name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
int ret = -ENODEV; \
struct uea_softc *sc; \
\
mutex_lock(&uea_mutex); \
sc = dev_to_uea(dev); \
if (!sc) \
goto out; \
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \
if (reset) \
sc->stats.phy.name = 0; \
out: \
mutex_unlock(&uea_mutex); \
return ret; \
} \
\
static DEVICE_ATTR_RO(stat_##name)
UEA_ATTR(mflags, 1);
UEA_ATTR(vidcpe, 0);
UEA_ATTR(usrate, 0);
UEA_ATTR(dsrate, 0);
UEA_ATTR(usattenuation, 0);
UEA_ATTR(dsattenuation, 0);
UEA_ATTR(usmargin, 0);
UEA_ATTR(dsmargin, 0);
UEA_ATTR(txflow, 0);
UEA_ATTR(rxflow, 0);
UEA_ATTR(uscorr, 0);
UEA_ATTR(dscorr, 0);
UEA_ATTR(usunc, 0);
UEA_ATTR(dsunc, 0);
UEA_ATTR(firmid, 0);
/* Retrieve the device End System Identifier (MAC) */
static int uea_getesi(struct uea_softc *sc, u_char *esi)
{
unsigned char mac_str[2 * ETH_ALEN + 1];
int i;
if (usb_string
(sc->usb_dev, sc->usb_dev->descriptor.iSerialNumber, mac_str,
sizeof(mac_str)) != 2 * ETH_ALEN)
return 1;
for (i = 0; i < ETH_ALEN; i++)
esi[i] = hex_to_bin(mac_str[2 * i]) * 16 +
hex_to_bin(mac_str[2 * i + 1]);
return 0;
}
/* ATM stuff */
static int uea_atm_open(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct uea_softc *sc = usbatm->driver_data;
return uea_getesi(sc, atm_dev->esi);
}
static int uea_heavy(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct uea_softc *sc = usbatm->driver_data;
wait_event_interruptible(sc->sync_q, IS_OPERATIONAL(sc));
return 0;
}
static int claim_interface(struct usb_device *usb_dev,
struct usbatm_data *usbatm, int ifnum)
{
int ret;
struct usb_interface *intf = usb_ifnum_to_if(usb_dev, ifnum);
if (!intf) {
uea_err(usb_dev, "interface %d not found\n", ifnum);
return -ENODEV;
}
ret = usb_driver_claim_interface(&uea_driver, intf, usbatm);
if (ret != 0)
uea_err(usb_dev, "can't claim interface %d, error %d\n", ifnum,
ret);
return ret;
}
static struct attribute *uea_attrs[] = {
&dev_attr_stat_status.attr,
&dev_attr_stat_mflags.attr,
&dev_attr_stat_human_status.attr,
&dev_attr_stat_delin.attr,
&dev_attr_stat_vidcpe.attr,
&dev_attr_stat_usrate.attr,
&dev_attr_stat_dsrate.attr,
&dev_attr_stat_usattenuation.attr,
&dev_attr_stat_dsattenuation.attr,
&dev_attr_stat_usmargin.attr,
&dev_attr_stat_dsmargin.attr,
&dev_attr_stat_txflow.attr,
&dev_attr_stat_rxflow.attr,
&dev_attr_stat_uscorr.attr,
&dev_attr_stat_dscorr.attr,
&dev_attr_stat_usunc.attr,
&dev_attr_stat_dsunc.attr,
&dev_attr_stat_firmid.attr,
NULL,
};
ATTRIBUTE_GROUPS(uea);
static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb = interface_to_usbdev(intf);
struct uea_softc *sc;
int ret, ifnum = intf->altsetting->desc.bInterfaceNumber;
unsigned int alt;
uea_enters(usb);
/* interface 0 is for firmware/monitoring */
if (ifnum != UEA_INTR_IFACE_NO)
return -ENODEV;
usbatm->flags = (sync_wait[modem_index] ? 0 : UDSL_SKIP_HEAVY_INIT);
/* interface 1 is for outbound traffic */
ret = claim_interface(usb, usbatm, UEA_US_IFACE_NO);
if (ret < 0)
return ret;
/* ADI930 has only 2 interfaces and inbound traffic is on interface 1 */
if (UEA_CHIP_VERSION(id) != ADI930) {
/* interface 2 is for inbound traffic */
ret = claim_interface(usb, usbatm, UEA_DS_IFACE_NO);
if (ret < 0)
return ret;
}
sc = kzalloc(sizeof(struct uea_softc), GFP_KERNEL);
if (!sc)
return -ENOMEM;
sc->usb_dev = usb;
usbatm->driver_data = sc;
sc->usbatm = usbatm;
sc->modem_index = (modem_index < NB_MODEM) ? modem_index++ : 0;
sc->driver_info = id->driver_info;
/* first try to use module parameter */
if (annex[sc->modem_index] == 1)
sc->annex = ANNEXA;
else if (annex[sc->modem_index] == 2)
sc->annex = ANNEXB;
/* try to autodetect annex */
else if (sc->driver_info & AUTO_ANNEX_A)
sc->annex = ANNEXA;
else if (sc->driver_info & AUTO_ANNEX_B)
sc->annex = ANNEXB;
else
sc->annex = (le16_to_cpu
(sc->usb_dev->descriptor.bcdDevice) & 0x80) ? ANNEXB : ANNEXA;
alt = altsetting[sc->modem_index];
/* ADI930 don't support iso */
if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) {
if (alt <= 8 &&
usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
uea_dbg(usb, "set alternate %u for 2 interface\n", alt);
uea_info(usb, "using iso mode\n");
usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ;
} else {
uea_err(usb, "setting alternate %u failed for "
"2 interface, using bulk mode\n", alt);
}
}
ret = uea_boot(sc, intf);
if (ret < 0)
goto error;
return 0;
error:
kfree(sc);
return ret;
}
static void uea_unbind(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct uea_softc *sc = usbatm->driver_data;
uea_stop(sc);
kfree(sc);
}
static struct usbatm_driver uea_usbatm_driver = {
.driver_name = "ueagle-atm",
.bind = uea_bind,
.atm_start = uea_atm_open,
.unbind = uea_unbind,
.heavy_init = uea_heavy,
.bulk_in = UEA_BULK_DATA_PIPE,
.bulk_out = UEA_BULK_DATA_PIPE,
.isoc_in = UEA_ISO_DATA_PIPE,
};
static int uea_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *usb = interface_to_usbdev(intf);
int ret;
uea_enters(usb);
uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) Rev (%#X): %s\n",
le16_to_cpu(usb->descriptor.idVendor),
le16_to_cpu(usb->descriptor.idProduct),
le16_to_cpu(usb->descriptor.bcdDevice),
chip_name[UEA_CHIP_VERSION(id)]);
usb_reset_device(usb);
if (UEA_IS_PREFIRM(id))
return uea_load_firmware(usb, UEA_CHIP_VERSION(id));
ret = usbatm_usb_probe(intf, id, &uea_usbatm_driver);
if (ret == 0) {
struct usbatm_data *usbatm = usb_get_intfdata(intf);
struct uea_softc *sc = usbatm->driver_data;
/* Ensure carrier is initialized to off as early as possible */
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST);
/* Only start the worker thread when all init is done */
wake_up_process(sc->kthread);
}
return ret;
}
static void uea_disconnect(struct usb_interface *intf)
{
struct usb_device *usb = interface_to_usbdev(intf);
int ifnum = intf->altsetting->desc.bInterfaceNumber;
uea_enters(usb);
/* ADI930 has 2 interfaces and eagle 3 interfaces.
* Pre-firmware device has one interface
*/
if (usb->config->desc.bNumInterfaces != 1 && ifnum == 0) {
mutex_lock(&uea_mutex);
usbatm_usb_disconnect(intf);
mutex_unlock(&uea_mutex);
uea_info(usb, "ADSL device removed\n");
}
uea_leaves(usb);
}
/*
* List of supported VID/PID
*/
static const struct usb_device_id uea_ids[] = {
{USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM),
.driver_info = ADI930 | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM),
.driver_info = EAGLE_III | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM),
.driver_info = EAGLE_III | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM),
.driver_info = EAGLE_IV | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM),
.driver_info = EAGLE_IV | PSTFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM),
.driver_info = ADI930 | PSTFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM),
.driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM),
.driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{}
};
/*
* USB driver descriptor
*/
static struct usb_driver uea_driver = {
.name = "ueagle-atm",
.id_table = uea_ids,
.probe = uea_probe,
.disconnect = uea_disconnect,
.dev_groups = uea_groups,
};
MODULE_DEVICE_TABLE(usb, uea_ids);
module_usb_driver(uea_driver);
MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka");
MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(EAGLE_FIRMWARE);
MODULE_FIRMWARE(ADI930_FIRMWARE);
MODULE_FIRMWARE(EAGLE_I_FIRMWARE);
MODULE_FIRMWARE(EAGLE_II_FIRMWARE);
MODULE_FIRMWARE(EAGLE_III_FIRMWARE);
MODULE_FIRMWARE(EAGLE_IV_FIRMWARE);
MODULE_FIRMWARE(DSP4I_FIRMWARE);
MODULE_FIRMWARE(DSP4P_FIRMWARE);
MODULE_FIRMWARE(DSP9I_FIRMWARE);
MODULE_FIRMWARE(DSP9P_FIRMWARE);
MODULE_FIRMWARE(DSPEI_FIRMWARE);
MODULE_FIRMWARE(DSPEP_FIRMWARE);
MODULE_FIRMWARE(FPGA930_FIRMWARE);
MODULE_FIRMWARE(CMV4P_FIRMWARE);
MODULE_FIRMWARE(CMV4PV2_FIRMWARE);
MODULE_FIRMWARE(CMV4I_FIRMWARE);
MODULE_FIRMWARE(CMV4IV2_FIRMWARE);
MODULE_FIRMWARE(CMV9P_FIRMWARE);
MODULE_FIRMWARE(CMV9PV2_FIRMWARE);
MODULE_FIRMWARE(CMV9I_FIRMWARE);
MODULE_FIRMWARE(CMV9IV2_FIRMWARE);
MODULE_FIRMWARE(CMVEP_FIRMWARE);
MODULE_FIRMWARE(CMVEPV2_FIRMWARE);
MODULE_FIRMWARE(CMVEI_FIRMWARE);
MODULE_FIRMWARE(CMVEIV2_FIRMWARE);
| linux-master | drivers/usb/atm/ueagle-atm.c |
// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* usbatm.c - Generic USB xDSL driver core
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
* Copyright (C) 2004, David Woodhouse, Roman Kagan
******************************************************************************/
/*
* Written by Johan Verrept, Duncan Sands ([email protected]) and David Woodhouse
*
* 1.7+: - See the check-in logs
*
* 1.6: - No longer opens a connection if the firmware is not loaded
* - Added support for the speedtouch 330
* - Removed the limit on the number of devices
* - Module now autoloads on device plugin
* - Merged relevant parts of sarlib
* - Replaced the kernel thread with a tasklet
* - New packet transmission code
* - Changed proc file contents
* - Fixed all known SMP races
* - Many fixes and cleanups
* - Various fixes by Oliver Neukum ([email protected])
*
* 1.5A: - Version for inclusion in 2.5 series kernel
* - Modifications by Richard Purdie ([email protected])
* - made compatible with kernel 2.5.6 onwards by changing
* usbatm_usb_send_data_context->urb to a pointer and adding code
* to alloc and free it
* - remove_wait_queue() added to usbatm_atm_processqueue_thread()
*
* 1.5: - fixed memory leak when atmsar_decode_aal5 returned NULL.
* (reported by [email protected])
*
* 1.4: - changed the spin_lock() under interrupt to spin_lock_irqsave()
* - unlink all active send urbs of a vcc that is being closed.
*
* 1.3.1: - added the version number
*
* 1.3: - Added multiple send urb support
* - fixed memory leak and vcc->tx_inuse starvation bug
* when not enough memory left in vcc.
*
* 1.2: - Fixed race condition in usbatm_usb_send_data()
* 1.1: - Turned off packet debugging
*
*/
#include "usbatm.h"
#include <linux/uaccess.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <linux/ratelimit.h>
#ifdef VERBOSE_DEBUG
static int usbatm_print_packet(struct usbatm_data *instance, const unsigned char *data, int len);
#define PACKETDEBUG(arg...) usbatm_print_packet(arg)
#define vdbg(arg...) dev_dbg(arg)
#else
#define PACKETDEBUG(arg...)
#define vdbg(arg...)
#endif
#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <[email protected]>"
#define DRIVER_DESC "Generic USB ATM/DSL I/O"
static const char usbatm_driver_name[] = "usbatm";
#define UDSL_MAX_RCV_URBS 16
#define UDSL_MAX_SND_URBS 16
#define UDSL_MAX_BUF_SIZE 65536
#define UDSL_DEFAULT_RCV_URBS 4
#define UDSL_DEFAULT_SND_URBS 4
#define UDSL_DEFAULT_RCV_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */
#define UDSL_DEFAULT_SND_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */
#define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD)
#define THROTTLE_MSECS 100 /* delay to recover processing after urb submission fails */
static unsigned int num_rcv_urbs = UDSL_DEFAULT_RCV_URBS;
static unsigned int num_snd_urbs = UDSL_DEFAULT_SND_URBS;
static unsigned int rcv_buf_bytes = UDSL_DEFAULT_RCV_BUF_SIZE;
static unsigned int snd_buf_bytes = UDSL_DEFAULT_SND_BUF_SIZE;
module_param(num_rcv_urbs, uint, S_IRUGO);
MODULE_PARM_DESC(num_rcv_urbs,
"Number of urbs used for reception (range: 0-"
__MODULE_STRING(UDSL_MAX_RCV_URBS) ", default: "
__MODULE_STRING(UDSL_DEFAULT_RCV_URBS) ")");
module_param(num_snd_urbs, uint, S_IRUGO);
MODULE_PARM_DESC(num_snd_urbs,
"Number of urbs used for transmission (range: 0-"
__MODULE_STRING(UDSL_MAX_SND_URBS) ", default: "
__MODULE_STRING(UDSL_DEFAULT_SND_URBS) ")");
module_param(rcv_buf_bytes, uint, S_IRUGO);
MODULE_PARM_DESC(rcv_buf_bytes,
"Size of the buffers used for reception, in bytes (range: 1-"
__MODULE_STRING(UDSL_MAX_BUF_SIZE) ", default: "
__MODULE_STRING(UDSL_DEFAULT_RCV_BUF_SIZE) ")");
module_param(snd_buf_bytes, uint, S_IRUGO);
MODULE_PARM_DESC(snd_buf_bytes,
"Size of the buffers used for transmission, in bytes (range: 1-"
__MODULE_STRING(UDSL_MAX_BUF_SIZE) ", default: "
__MODULE_STRING(UDSL_DEFAULT_SND_BUF_SIZE) ")");
/* receive */
struct usbatm_vcc_data {
/* vpi/vci lookup */
struct list_head list;
short vpi;
int vci;
struct atm_vcc *vcc;
/* raw cell reassembly */
struct sk_buff *sarb;
};
/* send */
struct usbatm_control {
struct atm_skb_data atm;
u32 len;
u32 crc;
};
#define UDSL_SKB(x) ((struct usbatm_control *)(x)->cb)
/* ATM */
static void usbatm_atm_dev_close(struct atm_dev *atm_dev);
static int usbatm_atm_open(struct atm_vcc *vcc);
static void usbatm_atm_close(struct atm_vcc *vcc);
static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg);
static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb);
static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page);
static const struct atmdev_ops usbatm_atm_devops = {
.dev_close = usbatm_atm_dev_close,
.open = usbatm_atm_open,
.close = usbatm_atm_close,
.ioctl = usbatm_atm_ioctl,
.send = usbatm_atm_send,
.proc_read = usbatm_atm_proc_read,
.owner = THIS_MODULE,
};
/***********
** misc **
***********/
static inline unsigned int usbatm_pdu_length(unsigned int length)
{
length += ATM_CELL_PAYLOAD - 1 + ATM_AAL5_TRAILER;
return length - length % ATM_CELL_PAYLOAD;
}
static inline void usbatm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
}
/***********
** urbs **
************/
static struct urb *usbatm_pop_urb(struct usbatm_channel *channel)
{
struct urb *urb;
spin_lock_irq(&channel->lock);
if (list_empty(&channel->list)) {
spin_unlock_irq(&channel->lock);
return NULL;
}
urb = list_entry(channel->list.next, struct urb, urb_list);
list_del(&urb->urb_list);
spin_unlock_irq(&channel->lock);
return urb;
}
static int usbatm_submit_urb(struct urb *urb)
{
struct usbatm_channel *channel = urb->context;
int ret;
/* vdbg("%s: submitting urb 0x%p, size %u",
__func__, urb, urb->transfer_buffer_length); */
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
if (printk_ratelimit())
atm_warn(channel->usbatm, "%s: urb 0x%p submission failed (%d)!\n",
__func__, urb, ret);
/* consider all errors transient and return the buffer back to the queue */
urb->status = -EAGAIN;
spin_lock_irq(&channel->lock);
/* must add to the front when sending; doesn't matter when receiving */
list_add(&urb->urb_list, &channel->list);
spin_unlock_irq(&channel->lock);
/* make sure the channel doesn't stall */
mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS));
}
return ret;
}
static void usbatm_complete(struct urb *urb)
{
struct usbatm_channel *channel = urb->context;
unsigned long flags;
int status = urb->status;
/* vdbg("%s: urb 0x%p, status %d, actual_length %d",
__func__, urb, status, urb->actual_length); */
/* Can be invoked from task context, protect against interrupts */
spin_lock_irqsave(&channel->lock, flags);
/* must add to the back when receiving; doesn't matter when sending */
list_add_tail(&urb->urb_list, &channel->list);
spin_unlock_irqrestore(&channel->lock, flags);
if (unlikely(status) &&
(!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) ||
status != -EILSEQ)) {
if (status == -ESHUTDOWN)
return;
if (printk_ratelimit())
atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n",
__func__, urb, status);
/* throttle processing in case of an error */
mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS));
} else
tasklet_schedule(&channel->tasklet);
}
/*************
** decode **
*************/
static inline struct usbatm_vcc_data *usbatm_find_vcc(struct usbatm_data *instance,
short vpi, int vci)
{
struct usbatm_vcc_data *vcc_data;
list_for_each_entry(vcc_data, &instance->vcc_list, list)
if ((vcc_data->vci == vci) && (vcc_data->vpi == vpi))
return vcc_data;
return NULL;
}
static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char *source)
{
struct atm_vcc *vcc;
struct sk_buff *sarb;
short vpi = ((source[0] & 0x0f) << 4) | (source[1] >> 4);
int vci = ((source[1] & 0x0f) << 12) | (source[2] << 4) | (source[3] >> 4);
u8 pti = ((source[3] & 0xe) >> 1);
if ((vci != instance->cached_vci) || (vpi != instance->cached_vpi)) {
instance->cached_vpi = vpi;
instance->cached_vci = vci;
instance->cached_vcc = usbatm_find_vcc(instance, vpi, vci);
if (!instance->cached_vcc)
atm_rldbg(instance, "%s: unknown vpi/vci (%hd/%d)!\n", __func__, vpi, vci);
}
if (!instance->cached_vcc)
return;
vcc = instance->cached_vcc->vcc;
/* OAM F5 end-to-end */
if (pti == ATM_PTI_E2EF5) {
if (printk_ratelimit())
atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
__func__, vpi, vci);
atomic_inc(&vcc->stats->rx_err);
return;
}
sarb = instance->cached_vcc->sarb;
if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) {
atm_rldbg(instance, "%s: buffer overrun (sarb->len %u, vcc: 0x%p)!\n",
__func__, sarb->len, vcc);
/* discard cells already received */
skb_trim(sarb, 0);
}
memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
__skb_put(sarb, ATM_CELL_PAYLOAD);
if (pti & 1) {
struct sk_buff *skb;
unsigned int length;
unsigned int pdu_length;
length = (source[ATM_CELL_SIZE - 6] << 8) + source[ATM_CELL_SIZE - 5];
/* guard against overflow */
if (length > ATM_MAX_AAL5_PDU) {
atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
__func__, length, vcc);
atomic_inc(&vcc->stats->rx_err);
goto out;
}
pdu_length = usbatm_pdu_length(length);
if (sarb->len < pdu_length) {
atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
__func__, pdu_length, sarb->len, vcc);
atomic_inc(&vcc->stats->rx_err);
goto out;
}
if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
__func__, vcc);
atomic_inc(&vcc->stats->rx_err);
goto out;
}
vdbg(&instance->usb_intf->dev,
"%s: got packet (length: %u, pdu_length: %u, vcc: 0x%p)",
__func__, length, pdu_length, vcc);
skb = dev_alloc_skb(length);
if (!skb) {
if (printk_ratelimit())
atm_err(instance, "%s: no memory for skb (length: %u)!\n",
__func__, length);
atomic_inc(&vcc->stats->rx_drop);
goto out;
}
vdbg(&instance->usb_intf->dev,
"%s: allocated new sk_buff (skb: 0x%p, skb->truesize: %u)",
__func__, skb, skb->truesize);
if (!atm_charge(vcc, skb->truesize)) {
atm_rldbg(instance, "%s: failed atm_charge (skb->truesize: %u)!\n",
__func__, skb->truesize);
dev_kfree_skb_any(skb);
goto out; /* atm_charge increments rx_drop */
}
skb_copy_to_linear_data(skb,
skb_tail_pointer(sarb) - pdu_length,
length);
__skb_put(skb, length);
vdbg(&instance->usb_intf->dev,
"%s: sending skb 0x%p, skb->len %u, skb->truesize %u",
__func__, skb, skb->len, skb->truesize);
PACKETDEBUG(instance, skb->data, skb->len);
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
out:
skb_trim(sarb, 0);
}
}
static void usbatm_extract_cells(struct usbatm_data *instance,
unsigned char *source, unsigned int avail_data)
{
unsigned int stride = instance->rx_channel.stride;
unsigned int buf_usage = instance->buf_usage;
/* extract cells from incoming data, taking into account that
* the length of avail data may not be a multiple of stride */
if (buf_usage > 0) {
/* we have a partially received atm cell */
unsigned char *cell_buf = instance->cell_buf;
unsigned int space_left = stride - buf_usage;
if (avail_data >= space_left) {
/* add new data and process cell */
memcpy(cell_buf + buf_usage, source, space_left);
source += space_left;
avail_data -= space_left;
usbatm_extract_one_cell(instance, cell_buf);
instance->buf_usage = 0;
} else {
/* not enough data to fill the cell */
memcpy(cell_buf + buf_usage, source, avail_data);
instance->buf_usage = buf_usage + avail_data;
return;
}
}
for (; avail_data >= stride; avail_data -= stride, source += stride)
usbatm_extract_one_cell(instance, source);
if (avail_data > 0) {
/* length was not a multiple of stride -
* save remaining data for next call */
memcpy(instance->cell_buf, source, avail_data);
instance->buf_usage = avail_data;
}
}
/*************
** encode **
*************/
static unsigned int usbatm_write_cells(struct usbatm_data *instance,
struct sk_buff *skb,
u8 *target, unsigned int avail_space)
{
struct usbatm_control *ctrl = UDSL_SKB(skb);
struct atm_vcc *vcc = ctrl->atm.vcc;
unsigned int bytes_written;
unsigned int stride = instance->tx_channel.stride;
for (bytes_written = 0; bytes_written < avail_space && ctrl->len;
bytes_written += stride, target += stride) {
unsigned int data_len = min_t(unsigned int, skb->len, ATM_CELL_PAYLOAD);
unsigned int left = ATM_CELL_PAYLOAD - data_len;
u8 *ptr = target;
ptr[0] = vcc->vpi >> 4;
ptr[1] = (vcc->vpi << 4) | (vcc->vci >> 12);
ptr[2] = vcc->vci >> 4;
ptr[3] = vcc->vci << 4;
ptr[4] = 0xec;
ptr += ATM_CELL_HEADER;
skb_copy_from_linear_data(skb, ptr, data_len);
ptr += data_len;
__skb_pull(skb, data_len);
if (!left)
continue;
memset(ptr, 0, left);
if (left >= ATM_AAL5_TRAILER) { /* trailer will go in this cell */
u8 *trailer = target + ATM_CELL_SIZE - ATM_AAL5_TRAILER;
/* trailer[0] = 0; UU = 0 */
/* trailer[1] = 0; CPI = 0 */
trailer[2] = ctrl->len >> 8;
trailer[3] = ctrl->len;
ctrl->crc = ~crc32_be(ctrl->crc, ptr, left - 4);
trailer[4] = ctrl->crc >> 24;
trailer[5] = ctrl->crc >> 16;
trailer[6] = ctrl->crc >> 8;
trailer[7] = ctrl->crc;
target[3] |= 0x2; /* adjust PTI */
ctrl->len = 0; /* tag this skb finished */
} else
ctrl->crc = crc32_be(ctrl->crc, ptr, left);
}
return bytes_written;
}
/**************
** receive **
**************/
static void usbatm_rx_process(struct tasklet_struct *t)
{
struct usbatm_data *instance = from_tasklet(instance, t,
rx_channel.tasklet);
struct urb *urb;
while ((urb = usbatm_pop_urb(&instance->rx_channel))) {
vdbg(&instance->usb_intf->dev,
"%s: processing urb 0x%p", __func__, urb);
if (usb_pipeisoc(urb->pipe)) {
unsigned char *merge_start = NULL;
unsigned int merge_length = 0;
const unsigned int packet_size = instance->rx_channel.packet_size;
int i;
for (i = 0; i < urb->number_of_packets; i++) {
if (!urb->iso_frame_desc[i].status) {
unsigned int actual_length = urb->iso_frame_desc[i].actual_length;
if (!merge_length)
merge_start = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
merge_length += actual_length;
if (merge_length && (actual_length < packet_size)) {
usbatm_extract_cells(instance, merge_start, merge_length);
merge_length = 0;
}
} else {
atm_rldbg(instance, "%s: status %d in frame %d!\n", __func__, urb->status, i);
if (merge_length)
usbatm_extract_cells(instance, merge_start, merge_length);
merge_length = 0;
instance->buf_usage = 0;
}
}
if (merge_length)
usbatm_extract_cells(instance, merge_start, merge_length);
} else
if (!urb->status)
usbatm_extract_cells(instance, urb->transfer_buffer, urb->actual_length);
else
instance->buf_usage = 0;
if (usbatm_submit_urb(urb))
return;
}
}
/***********
** send **
***********/
static void usbatm_tx_process(struct tasklet_struct *t)
{
struct usbatm_data *instance = from_tasklet(instance, t,
tx_channel.tasklet);
struct sk_buff *skb = instance->current_skb;
struct urb *urb = NULL;
const unsigned int buf_size = instance->tx_channel.buf_size;
unsigned int bytes_written = 0;
u8 *buffer = NULL;
if (!skb)
skb = skb_dequeue(&instance->sndqueue);
while (skb) {
if (!urb) {
urb = usbatm_pop_urb(&instance->tx_channel);
if (!urb)
break; /* no more senders */
buffer = urb->transfer_buffer;
bytes_written = (urb->status == -EAGAIN) ?
urb->transfer_buffer_length : 0;
}
bytes_written += usbatm_write_cells(instance, skb,
buffer + bytes_written,
buf_size - bytes_written);
vdbg(&instance->usb_intf->dev,
"%s: wrote %u bytes from skb 0x%p to urb 0x%p",
__func__, bytes_written, skb, urb);
if (!UDSL_SKB(skb)->len) {
struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
usbatm_pop(vcc, skb);
atomic_inc(&vcc->stats->tx);
skb = skb_dequeue(&instance->sndqueue);
}
if (bytes_written == buf_size || (!skb && bytes_written)) {
urb->transfer_buffer_length = bytes_written;
if (usbatm_submit_urb(urb))
break;
urb = NULL;
}
}
instance->current_skb = skb;
}
static void usbatm_cancel_send(struct usbatm_data *instance,
struct atm_vcc *vcc)
{
struct sk_buff *skb, *n;
spin_lock_irq(&instance->sndqueue.lock);
skb_queue_walk_safe(&instance->sndqueue, skb, n) {
if (UDSL_SKB(skb)->atm.vcc == vcc) {
atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb);
__skb_unlink(skb, &instance->sndqueue);
usbatm_pop(vcc, skb);
}
}
spin_unlock_irq(&instance->sndqueue.lock);
tasklet_disable(&instance->tx_channel.tasklet);
if ((skb = instance->current_skb) && (UDSL_SKB(skb)->atm.vcc == vcc)) {
atm_dbg(instance, "%s: popping current skb (0x%p)\n", __func__, skb);
instance->current_skb = NULL;
usbatm_pop(vcc, skb);
}
tasklet_enable(&instance->tx_channel.tasklet);
}
static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct usbatm_data *instance = vcc->dev->dev_data;
struct usbatm_control *ctrl = UDSL_SKB(skb);
int err;
/* racy disconnection check - fine */
if (!instance || instance->disconnected) {
#ifdef VERBOSE_DEBUG
printk_ratelimited(KERN_DEBUG "%s: %s!\n", __func__, instance ? "disconnected" : "NULL instance");
#endif
err = -ENODEV;
goto fail;
}
if (vcc->qos.aal != ATM_AAL5) {
atm_rldbg(instance, "%s: unsupported ATM type %d!\n", __func__, vcc->qos.aal);
err = -EINVAL;
goto fail;
}
if (skb->len > ATM_MAX_AAL5_PDU) {
atm_rldbg(instance, "%s: packet too long (%d vs %d)!\n",
__func__, skb->len, ATM_MAX_AAL5_PDU);
err = -EINVAL;
goto fail;
}
PACKETDEBUG(instance, skb->data, skb->len);
/* initialize the control block */
ctrl->atm.vcc = vcc;
ctrl->len = skb->len;
ctrl->crc = crc32_be(~0, skb->data, skb->len);
skb_queue_tail(&instance->sndqueue, skb);
tasklet_schedule(&instance->tx_channel.tasklet);
return 0;
fail:
usbatm_pop(vcc, skb);
return err;
}
/********************
** bean counting **
********************/
static void usbatm_destroy_instance(struct kref *kref)
{
struct usbatm_data *instance = container_of(kref, struct usbatm_data, refcount);
tasklet_kill(&instance->rx_channel.tasklet);
tasklet_kill(&instance->tx_channel.tasklet);
usb_put_dev(instance->usb_dev);
kfree(instance);
}
static void usbatm_get_instance(struct usbatm_data *instance)
{
kref_get(&instance->refcount);
}
static void usbatm_put_instance(struct usbatm_data *instance)
{
kref_put(&instance->refcount, usbatm_destroy_instance);
}
/**********
** ATM **
**********/
static void usbatm_atm_dev_close(struct atm_dev *atm_dev)
{
struct usbatm_data *instance = atm_dev->dev_data;
if (!instance)
return;
atm_dev->dev_data = NULL; /* catch bugs */
usbatm_put_instance(instance); /* taken in usbatm_atm_init */
}
static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page)
{
struct usbatm_data *instance = atm_dev->dev_data;
int left = *pos;
if (!instance)
return -ENODEV;
if (!left--)
return sprintf(page, "%s\n", instance->description);
if (!left--)
return sprintf(page, "MAC: %pM\n", atm_dev->esi);
if (!left--)
return sprintf(page,
"AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
atomic_read(&atm_dev->stats.aal5.tx),
atomic_read(&atm_dev->stats.aal5.tx_err),
atomic_read(&atm_dev->stats.aal5.rx),
atomic_read(&atm_dev->stats.aal5.rx_err),
atomic_read(&atm_dev->stats.aal5.rx_drop));
if (!left--) {
if (instance->disconnected)
return sprintf(page, "Disconnected\n");
else
switch (atm_dev->signal) {
case ATM_PHY_SIG_FOUND:
return sprintf(page, "Line up\n");
case ATM_PHY_SIG_LOST:
return sprintf(page, "Line down\n");
default:
return sprintf(page, "Line state unknown\n");
}
}
return 0;
}
static int usbatm_atm_open(struct atm_vcc *vcc)
{
struct usbatm_data *instance = vcc->dev->dev_data;
struct usbatm_vcc_data *new = NULL;
int ret;
int vci = vcc->vci;
short vpi = vcc->vpi;
if (!instance)
return -ENODEV;
/* only support AAL5 */
if ((vcc->qos.aal != ATM_AAL5)) {
atm_warn(instance, "%s: unsupported ATM type %d!\n", __func__, vcc->qos.aal);
return -EINVAL;
}
/* sanity checks */
if ((vcc->qos.rxtp.max_sdu < 0) || (vcc->qos.rxtp.max_sdu > ATM_MAX_AAL5_PDU)) {
atm_dbg(instance, "%s: max_sdu %d out of range!\n", __func__, vcc->qos.rxtp.max_sdu);
return -EINVAL;
}
mutex_lock(&instance->serialize); /* vs self, usbatm_atm_close, usbatm_usb_disconnect */
if (instance->disconnected) {
atm_dbg(instance, "%s: disconnected!\n", __func__);
ret = -ENODEV;
goto fail;
}
if (usbatm_find_vcc(instance, vpi, vci)) {
atm_dbg(instance, "%s: %hd/%d already in use!\n", __func__, vpi, vci);
ret = -EADDRINUSE;
goto fail;
}
new = kzalloc(sizeof(struct usbatm_vcc_data), GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
goto fail;
}
new->vcc = vcc;
new->vpi = vpi;
new->vci = vci;
new->sarb = alloc_skb(usbatm_pdu_length(vcc->qos.rxtp.max_sdu), GFP_KERNEL);
if (!new->sarb) {
atm_err(instance, "%s: no memory for SAR buffer!\n", __func__);
ret = -ENOMEM;
goto fail;
}
vcc->dev_data = new;
tasklet_disable(&instance->rx_channel.tasklet);
instance->cached_vcc = new;
instance->cached_vpi = vpi;
instance->cached_vci = vci;
list_add(&new->list, &instance->vcc_list);
tasklet_enable(&instance->rx_channel.tasklet);
set_bit(ATM_VF_ADDR, &vcc->flags);
set_bit(ATM_VF_PARTIAL, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
mutex_unlock(&instance->serialize);
atm_dbg(instance, "%s: allocated vcc data 0x%p\n", __func__, new);
return 0;
fail:
kfree(new);
mutex_unlock(&instance->serialize);
return ret;
}
static void usbatm_atm_close(struct atm_vcc *vcc)
{
struct usbatm_data *instance = vcc->dev->dev_data;
struct usbatm_vcc_data *vcc_data = vcc->dev_data;
if (!instance || !vcc_data)
return;
usbatm_cancel_send(instance, vcc);
mutex_lock(&instance->serialize); /* vs self, usbatm_atm_open, usbatm_usb_disconnect */
tasklet_disable(&instance->rx_channel.tasklet);
if (instance->cached_vcc == vcc_data) {
instance->cached_vcc = NULL;
instance->cached_vpi = ATM_VPI_UNSPEC;
instance->cached_vci = ATM_VCI_UNSPEC;
}
list_del(&vcc_data->list);
tasklet_enable(&instance->rx_channel.tasklet);
kfree_skb(vcc_data->sarb);
vcc_data->sarb = NULL;
kfree(vcc_data);
vcc->dev_data = NULL;
vcc->vpi = ATM_VPI_UNSPEC;
vcc->vci = ATM_VCI_UNSPEC;
clear_bit(ATM_VF_READY, &vcc->flags);
clear_bit(ATM_VF_PARTIAL, &vcc->flags);
clear_bit(ATM_VF_ADDR, &vcc->flags);
mutex_unlock(&instance->serialize);
}
static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd,
void __user *arg)
{
struct usbatm_data *instance = atm_dev->dev_data;
if (!instance || instance->disconnected)
return -ENODEV;
switch (cmd) {
case ATM_QUERYLOOP:
return put_user(ATM_LM_NONE, (int __user *)arg) ? -EFAULT : 0;
default:
return -ENOIOCTLCMD;
}
}
static int usbatm_atm_init(struct usbatm_data *instance)
{
struct atm_dev *atm_dev;
int ret, i;
/* ATM init. The ATM initialization scheme suffers from an intrinsic race
* condition: callbacks we register can be executed at once, before we have
* initialized the struct atm_dev. To protect against this, all callbacks
* abort if atm_dev->dev_data is NULL. */
atm_dev = atm_dev_register(instance->driver_name,
&instance->usb_intf->dev, &usbatm_atm_devops,
-1, NULL);
if (!atm_dev) {
usb_err(instance, "%s: failed to register ATM device!\n", __func__);
return -1;
}
instance->atm_dev = atm_dev;
atm_dev->ci_range.vpi_bits = ATM_CI_MAX;
atm_dev->ci_range.vci_bits = ATM_CI_MAX;
atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
/* temp init ATM device, set to 128kbit */
atm_dev->link_rate = 128 * 1000 / 424;
if (instance->driver->atm_start && ((ret = instance->driver->atm_start(instance, atm_dev)) < 0)) {
atm_err(instance, "%s: atm_start failed: %d!\n", __func__, ret);
goto fail;
}
usbatm_get_instance(instance); /* dropped in usbatm_atm_dev_close */
/* ready for ATM callbacks */
mb();
atm_dev->dev_data = instance;
/* submit all rx URBs */
for (i = 0; i < num_rcv_urbs; i++)
usbatm_submit_urb(instance->urbs[i]);
return 0;
fail:
instance->atm_dev = NULL;
atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */
return ret;
}
/**********
** USB **
**********/
static int usbatm_do_heavy_init(void *arg)
{
struct usbatm_data *instance = arg;
int ret;
allow_signal(SIGTERM);
complete(&instance->thread_started);
ret = instance->driver->heavy_init(instance, instance->usb_intf);
if (!ret)
ret = usbatm_atm_init(instance);
mutex_lock(&instance->serialize);
instance->thread = NULL;
mutex_unlock(&instance->serialize);
kthread_complete_and_exit(&instance->thread_exited, ret);
}
static int usbatm_heavy_init(struct usbatm_data *instance)
{
struct task_struct *t;
t = kthread_create(usbatm_do_heavy_init, instance, "%s",
instance->driver->driver_name);
if (IS_ERR(t)) {
usb_err(instance, "%s: failed to create kernel_thread (%ld)!\n",
__func__, PTR_ERR(t));
return PTR_ERR(t);
}
instance->thread = t;
wake_up_process(t);
wait_for_completion(&instance->thread_started);
return 0;
}
static void usbatm_tasklet_schedule(struct timer_list *t)
{
struct usbatm_channel *channel = from_timer(channel, t, delay);
tasklet_schedule(&channel->tasklet);
}
static void usbatm_init_channel(struct usbatm_channel *channel)
{
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->list);
timer_setup(&channel->delay, usbatm_tasklet_schedule, 0);
}
int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
struct usbatm_driver *driver)
{
struct device *dev = &intf->dev;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usbatm_data *instance;
char *buf;
int error = -ENOMEM;
int i, length;
unsigned int maxpacket, num_packets;
size_t size;
/* instance init */
size = struct_size(instance, urbs, num_rcv_urbs + num_snd_urbs);
instance = kzalloc(size, GFP_KERNEL);
if (!instance)
return -ENOMEM;
/* public fields */
instance->driver = driver;
strscpy(instance->driver_name, driver->driver_name,
sizeof(instance->driver_name));
instance->usb_dev = usb_dev;
instance->usb_intf = intf;
buf = instance->description;
length = sizeof(instance->description);
if ((i = usb_string(usb_dev, usb_dev->descriptor.iProduct, buf, length)) < 0)
goto bind;
buf += i;
length -= i;
i = scnprintf(buf, length, " (");
buf += i;
length -= i;
if (length <= 0 || (i = usb_make_path(usb_dev, buf, length)) < 0)
goto bind;
buf += i;
length -= i;
snprintf(buf, length, ")");
bind:
if (driver->bind && (error = driver->bind(instance, intf, id)) < 0) {
dev_err(dev, "%s: bind failed: %d!\n", __func__, error);
goto fail_free;
}
/* private fields */
kref_init(&instance->refcount); /* dropped in usbatm_usb_disconnect */
mutex_init(&instance->serialize);
instance->thread = NULL;
init_completion(&instance->thread_started);
init_completion(&instance->thread_exited);
INIT_LIST_HEAD(&instance->vcc_list);
skb_queue_head_init(&instance->sndqueue);
usbatm_init_channel(&instance->rx_channel);
usbatm_init_channel(&instance->tx_channel);
tasklet_setup(&instance->rx_channel.tasklet, usbatm_rx_process);
tasklet_setup(&instance->tx_channel.tasklet, usbatm_tx_process);
instance->rx_channel.stride = ATM_CELL_SIZE + driver->rx_padding;
instance->tx_channel.stride = ATM_CELL_SIZE + driver->tx_padding;
instance->rx_channel.usbatm = instance->tx_channel.usbatm = instance;
if ((instance->flags & UDSL_USE_ISOC) && driver->isoc_in)
instance->rx_channel.endpoint = usb_rcvisocpipe(usb_dev, driver->isoc_in);
else
instance->rx_channel.endpoint = usb_rcvbulkpipe(usb_dev, driver->bulk_in);
instance->tx_channel.endpoint = usb_sndbulkpipe(usb_dev, driver->bulk_out);
/* tx buffer size must be a positive multiple of the stride */
instance->tx_channel.buf_size = max(instance->tx_channel.stride,
snd_buf_bytes - (snd_buf_bytes % instance->tx_channel.stride));
/* rx buffer size must be a positive multiple of the endpoint maxpacket */
maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint);
if ((maxpacket < 1) || (maxpacket > UDSL_MAX_BUF_SIZE)) {
dev_err(dev, "%s: invalid endpoint %02x!\n", __func__,
usb_pipeendpoint(instance->rx_channel.endpoint));
error = -EINVAL;
goto fail_unbind;
}
num_packets = max(1U, (rcv_buf_bytes + maxpacket / 2) / maxpacket); /* round */
if (num_packets * maxpacket > UDSL_MAX_BUF_SIZE)
num_packets--;
instance->rx_channel.buf_size = num_packets * maxpacket;
instance->rx_channel.packet_size = maxpacket;
for (i = 0; i < 2; i++) {
struct usbatm_channel *channel = i ?
&instance->tx_channel : &instance->rx_channel;
dev_dbg(dev, "%s: using %d byte buffer for %s channel 0x%p\n",
__func__, channel->buf_size, i ? "tx" : "rx", channel);
}
/* initialize urbs */
for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) {
u8 *buffer;
struct usbatm_channel *channel = i < num_rcv_urbs ?
&instance->rx_channel : &instance->tx_channel;
struct urb *urb;
unsigned int iso_packets = usb_pipeisoc(channel->endpoint) ? channel->buf_size / channel->packet_size : 0;
urb = usb_alloc_urb(iso_packets, GFP_KERNEL);
if (!urb) {
error = -ENOMEM;
goto fail_unbind;
}
instance->urbs[i] = urb;
/* zero the tx padding to avoid leaking information */
buffer = kzalloc(channel->buf_size, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto fail_unbind;
}
usb_fill_bulk_urb(urb, instance->usb_dev, channel->endpoint,
buffer, channel->buf_size, usbatm_complete, channel);
if (iso_packets) {
int j;
urb->interval = 1;
urb->transfer_flags = URB_ISO_ASAP;
urb->number_of_packets = iso_packets;
for (j = 0; j < iso_packets; j++) {
urb->iso_frame_desc[j].offset = channel->packet_size * j;
urb->iso_frame_desc[j].length = channel->packet_size;
}
}
/* put all tx URBs on the list of spares */
if (i >= num_rcv_urbs)
list_add_tail(&urb->urb_list, &channel->list);
vdbg(&intf->dev, "%s: alloced buffer 0x%p buf size %u urb 0x%p",
__func__, urb->transfer_buffer, urb->transfer_buffer_length, urb);
}
instance->cached_vpi = ATM_VPI_UNSPEC;
instance->cached_vci = ATM_VCI_UNSPEC;
instance->cell_buf = kmalloc(instance->rx_channel.stride, GFP_KERNEL);
if (!instance->cell_buf) {
error = -ENOMEM;
goto fail_unbind;
}
if (!(instance->flags & UDSL_SKIP_HEAVY_INIT) && driver->heavy_init) {
error = usbatm_heavy_init(instance);
} else {
complete(&instance->thread_exited); /* pretend that heavy_init was run */
error = usbatm_atm_init(instance);
}
if (error < 0)
goto fail_unbind;
usb_get_dev(usb_dev);
usb_set_intfdata(intf, instance);
return 0;
fail_unbind:
if (instance->driver->unbind)
instance->driver->unbind(instance, intf);
fail_free:
kfree(instance->cell_buf);
for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) {
if (instance->urbs[i])
kfree(instance->urbs[i]->transfer_buffer);
usb_free_urb(instance->urbs[i]);
}
kfree(instance);
return error;
}
EXPORT_SYMBOL_GPL(usbatm_usb_probe);
void usbatm_usb_disconnect(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
struct usbatm_data *instance = usb_get_intfdata(intf);
struct usbatm_vcc_data *vcc_data;
int i;
if (!instance) {
dev_dbg(dev, "%s: NULL instance!\n", __func__);
return;
}
usb_set_intfdata(intf, NULL);
mutex_lock(&instance->serialize);
instance->disconnected = 1;
if (instance->thread != NULL)
send_sig(SIGTERM, instance->thread, 1);
mutex_unlock(&instance->serialize);
wait_for_completion(&instance->thread_exited);
mutex_lock(&instance->serialize);
list_for_each_entry(vcc_data, &instance->vcc_list, list)
vcc_release_async(vcc_data->vcc, -EPIPE);
mutex_unlock(&instance->serialize);
tasklet_disable(&instance->rx_channel.tasklet);
tasklet_disable(&instance->tx_channel.tasklet);
for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++)
usb_kill_urb(instance->urbs[i]);
del_timer_sync(&instance->rx_channel.delay);
del_timer_sync(&instance->tx_channel.delay);
/* turn usbatm_[rt]x_process into something close to a no-op */
/* no need to take the spinlock */
INIT_LIST_HEAD(&instance->rx_channel.list);
INIT_LIST_HEAD(&instance->tx_channel.list);
tasklet_enable(&instance->rx_channel.tasklet);
tasklet_enable(&instance->tx_channel.tasklet);
if (instance->atm_dev && instance->driver->atm_stop)
instance->driver->atm_stop(instance, instance->atm_dev);
if (instance->driver->unbind)
instance->driver->unbind(instance, intf);
instance->driver_data = NULL;
for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) {
kfree(instance->urbs[i]->transfer_buffer);
usb_free_urb(instance->urbs[i]);
}
kfree(instance->cell_buf);
/* ATM finalize */
if (instance->atm_dev) {
atm_dev_deregister(instance->atm_dev);
instance->atm_dev = NULL;
}
usbatm_put_instance(instance); /* taken in usbatm_usb_probe */
}
EXPORT_SYMBOL_GPL(usbatm_usb_disconnect);
/***********
** init **
***********/
static int __init usbatm_usb_init(void)
{
if (sizeof(struct usbatm_control) > sizeof_field(struct sk_buff, cb)) {
pr_err("%s unusable with this kernel!\n", usbatm_driver_name);
return -EIO;
}
if ((num_rcv_urbs > UDSL_MAX_RCV_URBS)
|| (num_snd_urbs > UDSL_MAX_SND_URBS)
|| (rcv_buf_bytes < 1)
|| (rcv_buf_bytes > UDSL_MAX_BUF_SIZE)
|| (snd_buf_bytes < 1)
|| (snd_buf_bytes > UDSL_MAX_BUF_SIZE))
return -EINVAL;
return 0;
}
module_init(usbatm_usb_init);
static void __exit usbatm_usb_exit(void)
{
}
module_exit(usbatm_usb_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/************
** debug **
************/
#ifdef VERBOSE_DEBUG
static int usbatm_print_packet(struct usbatm_data *instance,
const unsigned char *data, int len)
{
unsigned char buffer[256];
int i = 0, j = 0;
for (i = 0; i < len;) {
buffer[0] = '\0';
sprintf(buffer, "%.3d :", i);
for (j = 0; (j < 16) && (i < len); j++, i++)
sprintf(buffer, "%s %2.2x", buffer, data[i]);
dev_dbg(&instance->usb_intf->dev, "%s", buffer);
}
return i;
}
#endif
| linux-master | drivers/usb/atm/usbatm.c |
// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* xusbatm.c - dumb usbatm-based driver for modems initialized in userspace
*
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
******************************************************************************/
#include <linux/module.h>
#include <linux/etherdevice.h> /* for eth_random_addr() */
#include "usbatm.h"
#define XUSBATM_DRIVERS_MAX 8
#define XUSBATM_PARM(name, type, parmtype, desc) \
static type name[XUSBATM_DRIVERS_MAX]; \
static unsigned int num_##name; \
module_param_array(name, parmtype, &num_##name, 0444); \
MODULE_PARM_DESC(name, desc)
XUSBATM_PARM(vendor, unsigned short, ushort, "USB device vendor");
XUSBATM_PARM(product, unsigned short, ushort, "USB device product");
XUSBATM_PARM(rx_endpoint, unsigned char, byte, "rx endpoint number");
XUSBATM_PARM(tx_endpoint, unsigned char, byte, "tx endpoint number");
XUSBATM_PARM(rx_padding, unsigned char, byte, "rx padding (default 0)");
XUSBATM_PARM(tx_padding, unsigned char, byte, "tx padding (default 0)");
XUSBATM_PARM(rx_altsetting, unsigned char, byte, "rx altsetting (default 0)");
XUSBATM_PARM(tx_altsetting, unsigned char, byte, "rx altsetting (default 0)");
static const char xusbatm_driver_name[] = "xusbatm";
static struct usbatm_driver xusbatm_drivers[XUSBATM_DRIVERS_MAX];
static struct usb_device_id xusbatm_usb_ids[XUSBATM_DRIVERS_MAX + 1];
static struct usb_driver xusbatm_usb_driver;
static struct usb_interface *xusbatm_find_intf(struct usb_device *usb_dev, int altsetting, u8 ep)
{
struct usb_host_interface *alt;
struct usb_interface *intf;
int i, j;
for (i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++)
if ((intf = usb_dev->actconfig->interface[i]) && (alt = usb_altnum_to_altsetting(intf, altsetting)))
for (j = 0; j < alt->desc.bNumEndpoints; j++)
if (alt->endpoint[j].desc.bEndpointAddress == ep)
return intf;
return NULL;
}
static int xusbatm_capture_intf(struct usbatm_data *usbatm, struct usb_device *usb_dev,
struct usb_interface *intf, int altsetting, int claim)
{
int ifnum = intf->altsetting->desc.bInterfaceNumber;
int ret;
if (claim && (ret = usb_driver_claim_interface(&xusbatm_usb_driver, intf, usbatm))) {
usb_err(usbatm, "%s: failed to claim interface %2d (%d)!\n", __func__, ifnum, ret);
return ret;
}
ret = usb_set_interface(usb_dev, ifnum, altsetting);
if (ret) {
usb_err(usbatm, "%s: altsetting %2d for interface %2d failed (%d)!\n", __func__, altsetting, ifnum, ret);
return ret;
}
return 0;
}
static void xusbatm_release_intf(struct usb_device *usb_dev, struct usb_interface *intf, int claimed)
{
if (claimed) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(&xusbatm_usb_driver, intf);
}
}
static int xusbatm_bind(struct usbatm_data *usbatm,
struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
int drv_ix = id - xusbatm_usb_ids;
int rx_alt = rx_altsetting[drv_ix];
int tx_alt = tx_altsetting[drv_ix];
struct usb_interface *rx_intf = xusbatm_find_intf(usb_dev, rx_alt, rx_endpoint[drv_ix]);
struct usb_interface *tx_intf = xusbatm_find_intf(usb_dev, tx_alt, tx_endpoint[drv_ix]);
int ret;
usb_dbg(usbatm, "%s: binding driver %d: vendor %04x product %04x"
" rx: ep %02x padd %d alt %2d tx: ep %02x padd %d alt %2d\n",
__func__, drv_ix, vendor[drv_ix], product[drv_ix],
rx_endpoint[drv_ix], rx_padding[drv_ix], rx_alt,
tx_endpoint[drv_ix], tx_padding[drv_ix], tx_alt);
if (!rx_intf || !tx_intf) {
if (!rx_intf)
usb_dbg(usbatm, "%s: no interface contains endpoint %02x in altsetting %2d\n",
__func__, rx_endpoint[drv_ix], rx_alt);
if (!tx_intf)
usb_dbg(usbatm, "%s: no interface contains endpoint %02x in altsetting %2d\n",
__func__, tx_endpoint[drv_ix], tx_alt);
return -ENODEV;
}
if ((rx_intf != intf) && (tx_intf != intf))
return -ENODEV;
if ((rx_intf == tx_intf) && (rx_alt != tx_alt)) {
usb_err(usbatm, "%s: altsettings clash on interface %2d (%2d vs %2d)!\n", __func__,
rx_intf->altsetting->desc.bInterfaceNumber, rx_alt, tx_alt);
return -EINVAL;
}
usb_dbg(usbatm, "%s: rx If#=%2d; tx If#=%2d\n", __func__,
rx_intf->altsetting->desc.bInterfaceNumber,
tx_intf->altsetting->desc.bInterfaceNumber);
ret = xusbatm_capture_intf(usbatm, usb_dev, rx_intf, rx_alt, rx_intf != intf);
if (ret)
return ret;
if ((tx_intf != rx_intf) && (ret = xusbatm_capture_intf(usbatm, usb_dev, tx_intf, tx_alt, tx_intf != intf))) {
xusbatm_release_intf(usb_dev, rx_intf, rx_intf != intf);
return ret;
}
return 0;
}
static void xusbatm_unbind(struct usbatm_data *usbatm,
struct usb_interface *intf)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
int i;
usb_dbg(usbatm, "%s entered\n", __func__);
for (i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *cur_intf = usb_dev->actconfig->interface[i];
if (cur_intf && (usb_get_intfdata(cur_intf) == usbatm)) {
usb_set_intfdata(cur_intf, NULL);
usb_driver_release_interface(&xusbatm_usb_driver, cur_intf);
}
}
}
static int xusbatm_atm_start(struct usbatm_data *usbatm,
struct atm_dev *atm_dev)
{
atm_dbg(usbatm, "%s entered\n", __func__);
/* use random MAC as we've no way to get it from the device */
eth_random_addr(atm_dev->esi);
return 0;
}
static int xusbatm_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return usbatm_usb_probe(intf, id,
xusbatm_drivers + (id - xusbatm_usb_ids));
}
static struct usb_driver xusbatm_usb_driver = {
.name = xusbatm_driver_name,
.probe = xusbatm_usb_probe,
.disconnect = usbatm_usb_disconnect,
.id_table = xusbatm_usb_ids
};
static int __init xusbatm_init(void)
{
int i;
if (!num_vendor ||
num_vendor != num_product ||
num_vendor != num_rx_endpoint ||
num_vendor != num_tx_endpoint) {
pr_warn("xusbatm: malformed module parameters\n");
return -EINVAL;
}
for (i = 0; i < num_vendor; i++) {
rx_endpoint[i] |= USB_DIR_IN;
tx_endpoint[i] &= USB_ENDPOINT_NUMBER_MASK;
xusbatm_usb_ids[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
xusbatm_usb_ids[i].idVendor = vendor[i];
xusbatm_usb_ids[i].idProduct = product[i];
xusbatm_drivers[i].driver_name = xusbatm_driver_name;
xusbatm_drivers[i].bind = xusbatm_bind;
xusbatm_drivers[i].unbind = xusbatm_unbind;
xusbatm_drivers[i].atm_start = xusbatm_atm_start;
xusbatm_drivers[i].bulk_in = rx_endpoint[i];
xusbatm_drivers[i].bulk_out = tx_endpoint[i];
xusbatm_drivers[i].rx_padding = rx_padding[i];
xusbatm_drivers[i].tx_padding = tx_padding[i];
}
return usb_register(&xusbatm_usb_driver);
}
module_init(xusbatm_init);
static void __exit xusbatm_exit(void)
{
usb_deregister(&xusbatm_usb_driver);
}
module_exit(xusbatm_exit);
MODULE_AUTHOR("Roman Kagan, Duncan Sands");
MODULE_DESCRIPTION("Driver for USB ADSL modems initialized in userspace");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/atm/xusbatm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
*
* Copyright (C) 2004 David Brownell
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
/**
* usb_ep_autoconfig_ss() - choose an endpoint matching the ep
* descriptor and ep companion descriptor
* @gadget: The device to which the endpoint must belong.
* @desc: Endpoint descriptor, with endpoint direction and transfer mode
* initialized. For periodic transfers, the maximum packet
* size must also be initialized. This is modified on
* success.
* @ep_comp: Endpoint companion descriptor, with the required
* number of streams. Will be modified when the chosen EP
* supports a different number of streams.
*
* This routine replaces the usb_ep_autoconfig when needed
* superspeed enhancments. If such enhancemnets are required,
* the FD should call usb_ep_autoconfig_ss directly and provide
* the additional ep_comp parameter.
*
* By choosing an endpoint to use with the specified descriptor,
* this routine simplifies writing gadget drivers that work with
* multiple USB device controllers. The endpoint would be
* passed later to usb_ep_enable(), along with some descriptor.
*
* That second descriptor won't always be the same as the first one.
* For example, isochronous endpoints can be autoconfigured for high
* bandwidth, and then used in several lower bandwidth altsettings.
* Also, high and full speed descriptors will be different.
*
* Be sure to examine and test the results of autoconfiguration
* on your hardware. This code may not make the best choices
* about how to use the USB controller, and it can't know all
* the restrictions that may apply. Some combinations of driver
* and hardware won't be able to autoconfigure.
*
* On success, this returns an claimed usb_ep, and modifies the endpoint
* descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
* is initialized as if the endpoint were used at full speed and
* the bmAttribute field in the ep companion descriptor is
* updated with the assigned number of streams if it is
* different from the original value. To prevent the endpoint
* from being returned by a later autoconfig call, claims it by
* assigning ep->claimed to true.
*
* On failure, this returns a null endpoint descriptor.
*/
struct usb_ep *usb_ep_autoconfig_ss(
struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *ep_comp
)
{
struct usb_ep *ep;
if (gadget->ops->match_ep) {
ep = gadget->ops->match_ep(gadget, desc, ep_comp);
if (ep)
goto found_ep;
}
/* Second, look at endpoints until an unclaimed one looks usable */
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
if (usb_gadget_ep_match_desc(gadget, ep, desc, ep_comp))
goto found_ep;
}
/* Fail */
return NULL;
found_ep:
/*
* If the protocol driver hasn't yet decided on wMaxPacketSize
* and wants to know the maximum possible, provide the info.
*/
if (desc->wMaxPacketSize == 0)
desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket_limit);
/* report address */
desc->bEndpointAddress &= USB_DIR_IN;
if (isdigit(ep->name[2])) {
u8 num = simple_strtoul(&ep->name[2], NULL, 10);
desc->bEndpointAddress |= num;
} else if (desc->bEndpointAddress & USB_DIR_IN) {
if (++gadget->in_epnum > 15)
return NULL;
desc->bEndpointAddress = USB_DIR_IN | gadget->in_epnum;
} else {
if (++gadget->out_epnum > 15)
return NULL;
desc->bEndpointAddress |= gadget->out_epnum;
}
ep->address = desc->bEndpointAddress;
ep->desc = NULL;
ep->comp_desc = NULL;
ep->claimed = true;
return ep;
}
EXPORT_SYMBOL_GPL(usb_ep_autoconfig_ss);
/**
* usb_ep_autoconfig() - choose an endpoint matching the
* descriptor
* @gadget: The device to which the endpoint must belong.
* @desc: Endpoint descriptor, with endpoint direction and transfer mode
* initialized. For periodic transfers, the maximum packet
* size must also be initialized. This is modified on success.
*
* By choosing an endpoint to use with the specified descriptor, this
* routine simplifies writing gadget drivers that work with multiple
* USB device controllers. The endpoint would be passed later to
* usb_ep_enable(), along with some descriptor.
*
* That second descriptor won't always be the same as the first one.
* For example, isochronous endpoints can be autoconfigured for high
* bandwidth, and then used in several lower bandwidth altsettings.
* Also, high and full speed descriptors will be different.
*
* Be sure to examine and test the results of autoconfiguration on your
* hardware. This code may not make the best choices about how to use the
* USB controller, and it can't know all the restrictions that may apply.
* Some combinations of driver and hardware won't be able to autoconfigure.
*
* On success, this returns an claimed usb_ep, and modifies the endpoint
* descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
* is initialized as if the endpoint were used at full speed. Because of
* that the users must consider adjusting the autoconfigured descriptor.
* To prevent the endpoint from being returned by a later autoconfig call,
* claims it by assigning ep->claimed to true.
*
* On failure, this returns a null endpoint descriptor.
*/
struct usb_ep *usb_ep_autoconfig(
struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc
)
{
struct usb_ep *ep;
u8 type;
ep = usb_ep_autoconfig_ss(gadget, desc, NULL);
if (!ep)
return NULL;
type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
/* report (variable) full speed bulk maxpacket */
if (type == USB_ENDPOINT_XFER_BULK) {
int size = ep->maxpacket_limit;
/* min() doesn't work on bitfields with gcc-3.5 */
if (size > 64)
size = 64;
desc->wMaxPacketSize = cpu_to_le16(size);
}
return ep;
}
EXPORT_SYMBOL_GPL(usb_ep_autoconfig);
/**
* usb_ep_autoconfig_release - releases endpoint and set it to initial state
* @ep: endpoint which should be released
*
* This function can be used during function bind for endpoints obtained
* from usb_ep_autoconfig(). It unclaims endpoint claimed by
* usb_ep_autoconfig() to make it available for other functions. Endpoint
* which was released is no longer valid and shouldn't be used in
* context of function which released it.
*/
void usb_ep_autoconfig_release(struct usb_ep *ep)
{
ep->claimed = false;
ep->driver_data = NULL;
}
EXPORT_SYMBOL_GPL(usb_ep_autoconfig_release);
/**
* usb_ep_autoconfig_reset - reset endpoint autoconfig state
* @gadget: device for which autoconfig state will be reset
*
* Use this for devices where one configuration may need to assign
* endpoint resources very differently from the next one. It clears
* state such as ep->claimed and the record of assigned endpoints
* used by usb_ep_autoconfig().
*/
void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
{
struct usb_ep *ep;
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
ep->claimed = false;
ep->driver_data = NULL;
}
gadget->in_epnum = 0;
gadget->out_epnum = 0;
}
EXPORT_SYMBOL_GPL(usb_ep_autoconfig_reset);
| linux-master | drivers/usb/gadget/epautoconf.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/usb/composite.h>
static LIST_HEAD(func_list);
static DEFINE_MUTEX(func_lock);
static struct usb_function_instance *try_get_usb_function_instance(const char *name)
{
struct usb_function_driver *fd;
struct usb_function_instance *fi;
fi = ERR_PTR(-ENOENT);
mutex_lock(&func_lock);
list_for_each_entry(fd, &func_list, list) {
if (strcmp(name, fd->name))
continue;
if (!try_module_get(fd->mod)) {
fi = ERR_PTR(-EBUSY);
break;
}
fi = fd->alloc_inst();
if (IS_ERR(fi))
module_put(fd->mod);
else
fi->fd = fd;
break;
}
mutex_unlock(&func_lock);
return fi;
}
struct usb_function_instance *usb_get_function_instance(const char *name)
{
struct usb_function_instance *fi;
int ret;
fi = try_get_usb_function_instance(name);
if (!IS_ERR(fi))
return fi;
ret = PTR_ERR(fi);
if (ret != -ENOENT)
return fi;
ret = request_module("usbfunc:%s", name);
if (ret < 0)
return ERR_PTR(ret);
return try_get_usb_function_instance(name);
}
EXPORT_SYMBOL_GPL(usb_get_function_instance);
struct usb_function *usb_get_function(struct usb_function_instance *fi)
{
struct usb_function *f;
f = fi->fd->alloc_func(fi);
if (IS_ERR(f))
return f;
f->fi = fi;
return f;
}
EXPORT_SYMBOL_GPL(usb_get_function);
void usb_put_function_instance(struct usb_function_instance *fi)
{
struct module *mod;
if (!fi)
return;
mod = fi->fd->mod;
fi->free_func_inst(fi);
module_put(mod);
}
EXPORT_SYMBOL_GPL(usb_put_function_instance);
void usb_put_function(struct usb_function *f)
{
if (!f)
return;
f->free_func(f);
}
EXPORT_SYMBOL_GPL(usb_put_function);
int usb_function_register(struct usb_function_driver *newf)
{
struct usb_function_driver *fd;
int ret;
ret = -EEXIST;
mutex_lock(&func_lock);
list_for_each_entry(fd, &func_list, list) {
if (!strcmp(fd->name, newf->name))
goto out;
}
ret = 0;
list_add_tail(&newf->list, &func_list);
out:
mutex_unlock(&func_lock);
return ret;
}
EXPORT_SYMBOL_GPL(usb_function_register);
void usb_function_unregister(struct usb_function_driver *fd)
{
mutex_lock(&func_lock);
list_del(&fd->list);
mutex_unlock(&func_lock);
}
EXPORT_SYMBOL_GPL(usb_function_unregister);
| linux-master | drivers/usb/gadget/functions.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/configfs.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kstrtox.h>
#include <linux/nls.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget_configfs.h>
#include <linux/usb/webusb.h>
#include "configfs.h"
#include "u_f.h"
#include "u_os_desc.h"
int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev)
{
u16 num;
int ret;
ret = kstrtou16(name, 0, &num);
if (ret)
return ret;
if (!usb_validate_langid(num))
return -EINVAL;
stringtab_dev->language = num;
return 0;
}
#define MAX_NAME_LEN 40
#define MAX_USB_STRING_LANGS 2
static const struct usb_descriptor_header *otg_desc[2];
struct gadget_info {
struct config_group group;
struct config_group functions_group;
struct config_group configs_group;
struct config_group strings_group;
struct config_group os_desc_group;
struct config_group webusb_group;
struct mutex lock;
struct usb_gadget_strings *gstrings[MAX_USB_STRING_LANGS + 1];
struct list_head string_list;
struct list_head available_func;
struct usb_composite_driver composite;
struct usb_composite_dev cdev;
bool use_os_desc;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
bool use_webusb;
u16 bcd_webusb_version;
u8 b_webusb_vendor_code;
char landing_page[WEBUSB_URL_RAW_MAX_LENGTH];
spinlock_t spinlock;
bool unbind;
};
static inline struct gadget_info *to_gadget_info(struct config_item *item)
{
return container_of(to_config_group(item), struct gadget_info, group);
}
struct config_usb_cfg {
struct config_group group;
struct config_group strings_group;
struct list_head string_list;
struct usb_configuration c;
struct list_head func_list;
struct usb_gadget_strings *gstrings[MAX_USB_STRING_LANGS + 1];
};
static inline struct config_usb_cfg *to_config_usb_cfg(struct config_item *item)
{
return container_of(to_config_group(item), struct config_usb_cfg,
group);
}
static inline struct gadget_info *cfg_to_gadget_info(struct config_usb_cfg *cfg)
{
return container_of(cfg->c.cdev, struct gadget_info, cdev);
}
struct gadget_language {
struct usb_gadget_strings stringtab_dev;
struct usb_string strings[USB_GADGET_FIRST_AVAIL_IDX];
char *manufacturer;
char *product;
char *serialnumber;
struct config_group group;
struct list_head list;
struct list_head gadget_strings;
unsigned int nstrings;
};
struct gadget_config_name {
struct usb_gadget_strings stringtab_dev;
struct usb_string strings;
char *configuration;
struct config_group group;
struct list_head list;
};
#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1)
static int usb_string_copy(const char *s, char **s_copy)
{
int ret;
char *str;
char *copy = *s_copy;
ret = strlen(s);
if (ret > USB_MAX_STRING_LEN)
return -EOVERFLOW;
if (copy) {
str = copy;
} else {
str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL);
if (!str)
return -ENOMEM;
}
strcpy(str, s);
if (str[ret - 1] == '\n')
str[ret - 1] = '\0';
*s_copy = str;
return 0;
}
#define GI_DEVICE_DESC_SIMPLE_R_u8(__name) \
static ssize_t gadget_dev_desc_##__name##_show(struct config_item *item, \
char *page) \
{ \
return sprintf(page, "0x%02x\n", \
to_gadget_info(item)->cdev.desc.__name); \
}
#define GI_DEVICE_DESC_SIMPLE_R_u16(__name) \
static ssize_t gadget_dev_desc_##__name##_show(struct config_item *item, \
char *page) \
{ \
return sprintf(page, "0x%04x\n", \
le16_to_cpup(&to_gadget_info(item)->cdev.desc.__name)); \
}
#define GI_DEVICE_DESC_SIMPLE_W_u8(_name) \
static ssize_t gadget_dev_desc_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
u8 val; \
int ret; \
ret = kstrtou8(page, 0, &val); \
if (ret) \
return ret; \
to_gadget_info(item)->cdev.desc._name = val; \
return len; \
}
#define GI_DEVICE_DESC_SIMPLE_W_u16(_name) \
static ssize_t gadget_dev_desc_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
u16 val; \
int ret; \
ret = kstrtou16(page, 0, &val); \
if (ret) \
return ret; \
to_gadget_info(item)->cdev.desc._name = cpu_to_le16p(&val); \
return len; \
}
#define GI_DEVICE_DESC_SIMPLE_RW(_name, _type) \
GI_DEVICE_DESC_SIMPLE_R_##_type(_name) \
GI_DEVICE_DESC_SIMPLE_W_##_type(_name)
GI_DEVICE_DESC_SIMPLE_R_u16(bcdUSB);
GI_DEVICE_DESC_SIMPLE_RW(bDeviceClass, u8);
GI_DEVICE_DESC_SIMPLE_RW(bDeviceSubClass, u8);
GI_DEVICE_DESC_SIMPLE_RW(bDeviceProtocol, u8);
GI_DEVICE_DESC_SIMPLE_RW(bMaxPacketSize0, u8);
GI_DEVICE_DESC_SIMPLE_RW(idVendor, u16);
GI_DEVICE_DESC_SIMPLE_RW(idProduct, u16);
GI_DEVICE_DESC_SIMPLE_R_u16(bcdDevice);
static ssize_t is_valid_bcd(u16 bcd_val)
{
if ((bcd_val & 0xf) > 9)
return -EINVAL;
if (((bcd_val >> 4) & 0xf) > 9)
return -EINVAL;
if (((bcd_val >> 8) & 0xf) > 9)
return -EINVAL;
if (((bcd_val >> 12) & 0xf) > 9)
return -EINVAL;
return 0;
}
static ssize_t gadget_dev_desc_bcdDevice_store(struct config_item *item,
const char *page, size_t len)
{
u16 bcdDevice;
int ret;
ret = kstrtou16(page, 0, &bcdDevice);
if (ret)
return ret;
ret = is_valid_bcd(bcdDevice);
if (ret)
return ret;
to_gadget_info(item)->cdev.desc.bcdDevice = cpu_to_le16(bcdDevice);
return len;
}
static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
const char *page, size_t len)
{
u16 bcdUSB;
int ret;
ret = kstrtou16(page, 0, &bcdUSB);
if (ret)
return ret;
ret = is_valid_bcd(bcdUSB);
if (ret)
return ret;
to_gadget_info(item)->cdev.desc.bcdUSB = cpu_to_le16(bcdUSB);
return len;
}
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
struct gadget_info *gi = to_gadget_info(item);
char *udc_name;
int ret;
mutex_lock(&gi->lock);
udc_name = gi->composite.gadget_driver.udc_name;
ret = sprintf(page, "%s\n", udc_name ?: "");
mutex_unlock(&gi->lock);
return ret;
}
static int unregister_gadget(struct gadget_info *gi)
{
int ret;
if (!gi->composite.gadget_driver.udc_name)
return -ENODEV;
ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver);
if (ret)
return ret;
kfree(gi->composite.gadget_driver.udc_name);
gi->composite.gadget_driver.udc_name = NULL;
return 0;
}
static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
const char *page, size_t len)
{
struct gadget_info *gi = to_gadget_info(item);
char *name;
int ret;
if (strlen(page) < len)
return -EOVERFLOW;
name = kstrdup(page, GFP_KERNEL);
if (!name)
return -ENOMEM;
if (name[len - 1] == '\n')
name[len - 1] = '\0';
mutex_lock(&gi->lock);
if (!strlen(name)) {
ret = unregister_gadget(gi);
if (ret)
goto err;
kfree(name);
} else {
if (gi->composite.gadget_driver.udc_name) {
ret = -EBUSY;
goto err;
}
gi->composite.gadget_driver.udc_name = name;
ret = usb_gadget_register_driver(&gi->composite.gadget_driver);
if (ret) {
gi->composite.gadget_driver.udc_name = NULL;
goto err;
}
}
mutex_unlock(&gi->lock);
return len;
err:
kfree(name);
mutex_unlock(&gi->lock);
return ret;
}
static ssize_t gadget_dev_desc_max_speed_show(struct config_item *item,
char *page)
{
enum usb_device_speed speed = to_gadget_info(item)->composite.max_speed;
return sprintf(page, "%s\n", usb_speed_string(speed));
}
static ssize_t gadget_dev_desc_max_speed_store(struct config_item *item,
const char *page, size_t len)
{
struct gadget_info *gi = to_gadget_info(item);
mutex_lock(&gi->lock);
/* Prevent changing of max_speed after the driver is binded */
if (gi->composite.gadget_driver.udc_name)
goto err;
if (strncmp(page, "super-speed-plus", 16) == 0)
gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
else if (strncmp(page, "super-speed", 11) == 0)
gi->composite.max_speed = USB_SPEED_SUPER;
else if (strncmp(page, "high-speed", 10) == 0)
gi->composite.max_speed = USB_SPEED_HIGH;
else if (strncmp(page, "full-speed", 10) == 0)
gi->composite.max_speed = USB_SPEED_FULL;
else if (strncmp(page, "low-speed", 9) == 0)
gi->composite.max_speed = USB_SPEED_LOW;
else
goto err;
gi->composite.gadget_driver.max_speed = gi->composite.max_speed;
mutex_unlock(&gi->lock);
return len;
err:
mutex_unlock(&gi->lock);
return -EINVAL;
}
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceSubClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceProtocol);
CONFIGFS_ATTR(gadget_dev_desc_, bMaxPacketSize0);
CONFIGFS_ATTR(gadget_dev_desc_, idVendor);
CONFIGFS_ATTR(gadget_dev_desc_, idProduct);
CONFIGFS_ATTR(gadget_dev_desc_, bcdDevice);
CONFIGFS_ATTR(gadget_dev_desc_, bcdUSB);
CONFIGFS_ATTR(gadget_dev_desc_, UDC);
CONFIGFS_ATTR(gadget_dev_desc_, max_speed);
static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_bDeviceClass,
&gadget_dev_desc_attr_bDeviceSubClass,
&gadget_dev_desc_attr_bDeviceProtocol,
&gadget_dev_desc_attr_bMaxPacketSize0,
&gadget_dev_desc_attr_idVendor,
&gadget_dev_desc_attr_idProduct,
&gadget_dev_desc_attr_bcdDevice,
&gadget_dev_desc_attr_bcdUSB,
&gadget_dev_desc_attr_UDC,
&gadget_dev_desc_attr_max_speed,
NULL,
};
static inline struct gadget_language *to_gadget_language(struct config_item *item)
{
return container_of(to_config_group(item), struct gadget_language,
group);
}
static inline struct gadget_config_name *to_gadget_config_name(
struct config_item *item)
{
return container_of(to_config_group(item), struct gadget_config_name,
group);
}
static inline struct usb_function_instance *to_usb_function_instance(
struct config_item *item)
{
return container_of(to_config_group(item),
struct usb_function_instance, group);
}
static void gadget_info_attr_release(struct config_item *item)
{
struct gadget_info *gi = to_gadget_info(item);
WARN_ON(!list_empty(&gi->cdev.configs));
WARN_ON(!list_empty(&gi->string_list));
WARN_ON(!list_empty(&gi->available_func));
kfree(gi->composite.gadget_driver.function);
kfree(gi->composite.gadget_driver.driver.name);
kfree(gi);
}
static struct configfs_item_operations gadget_root_item_ops = {
.release = gadget_info_attr_release,
};
static void gadget_config_attr_release(struct config_item *item)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(item);
WARN_ON(!list_empty(&cfg->c.functions));
list_del(&cfg->c.list);
kfree(cfg->c.label);
kfree(cfg);
}
static int config_usb_cfg_link(
struct config_item *usb_cfg_ci,
struct config_item *usb_func_ci)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
struct gadget_info *gi = cfg_to_gadget_info(cfg);
struct usb_function_instance *fi =
to_usb_function_instance(usb_func_ci);
struct usb_function_instance *a_fi = NULL, *iter;
struct usb_function *f;
int ret;
mutex_lock(&gi->lock);
/*
* Make sure this function is from within our _this_ gadget and not
* from another gadget or a random directory.
* Also a function instance can only be linked once.
*/
if (gi->composite.gadget_driver.udc_name) {
ret = -EINVAL;
goto out;
}
list_for_each_entry(iter, &gi->available_func, cfs_list) {
if (iter != fi)
continue;
a_fi = iter;
break;
}
if (!a_fi) {
ret = -EINVAL;
goto out;
}
list_for_each_entry(f, &cfg->func_list, list) {
if (f->fi == fi) {
ret = -EEXIST;
goto out;
}
}
f = usb_get_function(fi);
if (IS_ERR(f)) {
ret = PTR_ERR(f);
goto out;
}
/* stash the function until we bind it to the gadget */
list_add_tail(&f->list, &cfg->func_list);
ret = 0;
out:
mutex_unlock(&gi->lock);
return ret;
}
static void config_usb_cfg_unlink(
struct config_item *usb_cfg_ci,
struct config_item *usb_func_ci)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
struct gadget_info *gi = cfg_to_gadget_info(cfg);
struct usb_function_instance *fi =
to_usb_function_instance(usb_func_ci);
struct usb_function *f;
/*
* ideally I would like to forbid to unlink functions while a gadget is
* bound to an UDC. Since this isn't possible at the moment, we simply
* force an unbind, the function is available here and then we can
* remove the function.
*/
mutex_lock(&gi->lock);
if (gi->composite.gadget_driver.udc_name)
unregister_gadget(gi);
WARN_ON(gi->composite.gadget_driver.udc_name);
list_for_each_entry(f, &cfg->func_list, list) {
if (f->fi == fi) {
list_del(&f->list);
usb_put_function(f);
mutex_unlock(&gi->lock);
return;
}
}
mutex_unlock(&gi->lock);
WARN(1, "Unable to locate function to unbind\n");
}
static struct configfs_item_operations gadget_config_item_ops = {
.release = gadget_config_attr_release,
.allow_link = config_usb_cfg_link,
.drop_link = config_usb_cfg_unlink,
};
static ssize_t gadget_config_desc_MaxPower_show(struct config_item *item,
char *page)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(item);
return sprintf(page, "%u\n", cfg->c.MaxPower);
}
static ssize_t gadget_config_desc_MaxPower_store(struct config_item *item,
const char *page, size_t len)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(item);
u16 val;
int ret;
ret = kstrtou16(page, 0, &val);
if (ret)
return ret;
if (DIV_ROUND_UP(val, 8) > 0xff)
return -ERANGE;
cfg->c.MaxPower = val;
return len;
}
static ssize_t gadget_config_desc_bmAttributes_show(struct config_item *item,
char *page)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(item);
return sprintf(page, "0x%02x\n", cfg->c.bmAttributes);
}
static ssize_t gadget_config_desc_bmAttributes_store(struct config_item *item,
const char *page, size_t len)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(item);
u8 val;
int ret;
ret = kstrtou8(page, 0, &val);
if (ret)
return ret;
if (!(val & USB_CONFIG_ATT_ONE))
return -EINVAL;
if (val & ~(USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER |
USB_CONFIG_ATT_WAKEUP))
return -EINVAL;
cfg->c.bmAttributes = val;
return len;
}
CONFIGFS_ATTR(gadget_config_desc_, MaxPower);
CONFIGFS_ATTR(gadget_config_desc_, bmAttributes);
static struct configfs_attribute *gadget_config_attrs[] = {
&gadget_config_desc_attr_MaxPower,
&gadget_config_desc_attr_bmAttributes,
NULL,
};
static const struct config_item_type gadget_config_type = {
.ct_item_ops = &gadget_config_item_ops,
.ct_attrs = gadget_config_attrs,
.ct_owner = THIS_MODULE,
};
static const struct config_item_type gadget_root_type = {
.ct_item_ops = &gadget_root_item_ops,
.ct_attrs = gadget_root_attrs,
.ct_owner = THIS_MODULE,
};
static void composite_init_dev(struct usb_composite_dev *cdev)
{
spin_lock_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->configs);
INIT_LIST_HEAD(&cdev->gstrings);
}
static struct config_group *function_make(
struct config_group *group,
const char *name)
{
struct gadget_info *gi;
struct usb_function_instance *fi;
char buf[MAX_NAME_LEN];
char *func_name;
char *instance_name;
int ret;
ret = snprintf(buf, MAX_NAME_LEN, "%s", name);
if (ret >= MAX_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
func_name = buf;
instance_name = strchr(func_name, '.');
if (!instance_name) {
pr_err("Unable to locate . in FUNC.INSTANCE\n");
return ERR_PTR(-EINVAL);
}
*instance_name = '\0';
instance_name++;
fi = usb_get_function_instance(func_name);
if (IS_ERR(fi))
return ERR_CAST(fi);
ret = config_item_set_name(&fi->group.cg_item, "%s", name);
if (ret) {
usb_put_function_instance(fi);
return ERR_PTR(ret);
}
if (fi->set_inst_name) {
ret = fi->set_inst_name(fi, instance_name);
if (ret) {
usb_put_function_instance(fi);
return ERR_PTR(ret);
}
}
gi = container_of(group, struct gadget_info, functions_group);
mutex_lock(&gi->lock);
list_add_tail(&fi->cfs_list, &gi->available_func);
mutex_unlock(&gi->lock);
return &fi->group;
}
static void function_drop(
struct config_group *group,
struct config_item *item)
{
struct usb_function_instance *fi = to_usb_function_instance(item);
struct gadget_info *gi;
gi = container_of(group, struct gadget_info, functions_group);
mutex_lock(&gi->lock);
list_del(&fi->cfs_list);
mutex_unlock(&gi->lock);
config_item_put(item);
}
static struct configfs_group_operations functions_ops = {
.make_group = &function_make,
.drop_item = &function_drop,
};
static const struct config_item_type functions_type = {
.ct_group_ops = &functions_ops,
.ct_owner = THIS_MODULE,
};
GS_STRINGS_RW(gadget_config_name, configuration);
static struct configfs_attribute *gadget_config_name_langid_attrs[] = {
&gadget_config_name_attr_configuration,
NULL,
};
static void gadget_config_name_attr_release(struct config_item *item)
{
struct gadget_config_name *cn = to_gadget_config_name(item);
kfree(cn->configuration);
list_del(&cn->list);
kfree(cn);
}
USB_CONFIG_STRING_RW_OPS(gadget_config_name);
USB_CONFIG_STRINGS_LANG(gadget_config_name, config_usb_cfg);
static struct config_group *config_desc_make(
struct config_group *group,
const char *name)
{
struct gadget_info *gi;
struct config_usb_cfg *cfg;
char buf[MAX_NAME_LEN];
char *num_str;
u8 num;
int ret;
gi = container_of(group, struct gadget_info, configs_group);
ret = snprintf(buf, MAX_NAME_LEN, "%s", name);
if (ret >= MAX_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
num_str = strchr(buf, '.');
if (!num_str) {
pr_err("Unable to locate . in name.bConfigurationValue\n");
return ERR_PTR(-EINVAL);
}
*num_str = '\0';
num_str++;
if (!strlen(buf))
return ERR_PTR(-EINVAL);
ret = kstrtou8(num_str, 0, &num);
if (ret)
return ERR_PTR(ret);
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return ERR_PTR(-ENOMEM);
cfg->c.label = kstrdup(buf, GFP_KERNEL);
if (!cfg->c.label) {
ret = -ENOMEM;
goto err;
}
cfg->c.bConfigurationValue = num;
cfg->c.MaxPower = CONFIG_USB_GADGET_VBUS_DRAW;
cfg->c.bmAttributes = USB_CONFIG_ATT_ONE;
INIT_LIST_HEAD(&cfg->string_list);
INIT_LIST_HEAD(&cfg->func_list);
config_group_init_type_name(&cfg->group, name,
&gadget_config_type);
config_group_init_type_name(&cfg->strings_group, "strings",
&gadget_config_name_strings_type);
configfs_add_default_group(&cfg->strings_group, &cfg->group);
ret = usb_add_config_only(&gi->cdev, &cfg->c);
if (ret)
goto err;
return &cfg->group;
err:
kfree(cfg->c.label);
kfree(cfg);
return ERR_PTR(ret);
}
static void config_desc_drop(
struct config_group *group,
struct config_item *item)
{
config_item_put(item);
}
static struct configfs_group_operations config_desc_ops = {
.make_group = &config_desc_make,
.drop_item = &config_desc_drop,
};
static const struct config_item_type config_desc_type = {
.ct_group_ops = &config_desc_ops,
.ct_owner = THIS_MODULE,
};
GS_STRINGS_RW(gadget_language, manufacturer);
GS_STRINGS_RW(gadget_language, product);
GS_STRINGS_RW(gadget_language, serialnumber);
static struct configfs_attribute *gadget_language_langid_attrs[] = {
&gadget_language_attr_manufacturer,
&gadget_language_attr_product,
&gadget_language_attr_serialnumber,
NULL,
};
static void gadget_language_attr_release(struct config_item *item)
{
struct gadget_language *gs = to_gadget_language(item);
kfree(gs->manufacturer);
kfree(gs->product);
kfree(gs->serialnumber);
list_del(&gs->list);
kfree(gs);
}
static struct configfs_item_operations gadget_language_langid_item_ops = {
.release = gadget_language_attr_release,
};
static ssize_t gadget_string_id_show(struct config_item *item, char *page)
{
struct gadget_string *string = to_gadget_string(item);
int ret;
ret = sprintf(page, "%u\n", string->usb_string.id);
return ret;
}
CONFIGFS_ATTR_RO(gadget_string_, id);
static ssize_t gadget_string_s_show(struct config_item *item, char *page)
{
struct gadget_string *string = to_gadget_string(item);
int ret;
ret = snprintf(page, sizeof(string->string), "%s\n", string->string);
return ret;
}
static ssize_t gadget_string_s_store(struct config_item *item, const char *page,
size_t len)
{
struct gadget_string *string = to_gadget_string(item);
int size = min(sizeof(string->string), len + 1);
if (len > USB_MAX_STRING_LEN)
return -EINVAL;
return strscpy(string->string, page, size);
}
CONFIGFS_ATTR(gadget_string_, s);
static struct configfs_attribute *gadget_string_attrs[] = {
&gadget_string_attr_id,
&gadget_string_attr_s,
NULL,
};
static void gadget_string_release(struct config_item *item)
{
struct gadget_string *string = to_gadget_string(item);
kfree(string);
}
static struct configfs_item_operations gadget_string_item_ops = {
.release = gadget_string_release,
};
static const struct config_item_type gadget_string_type = {
.ct_item_ops = &gadget_string_item_ops,
.ct_attrs = gadget_string_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_item *gadget_language_string_make(struct config_group *group,
const char *name)
{
struct gadget_language *language;
struct gadget_string *string;
language = to_gadget_language(&group->cg_item);
string = kzalloc(sizeof(*string), GFP_KERNEL);
if (!string)
return ERR_PTR(-ENOMEM);
string->usb_string.id = language->nstrings++;
string->usb_string.s = string->string;
list_add_tail(&string->list, &language->gadget_strings);
config_item_init_type_name(&string->item, name, &gadget_string_type);
return &string->item;
}
static void gadget_language_string_drop(struct config_group *group,
struct config_item *item)
{
struct gadget_language *language;
struct gadget_string *string;
unsigned int i = USB_GADGET_FIRST_AVAIL_IDX;
language = to_gadget_language(&group->cg_item);
string = to_gadget_string(item);
list_del(&string->list);
language->nstrings--;
/* Reset the ids for the language's strings to guarantee a continuous set */
list_for_each_entry(string, &language->gadget_strings, list)
string->usb_string.id = i++;
}
static struct configfs_group_operations gadget_language_langid_group_ops = {
.make_item = gadget_language_string_make,
.drop_item = gadget_language_string_drop,
};
static struct config_item_type gadget_language_type = {
.ct_item_ops = &gadget_language_langid_item_ops,
.ct_group_ops = &gadget_language_langid_group_ops,
.ct_attrs = gadget_language_langid_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *gadget_language_make(struct config_group *group,
const char *name)
{
struct gadget_info *gi;
struct gadget_language *gs;
struct gadget_language *new;
int langs = 0;
int ret;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
ret = check_user_usb_string(name, &new->stringtab_dev);
if (ret)
goto err;
config_group_init_type_name(&new->group, name,
&gadget_language_type);
gi = container_of(group, struct gadget_info, strings_group);
ret = -EEXIST;
list_for_each_entry(gs, &gi->string_list, list) {
if (gs->stringtab_dev.language == new->stringtab_dev.language)
goto err;
langs++;
}
ret = -EOVERFLOW;
if (langs >= MAX_USB_STRING_LANGS)
goto err;
list_add_tail(&new->list, &gi->string_list);
INIT_LIST_HEAD(&new->gadget_strings);
/* We have the default manufacturer, product and serialnumber strings */
new->nstrings = 3;
return &new->group;
err:
kfree(new);
return ERR_PTR(ret);
}
static void gadget_language_drop(struct config_group *group,
struct config_item *item)
{
config_item_put(item);
}
static struct configfs_group_operations gadget_language_group_ops = {
.make_group = &gadget_language_make,
.drop_item = &gadget_language_drop,
};
static struct config_item_type gadget_language_strings_type = {
.ct_group_ops = &gadget_language_group_ops,
.ct_owner = THIS_MODULE,
};
static inline struct gadget_info *webusb_item_to_gadget_info(
struct config_item *item)
{
return container_of(to_config_group(item),
struct gadget_info, webusb_group);
}
static ssize_t webusb_use_show(struct config_item *item, char *page)
{
return sysfs_emit(page, "%d\n",
webusb_item_to_gadget_info(item)->use_webusb);
}
static ssize_t webusb_use_store(struct config_item *item, const char *page,
size_t len)
{
struct gadget_info *gi = webusb_item_to_gadget_info(item);
int ret;
bool use;
ret = kstrtobool(page, &use);
if (ret)
return ret;
mutex_lock(&gi->lock);
gi->use_webusb = use;
mutex_unlock(&gi->lock);
return len;
}
static ssize_t webusb_bcdVersion_show(struct config_item *item, char *page)
{
return sysfs_emit(page, "0x%04x\n",
webusb_item_to_gadget_info(item)->bcd_webusb_version);
}
static ssize_t webusb_bcdVersion_store(struct config_item *item,
const char *page, size_t len)
{
struct gadget_info *gi = webusb_item_to_gadget_info(item);
u16 bcdVersion;
int ret;
ret = kstrtou16(page, 0, &bcdVersion);
if (ret)
return ret;
ret = is_valid_bcd(bcdVersion);
if (ret)
return ret;
mutex_lock(&gi->lock);
gi->bcd_webusb_version = bcdVersion;
mutex_unlock(&gi->lock);
return len;
}
static ssize_t webusb_bVendorCode_show(struct config_item *item, char *page)
{
return sysfs_emit(page, "0x%02x\n",
webusb_item_to_gadget_info(item)->b_webusb_vendor_code);
}
static ssize_t webusb_bVendorCode_store(struct config_item *item,
const char *page, size_t len)
{
struct gadget_info *gi = webusb_item_to_gadget_info(item);
int ret;
u8 b_vendor_code;
ret = kstrtou8(page, 0, &b_vendor_code);
if (ret)
return ret;
mutex_lock(&gi->lock);
gi->b_webusb_vendor_code = b_vendor_code;
mutex_unlock(&gi->lock);
return len;
}
static ssize_t webusb_landingPage_show(struct config_item *item, char *page)
{
return sysfs_emit(page, "%s\n", webusb_item_to_gadget_info(item)->landing_page);
}
static ssize_t webusb_landingPage_store(struct config_item *item, const char *page,
size_t len)
{
struct gadget_info *gi = webusb_item_to_gadget_info(item);
unsigned int bytes_to_strip = 0;
int l = len;
if (page[l - 1] == '\n') {
--l;
++bytes_to_strip;
}
if (l > sizeof(gi->landing_page)) {
pr_err("webusb: landingPage URL too long\n");
return -EINVAL;
}
// validation
if (strncasecmp(page, "https://", 8) == 0)
bytes_to_strip = 8;
else if (strncasecmp(page, "http://", 7) == 0)
bytes_to_strip = 7;
else
bytes_to_strip = 0;
if (l > U8_MAX - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + bytes_to_strip) {
pr_err("webusb: landingPage URL %d bytes too long for given URL scheme\n",
l - U8_MAX + WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH - bytes_to_strip);
return -EINVAL;
}
mutex_lock(&gi->lock);
// ensure 0 bytes are set, in case the new landing page is shorter then the old one.
memcpy_and_pad(gi->landing_page, sizeof(gi->landing_page), page, l, 0);
mutex_unlock(&gi->lock);
return len;
}
CONFIGFS_ATTR(webusb_, use);
CONFIGFS_ATTR(webusb_, bVendorCode);
CONFIGFS_ATTR(webusb_, bcdVersion);
CONFIGFS_ATTR(webusb_, landingPage);
static struct configfs_attribute *webusb_attrs[] = {
&webusb_attr_use,
&webusb_attr_bcdVersion,
&webusb_attr_bVendorCode,
&webusb_attr_landingPage,
NULL,
};
static struct config_item_type webusb_type = {
.ct_attrs = webusb_attrs,
.ct_owner = THIS_MODULE,
};
static inline struct gadget_info *os_desc_item_to_gadget_info(
struct config_item *item)
{
return container_of(to_config_group(item),
struct gadget_info, os_desc_group);
}
static ssize_t os_desc_use_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n",
os_desc_item_to_gadget_info(item)->use_os_desc);
}
static ssize_t os_desc_use_store(struct config_item *item, const char *page,
size_t len)
{
struct gadget_info *gi = os_desc_item_to_gadget_info(item);
int ret;
bool use;
ret = kstrtobool(page, &use);
if (ret)
return ret;
mutex_lock(&gi->lock);
gi->use_os_desc = use;
mutex_unlock(&gi->lock);
return len;
}
static ssize_t os_desc_b_vendor_code_show(struct config_item *item, char *page)
{
return sprintf(page, "0x%02x\n",
os_desc_item_to_gadget_info(item)->b_vendor_code);
}
static ssize_t os_desc_b_vendor_code_store(struct config_item *item,
const char *page, size_t len)
{
struct gadget_info *gi = os_desc_item_to_gadget_info(item);
int ret;
u8 b_vendor_code;
ret = kstrtou8(page, 0, &b_vendor_code);
if (ret)
return ret;
mutex_lock(&gi->lock);
gi->b_vendor_code = b_vendor_code;
mutex_unlock(&gi->lock);
return len;
}
static ssize_t os_desc_qw_sign_show(struct config_item *item, char *page)
{
struct gadget_info *gi = os_desc_item_to_gadget_info(item);
int res;
res = utf16s_to_utf8s((wchar_t *) gi->qw_sign, OS_STRING_QW_SIGN_LEN,
UTF16_LITTLE_ENDIAN, page, PAGE_SIZE - 1);
page[res++] = '\n';
return res;
}
static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page,
size_t len)
{
struct gadget_info *gi = os_desc_item_to_gadget_info(item);
int res, l;
l = min((int)len, OS_STRING_QW_SIGN_LEN >> 1);
if (page[l - 1] == '\n')
--l;
mutex_lock(&gi->lock);
res = utf8s_to_utf16s(page, l,
UTF16_LITTLE_ENDIAN, (wchar_t *) gi->qw_sign,
OS_STRING_QW_SIGN_LEN);
if (res > 0)
res = len;
mutex_unlock(&gi->lock);
return res;
}
CONFIGFS_ATTR(os_desc_, use);
CONFIGFS_ATTR(os_desc_, b_vendor_code);
CONFIGFS_ATTR(os_desc_, qw_sign);
static struct configfs_attribute *os_desc_attrs[] = {
&os_desc_attr_use,
&os_desc_attr_b_vendor_code,
&os_desc_attr_qw_sign,
NULL,
};
static int os_desc_link(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
struct gadget_info *gi = os_desc_item_to_gadget_info(os_desc_ci);
struct usb_composite_dev *cdev = &gi->cdev;
struct config_usb_cfg *c_target = to_config_usb_cfg(usb_cfg_ci);
struct usb_configuration *c = NULL, *iter;
int ret;
mutex_lock(&gi->lock);
list_for_each_entry(iter, &cdev->configs, list) {
if (iter != &c_target->c)
continue;
c = iter;
break;
}
if (!c) {
ret = -EINVAL;
goto out;
}
if (cdev->os_desc_config) {
ret = -EBUSY;
goto out;
}
cdev->os_desc_config = &c_target->c;
ret = 0;
out:
mutex_unlock(&gi->lock);
return ret;
}
static void os_desc_unlink(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
struct gadget_info *gi = os_desc_item_to_gadget_info(os_desc_ci);
struct usb_composite_dev *cdev = &gi->cdev;
mutex_lock(&gi->lock);
if (gi->composite.gadget_driver.udc_name)
unregister_gadget(gi);
cdev->os_desc_config = NULL;
WARN_ON(gi->composite.gadget_driver.udc_name);
mutex_unlock(&gi->lock);
}
static struct configfs_item_operations os_desc_ops = {
.allow_link = os_desc_link,
.drop_link = os_desc_unlink,
};
static struct config_item_type os_desc_type = {
.ct_item_ops = &os_desc_ops,
.ct_attrs = os_desc_attrs,
.ct_owner = THIS_MODULE,
};
static inline struct usb_os_desc_ext_prop
*to_usb_os_desc_ext_prop(struct config_item *item)
{
return container_of(item, struct usb_os_desc_ext_prop, item);
}
static ssize_t ext_prop_type_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_usb_os_desc_ext_prop(item)->type);
}
static ssize_t ext_prop_type_store(struct config_item *item,
const char *page, size_t len)
{
struct usb_os_desc_ext_prop *ext_prop = to_usb_os_desc_ext_prop(item);
struct usb_os_desc *desc = to_usb_os_desc(ext_prop->item.ci_parent);
u8 type;
int ret;
ret = kstrtou8(page, 0, &type);
if (ret)
return ret;
if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI)
return -EINVAL;
if (desc->opts_mutex)
mutex_lock(desc->opts_mutex);
if ((ext_prop->type == USB_EXT_PROP_BINARY ||
ext_prop->type == USB_EXT_PROP_LE32 ||
ext_prop->type == USB_EXT_PROP_BE32) &&
(type == USB_EXT_PROP_UNICODE ||
type == USB_EXT_PROP_UNICODE_ENV ||
type == USB_EXT_PROP_UNICODE_LINK))
ext_prop->data_len <<= 1;
else if ((ext_prop->type == USB_EXT_PROP_UNICODE ||
ext_prop->type == USB_EXT_PROP_UNICODE_ENV ||
ext_prop->type == USB_EXT_PROP_UNICODE_LINK) &&
(type == USB_EXT_PROP_BINARY ||
type == USB_EXT_PROP_LE32 ||
type == USB_EXT_PROP_BE32))
ext_prop->data_len >>= 1;
ext_prop->type = type;
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
return len;
}
static ssize_t ext_prop_data_show(struct config_item *item, char *page)
{
struct usb_os_desc_ext_prop *ext_prop = to_usb_os_desc_ext_prop(item);
int len = ext_prop->data_len;
if (ext_prop->type == USB_EXT_PROP_UNICODE ||
ext_prop->type == USB_EXT_PROP_UNICODE_ENV ||
ext_prop->type == USB_EXT_PROP_UNICODE_LINK)
len >>= 1;
memcpy(page, ext_prop->data, len);
return len;
}
static ssize_t ext_prop_data_store(struct config_item *item,
const char *page, size_t len)
{
struct usb_os_desc_ext_prop *ext_prop = to_usb_os_desc_ext_prop(item);
struct usb_os_desc *desc = to_usb_os_desc(ext_prop->item.ci_parent);
char *new_data;
size_t ret_len = len;
if (page[len - 1] == '\n' || page[len - 1] == '\0')
--len;
new_data = kmemdup(page, len, GFP_KERNEL);
if (!new_data)
return -ENOMEM;
if (desc->opts_mutex)
mutex_lock(desc->opts_mutex);
kfree(ext_prop->data);
ext_prop->data = new_data;
desc->ext_prop_len -= ext_prop->data_len;
ext_prop->data_len = len;
desc->ext_prop_len += ext_prop->data_len;
if (ext_prop->type == USB_EXT_PROP_UNICODE ||
ext_prop->type == USB_EXT_PROP_UNICODE_ENV ||
ext_prop->type == USB_EXT_PROP_UNICODE_LINK) {
desc->ext_prop_len -= ext_prop->data_len;
ext_prop->data_len <<= 1;
ext_prop->data_len += 2;
desc->ext_prop_len += ext_prop->data_len;
}
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
return ret_len;
}
CONFIGFS_ATTR(ext_prop_, type);
CONFIGFS_ATTR(ext_prop_, data);
static struct configfs_attribute *ext_prop_attrs[] = {
&ext_prop_attr_type,
&ext_prop_attr_data,
NULL,
};
static void usb_os_desc_ext_prop_release(struct config_item *item)
{
struct usb_os_desc_ext_prop *ext_prop = to_usb_os_desc_ext_prop(item);
kfree(ext_prop); /* frees a whole chunk */
}
static struct configfs_item_operations ext_prop_ops = {
.release = usb_os_desc_ext_prop_release,
};
static struct config_item *ext_prop_make(
struct config_group *group,
const char *name)
{
struct usb_os_desc_ext_prop *ext_prop;
struct config_item_type *ext_prop_type;
struct usb_os_desc *desc;
char *vlabuf;
vla_group(data_chunk);
vla_item(data_chunk, struct usb_os_desc_ext_prop, ext_prop, 1);
vla_item(data_chunk, struct config_item_type, ext_prop_type, 1);
vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
if (!vlabuf)
return ERR_PTR(-ENOMEM);
ext_prop = vla_ptr(vlabuf, data_chunk, ext_prop);
ext_prop_type = vla_ptr(vlabuf, data_chunk, ext_prop_type);
desc = container_of(group, struct usb_os_desc, group);
ext_prop_type->ct_item_ops = &ext_prop_ops;
ext_prop_type->ct_attrs = ext_prop_attrs;
ext_prop_type->ct_owner = desc->owner;
config_item_init_type_name(&ext_prop->item, name, ext_prop_type);
ext_prop->name = kstrdup(name, GFP_KERNEL);
if (!ext_prop->name) {
kfree(vlabuf);
return ERR_PTR(-ENOMEM);
}
desc->ext_prop_len += 14;
ext_prop->name_len = 2 * strlen(ext_prop->name) + 2;
if (desc->opts_mutex)
mutex_lock(desc->opts_mutex);
desc->ext_prop_len += ext_prop->name_len;
list_add_tail(&ext_prop->entry, &desc->ext_prop);
++desc->ext_prop_count;
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
return &ext_prop->item;
}
static void ext_prop_drop(struct config_group *group, struct config_item *item)
{
struct usb_os_desc_ext_prop *ext_prop = to_usb_os_desc_ext_prop(item);
struct usb_os_desc *desc = to_usb_os_desc(&group->cg_item);
if (desc->opts_mutex)
mutex_lock(desc->opts_mutex);
list_del(&ext_prop->entry);
--desc->ext_prop_count;
kfree(ext_prop->name);
desc->ext_prop_len -= (ext_prop->name_len + ext_prop->data_len + 14);
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
config_item_put(item);
}
static struct configfs_group_operations interf_grp_ops = {
.make_item = &ext_prop_make,
.drop_item = &ext_prop_drop,
};
static ssize_t interf_grp_compatible_id_show(struct config_item *item,
char *page)
{
memcpy(page, to_usb_os_desc(item)->ext_compat_id, 8);
return 8;
}
static ssize_t interf_grp_compatible_id_store(struct config_item *item,
const char *page, size_t len)
{
struct usb_os_desc *desc = to_usb_os_desc(item);
int l;
l = min_t(int, 8, len);
if (page[l - 1] == '\n')
--l;
if (desc->opts_mutex)
mutex_lock(desc->opts_mutex);
memcpy(desc->ext_compat_id, page, l);
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
return len;
}
static ssize_t interf_grp_sub_compatible_id_show(struct config_item *item,
char *page)
{
memcpy(page, to_usb_os_desc(item)->ext_compat_id + 8, 8);
return 8;
}
static ssize_t interf_grp_sub_compatible_id_store(struct config_item *item,
const char *page, size_t len)
{
struct usb_os_desc *desc = to_usb_os_desc(item);
int l;
l = min_t(int, 8, len);
if (page[l - 1] == '\n')
--l;
if (desc->opts_mutex)
mutex_lock(desc->opts_mutex);
memcpy(desc->ext_compat_id + 8, page, l);
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
return len;
}
CONFIGFS_ATTR(interf_grp_, compatible_id);
CONFIGFS_ATTR(interf_grp_, sub_compatible_id);
static struct configfs_attribute *interf_grp_attrs[] = {
&interf_grp_attr_compatible_id,
&interf_grp_attr_sub_compatible_id,
NULL
};
struct config_group *usb_os_desc_prepare_interf_dir(
struct config_group *parent,
int n_interf,
struct usb_os_desc **desc,
char **names,
struct module *owner)
{
struct config_group *os_desc_group;
struct config_item_type *os_desc_type, *interface_type;
vla_group(data_chunk);
vla_item(data_chunk, struct config_group, os_desc_group, 1);
vla_item(data_chunk, struct config_item_type, os_desc_type, 1);
vla_item(data_chunk, struct config_item_type, interface_type, 1);
char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
if (!vlabuf)
return ERR_PTR(-ENOMEM);
os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
interface_type = vla_ptr(vlabuf, data_chunk, interface_type);
os_desc_type->ct_owner = owner;
config_group_init_type_name(os_desc_group, "os_desc", os_desc_type);
configfs_add_default_group(os_desc_group, parent);
interface_type->ct_group_ops = &interf_grp_ops;
interface_type->ct_attrs = interf_grp_attrs;
interface_type->ct_owner = owner;
while (n_interf--) {
struct usb_os_desc *d;
d = desc[n_interf];
d->owner = owner;
config_group_init_type_name(&d->group, "", interface_type);
config_item_set_name(&d->group.cg_item, "interface.%s",
names[n_interf]);
configfs_add_default_group(&d->group, os_desc_group);
}
return os_desc_group;
}
EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
static int configfs_do_nothing(struct usb_composite_dev *cdev)
{
WARN_ON(1);
return -EINVAL;
}
int composite_dev_prepare(struct usb_composite_driver *composite,
struct usb_composite_dev *dev);
int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
struct usb_ep *ep0);
static void purge_configs_funcs(struct gadget_info *gi)
{
struct usb_configuration *c;
list_for_each_entry(c, &gi->cdev.configs, list) {
struct usb_function *f, *tmp;
struct config_usb_cfg *cfg;
cfg = container_of(c, struct config_usb_cfg, c);
list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
f->name, f);
f->unbind(c, f);
}
}
c->next_interface_id = 0;
memset(c->interface, 0, sizeof(c->interface));
c->superspeed_plus = 0;
c->superspeed = 0;
c->highspeed = 0;
c->fullspeed = 0;
}
}
static struct usb_string *
configfs_attach_gadget_strings(struct gadget_info *gi)
{
struct usb_gadget_strings **gadget_strings;
struct gadget_language *language;
struct gadget_string *string;
unsigned int nlangs = 0;
struct list_head *iter;
struct usb_string *us;
unsigned int i = 0;
int nstrings = -1;
unsigned int j;
list_for_each(iter, &gi->string_list)
nlangs++;
/* Bail out early if no languages are configured */
if (!nlangs)
return NULL;
gadget_strings = kcalloc(nlangs + 1, /* including NULL terminator */
sizeof(struct usb_gadget_strings *), GFP_KERNEL);
if (!gadget_strings)
return ERR_PTR(-ENOMEM);
list_for_each_entry(language, &gi->string_list, list) {
struct usb_string *stringtab;
if (nstrings == -1) {
nstrings = language->nstrings;
} else if (nstrings != language->nstrings) {
pr_err("languages must contain the same number of strings\n");
us = ERR_PTR(-EINVAL);
goto cleanup;
}
stringtab = kcalloc(language->nstrings + 1, sizeof(struct usb_string),
GFP_KERNEL);
if (!stringtab) {
us = ERR_PTR(-ENOMEM);
goto cleanup;
}
stringtab[USB_GADGET_MANUFACTURER_IDX].id = USB_GADGET_MANUFACTURER_IDX;
stringtab[USB_GADGET_MANUFACTURER_IDX].s = language->manufacturer;
stringtab[USB_GADGET_PRODUCT_IDX].id = USB_GADGET_PRODUCT_IDX;
stringtab[USB_GADGET_PRODUCT_IDX].s = language->product;
stringtab[USB_GADGET_SERIAL_IDX].id = USB_GADGET_SERIAL_IDX;
stringtab[USB_GADGET_SERIAL_IDX].s = language->serialnumber;
j = USB_GADGET_FIRST_AVAIL_IDX;
list_for_each_entry(string, &language->gadget_strings, list) {
memcpy(&stringtab[j], &string->usb_string, sizeof(struct usb_string));
j++;
}
language->stringtab_dev.strings = stringtab;
gadget_strings[i] = &language->stringtab_dev;
i++;
}
us = usb_gstrings_attach(&gi->cdev, gadget_strings, nstrings);
cleanup:
list_for_each_entry(language, &gi->string_list, list) {
kfree(language->stringtab_dev.strings);
language->stringtab_dev.strings = NULL;
}
kfree(gadget_strings);
return us;
}
static int configfs_composite_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *gdriver)
{
struct usb_composite_driver *composite = to_cdriver(gdriver);
struct gadget_info *gi = container_of(composite,
struct gadget_info, composite);
struct usb_composite_dev *cdev = &gi->cdev;
struct usb_configuration *c;
struct usb_string *s;
unsigned i;
int ret;
/* the gi->lock is hold by the caller */
gi->unbind = 0;
cdev->gadget = gadget;
set_gadget_data(gadget, cdev);
ret = composite_dev_prepare(composite, cdev);
if (ret)
return ret;
/* and now the gadget bind */
ret = -EINVAL;
if (list_empty(&gi->cdev.configs)) {
pr_err("Need at least one configuration in %s.\n",
gi->composite.name);
goto err_comp_cleanup;
}
list_for_each_entry(c, &gi->cdev.configs, list) {
struct config_usb_cfg *cfg;
cfg = container_of(c, struct config_usb_cfg, c);
if (list_empty(&cfg->func_list)) {
pr_err("Config %s/%d of %s needs at least one function.\n",
c->label, c->bConfigurationValue,
gi->composite.name);
goto err_comp_cleanup;
}
}
/* init all strings */
if (!list_empty(&gi->string_list)) {
s = configfs_attach_gadget_strings(gi);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto err_comp_cleanup;
}
gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id;
gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id;
gi->cdev.desc.iSerialNumber = s[USB_GADGET_SERIAL_IDX].id;
gi->cdev.usb_strings = s;
}
if (gi->use_webusb) {
cdev->use_webusb = true;
cdev->bcd_webusb_version = gi->bcd_webusb_version;
cdev->b_webusb_vendor_code = gi->b_webusb_vendor_code;
memcpy(cdev->landing_page, gi->landing_page, WEBUSB_URL_RAW_MAX_LENGTH);
}
if (gi->use_os_desc) {
cdev->use_os_string = true;
cdev->b_vendor_code = gi->b_vendor_code;
memcpy(cdev->qw_sign, gi->qw_sign, OS_STRING_QW_SIGN_LEN);
}
if (gadget_is_otg(gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) {
ret = -ENOMEM;
goto err_comp_cleanup;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
/* Go through all configs, attach all functions */
list_for_each_entry(c, &gi->cdev.configs, list) {
struct config_usb_cfg *cfg;
struct usb_function *f;
struct usb_function *tmp;
struct gadget_config_name *cn;
if (gadget_is_otg(gadget))
c->descriptors = otg_desc;
/* Properly configure the bmAttributes wakeup bit */
check_remote_wakeup_config(gadget, c);
cfg = container_of(c, struct config_usb_cfg, c);
if (!list_empty(&cfg->string_list)) {
i = 0;
list_for_each_entry(cn, &cfg->string_list, list) {
cfg->gstrings[i] = &cn->stringtab_dev;
cn->stringtab_dev.strings = &cn->strings;
cn->strings.s = cn->configuration;
i++;
}
cfg->gstrings[i] = NULL;
s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto err_comp_cleanup;
}
c->iConfiguration = s[0].id;
}
list_for_each_entry_safe(f, tmp, &cfg->func_list, list) {
list_del(&f->list);
ret = usb_add_function(c, f);
if (ret) {
list_add(&f->list, &cfg->func_list);
goto err_purge_funcs;
}
}
ret = usb_gadget_check_config(cdev->gadget);
if (ret)
goto err_purge_funcs;
usb_ep_autoconfig_reset(cdev->gadget);
}
if (cdev->use_os_string) {
ret = composite_os_desc_req_prepare(cdev, gadget->ep0);
if (ret)
goto err_purge_funcs;
}
usb_ep_autoconfig_reset(cdev->gadget);
return 0;
err_purge_funcs:
purge_configs_funcs(gi);
err_comp_cleanup:
composite_dev_cleanup(cdev);
return ret;
}
static void configfs_composite_unbind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
/* the gi->lock is hold by the caller */
cdev = get_gadget_data(gadget);
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
gi->unbind = 1;
spin_unlock_irqrestore(&gi->spinlock, flags);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
purge_configs_funcs(gi);
composite_dev_cleanup(cdev);
usb_ep_autoconfig_reset(cdev->gadget);
spin_lock_irqsave(&gi->spinlock, flags);
cdev->gadget = NULL;
cdev->deactivations = 0;
gadget->deactivated = false;
set_gadget_data(gadget, NULL);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static int configfs_composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
int ret;
cdev = get_gadget_data(gadget);
if (!cdev)
return 0;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return 0;
}
ret = composite_setup(gadget, ctrl);
spin_unlock_irqrestore(&gi->spinlock, flags);
return ret;
}
static void configfs_composite_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
cdev = get_gadget_data(gadget);
if (!cdev)
return;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return;
}
composite_disconnect(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static void configfs_composite_reset(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
cdev = get_gadget_data(gadget);
if (!cdev)
return;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return;
}
composite_reset(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static void configfs_composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
cdev = get_gadget_data(gadget);
if (!cdev)
return;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return;
}
composite_suspend(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static void configfs_composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
cdev = get_gadget_data(gadget);
if (!cdev)
return;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return;
}
composite_resume(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static const struct usb_gadget_driver configfs_driver_template = {
.bind = configfs_composite_bind,
.unbind = configfs_composite_unbind,
.setup = configfs_composite_setup,
.reset = configfs_composite_reset,
.disconnect = configfs_composite_disconnect,
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
.max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
},
.match_existing_only = 1,
};
static struct config_group *gadgets_make(
struct config_group *group,
const char *name)
{
struct gadget_info *gi;
gi = kzalloc(sizeof(*gi), GFP_KERNEL);
if (!gi)
return ERR_PTR(-ENOMEM);
config_group_init_type_name(&gi->group, name, &gadget_root_type);
config_group_init_type_name(&gi->functions_group, "functions",
&functions_type);
configfs_add_default_group(&gi->functions_group, &gi->group);
config_group_init_type_name(&gi->configs_group, "configs",
&config_desc_type);
configfs_add_default_group(&gi->configs_group, &gi->group);
config_group_init_type_name(&gi->strings_group, "strings",
&gadget_language_strings_type);
configfs_add_default_group(&gi->strings_group, &gi->group);
config_group_init_type_name(&gi->os_desc_group, "os_desc",
&os_desc_type);
configfs_add_default_group(&gi->os_desc_group, &gi->group);
config_group_init_type_name(&gi->webusb_group, "webusb",
&webusb_type);
configfs_add_default_group(&gi->webusb_group, &gi->group);
gi->composite.bind = configfs_do_nothing;
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
INIT_LIST_HEAD(&gi->string_list);
INIT_LIST_HEAD(&gi->available_func);
composite_init_dev(&gi->cdev);
gi->cdev.desc.bLength = USB_DT_DEVICE_SIZE;
gi->cdev.desc.bDescriptorType = USB_DT_DEVICE;
gi->cdev.desc.bcdDevice = cpu_to_le16(get_default_bcdDevice());
gi->composite.gadget_driver = configfs_driver_template;
gi->composite.gadget_driver.driver.name = kasprintf(GFP_KERNEL,
"configfs-gadget.%s", name);
if (!gi->composite.gadget_driver.driver.name)
goto err;
gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL);
gi->composite.name = gi->composite.gadget_driver.function;
if (!gi->composite.gadget_driver.function)
goto out_free_driver_name;
return &gi->group;
out_free_driver_name:
kfree(gi->composite.gadget_driver.driver.name);
err:
kfree(gi);
return ERR_PTR(-ENOMEM);
}
static void gadgets_drop(struct config_group *group, struct config_item *item)
{
config_item_put(item);
}
static struct configfs_group_operations gadgets_ops = {
.make_group = &gadgets_make,
.drop_item = &gadgets_drop,
};
static const struct config_item_type gadgets_type = {
.ct_group_ops = &gadgets_ops,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem gadget_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "usb_gadget",
.ci_type = &gadgets_type,
},
},
.su_mutex = __MUTEX_INITIALIZER(gadget_subsys.su_mutex),
};
void unregister_gadget_item(struct config_item *item)
{
struct gadget_info *gi = to_gadget_info(item);
mutex_lock(&gi->lock);
unregister_gadget(gi);
mutex_unlock(&gi->lock);
}
EXPORT_SYMBOL_GPL(unregister_gadget_item);
static int __init gadget_cfs_init(void)
{
int ret;
config_group_init(&gadget_subsys.su_group);
ret = configfs_register_subsystem(&gadget_subsys);
return ret;
}
module_init(gadget_cfs_init);
static void __exit gadget_cfs_exit(void)
{
configfs_unregister_subsystem(&gadget_subsys);
}
module_exit(gadget_cfs_exit);
| linux-master | drivers/usb/gadget/configfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* u_f.c -- USB function utilities for Gadget stack
*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <[email protected]>
*/
#include "u_f.h"
#include <linux/usb/ch9.h>
struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req) {
req->length = usb_endpoint_dir_out(ep->desc) ?
usb_ep_align(ep, len) : len;
req->buf = kmalloc(req->length, GFP_ATOMIC);
if (!req->buf) {
usb_ep_free_request(ep, req);
req = NULL;
}
}
return req;
}
EXPORT_SYMBOL_GPL(alloc_ep_req);
| linux-master | drivers/usb/gadget/u_f.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* composite.c - infrastructure for Composite USB Gadgets
*
* Copyright (C) 2006-2008 David Brownell
*/
/* #define VERBOSE_DEBUG */
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/utsname.h>
#include <linux/bitfield.h>
#include <linux/uuid.h>
#include <linux/usb/composite.h>
#include <linux/usb/otg.h>
#include <linux/usb/webusb.h>
#include <asm/unaligned.h>
#include "u_os_desc.h"
/**
* struct usb_os_string - represents OS String to be reported by a gadget
* @bLength: total length of the entire descritor, always 0x12
* @bDescriptorType: USB_DT_STRING
* @qwSignature: the OS String proper
* @bMS_VendorCode: code used by the host for subsequent requests
* @bPad: not used, must be zero
*/
struct usb_os_string {
__u8 bLength;
__u8 bDescriptorType;
__u8 qwSignature[OS_STRING_QW_SIGN_LEN];
__u8 bMS_VendorCode;
__u8 bPad;
} __packed;
/*
* The code in this file is utility code, used to build a gadget driver
* from one or more "function" drivers, one or more "configuration"
* objects, and a "usb_composite_driver" by gluing them together along
* with the relevant device-wide data.
*/
static struct usb_gadget_strings **get_containers_gs(
struct usb_gadget_string_container *uc)
{
return (struct usb_gadget_strings **)uc->stash;
}
/**
* function_descriptors() - get function descriptors for speed
* @f: the function
* @speed: the speed
*
* Returns the descriptors or NULL if not set.
*/
static struct usb_descriptor_header **
function_descriptors(struct usb_function *f,
enum usb_device_speed speed)
{
struct usb_descriptor_header **descriptors;
/*
* NOTE: we try to help gadget drivers which might not be setting
* max_speed appropriately.
*/
switch (speed) {
case USB_SPEED_SUPER_PLUS:
descriptors = f->ssp_descriptors;
if (descriptors)
break;
fallthrough;
case USB_SPEED_SUPER:
descriptors = f->ss_descriptors;
if (descriptors)
break;
fallthrough;
case USB_SPEED_HIGH:
descriptors = f->hs_descriptors;
if (descriptors)
break;
fallthrough;
default:
descriptors = f->fs_descriptors;
}
/*
* if we can't find any descriptors at all, then this gadget deserves to
* Oops with a NULL pointer dereference
*/
return descriptors;
}
/**
* next_desc() - advance to the next desc_type descriptor
* @t: currect pointer within descriptor array
* @desc_type: descriptor type
*
* Return: next desc_type descriptor or NULL
*
* Iterate over @t until either desc_type descriptor found or
* NULL (that indicates end of list) encountered
*/
static struct usb_descriptor_header**
next_desc(struct usb_descriptor_header **t, u8 desc_type)
{
for (; *t; t++) {
if ((*t)->bDescriptorType == desc_type)
return t;
}
return NULL;
}
/*
* for_each_desc() - iterate over desc_type descriptors in the
* descriptors list
* @start: pointer within descriptor array.
* @iter_desc: desc_type descriptor to use as the loop cursor
* @desc_type: wanted descriptr type
*/
#define for_each_desc(start, iter_desc, desc_type) \
for (iter_desc = next_desc(start, desc_type); \
iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type))
/**
* config_ep_by_speed_and_alt() - configures the given endpoint
* according to gadget speed.
* @g: pointer to the gadget
* @f: usb function
* @_ep: the endpoint to configure
* @alt: alternate setting number
*
* Return: error code, 0 on success
*
* This function chooses the right descriptors for a given
* endpoint according to gadget speed and saves it in the
* endpoint desc field. If the endpoint already has a descriptor
* assigned to it - overwrites it with currently corresponding
* descriptor. The endpoint maxpacket field is updated according
* to the chosen descriptor.
* Note: the supplied function should hold all the descriptors
* for supported speeds
*/
int config_ep_by_speed_and_alt(struct usb_gadget *g,
struct usb_function *f,
struct usb_ep *_ep,
u8 alt)
{
struct usb_endpoint_descriptor *chosen_desc = NULL;
struct usb_interface_descriptor *int_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
int want_comp_desc = 0;
struct usb_descriptor_header **d_spd; /* cursor for speed desc */
struct usb_composite_dev *cdev;
bool incomplete_desc = false;
if (!g || !f || !_ep)
return -EIO;
/* select desired speed */
switch (g->speed) {
case USB_SPEED_SUPER_PLUS:
if (f->ssp_descriptors) {
speed_desc = f->ssp_descriptors;
want_comp_desc = 1;
break;
}
incomplete_desc = true;
fallthrough;
case USB_SPEED_SUPER:
if (f->ss_descriptors) {
speed_desc = f->ss_descriptors;
want_comp_desc = 1;
break;
}
incomplete_desc = true;
fallthrough;
case USB_SPEED_HIGH:
if (f->hs_descriptors) {
speed_desc = f->hs_descriptors;
break;
}
incomplete_desc = true;
fallthrough;
default:
speed_desc = f->fs_descriptors;
}
cdev = get_gadget_data(g);
if (incomplete_desc)
WARNING(cdev,
"%s doesn't hold the descriptors for current speed\n",
f->name);
/* find correct alternate setting descriptor */
for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) {
int_desc = (struct usb_interface_descriptor *)*d_spd;
if (int_desc->bAlternateSetting == alt) {
speed_desc = d_spd;
goto intf_found;
}
}
return -EIO;
intf_found:
/* find descriptors */
for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) {
chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
if (chosen_desc->bEndpointAddress == _ep->address)
goto ep_found;
}
return -EIO;
ep_found:
/* commit results */
_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
_ep->mult = 1;
if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
usb_endpoint_xfer_int(_ep->desc)))
_ep->mult = usb_endpoint_maxp_mult(_ep->desc);
if (!want_comp_desc)
return 0;
/*
* Companion descriptor should follow EP descriptor
* USB 3.0 spec, #9.6.7
*/
comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd);
if (!comp_desc ||
(comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
return -EIO;
_ep->comp_desc = comp_desc;
if (g->speed >= USB_SPEED_SUPER) {
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
_ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
fallthrough;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
if (comp_desc->bMaxBurst != 0)
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
_ep->maxburst = 1;
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt);
/**
* config_ep_by_speed() - configures the given endpoint
* according to gadget speed.
* @g: pointer to the gadget
* @f: usb function
* @_ep: the endpoint to configure
*
* Return: error code, 0 on success
*
* This function chooses the right descriptors for a given
* endpoint according to gadget speed and saves it in the
* endpoint desc field. If the endpoint already has a descriptor
* assigned to it - overwrites it with currently corresponding
* descriptor. The endpoint maxpacket field is updated according
* to the chosen descriptor.
* Note: the supplied function should hold all the descriptors
* for supported speeds
*/
int config_ep_by_speed(struct usb_gadget *g,
struct usb_function *f,
struct usb_ep *_ep)
{
return config_ep_by_speed_and_alt(g, f, _ep, 0);
}
EXPORT_SYMBOL_GPL(config_ep_by_speed);
/**
* usb_add_function() - add a function to a configuration
* @config: the configuration
* @function: the function being added
* Context: single threaded during gadget setup
*
* After initialization, each configuration must have one or more
* functions added to it. Adding a function involves calling its @bind()
* method to allocate resources such as interface and string identifiers
* and endpoints.
*
* This function returns the value of the function's bind(), which is
* zero for success else a negative errno value.
*/
int usb_add_function(struct usb_configuration *config,
struct usb_function *function)
{
int value = -EINVAL;
DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
function->name, function,
config->label, config);
if (!function->set_alt || !function->disable)
goto done;
function->config = config;
list_add_tail(&function->list, &config->functions);
if (function->bind_deactivated) {
value = usb_function_deactivate(function);
if (value)
goto done;
}
/* REVISIT *require* function->bind? */
if (function->bind) {
value = function->bind(config, function);
if (value < 0) {
list_del(&function->list);
function->config = NULL;
}
} else
value = 0;
/* We allow configurations that don't work at both speeds.
* If we run into a lowspeed Linux system, treat it the same
* as full speed ... it's the function drivers that will need
* to avoid bulk and ISO transfers.
*/
if (!config->fullspeed && function->fs_descriptors)
config->fullspeed = true;
if (!config->highspeed && function->hs_descriptors)
config->highspeed = true;
if (!config->superspeed && function->ss_descriptors)
config->superspeed = true;
if (!config->superspeed_plus && function->ssp_descriptors)
config->superspeed_plus = true;
done:
if (value)
DBG(config->cdev, "adding '%s'/%p --> %d\n",
function->name, function, value);
return value;
}
EXPORT_SYMBOL_GPL(usb_add_function);
void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
{
if (f->disable)
f->disable(f);
bitmap_zero(f->endpoints, 32);
list_del(&f->list);
if (f->unbind)
f->unbind(c, f);
if (f->bind_deactivated)
usb_function_activate(f);
}
EXPORT_SYMBOL_GPL(usb_remove_function);
/**
* usb_function_deactivate - prevent function and gadget enumeration
* @function: the function that isn't yet ready to respond
*
* Blocks response of the gadget driver to host enumeration by
* preventing the data line pullup from being activated. This is
* normally called during @bind() processing to change from the
* initial "ready to respond" state, or when a required resource
* becomes available.
*
* For example, drivers that serve as a passthrough to a userspace
* daemon can block enumeration unless that daemon (such as an OBEX,
* MTP, or print server) is ready to handle host requests.
*
* Not all systems support software control of their USB peripheral
* data pullups.
*
* Returns zero on success, else negative errno.
*/
int usb_function_deactivate(struct usb_function *function)
{
struct usb_composite_dev *cdev = function->config->cdev;
unsigned long flags;
int status = 0;
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->deactivations == 0) {
spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
spin_lock_irqsave(&cdev->lock, flags);
}
if (status == 0)
cdev->deactivations++;
spin_unlock_irqrestore(&cdev->lock, flags);
return status;
}
EXPORT_SYMBOL_GPL(usb_function_deactivate);
/**
* usb_function_activate - allow function and gadget enumeration
* @function: function on which usb_function_activate() was called
*
* Reverses effect of usb_function_deactivate(). If no more functions
* are delaying their activation, the gadget driver will respond to
* host enumeration procedures.
*
* Returns zero on success, else negative errno.
*/
int usb_function_activate(struct usb_function *function)
{
struct usb_composite_dev *cdev = function->config->cdev;
unsigned long flags;
int status = 0;
spin_lock_irqsave(&cdev->lock, flags);
if (WARN_ON(cdev->deactivations == 0))
status = -EINVAL;
else {
cdev->deactivations--;
if (cdev->deactivations == 0) {
spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
spin_lock_irqsave(&cdev->lock, flags);
}
}
spin_unlock_irqrestore(&cdev->lock, flags);
return status;
}
EXPORT_SYMBOL_GPL(usb_function_activate);
/**
* usb_interface_id() - allocate an unused interface ID
* @config: configuration associated with the interface
* @function: function handling the interface
* Context: single threaded during gadget setup
*
* usb_interface_id() is called from usb_function.bind() callbacks to
* allocate new interface IDs. The function driver will then store that
* ID in interface, association, CDC union, and other descriptors. It
* will also handle any control requests targeted at that interface,
* particularly changing its altsetting via set_alt(). There may
* also be class-specific or vendor-specific requests to handle.
*
* All interface identifier should be allocated using this routine, to
* ensure that for example different functions don't wrongly assign
* different meanings to the same identifier. Note that since interface
* identifiers are configuration-specific, functions used in more than
* one configuration (or more than once in a given configuration) need
* multiple versions of the relevant descriptors.
*
* Returns the interface ID which was allocated; or -ENODEV if no
* more interface IDs can be allocated.
*/
int usb_interface_id(struct usb_configuration *config,
struct usb_function *function)
{
unsigned id = config->next_interface_id;
if (id < MAX_CONFIG_INTERFACES) {
config->interface[id] = function;
config->next_interface_id = id + 1;
return id;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(usb_interface_id);
/**
* usb_func_wakeup - sends function wake notification to the host.
* @func: function that sends the remote wakeup notification.
*
* Applicable to devices operating at enhanced superspeed when usb
* functions are put in function suspend state and armed for function
* remote wakeup. On completion, function wake notification is sent. If
* the device is in low power state it tries to bring the device to active
* state before sending the wake notification. Since it is a synchronous
* call, caller must take care of not calling it in interrupt context.
* For devices operating at lower speeds returns negative errno.
*
* Returns zero on success, else negative errno.
*/
int usb_func_wakeup(struct usb_function *func)
{
struct usb_gadget *gadget = func->config->cdev->gadget;
int id;
if (!gadget->ops->func_wakeup)
return -EOPNOTSUPP;
if (!func->func_wakeup_armed) {
ERROR(func->config->cdev, "not armed for func remote wakeup\n");
return -EINVAL;
}
for (id = 0; id < MAX_CONFIG_INTERFACES; id++)
if (func->config->interface[id] == func)
break;
if (id == MAX_CONFIG_INTERFACES) {
ERROR(func->config->cdev, "Invalid function\n");
return -EINVAL;
}
return gadget->ops->func_wakeup(gadget, id);
}
EXPORT_SYMBOL_GPL(usb_func_wakeup);
static u8 encode_bMaxPower(enum usb_device_speed speed,
struct usb_configuration *c)
{
unsigned val;
if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
val = c->MaxPower;
else
val = CONFIG_USB_GADGET_VBUS_DRAW;
if (!val)
return 0;
if (speed < USB_SPEED_SUPER)
return min(val, 500U) / 2;
else
/*
* USB 3.x supports up to 900mA, but since 900 isn't divisible
* by 8 the integral division will effectively cap to 896mA.
*/
return min(val, 900U) / 8;
}
void check_remote_wakeup_config(struct usb_gadget *g,
struct usb_configuration *c)
{
if (USB_CONFIG_ATT_WAKEUP & c->bmAttributes) {
/* Reset the rw bit if gadget is not capable of it */
if (!g->wakeup_capable && g->ops->set_remote_wakeup) {
WARN(c->cdev, "Clearing wakeup bit for config c.%d\n",
c->bConfigurationValue);
c->bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
}
}
}
static int config_buf(struct usb_configuration *config,
enum usb_device_speed speed, void *buf, u8 type)
{
struct usb_config_descriptor *c = buf;
void *next = buf + USB_DT_CONFIG_SIZE;
int len;
struct usb_function *f;
int status;
len = USB_COMP_EP0_BUFSIZ - USB_DT_CONFIG_SIZE;
/* write the config descriptor */
c = buf;
c->bLength = USB_DT_CONFIG_SIZE;
c->bDescriptorType = type;
/* wTotalLength is written later */
c->bNumInterfaces = config->next_interface_id;
c->bConfigurationValue = config->bConfigurationValue;
c->iConfiguration = config->iConfiguration;
c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
c->bMaxPower = encode_bMaxPower(speed, config);
/* There may be e.g. OTG descriptors */
if (config->descriptors) {
status = usb_descriptor_fillbuf(next, len,
config->descriptors);
if (status < 0)
return status;
len -= status;
next += status;
}
/* add each function's descriptors */
list_for_each_entry(f, &config->functions, list) {
struct usb_descriptor_header **descriptors;
descriptors = function_descriptors(f, speed);
if (!descriptors)
continue;
status = usb_descriptor_fillbuf(next, len,
(const struct usb_descriptor_header **) descriptors);
if (status < 0)
return status;
len -= status;
next += status;
}
len = next - buf;
c->wTotalLength = cpu_to_le16(len);
return len;
}
static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
{
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c;
struct list_head *pos;
u8 type = w_value >> 8;
enum usb_device_speed speed = USB_SPEED_UNKNOWN;
if (gadget->speed >= USB_SPEED_SUPER)
speed = gadget->speed;
else if (gadget_is_dualspeed(gadget)) {
int hs = 0;
if (gadget->speed == USB_SPEED_HIGH)
hs = 1;
if (type == USB_DT_OTHER_SPEED_CONFIG)
hs = !hs;
if (hs)
speed = USB_SPEED_HIGH;
}
/* This is a lookup by config *INDEX* */
w_value &= 0xff;
pos = &cdev->configs;
c = cdev->os_desc_config;
if (c)
goto check_config;
while ((pos = pos->next) != &cdev->configs) {
c = list_entry(pos, typeof(*c), list);
/* skip OS Descriptors config which is handled separately */
if (c == cdev->os_desc_config)
continue;
check_config:
/* ignore configs that won't work at this speed */
switch (speed) {
case USB_SPEED_SUPER_PLUS:
if (!c->superspeed_plus)
continue;
break;
case USB_SPEED_SUPER:
if (!c->superspeed)
continue;
break;
case USB_SPEED_HIGH:
if (!c->highspeed)
continue;
break;
default:
if (!c->fullspeed)
continue;
}
if (w_value == 0)
return config_buf(c, speed, cdev->req->buf, type);
w_value--;
}
return -EINVAL;
}
static int count_configs(struct usb_composite_dev *cdev, unsigned type)
{
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c;
unsigned count = 0;
int hs = 0;
int ss = 0;
int ssp = 0;
if (gadget_is_dualspeed(gadget)) {
if (gadget->speed == USB_SPEED_HIGH)
hs = 1;
if (gadget->speed == USB_SPEED_SUPER)
ss = 1;
if (gadget->speed == USB_SPEED_SUPER_PLUS)
ssp = 1;
if (type == USB_DT_DEVICE_QUALIFIER)
hs = !hs;
}
list_for_each_entry(c, &cdev->configs, list) {
/* ignore configs that won't work at this speed */
if (ssp) {
if (!c->superspeed_plus)
continue;
} else if (ss) {
if (!c->superspeed)
continue;
} else if (hs) {
if (!c->highspeed)
continue;
} else {
if (!c->fullspeed)
continue;
}
count++;
}
return count;
}
/**
* bos_desc() - prepares the BOS descriptor.
* @cdev: pointer to usb_composite device to generate the bos
* descriptor for
*
* This function generates the BOS (Binary Device Object)
* descriptor and its device capabilities descriptors. The BOS
* descriptor should be supported by a SuperSpeed device.
*/
static int bos_desc(struct usb_composite_dev *cdev)
{
struct usb_ext_cap_descriptor *usb_ext;
struct usb_dcd_config_params dcd_config_params;
struct usb_bos_descriptor *bos = cdev->req->buf;
unsigned int besl = 0;
bos->bLength = USB_DT_BOS_SIZE;
bos->bDescriptorType = USB_DT_BOS;
bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE);
bos->bNumDeviceCaps = 0;
/* Get Controller configuration */
if (cdev->gadget->ops->get_config_params) {
cdev->gadget->ops->get_config_params(cdev->gadget,
&dcd_config_params);
} else {
dcd_config_params.besl_baseline =
USB_DEFAULT_BESL_UNSPECIFIED;
dcd_config_params.besl_deep =
USB_DEFAULT_BESL_UNSPECIFIED;
dcd_config_params.bU1devExitLat =
USB_DEFAULT_U1_DEV_EXIT_LAT;
dcd_config_params.bU2DevExitLat =
cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
}
if (dcd_config_params.besl_baseline != USB_DEFAULT_BESL_UNSPECIFIED)
besl = USB_BESL_BASELINE_VALID |
USB_SET_BESL_BASELINE(dcd_config_params.besl_baseline);
if (dcd_config_params.besl_deep != USB_DEFAULT_BESL_UNSPECIFIED)
besl |= USB_BESL_DEEP_VALID |
USB_SET_BESL_DEEP(dcd_config_params.besl_deep);
/*
* A SuperSpeed device shall include the USB2.0 extension descriptor
* and shall support LPM when operating in USB2.0 HS mode.
*/
if (cdev->gadget->lpm_capable) {
usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
bos->bNumDeviceCaps++;
le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT |
USB_BESL_SUPPORT | besl);
}
/*
* The Superspeed USB Capability descriptor shall be implemented by all
* SuperSpeed devices.
*/
if (gadget_is_superspeed(cdev->gadget)) {
struct usb_ss_cap_descriptor *ss_cap;
ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
bos->bNumDeviceCaps++;
le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
ss_cap->bmAttributes = 0; /* LTM is not supported yet */
ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
USB_FULL_SPEED_OPERATION |
USB_HIGH_SPEED_OPERATION |
USB_5GBPS_OPERATION);
ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
}
/* The SuperSpeedPlus USB Device Capability descriptor */
if (gadget_is_superspeed_plus(cdev->gadget)) {
struct usb_ssp_cap_descriptor *ssp_cap;
u8 ssac = 1;
u8 ssic;
int i;
if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x2)
ssac = 3;
/*
* Paired RX and TX sublink speed attributes share
* the same SSID.
*/
ssic = (ssac + 1) / 2 - 1;
ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
bos->bNumDeviceCaps++;
le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(ssac));
ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(ssac);
ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
ssp_cap->bReserved = 0;
ssp_cap->wReserved = 0;
ssp_cap->bmAttributes =
cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_ATTRIBS, ssac) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_IDS, ssic));
ssp_cap->wFunctionalitySupport =
cpu_to_le16(FIELD_PREP(USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID, 0) |
FIELD_PREP(USB_SSP_MIN_RX_LANE_COUNT, 1) |
FIELD_PREP(USB_SSP_MIN_TX_LANE_COUNT, 1));
/*
* Use 1 SSID if the gadget supports up to gen2x1 or not
* specified:
* - SSID 0 for symmetric RX/TX sublink speed of 10 Gbps.
*
* Use 1 SSID if the gadget supports up to gen1x2:
* - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
*
* Use 2 SSIDs if the gadget supports up to gen2x2:
* - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
* - SSID 1 for symmetric RX/TX sublink speed of 10 Gbps.
*/
for (i = 0; i < ssac + 1; i++) {
u8 ssid;
u8 mantissa;
u8 type;
ssid = i >> 1;
if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x1 ||
cdev->gadget->max_ssp_rate == USB_SSP_GEN_UNKNOWN)
mantissa = 10;
else
mantissa = 5 << ssid;
if (i % 2)
type = USB_SSP_SUBLINK_SPEED_ST_SYM_TX;
else
type = USB_SSP_SUBLINK_SPEED_ST_SYM_RX;
ssp_cap->bmSublinkSpeedAttr[i] =
cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_SSID, ssid) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSE,
USB_SSP_SUBLINK_SPEED_LSE_GBPS) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST, type) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_LP,
USB_SSP_SUBLINK_SPEED_LP_SSP) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSM, mantissa));
}
}
/* The WebUSB Platform Capability descriptor */
if (cdev->use_webusb) {
struct usb_plat_dev_cap_descriptor *webusb_cap;
struct usb_webusb_cap_data *webusb_cap_data;
guid_t webusb_uuid = WEBUSB_UUID;
webusb_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
webusb_cap_data = (struct usb_webusb_cap_data *) webusb_cap->CapabilityData;
bos->bNumDeviceCaps++;
le16_add_cpu(&bos->wTotalLength,
USB_DT_USB_PLAT_DEV_CAP_SIZE(USB_WEBUSB_CAP_DATA_SIZE));
webusb_cap->bLength = USB_DT_USB_PLAT_DEV_CAP_SIZE(USB_WEBUSB_CAP_DATA_SIZE);
webusb_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
webusb_cap->bDevCapabilityType = USB_PLAT_DEV_CAP_TYPE;
webusb_cap->bReserved = 0;
export_guid(webusb_cap->UUID, &webusb_uuid);
if (cdev->bcd_webusb_version != 0)
webusb_cap_data->bcdVersion = cpu_to_le16(cdev->bcd_webusb_version);
else
webusb_cap_data->bcdVersion = WEBUSB_VERSION_1_00;
webusb_cap_data->bVendorCode = cdev->b_webusb_vendor_code;
if (strnlen(cdev->landing_page, sizeof(cdev->landing_page)) > 0)
webusb_cap_data->iLandingPage = WEBUSB_LANDING_PAGE_PRESENT;
else
webusb_cap_data->iLandingPage = WEBUSB_LANDING_PAGE_NOT_PRESENT;
}
return le16_to_cpu(bos->wTotalLength);
}
static void device_qual(struct usb_composite_dev *cdev)
{
struct usb_qualifier_descriptor *qual = cdev->req->buf;
qual->bLength = sizeof(*qual);
qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER;
/* POLICY: same bcdUSB and device type info at both speeds */
qual->bcdUSB = cdev->desc.bcdUSB;
qual->bDeviceClass = cdev->desc.bDeviceClass;
qual->bDeviceSubClass = cdev->desc.bDeviceSubClass;
qual->bDeviceProtocol = cdev->desc.bDeviceProtocol;
/* ASSUME same EP0 fifo size at both speeds */
qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket;
qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER);
qual->bRESERVED = 0;
}
/*-------------------------------------------------------------------------*/
static void reset_config(struct usb_composite_dev *cdev)
{
struct usb_function *f;
DBG(cdev, "reset config\n");
list_for_each_entry(f, &cdev->config->functions, list) {
if (f->disable)
f->disable(f);
/* Section 9.1.1.6, disable remote wakeup when device is reset */
f->func_wakeup_armed = false;
bitmap_zero(f->endpoints, 32);
}
cdev->config = NULL;
cdev->delayed_status = 0;
}
static int set_config(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl, unsigned number)
{
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c = NULL, *iter;
int result = -EINVAL;
unsigned power = gadget_is_otg(gadget) ? 8 : 100;
int tmp;
if (number) {
list_for_each_entry(iter, &cdev->configs, list) {
if (iter->bConfigurationValue != number)
continue;
/*
* We disable the FDs of the previous
* configuration only if the new configuration
* is a valid one
*/
if (cdev->config)
reset_config(cdev);
c = iter;
result = 0;
break;
}
if (result < 0)
goto done;
} else { /* Zero configuration value - need to reset the config */
if (cdev->config)
reset_config(cdev);
result = 0;
}
DBG(cdev, "%s config #%d: %s\n",
usb_speed_string(gadget->speed),
number, c ? c->label : "unconfigured");
if (!c)
goto done;
usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
cdev->config = c;
/* Initialize all interfaces by setting them to altsetting zero. */
for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
struct usb_function *f = c->interface[tmp];
struct usb_descriptor_header **descriptors;
if (!f)
break;
/*
* Record which endpoints are used by the function. This is used
* to dispatch control requests targeted at that endpoint to the
* function's setup callback instead of the current
* configuration's setup callback.
*/
descriptors = function_descriptors(f, gadget->speed);
for (; *descriptors; ++descriptors) {
struct usb_endpoint_descriptor *ep;
int addr;
if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
continue;
ep = (struct usb_endpoint_descriptor *)*descriptors;
addr = ((ep->bEndpointAddress & 0x80) >> 3)
| (ep->bEndpointAddress & 0x0f);
set_bit(addr, f->endpoints);
}
result = f->set_alt(f, tmp, 0);
if (result < 0) {
DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
tmp, f->name, f, result);
reset_config(cdev);
goto done;
}
if (result == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
"%s: interface %d (%s) requested delayed status\n",
__func__, tmp, f->name);
cdev->delayed_status++;
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
}
/* when we return, be sure our power usage is valid */
if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
power = c->MaxPower;
else
power = CONFIG_USB_GADGET_VBUS_DRAW;
if (gadget->speed < USB_SPEED_SUPER)
power = min(power, 500U);
else
power = min(power, 900U);
if (USB_CONFIG_ATT_WAKEUP & c->bmAttributes)
usb_gadget_set_remote_wakeup(gadget, 1);
else
usb_gadget_set_remote_wakeup(gadget, 0);
done:
if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_set_selfpowered(gadget);
else
usb_gadget_clear_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
return result;
}
int usb_add_config_only(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
struct usb_configuration *c;
if (!config->bConfigurationValue)
return -EINVAL;
/* Prevent duplicate configuration identifiers */
list_for_each_entry(c, &cdev->configs, list) {
if (c->bConfigurationValue == config->bConfigurationValue)
return -EBUSY;
}
config->cdev = cdev;
list_add_tail(&config->list, &cdev->configs);
INIT_LIST_HEAD(&config->functions);
config->next_interface_id = 0;
memset(config->interface, 0, sizeof(config->interface));
return 0;
}
EXPORT_SYMBOL_GPL(usb_add_config_only);
/**
* usb_add_config() - add a configuration to a device.
* @cdev: wraps the USB gadget
* @config: the configuration, with bConfigurationValue assigned
* @bind: the configuration's bind function
* Context: single threaded during gadget setup
*
* One of the main tasks of a composite @bind() routine is to
* add each of the configurations it supports, using this routine.
*
* This function returns the value of the configuration's @bind(), which
* is zero for success else a negative errno value. Binding configurations
* assigns global resources including string IDs, and per-configuration
* resources such as interface IDs and endpoints.
*/
int usb_add_config(struct usb_composite_dev *cdev,
struct usb_configuration *config,
int (*bind)(struct usb_configuration *))
{
int status = -EINVAL;
if (!bind)
goto done;
DBG(cdev, "adding config #%u '%s'/%p\n",
config->bConfigurationValue,
config->label, config);
status = usb_add_config_only(cdev, config);
if (status)
goto done;
status = bind(config);
if (status == 0)
status = usb_gadget_check_config(cdev->gadget);
if (status < 0) {
while (!list_empty(&config->functions)) {
struct usb_function *f;
f = list_first_entry(&config->functions,
struct usb_function, list);
list_del(&f->list);
if (f->unbind) {
DBG(cdev, "unbind function '%s'/%p\n",
f->name, f);
f->unbind(config, f);
/* may free memory for "f" */
}
}
list_del(&config->list);
config->cdev = NULL;
} else {
unsigned i;
DBG(cdev, "cfg %d/%p speeds:%s%s%s%s\n",
config->bConfigurationValue, config,
config->superspeed_plus ? " superplus" : "",
config->superspeed ? " super" : "",
config->highspeed ? " high" : "",
config->fullspeed
? (gadget_is_dualspeed(cdev->gadget)
? " full"
: " full/low")
: "");
for (i = 0; i < MAX_CONFIG_INTERFACES; i++) {
struct usb_function *f = config->interface[i];
if (!f)
continue;
DBG(cdev, " interface %d = %s/%p\n",
i, f->name, f);
}
}
/* set_alt(), or next bind(), sets up ep->claimed as needed */
usb_ep_autoconfig_reset(cdev->gadget);
done:
if (status)
DBG(cdev, "added config '%s'/%u --> %d\n", config->label,
config->bConfigurationValue, status);
return status;
}
EXPORT_SYMBOL_GPL(usb_add_config);
static void remove_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
while (!list_empty(&config->functions)) {
struct usb_function *f;
f = list_first_entry(&config->functions,
struct usb_function, list);
usb_remove_function(config, f);
}
list_del(&config->list);
if (config->unbind) {
DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
config->unbind(config);
/* may free memory for "c" */
}
}
/**
* usb_remove_config() - remove a configuration from a device.
* @cdev: wraps the USB gadget
* @config: the configuration
*
* Drivers must call usb_gadget_disconnect before calling this function
* to disconnect the device from the host and make sure the host will not
* try to enumerate the device while we are changing the config list.
*/
void usb_remove_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
unsigned long flags;
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config == config)
reset_config(cdev);
spin_unlock_irqrestore(&cdev->lock, flags);
remove_config(cdev, config);
}
/*-------------------------------------------------------------------------*/
/* We support strings in multiple languages ... string descriptor zero
* says which languages are supported. The typical case will be that
* only one language (probably English) is used, with i18n handled on
* the host side.
*/
static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf)
{
const struct usb_gadget_strings *s;
__le16 language;
__le16 *tmp;
while (*sp) {
s = *sp;
language = cpu_to_le16(s->language);
for (tmp = buf; *tmp && tmp < &buf[USB_MAX_STRING_LEN]; tmp++) {
if (*tmp == language)
goto repeat;
}
*tmp++ = language;
repeat:
sp++;
}
}
static int lookup_string(
struct usb_gadget_strings **sp,
void *buf,
u16 language,
int id
)
{
struct usb_gadget_strings *s;
int value;
while (*sp) {
s = *sp++;
if (s->language != language)
continue;
value = usb_gadget_get_string(s, id, buf);
if (value > 0)
return value;
}
return -EINVAL;
}
static int get_string(struct usb_composite_dev *cdev,
void *buf, u16 language, int id)
{
struct usb_composite_driver *composite = cdev->driver;
struct usb_gadget_string_container *uc;
struct usb_configuration *c;
struct usb_function *f;
int len;
/* Yes, not only is USB's i18n support probably more than most
* folk will ever care about ... also, it's all supported here.
* (Except for UTF8 support for Unicode's "Astral Planes".)
*/
/* 0 == report all available language codes */
if (id == 0) {
struct usb_string_descriptor *s = buf;
struct usb_gadget_strings **sp;
memset(s, 0, 256);
s->bDescriptorType = USB_DT_STRING;
sp = composite->strings;
if (sp)
collect_langs(sp, s->wData);
list_for_each_entry(c, &cdev->configs, list) {
sp = c->strings;
if (sp)
collect_langs(sp, s->wData);
list_for_each_entry(f, &c->functions, list) {
sp = f->strings;
if (sp)
collect_langs(sp, s->wData);
}
}
list_for_each_entry(uc, &cdev->gstrings, list) {
struct usb_gadget_strings **sp;
sp = get_containers_gs(uc);
collect_langs(sp, s->wData);
}
for (len = 0; len <= USB_MAX_STRING_LEN && s->wData[len]; len++)
continue;
if (!len)
return -EINVAL;
s->bLength = 2 * (len + 1);
return s->bLength;
}
if (cdev->use_os_string && language == 0 && id == OS_STRING_IDX) {
struct usb_os_string *b = buf;
b->bLength = sizeof(*b);
b->bDescriptorType = USB_DT_STRING;
compiletime_assert(
sizeof(b->qwSignature) == sizeof(cdev->qw_sign),
"qwSignature size must be equal to qw_sign");
memcpy(&b->qwSignature, cdev->qw_sign, sizeof(b->qwSignature));
b->bMS_VendorCode = cdev->b_vendor_code;
b->bPad = 0;
return sizeof(*b);
}
list_for_each_entry(uc, &cdev->gstrings, list) {
struct usb_gadget_strings **sp;
sp = get_containers_gs(uc);
len = lookup_string(sp, buf, language, id);
if (len > 0)
return len;
}
/* String IDs are device-scoped, so we look up each string
* table we're told about. These lookups are infrequent;
* simpler-is-better here.
*/
if (composite->strings) {
len = lookup_string(composite->strings, buf, language, id);
if (len > 0)
return len;
}
list_for_each_entry(c, &cdev->configs, list) {
if (c->strings) {
len = lookup_string(c->strings, buf, language, id);
if (len > 0)
return len;
}
list_for_each_entry(f, &c->functions, list) {
if (!f->strings)
continue;
len = lookup_string(f->strings, buf, language, id);
if (len > 0)
return len;
}
}
return -EINVAL;
}
/**
* usb_string_id() - allocate an unused string ID
* @cdev: the device whose string descriptor IDs are being allocated
* Context: single threaded during gadget setup
*
* @usb_string_id() is called from bind() callbacks to allocate
* string IDs. Drivers for functions, configurations, or gadgets will
* then store that ID in the appropriate descriptors and string table.
*
* All string identifier should be allocated using this,
* @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure
* that for example different functions don't wrongly assign different
* meanings to the same identifier.
*/
int usb_string_id(struct usb_composite_dev *cdev)
{
if (cdev->next_string_id < 254) {
/* string id 0 is reserved by USB spec for list of
* supported languages */
/* 255 reserved as well? -- mina86 */
cdev->next_string_id++;
return cdev->next_string_id;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(usb_string_id);
/**
* usb_string_ids_tab() - allocate unused string IDs in batch
* @cdev: the device whose string descriptor IDs are being allocated
* @str: an array of usb_string objects to assign numbers to
* Context: single threaded during gadget setup
*
* @usb_string_ids() is called from bind() callbacks to allocate
* string IDs. Drivers for functions, configurations, or gadgets will
* then copy IDs from the string table to the appropriate descriptors
* and string table for other languages.
*
* All string identifier should be allocated using this,
* @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
* example different functions don't wrongly assign different meanings
* to the same identifier.
*/
int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
{
int next = cdev->next_string_id;
for (; str->s; ++str) {
if (unlikely(next >= 254))
return -ENODEV;
str->id = ++next;
}
cdev->next_string_id = next;
return 0;
}
EXPORT_SYMBOL_GPL(usb_string_ids_tab);
static struct usb_gadget_string_container *copy_gadget_strings(
struct usb_gadget_strings **sp, unsigned n_gstrings,
unsigned n_strings)
{
struct usb_gadget_string_container *uc;
struct usb_gadget_strings **gs_array;
struct usb_gadget_strings *gs;
struct usb_string *s;
unsigned mem;
unsigned n_gs;
unsigned n_s;
void *stash;
mem = sizeof(*uc);
mem += sizeof(void *) * (n_gstrings + 1);
mem += sizeof(struct usb_gadget_strings) * n_gstrings;
mem += sizeof(struct usb_string) * (n_strings + 1) * (n_gstrings);
uc = kmalloc(mem, GFP_KERNEL);
if (!uc)
return ERR_PTR(-ENOMEM);
gs_array = get_containers_gs(uc);
stash = uc->stash;
stash += sizeof(void *) * (n_gstrings + 1);
for (n_gs = 0; n_gs < n_gstrings; n_gs++) {
struct usb_string *org_s;
gs_array[n_gs] = stash;
gs = gs_array[n_gs];
stash += sizeof(struct usb_gadget_strings);
gs->language = sp[n_gs]->language;
gs->strings = stash;
org_s = sp[n_gs]->strings;
for (n_s = 0; n_s < n_strings; n_s++) {
s = stash;
stash += sizeof(struct usb_string);
if (org_s->s)
s->s = org_s->s;
else
s->s = "";
org_s++;
}
s = stash;
s->s = NULL;
stash += sizeof(struct usb_string);
}
gs_array[n_gs] = NULL;
return uc;
}
/**
* usb_gstrings_attach() - attach gadget strings to a cdev and assign ids
* @cdev: the device whose string descriptor IDs are being allocated
* and attached.
* @sp: an array of usb_gadget_strings to attach.
* @n_strings: number of entries in each usb_strings array (sp[]->strings)
*
* This function will create a deep copy of usb_gadget_strings and usb_string
* and attach it to the cdev. The actual string (usb_string.s) will not be
* copied but only a referenced will be made. The struct usb_gadget_strings
* array may contain multiple languages and should be NULL terminated.
* The ->language pointer of each struct usb_gadget_strings has to contain the
* same amount of entries.
* For instance: sp[0] is en-US, sp[1] is es-ES. It is expected that the first
* usb_string entry of es-ES contains the translation of the first usb_string
* entry of en-US. Therefore both entries become the same id assign.
*/
struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
struct usb_gadget_strings **sp, unsigned n_strings)
{
struct usb_gadget_string_container *uc;
struct usb_gadget_strings **n_gs;
unsigned n_gstrings = 0;
unsigned i;
int ret;
for (i = 0; sp[i]; i++)
n_gstrings++;
if (!n_gstrings)
return ERR_PTR(-EINVAL);
uc = copy_gadget_strings(sp, n_gstrings, n_strings);
if (IS_ERR(uc))
return ERR_CAST(uc);
n_gs = get_containers_gs(uc);
ret = usb_string_ids_tab(cdev, n_gs[0]->strings);
if (ret)
goto err;
for (i = 1; i < n_gstrings; i++) {
struct usb_string *m_s;
struct usb_string *s;
unsigned n;
m_s = n_gs[0]->strings;
s = n_gs[i]->strings;
for (n = 0; n < n_strings; n++) {
s->id = m_s->id;
s++;
m_s++;
}
}
list_add_tail(&uc->list, &cdev->gstrings);
return n_gs[0]->strings;
err:
kfree(uc);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(usb_gstrings_attach);
/**
* usb_string_ids_n() - allocate unused string IDs in batch
* @c: the device whose string descriptor IDs are being allocated
* @n: number of string IDs to allocate
* Context: single threaded during gadget setup
*
* Returns the first requested ID. This ID and next @n-1 IDs are now
* valid IDs. At least provided that @n is non-zero because if it
* is, returns last requested ID which is now very useful information.
*
* @usb_string_ids_n() is called from bind() callbacks to allocate
* string IDs. Drivers for functions, configurations, or gadgets will
* then store that ID in the appropriate descriptors and string table.
*
* All string identifier should be allocated using this,
* @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
* example different functions don't wrongly assign different meanings
* to the same identifier.
*/
int usb_string_ids_n(struct usb_composite_dev *c, unsigned n)
{
unsigned next = c->next_string_id;
if (unlikely(n > 254 || (unsigned)next + n > 254))
return -ENODEV;
c->next_string_id += n;
return next + 1;
}
EXPORT_SYMBOL_GPL(usb_string_ids_n);
/*-------------------------------------------------------------------------*/
static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
{
struct usb_composite_dev *cdev;
if (req->status || req->actual != req->length)
DBG((struct usb_composite_dev *) ep->driver_data,
"setup complete --> %d, %d/%d\n",
req->status, req->actual, req->length);
/*
* REVIST The same ep0 requests are shared with function drivers
* so they don't have to maintain the same ->complete() stubs.
*
* Because of that, we need to check for the validity of ->context
* here, even though we know we've set it to something useful.
*/
if (!req->context)
return;
cdev = req->context;
if (cdev->req == req)
cdev->setup_pending = false;
else if (cdev->os_desc_req == req)
cdev->os_desc_pending = false;
else
WARN(1, "unknown request %p\n", req);
}
static int composite_ep0_queue(struct usb_composite_dev *cdev,
struct usb_request *req, gfp_t gfp_flags)
{
int ret;
ret = usb_ep_queue(cdev->gadget->ep0, req, gfp_flags);
if (ret == 0) {
if (cdev->req == req)
cdev->setup_pending = true;
else if (cdev->os_desc_req == req)
cdev->os_desc_pending = true;
else
WARN(1, "unknown request %p\n", req);
}
return ret;
}
static int count_ext_compat(struct usb_configuration *c)
{
int i, res;
res = 0;
for (i = 0; i < c->next_interface_id; ++i) {
struct usb_function *f;
int j;
f = c->interface[i];
for (j = 0; j < f->os_desc_n; ++j) {
struct usb_os_desc *d;
if (i != f->os_desc_table[j].if_id)
continue;
d = f->os_desc_table[j].os_desc;
if (d && d->ext_compat_id)
++res;
}
}
BUG_ON(res > 255);
return res;
}
static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
{
int i, count;
count = 16;
buf += 16;
for (i = 0; i < c->next_interface_id; ++i) {
struct usb_function *f;
int j;
f = c->interface[i];
for (j = 0; j < f->os_desc_n; ++j) {
struct usb_os_desc *d;
if (i != f->os_desc_table[j].if_id)
continue;
d = f->os_desc_table[j].os_desc;
if (d && d->ext_compat_id) {
*buf++ = i;
*buf++ = 0x01;
memcpy(buf, d->ext_compat_id, 16);
buf += 22;
} else {
++buf;
*buf = 0x01;
buf += 23;
}
count += 24;
if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
return count;
}
}
return count;
}
static int count_ext_prop(struct usb_configuration *c, int interface)
{
struct usb_function *f;
int j;
f = c->interface[interface];
for (j = 0; j < f->os_desc_n; ++j) {
struct usb_os_desc *d;
if (interface != f->os_desc_table[j].if_id)
continue;
d = f->os_desc_table[j].os_desc;
if (d && d->ext_compat_id)
return d->ext_prop_count;
}
return 0;
}
static int len_ext_prop(struct usb_configuration *c, int interface)
{
struct usb_function *f;
struct usb_os_desc *d;
int j, res;
res = 10; /* header length */
f = c->interface[interface];
for (j = 0; j < f->os_desc_n; ++j) {
if (interface != f->os_desc_table[j].if_id)
continue;
d = f->os_desc_table[j].os_desc;
if (d)
return min(res + d->ext_prop_len, 4096);
}
return res;
}
static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
{
struct usb_function *f;
struct usb_os_desc *d;
struct usb_os_desc_ext_prop *ext_prop;
int j, count, n, ret;
f = c->interface[interface];
count = 10; /* header length */
buf += 10;
for (j = 0; j < f->os_desc_n; ++j) {
if (interface != f->os_desc_table[j].if_id)
continue;
d = f->os_desc_table[j].os_desc;
if (d)
list_for_each_entry(ext_prop, &d->ext_prop, entry) {
n = ext_prop->data_len +
ext_prop->name_len + 14;
if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
return count;
usb_ext_prop_put_size(buf, n);
usb_ext_prop_put_type(buf, ext_prop->type);
ret = usb_ext_prop_put_name(buf, ext_prop->name,
ext_prop->name_len);
if (ret < 0)
return ret;
switch (ext_prop->type) {
case USB_EXT_PROP_UNICODE:
case USB_EXT_PROP_UNICODE_ENV:
case USB_EXT_PROP_UNICODE_LINK:
usb_ext_prop_put_unicode(buf, ret,
ext_prop->data,
ext_prop->data_len);
break;
case USB_EXT_PROP_BINARY:
usb_ext_prop_put_binary(buf, ret,
ext_prop->data,
ext_prop->data_len);
break;
case USB_EXT_PROP_LE32:
/* not implemented */
case USB_EXT_PROP_BE32:
/* not implemented */
default:
return -EINVAL;
}
buf += n;
count += n;
}
}
return count;
}
/*
* The setup() callback implements all the ep0 functionality that's
* not handled lower down, in hardware or the hardware driver(like
* device and endpoint feature flags, and their status). It's all
* housekeeping for the gadget function we're implementing. Most of
* the work is in config and function specific setup.
*/
int
composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
int status = 0;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u8 intf = w_index & 0xFF;
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
struct usb_function *f = NULL;
struct usb_function *iter;
u8 endp;
if (w_length > USB_COMP_EP0_BUFSIZ) {
if (ctrl->bRequestType & USB_DIR_IN) {
/* Cast away the const, we are going to overwrite on purpose. */
__le16 *temp = (__le16 *)&ctrl->wLength;
*temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
w_length = USB_COMP_EP0_BUFSIZ;
} else {
goto done;
}
}
/* partial re-init of the response message; the function or the
* gadget might need to intercept e.g. a control-OUT completion
* when we delegate to it.
*/
req->zero = 0;
req->context = cdev;
req->complete = composite_setup_complete;
req->length = 0;
gadget->ep0->driver_data = cdev;
/*
* Don't let non-standard requests match any of the cases below
* by accident.
*/
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
goto unknown;
switch (ctrl->bRequest) {
/* we handle all standard USB descriptors */
case USB_REQ_GET_DESCRIPTOR:
if (ctrl->bRequestType != USB_DIR_IN)
goto unknown;
switch (w_value >> 8) {
case USB_DT_DEVICE:
cdev->desc.bNumConfigurations =
count_configs(cdev, USB_DT_DEVICE);
cdev->desc.bMaxPacketSize0 =
cdev->gadget->ep0->maxpacket;
if (gadget_is_superspeed(gadget)) {
if (gadget->speed >= USB_SPEED_SUPER) {
cdev->desc.bcdUSB = cpu_to_le16(0x0320);
cdev->desc.bMaxPacketSize0 = 9;
} else {
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
}
} else {
if (gadget->lpm_capable || cdev->use_webusb)
cdev->desc.bcdUSB = cpu_to_le16(0x0201);
else
cdev->desc.bcdUSB = cpu_to_le16(0x0200);
}
value = min(w_length, (u16) sizeof cdev->desc);
memcpy(req->buf, &cdev->desc, value);
break;
case USB_DT_DEVICE_QUALIFIER:
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
device_qual(cdev);
value = min_t(int, w_length,
sizeof(struct usb_qualifier_descriptor));
break;
case USB_DT_OTHER_SPEED_CONFIG:
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
fallthrough;
case USB_DT_CONFIG:
value = config_desc(cdev, w_value);
if (value >= 0)
value = min(w_length, (u16) value);
break;
case USB_DT_STRING:
value = get_string(cdev, req->buf,
w_index, w_value & 0xff);
if (value >= 0)
value = min(w_length, (u16) value);
break;
case USB_DT_BOS:
if (gadget_is_superspeed(gadget) ||
gadget->lpm_capable || cdev->use_webusb) {
value = bos_desc(cdev);
value = min(w_length, (u16) value);
}
break;
case USB_DT_OTG:
if (gadget_is_otg(gadget)) {
struct usb_configuration *config;
int otg_desc_len = 0;
if (cdev->config)
config = cdev->config;
else
config = list_first_entry(
&cdev->configs,
struct usb_configuration, list);
if (!config)
goto done;
if (gadget->otg_caps &&
(gadget->otg_caps->otg_rev >= 0x0200))
otg_desc_len += sizeof(
struct usb_otg20_descriptor);
else
otg_desc_len += sizeof(
struct usb_otg_descriptor);
value = min_t(int, w_length, otg_desc_len);
memcpy(req->buf, config->descriptors[0], value);
}
break;
}
break;
/* any number of configs can work */
case USB_REQ_SET_CONFIGURATION:
if (ctrl->bRequestType != 0)
goto unknown;
if (gadget_is_otg(gadget)) {
if (gadget->a_hnp_support)
DBG(cdev, "HNP available\n");
else if (gadget->a_alt_hnp_support)
DBG(cdev, "HNP on another port\n");
else
VDBG(cdev, "HNP inactive\n");
}
spin_lock(&cdev->lock);
value = set_config(cdev, ctrl, w_value);
spin_unlock(&cdev->lock);
break;
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != USB_DIR_IN)
goto unknown;
if (cdev->config)
*(u8 *)req->buf = cdev->config->bConfigurationValue;
else
*(u8 *)req->buf = 0;
value = min(w_length, (u16) 1);
break;
/* function drivers must handle get/set altsetting */
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE)
goto unknown;
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
/*
* If there's no get_alt() method, we know only altsetting zero
* works. There is no need to check if set_alt() is not NULL
* as we check this in usb_add_function().
*/
if (w_value && !f->get_alt)
break;
spin_lock(&cdev->lock);
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
"%s: interface %d (%s) requested delayed status\n",
__func__, intf, f->name);
cdev->delayed_status++;
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
spin_unlock(&cdev->lock);
break;
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
goto unknown;
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
/* lots of interfaces only need altsetting zero... */
value = f->get_alt ? f->get_alt(f, w_index) : 0;
if (value < 0)
break;
*((u8 *)req->buf) = value;
value = min(w_length, (u16) 1);
break;
case USB_REQ_GET_STATUS:
if (gadget_is_otg(gadget) && gadget->hnp_polling_support &&
(w_index == OTG_STS_SELECTOR)) {
if (ctrl->bRequestType != (USB_DIR_IN |
USB_RECIP_DEVICE))
goto unknown;
*((u8 *)req->buf) = gadget->host_request_flag;
value = 1;
break;
}
/*
* USB 3.0 additions:
* Function driver should handle get_status request. If such cb
* wasn't supplied we respond with default value = 0
* Note: function driver should supply such cb only for the
* first interface of the function
*/
if (!gadget_is_superspeed(gadget))
goto unknown;
if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
goto unknown;
value = 2; /* This is the length of the get_status reply */
put_unaligned_le16(0, req->buf);
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
if (f->get_status) {
status = f->get_status(f);
if (status < 0)
break;
} else {
/* Set D0 and D1 bits based on func wakeup capability */
if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) {
status |= USB_INTRF_STAT_FUNC_RW_CAP;
if (f->func_wakeup_armed)
status |= USB_INTRF_STAT_FUNC_RW;
}
}
put_unaligned_le16(status & 0x0000ffff, req->buf);
break;
/*
* Function drivers should handle SetFeature/ClearFeature
* (FUNCTION_SUSPEND) request. function_suspend cb should be supplied
* only for the first interface of the function
*/
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
if (!gadget_is_superspeed(gadget))
goto unknown;
if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE))
goto unknown;
switch (w_value) {
case USB_INTRF_FUNC_SUSPEND:
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
value = 0;
if (f->func_suspend) {
value = f->func_suspend(f, w_index >> 8);
/* SetFeature(FUNCTION_SUSPEND) */
} else if (ctrl->bRequest == USB_REQ_SET_FEATURE) {
if (!(f->config->bmAttributes &
USB_CONFIG_ATT_WAKEUP) &&
(w_index & USB_INTRF_FUNC_SUSPEND_RW))
break;
f->func_wakeup_armed = !!(w_index &
USB_INTRF_FUNC_SUSPEND_RW);
if (w_index & USB_INTRF_FUNC_SUSPEND_LP) {
if (f->suspend && !f->func_suspended) {
f->suspend(f);
f->func_suspended = true;
}
/*
* Handle cases where host sends function resume
* through SetFeature(FUNCTION_SUSPEND) but low power
* bit reset
*/
} else {
if (f->resume && f->func_suspended) {
f->resume(f);
f->func_suspended = false;
}
}
/* ClearFeature(FUNCTION_SUSPEND) */
} else if (ctrl->bRequest == USB_REQ_CLEAR_FEATURE) {
f->func_wakeup_armed = false;
if (f->resume && f->func_suspended) {
f->resume(f);
f->func_suspended = false;
}
}
if (value < 0) {
ERROR(cdev,
"func_suspend() returned error %d\n",
value);
value = 0;
}
break;
}
break;
default:
unknown:
/*
* OS descriptors handling
*/
if (cdev->use_os_string && cdev->os_desc_config &&
(ctrl->bRequestType & USB_TYPE_VENDOR) &&
ctrl->bRequest == cdev->b_vendor_code) {
struct usb_configuration *os_desc_cfg;
u8 *buf;
int interface;
int count = 0;
req = cdev->os_desc_req;
req->context = cdev;
req->complete = composite_setup_complete;
buf = req->buf;
os_desc_cfg = cdev->os_desc_config;
w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
memset(buf, 0, w_length);
buf[5] = 0x01;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
if (w_index != 0x4 || (w_value >> 8))
break;
buf[6] = w_index;
/* Number of ext compat interfaces */
count = count_ext_compat(os_desc_cfg);
buf[8] = count;
count *= 24; /* 24 B/ext compat desc */
count += 16; /* header */
put_unaligned_le32(count, buf);
value = w_length;
if (w_length > 0x10) {
value = fill_ext_compat(os_desc_cfg, buf);
value = min_t(u16, w_length, value);
}
break;
case USB_RECIP_INTERFACE:
if (w_index != 0x5 || (w_value >> 8))
break;
interface = w_value & 0xFF;
if (interface >= MAX_CONFIG_INTERFACES ||
!os_desc_cfg->interface[interface])
break;
buf[6] = w_index;
count = count_ext_prop(os_desc_cfg,
interface);
put_unaligned_le16(count, buf + 8);
count = len_ext_prop(os_desc_cfg,
interface);
put_unaligned_le32(count, buf);
value = w_length;
if (w_length > 0x0A) {
value = fill_ext_prop(os_desc_cfg,
interface, buf);
if (value >= 0)
value = min_t(u16, w_length, value);
}
break;
}
goto check_value;
}
/*
* WebUSB URL descriptor handling, following:
* https://wicg.github.io/webusb/#device-requests
*/
if (cdev->use_webusb &&
ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_VENDOR) &&
w_index == WEBUSB_GET_URL &&
w_value == WEBUSB_LANDING_PAGE_PRESENT &&
ctrl->bRequest == cdev->b_webusb_vendor_code) {
unsigned int landing_page_length;
unsigned int landing_page_offset;
struct webusb_url_descriptor *url_descriptor =
(struct webusb_url_descriptor *)cdev->req->buf;
url_descriptor->bDescriptorType = WEBUSB_URL_DESCRIPTOR_TYPE;
if (strncasecmp(cdev->landing_page, "https://", 8) == 0) {
landing_page_offset = 8;
url_descriptor->bScheme = WEBUSB_URL_SCHEME_HTTPS;
} else if (strncasecmp(cdev->landing_page, "http://", 7) == 0) {
landing_page_offset = 7;
url_descriptor->bScheme = WEBUSB_URL_SCHEME_HTTP;
} else {
landing_page_offset = 0;
url_descriptor->bScheme = WEBUSB_URL_SCHEME_NONE;
}
landing_page_length = strnlen(cdev->landing_page,
sizeof(url_descriptor->URL)
- WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset);
if (w_length < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_length)
landing_page_length = w_length
- WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
memcpy(url_descriptor->URL,
cdev->landing_page + landing_page_offset,
landing_page_length - landing_page_offset);
url_descriptor->bLength = landing_page_length
- landing_page_offset + WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH;
value = url_descriptor->bLength;
goto check_value;
}
VDBG(cdev,
"non-core control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
/* functions always handle their interfaces and endpoints...
* punt other recipients (other, WUSB, ...) to the current
* configuration code.
*/
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list)
if (f->req_match &&
f->req_match(f, ctrl, false))
goto try_fun_setup;
} else {
struct usb_configuration *c;
list_for_each_entry(c, &cdev->configs, list)
list_for_each_entry(f, &c->functions, list)
if (f->req_match &&
f->req_match(f, ctrl, true))
goto try_fun_setup;
}
f = NULL;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
break;
case USB_RECIP_ENDPOINT:
if (!cdev->config)
break;
endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
list_for_each_entry(iter, &cdev->config->functions, list) {
if (test_bit(endp, iter->endpoints)) {
f = iter;
break;
}
}
break;
}
try_fun_setup:
if (f && f->setup)
value = f->setup(f, ctrl);
else {
struct usb_configuration *c;
c = cdev->config;
if (!c)
goto done;
/* try current config's setup */
if (c->setup) {
value = c->setup(c, ctrl);
goto done;
}
/* try the only function in the current config */
if (!list_is_singular(&c->functions))
goto done;
f = list_first_entry(&c->functions, struct usb_function,
list);
if (f->setup)
value = f->setup(f, ctrl);
}
goto done;
}
check_value:
/* respond with data transfer before status phase? */
if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) {
req->length = value;
req->context = cdev;
req->zero = value < w_length;
value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
composite_setup_complete(gadget->ep0, req);
}
} else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) {
WARN(cdev,
"%s: Delayed status not supported for w_length != 0",
__func__);
}
done:
/* device either stalls (value < 0) or reports success */
return value;
}
static void __composite_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
unsigned long flags;
/* REVISIT: should we have config and device level
* disconnect callbacks?
*/
spin_lock_irqsave(&cdev->lock, flags);
cdev->suspended = 0;
if (cdev->config)
reset_config(cdev);
if (cdev->driver->disconnect)
cdev->driver->disconnect(cdev);
spin_unlock_irqrestore(&cdev->lock, flags);
}
void composite_disconnect(struct usb_gadget *gadget)
{
usb_gadget_vbus_draw(gadget, 0);
__composite_disconnect(gadget);
}
void composite_reset(struct usb_gadget *gadget)
{
/*
* Section 1.4.13 Standard Downstream Port of the USB battery charging
* specification v1.2 states that a device connected on a SDP shall only
* draw at max 100mA while in a connected, but unconfigured state.
*/
usb_gadget_vbus_draw(gadget, 100);
__composite_disconnect(gadget);
}
/*-------------------------------------------------------------------------*/
static ssize_t suspended_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_composite_dev *cdev = get_gadget_data(gadget);
return sprintf(buf, "%d\n", cdev->suspended);
}
static DEVICE_ATTR_RO(suspended);
static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_gadget_strings *gstr = cdev->driver->strings[0];
struct usb_string *dev_str = gstr->strings;
/* composite_disconnect() must already have been called
* by the underlying peripheral controller driver!
* so there's no i/o concurrency that could affect the
* state protected by cdev->lock.
*/
WARN_ON(cdev->config);
while (!list_empty(&cdev->configs)) {
struct usb_configuration *c;
c = list_first_entry(&cdev->configs,
struct usb_configuration, list);
remove_config(cdev, c);
}
if (cdev->driver->unbind && unbind_driver)
cdev->driver->unbind(cdev);
composite_dev_cleanup(cdev);
if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
kfree(cdev->def_manufacturer);
kfree(cdev);
set_gadget_data(gadget, NULL);
}
static void composite_unbind(struct usb_gadget *gadget)
{
__composite_unbind(gadget, true);
}
static void update_unchanged_dev_desc(struct usb_device_descriptor *new,
const struct usb_device_descriptor *old)
{
__le16 idVendor;
__le16 idProduct;
__le16 bcdDevice;
u8 iSerialNumber;
u8 iManufacturer;
u8 iProduct;
/*
* these variables may have been set in
* usb_composite_overwrite_options()
*/
idVendor = new->idVendor;
idProduct = new->idProduct;
bcdDevice = new->bcdDevice;
iSerialNumber = new->iSerialNumber;
iManufacturer = new->iManufacturer;
iProduct = new->iProduct;
*new = *old;
if (idVendor)
new->idVendor = idVendor;
if (idProduct)
new->idProduct = idProduct;
if (bcdDevice)
new->bcdDevice = bcdDevice;
else
new->bcdDevice = cpu_to_le16(get_default_bcdDevice());
if (iSerialNumber)
new->iSerialNumber = iSerialNumber;
if (iManufacturer)
new->iManufacturer = iManufacturer;
if (iProduct)
new->iProduct = iProduct;
}
int composite_dev_prepare(struct usb_composite_driver *composite,
struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
int ret = -ENOMEM;
/* preallocate control response and buffer */
cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
if (!cdev->req)
return -ENOMEM;
cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
if (!cdev->req->buf)
goto fail;
ret = device_create_file(&gadget->dev, &dev_attr_suspended);
if (ret)
goto fail_dev;
cdev->req->complete = composite_setup_complete;
cdev->req->context = cdev;
gadget->ep0->driver_data = cdev;
cdev->driver = composite;
/*
* As per USB compliance update, a device that is actively drawing
* more than 100mA from USB must report itself as bus-powered in
* the GetStatus(DEVICE) call.
*/
if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_set_selfpowered(gadget);
/* interface and string IDs start at zero via kzalloc.
* we force endpoints to start unassigned; few controller
* drivers will zero ep->driver_data.
*/
usb_ep_autoconfig_reset(gadget);
return 0;
fail_dev:
kfree(cdev->req->buf);
fail:
usb_ep_free_request(gadget->ep0, cdev->req);
cdev->req = NULL;
return ret;
}
int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
struct usb_ep *ep0)
{
int ret = 0;
cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
if (!cdev->os_desc_req) {
ret = -ENOMEM;
goto end;
}
cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
ret = -ENOMEM;
usb_ep_free_request(ep0, cdev->os_desc_req);
goto end;
}
cdev->os_desc_req->context = cdev;
cdev->os_desc_req->complete = composite_setup_complete;
end:
return ret;
}
void composite_dev_cleanup(struct usb_composite_dev *cdev)
{
struct usb_gadget_string_container *uc, *tmp;
struct usb_ep *ep, *tmp_ep;
list_for_each_entry_safe(uc, tmp, &cdev->gstrings, list) {
list_del(&uc->list);
kfree(uc);
}
if (cdev->os_desc_req) {
if (cdev->os_desc_pending)
usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
kfree(cdev->os_desc_req->buf);
cdev->os_desc_req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
cdev->os_desc_req = NULL;
}
if (cdev->req) {
if (cdev->setup_pending)
usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
kfree(cdev->req->buf);
cdev->req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->req);
cdev->req = NULL;
}
cdev->next_string_id = 0;
device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
/*
* Some UDC backends have a dynamic EP allocation scheme.
*
* In that case, the dispose() callback is used to notify the
* backend that the EPs are no longer in use.
*
* Note: The UDC backend can remove the EP from the ep_list as
* a result, so we need to use the _safe list iterator.
*/
list_for_each_entry_safe(ep, tmp_ep,
&cdev->gadget->ep_list, ep_list) {
if (ep->ops->dispose)
ep->ops->dispose(ep);
}
}
static int composite_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *gdriver)
{
struct usb_composite_dev *cdev;
struct usb_composite_driver *composite = to_cdriver(gdriver);
int status = -ENOMEM;
cdev = kzalloc(sizeof *cdev, GFP_KERNEL);
if (!cdev)
return status;
spin_lock_init(&cdev->lock);
cdev->gadget = gadget;
set_gadget_data(gadget, cdev);
INIT_LIST_HEAD(&cdev->configs);
INIT_LIST_HEAD(&cdev->gstrings);
status = composite_dev_prepare(composite, cdev);
if (status)
goto fail;
/* composite gadget needs to assign strings for whole device (like
* serial number), register function drivers, potentially update
* power state and consumption, etc
*/
status = composite->bind(cdev);
if (status < 0)
goto fail;
if (cdev->use_os_string) {
status = composite_os_desc_req_prepare(cdev, gadget->ep0);
if (status)
goto fail;
}
update_unchanged_dev_desc(&cdev->desc, composite->dev);
/* has userspace failed to provide a serial number? */
if (composite->needs_serial && !cdev->desc.iSerialNumber)
WARNING(cdev, "userspace failed to provide iSerialNumber\n");
INFO(cdev, "%s ready\n", composite->name);
return 0;
fail:
__composite_unbind(gadget, false);
return status;
}
/*-------------------------------------------------------------------------*/
void composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
/* REVISIT: should we have config level
* suspend/resume callbacks?
*/
DBG(cdev, "suspend\n");
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list) {
if (f->suspend)
f->suspend(f);
}
}
if (cdev->driver->suspend)
cdev->driver->suspend(cdev);
cdev->suspended = 1;
usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, 2);
}
void composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
unsigned maxpower;
/* REVISIT: should we have config level
* suspend/resume callbacks?
*/
DBG(cdev, "resume\n");
if (cdev->driver->resume)
cdev->driver->resume(cdev);
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list) {
/*
* Check for func_suspended flag to see if the function is
* in USB3 FUNCTION_SUSPEND state. In this case resume is
* done via FUNCTION_SUSPEND feature selector.
*/
if (f->resume && !f->func_suspended)
f->resume(f);
}
maxpower = cdev->config->MaxPower ?
cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
if (gadget->speed < USB_SPEED_SUPER)
maxpower = min(maxpower, 500U);
else
maxpower = min(maxpower, 900U);
if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_clear_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, maxpower);
} else {
maxpower = CONFIG_USB_GADGET_VBUS_DRAW;
maxpower = min(maxpower, 100U);
usb_gadget_vbus_draw(gadget, maxpower);
}
cdev->suspended = 0;
}
/*-------------------------------------------------------------------------*/
static const struct usb_gadget_driver composite_driver_template = {
.bind = composite_bind,
.unbind = composite_unbind,
.setup = composite_setup,
.reset = composite_reset,
.disconnect = composite_disconnect,
.suspend = composite_suspend,
.resume = composite_resume,
.driver = {
.owner = THIS_MODULE,
},
};
/**
* usb_composite_probe() - register a composite driver
* @driver: the driver to register
*
* Context: single threaded during gadget setup
*
* This function is used to register drivers using the composite driver
* framework. The return value is zero, or a negative errno value.
* Those values normally come from the driver's @bind method, which does
* all the work of setting up the driver to match the hardware.
*
* On successful return, the gadget is ready to respond to requests from
* the host, unless one of its components invokes usb_gadget_disconnect()
* while it was binding. That would usually be done in order to wait for
* some userspace participation.
*/
int usb_composite_probe(struct usb_composite_driver *driver)
{
struct usb_gadget_driver *gadget_driver;
if (!driver || !driver->dev || !driver->bind)
return -EINVAL;
if (!driver->name)
driver->name = "composite";
driver->gadget_driver = composite_driver_template;
gadget_driver = &driver->gadget_driver;
gadget_driver->function = (char *) driver->name;
gadget_driver->driver.name = driver->name;
gadget_driver->max_speed = driver->max_speed;
return usb_gadget_register_driver(gadget_driver);
}
EXPORT_SYMBOL_GPL(usb_composite_probe);
/**
* usb_composite_unregister() - unregister a composite driver
* @driver: the driver to unregister
*
* This function is used to unregister drivers using the composite
* driver framework.
*/
void usb_composite_unregister(struct usb_composite_driver *driver)
{
usb_gadget_unregister_driver(&driver->gadget_driver);
}
EXPORT_SYMBOL_GPL(usb_composite_unregister);
/**
* usb_composite_setup_continue() - Continue with the control transfer
* @cdev: the composite device who's control transfer was kept waiting
*
* This function must be called by the USB function driver to continue
* with the control transfer's data/status stage in case it had requested to
* delay the data/status stages. A USB function's setup handler (e.g. set_alt())
* can request the composite framework to delay the setup request's data/status
* stages by returning USB_GADGET_DELAYED_STATUS.
*/
void usb_composite_setup_continue(struct usb_composite_dev *cdev)
{
int value;
struct usb_request *req = cdev->req;
unsigned long flags;
DBG(cdev, "%s\n", __func__);
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->delayed_status == 0) {
WARN(cdev, "%s: Unexpected call\n", __func__);
} else if (--cdev->delayed_status == 0) {
DBG(cdev, "%s: Completing delayed status\n", __func__);
req->length = 0;
req->context = cdev;
value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
composite_setup_complete(cdev->gadget->ep0, req);
}
}
spin_unlock_irqrestore(&cdev->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_composite_setup_continue);
static char *composite_default_mfr(struct usb_gadget *gadget)
{
return kasprintf(GFP_KERNEL, "%s %s with %s", init_utsname()->sysname,
init_utsname()->release, gadget->name);
}
void usb_composite_overwrite_options(struct usb_composite_dev *cdev,
struct usb_composite_overwrite *covr)
{
struct usb_device_descriptor *desc = &cdev->desc;
struct usb_gadget_strings *gstr = cdev->driver->strings[0];
struct usb_string *dev_str = gstr->strings;
if (covr->idVendor)
desc->idVendor = cpu_to_le16(covr->idVendor);
if (covr->idProduct)
desc->idProduct = cpu_to_le16(covr->idProduct);
if (covr->bcdDevice)
desc->bcdDevice = cpu_to_le16(covr->bcdDevice);
if (covr->serial_number) {
desc->iSerialNumber = dev_str[USB_GADGET_SERIAL_IDX].id;
dev_str[USB_GADGET_SERIAL_IDX].s = covr->serial_number;
}
if (covr->manufacturer) {
desc->iManufacturer = dev_str[USB_GADGET_MANUFACTURER_IDX].id;
dev_str[USB_GADGET_MANUFACTURER_IDX].s = covr->manufacturer;
} else if (!strlen(dev_str[USB_GADGET_MANUFACTURER_IDX].s)) {
desc->iManufacturer = dev_str[USB_GADGET_MANUFACTURER_IDX].id;
cdev->def_manufacturer = composite_default_mfr(cdev->gadget);
dev_str[USB_GADGET_MANUFACTURER_IDX].s = cdev->def_manufacturer;
}
if (covr->product) {
desc->iProduct = dev_str[USB_GADGET_PRODUCT_IDX].id;
dev_str[USB_GADGET_PRODUCT_IDX].s = covr->product;
}
}
EXPORT_SYMBOL_GPL(usb_composite_overwrite_options);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
| linux-master | drivers/usb/gadget/composite.c |
// SPDX-License-Identifier: LGPL-2.1+
/*
* Copyright (C) 2003 David Brownell
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/nls.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
/**
* usb_gadget_get_string - fill out a string descriptor
* @table: of c strings encoded using UTF-8
* @id: string id, from low byte of wValue in get string descriptor
* @buf: at least 256 bytes, must be 16-bit aligned
*
* Finds the UTF-8 string matching the ID, and converts it into a
* string descriptor in utf16-le.
* Returns length of descriptor (always even) or negative errno
*
* If your driver needs stings in multiple languages, you'll probably
* "switch (wIndex) { ... }" in your ep0 string descriptor logic,
* using this routine after choosing which set of UTF-8 strings to use.
* Note that US-ASCII is a strict subset of UTF-8; any string bytes with
* the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
* characters (which are also widely used in C strings).
*/
int
usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf)
{
struct usb_string *s;
int len;
/* descriptor 0 has the language id */
if (id == 0) {
buf [0] = 4;
buf [1] = USB_DT_STRING;
buf [2] = (u8) table->language;
buf [3] = (u8) (table->language >> 8);
return 4;
}
for (s = table->strings; s && s->s; s++)
if (s->id == id)
break;
/* unrecognized: stall. */
if (!s || !s->s)
return -EINVAL;
/* string descriptors have length, tag, then UTF16-LE text */
len = min((size_t)USB_MAX_STRING_LEN, strlen(s->s));
len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN,
(wchar_t *) &buf[2], USB_MAX_STRING_LEN);
if (len < 0)
return -EINVAL;
buf [0] = (len + 1) * 2;
buf [1] = USB_DT_STRING;
return buf [0];
}
EXPORT_SYMBOL_GPL(usb_gadget_get_string);
/**
* usb_validate_langid - validate usb language identifiers
* @langid: usb language identifier
*
* Returns true for valid language identifier, otherwise false.
*/
bool usb_validate_langid(u16 langid)
{
u16 primary_lang = langid & 0x3ff; /* bit [9:0] */
u16 sub_lang = langid >> 10; /* bit [15:10] */
switch (primary_lang) {
case 0:
case 0x62 ... 0xfe:
case 0x100 ... 0x3ff:
return false;
}
if (!sub_lang)
return false;
return true;
}
EXPORT_SYMBOL_GPL(usb_validate_langid);
| linux-master | drivers/usb/gadget/usbstring.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* usb/gadget/config.c -- simplify building config descriptors
*
* Copyright (C) 2003 David Brownell
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
#include <linux/usb/otg.h>
/**
* usb_descriptor_fillbuf - fill buffer with descriptors
* @buf: Buffer to be filled
* @buflen: Size of buf
* @src: Array of descriptor pointers, terminated by null pointer.
*
* Copies descriptors into the buffer, returning the length or a
* negative error code if they can't all be copied. Useful when
* assembling descriptors for an associated set of interfaces used
* as part of configuring a composite device; or in other cases where
* sets of descriptors need to be marshaled.
*/
int
usb_descriptor_fillbuf(void *buf, unsigned buflen,
const struct usb_descriptor_header **src)
{
u8 *dest = buf;
if (!src)
return -EINVAL;
/* fill buffer from src[] until null descriptor ptr */
for (; NULL != *src; src++) {
unsigned len = (*src)->bLength;
if (len > buflen)
return -EINVAL;
memcpy(dest, *src, len);
buflen -= len;
dest += len;
}
return dest - (u8 *)buf;
}
EXPORT_SYMBOL_GPL(usb_descriptor_fillbuf);
/**
* usb_gadget_config_buf - builts a complete configuration descriptor
* @config: Header for the descriptor, including characteristics such
* as power requirements and number of interfaces.
* @desc: Null-terminated vector of pointers to the descriptors (interface,
* endpoint, etc) defining all functions in this device configuration.
* @buf: Buffer for the resulting configuration descriptor.
* @length: Length of buffer. If this is not big enough to hold the
* entire configuration descriptor, an error code will be returned.
*
* This copies descriptors into the response buffer, building a descriptor
* for that configuration. It returns the buffer length or a negative
* status code. The config.wTotalLength field is set to match the length
* of the result, but other descriptor fields (including power usage and
* interface count) must be set by the caller.
*
* Gadget drivers could use this when constructing a config descriptor
* in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the
* resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed.
*/
int usb_gadget_config_buf(
const struct usb_config_descriptor *config,
void *buf,
unsigned length,
const struct usb_descriptor_header **desc
)
{
struct usb_config_descriptor *cp = buf;
int len;
/* config descriptor first */
if (length < USB_DT_CONFIG_SIZE || !desc)
return -EINVAL;
*cp = *config;
/* then interface/endpoint/class/vendor/... */
len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8 *)buf,
length - USB_DT_CONFIG_SIZE, desc);
if (len < 0)
return len;
len += USB_DT_CONFIG_SIZE;
if (len > 0xffff)
return -EINVAL;
/* patch up the config descriptor */
cp->bLength = USB_DT_CONFIG_SIZE;
cp->bDescriptorType = USB_DT_CONFIG;
cp->wTotalLength = cpu_to_le16(len);
cp->bmAttributes |= USB_CONFIG_ATT_ONE;
return len;
}
EXPORT_SYMBOL_GPL(usb_gadget_config_buf);
/**
* usb_copy_descriptors - copy a vector of USB descriptors
* @src: null-terminated vector to copy
* Context: initialization code, which may sleep
*
* This makes a copy of a vector of USB descriptors. Its primary use
* is to support usb_function objects which can have multiple copies,
* each needing different descriptors. Functions may have static
* tables of descriptors, which are used as templates and customized
* with identifiers (for interfaces, strings, endpoints, and more)
* as needed by a given function instance.
*/
struct usb_descriptor_header **
usb_copy_descriptors(struct usb_descriptor_header **src)
{
struct usb_descriptor_header **tmp;
unsigned bytes;
unsigned n_desc;
void *mem;
struct usb_descriptor_header **ret;
/* count descriptors and their sizes; then add vector size */
for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++)
bytes += (*tmp)->bLength;
bytes += (n_desc + 1) * sizeof(*tmp);
mem = kmalloc(bytes, GFP_KERNEL);
if (!mem)
return NULL;
/* fill in pointers starting at "tmp",
* to descriptors copied starting at "mem";
* and return "ret"
*/
tmp = mem;
ret = mem;
mem += (n_desc + 1) * sizeof(*tmp);
while (*src) {
memcpy(mem, *src, (*src)->bLength);
*tmp = mem;
tmp++;
mem += (*src)->bLength;
src++;
}
*tmp = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(usb_copy_descriptors);
int usb_assign_descriptors(struct usb_function *f,
struct usb_descriptor_header **fs,
struct usb_descriptor_header **hs,
struct usb_descriptor_header **ss,
struct usb_descriptor_header **ssp)
{
/* super-speed-plus descriptor falls back to super-speed one,
* if such a descriptor was provided, thus avoiding a NULL
* pointer dereference if a 5gbps capable gadget is used with
* a 10gbps capable config (device port + cable + host port)
*/
if (!ssp)
ssp = ss;
if (fs) {
f->fs_descriptors = usb_copy_descriptors(fs);
if (!f->fs_descriptors)
goto err;
}
if (hs) {
f->hs_descriptors = usb_copy_descriptors(hs);
if (!f->hs_descriptors)
goto err;
}
if (ss) {
f->ss_descriptors = usb_copy_descriptors(ss);
if (!f->ss_descriptors)
goto err;
}
if (ssp) {
f->ssp_descriptors = usb_copy_descriptors(ssp);
if (!f->ssp_descriptors)
goto err;
}
return 0;
err:
usb_free_all_descriptors(f);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_assign_descriptors);
void usb_free_all_descriptors(struct usb_function *f)
{
usb_free_descriptors(f->fs_descriptors);
f->fs_descriptors = NULL;
usb_free_descriptors(f->hs_descriptors);
f->hs_descriptors = NULL;
usb_free_descriptors(f->ss_descriptors);
f->ss_descriptors = NULL;
usb_free_descriptors(f->ssp_descriptors);
f->ssp_descriptors = NULL;
}
EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
struct usb_descriptor_header *usb_otg_descriptor_alloc(
struct usb_gadget *gadget)
{
struct usb_descriptor_header *otg_desc;
unsigned length = 0;
if (gadget->otg_caps && (gadget->otg_caps->otg_rev >= 0x0200))
length = sizeof(struct usb_otg20_descriptor);
else
length = sizeof(struct usb_otg_descriptor);
otg_desc = kzalloc(length, GFP_KERNEL);
return otg_desc;
}
EXPORT_SYMBOL_GPL(usb_otg_descriptor_alloc);
int usb_otg_descriptor_init(struct usb_gadget *gadget,
struct usb_descriptor_header *otg_desc)
{
struct usb_otg_descriptor *otg1x_desc;
struct usb_otg20_descriptor *otg20_desc;
struct usb_otg_caps *otg_caps = gadget->otg_caps;
u8 otg_attributes = 0;
if (!otg_desc)
return -EINVAL;
if (otg_caps && otg_caps->otg_rev) {
if (otg_caps->hnp_support)
otg_attributes |= USB_OTG_HNP;
if (otg_caps->srp_support)
otg_attributes |= USB_OTG_SRP;
if (otg_caps->adp_support && (otg_caps->otg_rev >= 0x0200))
otg_attributes |= USB_OTG_ADP;
} else {
otg_attributes = USB_OTG_SRP | USB_OTG_HNP;
}
if (otg_caps && (otg_caps->otg_rev >= 0x0200)) {
otg20_desc = (struct usb_otg20_descriptor *)otg_desc;
otg20_desc->bLength = sizeof(struct usb_otg20_descriptor);
otg20_desc->bDescriptorType = USB_DT_OTG;
otg20_desc->bmAttributes = otg_attributes;
otg20_desc->bcdOTG = cpu_to_le16(otg_caps->otg_rev);
} else {
otg1x_desc = (struct usb_otg_descriptor *)otg_desc;
otg1x_desc->bLength = sizeof(struct usb_otg_descriptor);
otg1x_desc->bDescriptorType = USB_DT_OTG;
otg1x_desc->bmAttributes = otg_attributes;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_otg_descriptor_init);
| linux-master | drivers/usb/gadget/config.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* at91_udc -- driver for at91-series USB peripheral controller
*
* Copyright (C) 2004 by Thomas Rathbone
* Copyright (C) 2005 by HP Labs
* Copyright (C) 2005 by David Brownell
*/
#undef VERBOSE_DEBUG
#undef PACKET_TRACE
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_data/atmel.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/atmel-matrix.h>
#include "at91_udc.h"
/*
* This controller is simple and PIO-only. It's used in many AT91-series
* full speed USB controllers, including the at91rm9200 (arm920T, with MMU),
* at91sam926x (arm926ejs, with MMU), and several no-mmu versions.
*
* This driver expects the board has been wired with two GPIOs supporting
* a VBUS sensing IRQ, and a D+ pullup. (They may be omitted, but the
* testing hasn't covered such cases.)
*
* The pullup is most important (so it's integrated on sam926x parts). It
* provides software control over whether the host enumerates the device.
*
* The VBUS sensing helps during enumeration, and allows both USB clocks
* (and the transceiver) to stay gated off until they're necessary, saving
* power. During USB suspend, the 48 MHz clock is gated off in hardware;
* it may also be gated off by software during some Linux sleep states.
*/
#define DRIVER_VERSION "3 May 2006"
static const char driver_name [] = "at91_udc";
static const struct {
const char *name;
const struct usb_ep_caps caps;
} ep_info[] = {
#define EP_INFO(_name, _caps) \
{ \
.name = _name, \
.caps = _caps, \
}
EP_INFO("ep0",
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep1",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep2",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep3-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep4",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep5",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
#undef EP_INFO
};
#define ep0name ep_info[0].name
#define VBUS_POLL_TIMEOUT msecs_to_jiffies(1000)
#define at91_udp_read(udc, reg) \
__raw_readl((udc)->udp_baseaddr + (reg))
#define at91_udp_write(udc, reg, val) \
__raw_writel((val), (udc)->udp_baseaddr + (reg))
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#include <linux/seq_file.h>
static const char debug_filename[] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void proc_ep_show(struct seq_file *s, struct at91_ep *ep)
{
static char *types[] = {
"control", "out-iso", "out-bulk", "out-int",
"BOGUS", "in-iso", "in-bulk", "in-int"};
u32 csr;
struct at91_request *req;
unsigned long flags;
struct at91_udc *udc = ep->udc;
spin_lock_irqsave(&udc->lock, flags);
csr = __raw_readl(ep->creg);
/* NOTE: not collecting per-endpoint irq statistics... */
seq_printf(s, "\n");
seq_printf(s, "%s, maxpacket %d %s%s %s%s\n",
ep->ep.name, ep->ep.maxpacket,
ep->is_in ? "in" : "out",
ep->is_iso ? " iso" : "",
ep->is_pingpong
? (ep->fifo_bank ? "pong" : "ping")
: "",
ep->stopped ? " stopped" : "");
seq_printf(s, "csr %08x rxbytes=%d %s %s %s" EIGHTBITS "\n",
csr,
(csr & 0x07ff0000) >> 16,
(csr & (1 << 15)) ? "enabled" : "disabled",
(csr & (1 << 11)) ? "DATA1" : "DATA0",
types[(csr & 0x700) >> 8],
/* iff type is control then print current direction */
(!(csr & 0x700))
? ((csr & (1 << 7)) ? " IN" : " OUT")
: "",
(csr & (1 << 6)) ? " rxdatabk1" : "",
(csr & (1 << 5)) ? " forcestall" : "",
(csr & (1 << 4)) ? " txpktrdy" : "",
(csr & (1 << 3)) ? " stallsent" : "",
(csr & (1 << 2)) ? " rxsetup" : "",
(csr & (1 << 1)) ? " rxdatabk0" : "",
(csr & (1 << 0)) ? " txcomp" : "");
if (list_empty (&ep->queue))
seq_printf(s, "\t(queue empty)\n");
else list_for_each_entry (req, &ep->queue, queue) {
unsigned length = req->req.actual;
seq_printf(s, "\treq %p len %d/%d buf %p\n",
&req->req, length,
req->req.length, req->req.buf);
}
spin_unlock_irqrestore(&udc->lock, flags);
}
static void proc_irq_show(struct seq_file *s, const char *label, u32 mask)
{
int i;
seq_printf(s, "%s %04x:%s%s" FOURBITS, label, mask,
(mask & (1 << 13)) ? " wakeup" : "",
(mask & (1 << 12)) ? " endbusres" : "",
(mask & (1 << 11)) ? " sofint" : "",
(mask & (1 << 10)) ? " extrsm" : "",
(mask & (1 << 9)) ? " rxrsm" : "",
(mask & (1 << 8)) ? " rxsusp" : "");
for (i = 0; i < 8; i++) {
if (mask & (1 << i))
seq_printf(s, " ep%d", i);
}
seq_printf(s, "\n");
}
static int proc_udc_show(struct seq_file *s, void *unused)
{
struct at91_udc *udc = s->private;
struct at91_ep *ep;
u32 tmp;
seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
udc->vbus ? "present" : "off",
udc->enabled
? (udc->vbus ? "active" : "enabled")
: "disabled",
udc->gadget.is_selfpowered ? "self" : "VBUS",
udc->suspended ? ", suspended" : "",
udc->driver ? udc->driver->driver.name : "(none)");
/* don't access registers when interface isn't clocked */
if (!udc->clocked) {
seq_printf(s, "(not clocked)\n");
return 0;
}
tmp = at91_udp_read(udc, AT91_UDP_FRM_NUM);
seq_printf(s, "frame %05x:%s%s frame=%d\n", tmp,
(tmp & AT91_UDP_FRM_OK) ? " ok" : "",
(tmp & AT91_UDP_FRM_ERR) ? " err" : "",
(tmp & AT91_UDP_NUM));
tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
seq_printf(s, "glbstate %02x:%s" FOURBITS "\n", tmp,
(tmp & AT91_UDP_RMWUPE) ? " rmwupe" : "",
(tmp & AT91_UDP_RSMINPR) ? " rsminpr" : "",
(tmp & AT91_UDP_ESR) ? " esr" : "",
(tmp & AT91_UDP_CONFG) ? " confg" : "",
(tmp & AT91_UDP_FADDEN) ? " fadden" : "");
tmp = at91_udp_read(udc, AT91_UDP_FADDR);
seq_printf(s, "faddr %03x:%s fadd=%d\n", tmp,
(tmp & AT91_UDP_FEN) ? " fen" : "",
(tmp & AT91_UDP_FADD));
proc_irq_show(s, "imr ", at91_udp_read(udc, AT91_UDP_IMR));
proc_irq_show(s, "isr ", at91_udp_read(udc, AT91_UDP_ISR));
if (udc->enabled && udc->vbus) {
proc_ep_show(s, &udc->ep[0]);
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
if (ep->ep.desc)
proc_ep_show(s, ep);
}
}
return 0;
}
static void create_debug_file(struct at91_udc *udc)
{
udc->pde = proc_create_single_data(debug_filename, 0, NULL,
proc_udc_show, udc);
}
static void remove_debug_file(struct at91_udc *udc)
{
if (udc->pde)
remove_proc_entry(debug_filename, NULL);
}
#else
static inline void create_debug_file(struct at91_udc *udc) {}
static inline void remove_debug_file(struct at91_udc *udc) {}
#endif
/*-------------------------------------------------------------------------*/
static void done(struct at91_ep *ep, struct at91_request *req, int status)
{
unsigned stopped = ep->stopped;
struct at91_udc *udc = ep->udc;
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
if (status && status != -ESHUTDOWN)
VDBG("%s done %p, status %d\n", ep->ep.name, req, status);
ep->stopped = 1;
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
ep->stopped = stopped;
/* ep0 is always ready; other endpoints need a non-empty queue */
if (list_empty(&ep->queue) && ep->int_mask != (1 << 0))
at91_udp_write(udc, AT91_UDP_IDR, ep->int_mask);
}
/*-------------------------------------------------------------------------*/
/* bits indicating OUT fifo has data ready */
#define RX_DATA_READY (AT91_UDP_RX_DATA_BK0 | AT91_UDP_RX_DATA_BK1)
/*
* Endpoint FIFO CSR bits have a mix of bits, making it unsafe to just write
* back most of the value you just read (because of side effects, including
* bits that may change after reading and before writing).
*
* Except when changing a specific bit, always write values which:
* - clear SET_FX bits (setting them could change something)
* - set CLR_FX bits (clearing them could change something)
*
* There are also state bits like FORCESTALL, EPEDS, DIR, and EPTYPE
* that shouldn't normally be changed.
*
* NOTE at91sam9260 docs mention synch between UDPCK and MCK clock domains,
* implying a need to wait for one write to complete (test relevant bits)
* before starting the next write. This shouldn't be an issue given how
* infrequently we write, except maybe for write-then-read idioms.
*/
#define SET_FX (AT91_UDP_TXPKTRDY)
#define CLR_FX (RX_DATA_READY | AT91_UDP_RXSETUP \
| AT91_UDP_STALLSENT | AT91_UDP_TXCOMP)
/* pull OUT packet data from the endpoint's fifo */
static int read_fifo (struct at91_ep *ep, struct at91_request *req)
{
u32 __iomem *creg = ep->creg;
u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
u32 csr;
u8 *buf;
unsigned int count, bufferspace, is_done;
buf = req->req.buf + req->req.actual;
bufferspace = req->req.length - req->req.actual;
/*
* there might be nothing to read if ep_queue() calls us,
* or if we already emptied both pingpong buffers
*/
rescan:
csr = __raw_readl(creg);
if ((csr & RX_DATA_READY) == 0)
return 0;
count = (csr & AT91_UDP_RXBYTECNT) >> 16;
if (count > ep->ep.maxpacket)
count = ep->ep.maxpacket;
if (count > bufferspace) {
DBG("%s buffer overflow\n", ep->ep.name);
req->req.status = -EOVERFLOW;
count = bufferspace;
}
__raw_readsb(dreg, buf, count);
/* release and swap pingpong mem bank */
csr |= CLR_FX;
if (ep->is_pingpong) {
if (ep->fifo_bank == 0) {
csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
ep->fifo_bank = 1;
} else {
csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK1);
ep->fifo_bank = 0;
}
} else
csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
__raw_writel(csr, creg);
req->req.actual += count;
is_done = (count < ep->ep.maxpacket);
if (count == bufferspace)
is_done = 1;
PACKET("%s %p out/%d%s\n", ep->ep.name, &req->req, count,
is_done ? " (done)" : "");
/*
* avoid extra trips through IRQ logic for packets already in
* the fifo ... maybe preventing an extra (expensive) OUT-NAK
*/
if (is_done)
done(ep, req, 0);
else if (ep->is_pingpong) {
/*
* One dummy read to delay the code because of a HW glitch:
* CSR returns bad RXCOUNT when read too soon after updating
* RX_DATA_BK flags.
*/
csr = __raw_readl(creg);
bufferspace -= count;
buf += count;
goto rescan;
}
return is_done;
}
/* load fifo for an IN packet */
static int write_fifo(struct at91_ep *ep, struct at91_request *req)
{
u32 __iomem *creg = ep->creg;
u32 csr = __raw_readl(creg);
u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
unsigned total, count, is_last;
u8 *buf;
/*
* TODO: allow for writing two packets to the fifo ... that'll
* reduce the amount of IN-NAKing, but probably won't affect
* throughput much. (Unlike preventing OUT-NAKing!)
*/
/*
* If ep_queue() calls us, the queue is empty and possibly in
* odd states like TXCOMP not yet cleared (we do it, saving at
* least one IRQ) or the fifo not yet being free. Those aren't
* issues normally (IRQ handler fast path).
*/
if (unlikely(csr & (AT91_UDP_TXCOMP | AT91_UDP_TXPKTRDY))) {
if (csr & AT91_UDP_TXCOMP) {
csr |= CLR_FX;
csr &= ~(SET_FX | AT91_UDP_TXCOMP);
__raw_writel(csr, creg);
csr = __raw_readl(creg);
}
if (csr & AT91_UDP_TXPKTRDY)
return 0;
}
buf = req->req.buf + req->req.actual;
prefetch(buf);
total = req->req.length - req->req.actual;
if (ep->ep.maxpacket < total) {
count = ep->ep.maxpacket;
is_last = 0;
} else {
count = total;
is_last = (count < ep->ep.maxpacket) || !req->req.zero;
}
/*
* Write the packet, maybe it's a ZLP.
*
* NOTE: incrementing req->actual before we receive the ACK means
* gadget driver IN bytecounts can be wrong in fault cases. That's
* fixable with PIO drivers like this one (save "count" here, and
* do the increment later on TX irq), but not for most DMA hardware.
*
* So all gadget drivers must accept that potential error. Some
* hardware supports precise fifo status reporting, letting them
* recover when the actual bytecount matters (e.g. for USB Test
* and Measurement Class devices).
*/
__raw_writesb(dreg, buf, count);
csr &= ~SET_FX;
csr |= CLR_FX | AT91_UDP_TXPKTRDY;
__raw_writel(csr, creg);
req->req.actual += count;
PACKET("%s %p in/%d%s\n", ep->ep.name, &req->req, count,
is_last ? " (done)" : "");
if (is_last)
done(ep, req, 0);
return is_last;
}
static void nuke(struct at91_ep *ep, int status)
{
struct at91_request *req;
/* terminate any request in the queue */
ep->stopped = 1;
if (list_empty(&ep->queue))
return;
VDBG("%s %s\n", __func__, ep->ep.name);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct at91_request, queue);
done(ep, req, status);
}
}
/*-------------------------------------------------------------------------*/
static int at91_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct at91_ep *ep = container_of(_ep, struct at91_ep, ep);
struct at91_udc *udc;
u16 maxpacket;
u32 tmp;
unsigned long flags;
if (!_ep || !ep
|| !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| (maxpacket = usb_endpoint_maxp(desc)) == 0
|| maxpacket > ep->maxpacket) {
DBG("bad ep or descriptor\n");
return -EINVAL;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
DBG("bogus device state\n");
return -ESHUTDOWN;
}
tmp = usb_endpoint_type(desc);
switch (tmp) {
case USB_ENDPOINT_XFER_CONTROL:
DBG("only one control endpoint\n");
return -EINVAL;
case USB_ENDPOINT_XFER_INT:
if (maxpacket > 64)
goto bogus_max;
break;
case USB_ENDPOINT_XFER_BULK:
switch (maxpacket) {
case 8:
case 16:
case 32:
case 64:
goto ok;
}
bogus_max:
DBG("bogus maxpacket %d\n", maxpacket);
return -EINVAL;
case USB_ENDPOINT_XFER_ISOC:
if (!ep->is_pingpong) {
DBG("iso requires double buffering\n");
return -EINVAL;
}
break;
}
ok:
spin_lock_irqsave(&udc->lock, flags);
/* initialize endpoint to match this descriptor */
ep->is_in = usb_endpoint_dir_in(desc);
ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
ep->stopped = 0;
if (ep->is_in)
tmp |= 0x04;
tmp <<= 8;
tmp |= AT91_UDP_EPEDS;
__raw_writel(tmp, ep->creg);
ep->ep.maxpacket = maxpacket;
/*
* reset/init endpoint fifo. NOTE: leaves fifo_bank alone,
* since endpoint resets don't reset hw pingpong state.
*/
at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
at91_udp_write(udc, AT91_UDP_RST_EP, 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int at91_ep_disable (struct usb_ep * _ep)
{
struct at91_ep *ep = container_of(_ep, struct at91_ep, ep);
struct at91_udc *udc = ep->udc;
unsigned long flags;
if (ep == &ep->udc->ep[0])
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
nuke(ep, -ESHUTDOWN);
/* restore the endpoint's pristine config */
ep->ep.desc = NULL;
ep->ep.maxpacket = ep->maxpacket;
/* reset fifos and endpoint */
if (ep->udc->clocked) {
at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
at91_udp_write(udc, AT91_UDP_RST_EP, 0);
__raw_writel(0, ep->creg);
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/*
* this is a PIO-only driver, so there's nothing
* interesting for request or buffer allocation.
*/
static struct usb_request *
at91_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct at91_request *req;
req = kzalloc(sizeof (struct at91_request), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void at91_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct at91_request *req;
req = container_of(_req, struct at91_request, req);
BUG_ON(!list_empty(&req->queue));
kfree(req);
}
static int at91_ep_queue(struct usb_ep *_ep,
struct usb_request *_req, gfp_t gfp_flags)
{
struct at91_request *req;
struct at91_ep *ep;
struct at91_udc *udc;
int status;
unsigned long flags;
req = container_of(_req, struct at91_request, req);
ep = container_of(_ep, struct at91_ep, ep);
if (!_req || !_req->complete
|| !_req->buf || !list_empty(&req->queue)) {
DBG("invalid request\n");
return -EINVAL;
}
if (!_ep || (!ep->ep.desc && ep->ep.name != ep0name)) {
DBG("invalid ep\n");
return -EINVAL;
}
udc = ep->udc;
if (!udc || !udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
DBG("invalid device\n");
return -EINVAL;
}
_req->status = -EINPROGRESS;
_req->actual = 0;
spin_lock_irqsave(&udc->lock, flags);
/* try to kickstart any empty and idle queue */
if (list_empty(&ep->queue) && !ep->stopped) {
int is_ep0;
/*
* If this control request has a non-empty DATA stage, this
* will start that stage. It works just like a non-control
* request (until the status stage starts, maybe early).
*
* If the data stage is empty, then this starts a successful
* IN/STATUS stage. (Unsuccessful ones use set_halt.)
*/
is_ep0 = (ep->ep.name == ep0name);
if (is_ep0) {
u32 tmp;
if (!udc->req_pending) {
status = -EINVAL;
goto done;
}
/*
* defer changing CONFG until after the gadget driver
* reconfigures the endpoints.
*/
if (udc->wait_for_config_ack) {
tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
tmp ^= AT91_UDP_CONFG;
VDBG("toggle config\n");
at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
}
if (req->req.length == 0) {
ep0_in_status:
PACKET("ep0 in/status\n");
status = 0;
tmp = __raw_readl(ep->creg);
tmp &= ~SET_FX;
tmp |= CLR_FX | AT91_UDP_TXPKTRDY;
__raw_writel(tmp, ep->creg);
udc->req_pending = 0;
goto done;
}
}
if (ep->is_in)
status = write_fifo(ep, req);
else {
status = read_fifo(ep, req);
/* IN/STATUS stage is otherwise triggered by irq */
if (status && is_ep0)
goto ep0_in_status;
}
} else
status = 0;
if (req && !status) {
list_add_tail (&req->queue, &ep->queue);
at91_udp_write(udc, AT91_UDP_IER, ep->int_mask);
}
done:
spin_unlock_irqrestore(&udc->lock, flags);
return (status < 0) ? status : 0;
}
static int at91_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct at91_ep *ep;
struct at91_request *req = NULL, *iter;
unsigned long flags;
struct at91_udc *udc;
ep = container_of(_ep, struct at91_ep, ep);
if (!_ep || ep->ep.name == ep0name)
return -EINVAL;
udc = ep->udc;
spin_lock_irqsave(&udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int at91_ep_set_halt(struct usb_ep *_ep, int value)
{
struct at91_ep *ep = container_of(_ep, struct at91_ep, ep);
struct at91_udc *udc = ep->udc;
u32 __iomem *creg;
u32 csr;
unsigned long flags;
int status = 0;
if (!_ep || ep->is_iso || !ep->udc->clocked)
return -EINVAL;
creg = ep->creg;
spin_lock_irqsave(&udc->lock, flags);
csr = __raw_readl(creg);
/*
* fail with still-busy IN endpoints, ensuring correct sequencing
* of data tx then stall. note that the fifo rx bytecount isn't
* completely accurate as a tx bytecount.
*/
if (ep->is_in && (!list_empty(&ep->queue) || (csr >> 16) != 0))
status = -EAGAIN;
else {
csr |= CLR_FX;
csr &= ~SET_FX;
if (value) {
csr |= AT91_UDP_FORCESTALL;
VDBG("halt %s\n", ep->ep.name);
} else {
at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
at91_udp_write(udc, AT91_UDP_RST_EP, 0);
csr &= ~AT91_UDP_FORCESTALL;
}
__raw_writel(csr, creg);
}
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
static const struct usb_ep_ops at91_ep_ops = {
.enable = at91_ep_enable,
.disable = at91_ep_disable,
.alloc_request = at91_ep_alloc_request,
.free_request = at91_ep_free_request,
.queue = at91_ep_queue,
.dequeue = at91_ep_dequeue,
.set_halt = at91_ep_set_halt,
/* there's only imprecise fifo status reporting */
};
/*-------------------------------------------------------------------------*/
static int at91_get_frame(struct usb_gadget *gadget)
{
struct at91_udc *udc = to_udc(gadget);
if (!to_udc(gadget)->clocked)
return -EINVAL;
return at91_udp_read(udc, AT91_UDP_FRM_NUM) & AT91_UDP_NUM;
}
static int at91_wakeup(struct usb_gadget *gadget)
{
struct at91_udc *udc = to_udc(gadget);
u32 glbstate;
unsigned long flags;
DBG("%s\n", __func__ );
spin_lock_irqsave(&udc->lock, flags);
if (!udc->clocked || !udc->suspended)
goto done;
/* NOTE: some "early versions" handle ESR differently ... */
glbstate = at91_udp_read(udc, AT91_UDP_GLB_STAT);
if (!(glbstate & AT91_UDP_ESR))
goto done;
glbstate |= AT91_UDP_ESR;
at91_udp_write(udc, AT91_UDP_GLB_STAT, glbstate);
done:
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/* reinit == restore initial software state */
static void udc_reinit(struct at91_udc *udc)
{
u32 i;
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
udc->gadget.quirk_stall_not_supp = 1;
for (i = 0; i < NUM_ENDPOINTS; i++) {
struct at91_ep *ep = &udc->ep[i];
if (i != 0)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
ep->ep.desc = NULL;
ep->stopped = 0;
ep->fifo_bank = 0;
usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
ep->creg = (void __iomem *) udc->udp_baseaddr + AT91_UDP_CSR(i);
/* initialize one queue per endpoint */
INIT_LIST_HEAD(&ep->queue);
}
}
static void reset_gadget(struct at91_udc *udc)
{
struct usb_gadget_driver *driver = udc->driver;
int i;
if (udc->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->suspended = 0;
for (i = 0; i < NUM_ENDPOINTS; i++) {
struct at91_ep *ep = &udc->ep[i];
ep->stopped = 1;
nuke(ep, -ESHUTDOWN);
}
if (driver) {
spin_unlock(&udc->lock);
usb_gadget_udc_reset(&udc->gadget, driver);
spin_lock(&udc->lock);
}
udc_reinit(udc);
}
static void stop_activity(struct at91_udc *udc)
{
struct usb_gadget_driver *driver = udc->driver;
int i;
if (udc->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->suspended = 0;
for (i = 0; i < NUM_ENDPOINTS; i++) {
struct at91_ep *ep = &udc->ep[i];
ep->stopped = 1;
nuke(ep, -ESHUTDOWN);
}
if (driver) {
spin_unlock(&udc->lock);
driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
udc_reinit(udc);
}
static void clk_on(struct at91_udc *udc)
{
if (udc->clocked)
return;
udc->clocked = 1;
clk_enable(udc->iclk);
clk_enable(udc->fclk);
}
static void clk_off(struct at91_udc *udc)
{
if (!udc->clocked)
return;
udc->clocked = 0;
udc->gadget.speed = USB_SPEED_UNKNOWN;
clk_disable(udc->fclk);
clk_disable(udc->iclk);
}
/*
* activate/deactivate link with host; minimize power usage for
* inactive links by cutting clocks and transceiver power.
*/
static void pullup(struct at91_udc *udc, int is_on)
{
if (!udc->enabled || !udc->vbus)
is_on = 0;
DBG("%sactive\n", is_on ? "" : "in");
if (is_on) {
clk_on(udc);
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM);
at91_udp_write(udc, AT91_UDP_TXVC, 0);
} else {
stop_activity(udc);
at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM);
at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
clk_off(udc);
}
if (udc->caps && udc->caps->pullup)
udc->caps->pullup(udc, is_on);
}
/* vbus is here! turn everything on that's ready */
static int at91_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct at91_udc *udc = to_udc(gadget);
unsigned long flags;
/* VDBG("vbus %s\n", is_active ? "on" : "off"); */
spin_lock_irqsave(&udc->lock, flags);
udc->vbus = (is_active != 0);
if (udc->driver)
pullup(udc, is_active);
else
pullup(udc, 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int at91_pullup(struct usb_gadget *gadget, int is_on)
{
struct at91_udc *udc = to_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->enabled = is_on = !!is_on;
pullup(udc, is_on);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int at91_set_selfpowered(struct usb_gadget *gadget, int is_on)
{
struct at91_udc *udc = to_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
gadget->is_selfpowered = (is_on != 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int at91_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int at91_stop(struct usb_gadget *gadget);
static const struct usb_gadget_ops at91_udc_ops = {
.get_frame = at91_get_frame,
.wakeup = at91_wakeup,
.set_selfpowered = at91_set_selfpowered,
.vbus_session = at91_vbus_session,
.pullup = at91_pullup,
.udc_start = at91_start,
.udc_stop = at91_stop,
/*
* VBUS-powered devices may also want to support bigger
* power budgets after an appropriate SET_CONFIGURATION.
*/
/* .vbus_power = at91_vbus_power, */
};
/*-------------------------------------------------------------------------*/
static int handle_ep(struct at91_ep *ep)
{
struct at91_request *req;
u32 __iomem *creg = ep->creg;
u32 csr = __raw_readl(creg);
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct at91_request, queue);
else
req = NULL;
if (ep->is_in) {
if (csr & (AT91_UDP_STALLSENT | AT91_UDP_TXCOMP)) {
csr |= CLR_FX;
csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP);
__raw_writel(csr, creg);
}
if (req)
return write_fifo(ep, req);
} else {
if (csr & AT91_UDP_STALLSENT) {
/* STALLSENT bit == ISOERR */
if (ep->is_iso && req)
req->req.status = -EILSEQ;
csr |= CLR_FX;
csr &= ~(SET_FX | AT91_UDP_STALLSENT);
__raw_writel(csr, creg);
csr = __raw_readl(creg);
}
if (req && (csr & RX_DATA_READY))
return read_fifo(ep, req);
}
return 0;
}
union setup {
u8 raw[8];
struct usb_ctrlrequest r;
};
static void handle_setup(struct at91_udc *udc, struct at91_ep *ep, u32 csr)
{
u32 __iomem *creg = ep->creg;
u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
unsigned rxcount, i = 0;
u32 tmp;
union setup pkt;
int status = 0;
/* read and ack SETUP; hard-fail for bogus packets */
rxcount = (csr & AT91_UDP_RXBYTECNT) >> 16;
if (likely(rxcount == 8)) {
while (rxcount--)
pkt.raw[i++] = __raw_readb(dreg);
if (pkt.r.bRequestType & USB_DIR_IN) {
csr |= AT91_UDP_DIR;
ep->is_in = 1;
} else {
csr &= ~AT91_UDP_DIR;
ep->is_in = 0;
}
} else {
/* REVISIT this happens sometimes under load; why?? */
ERR("SETUP len %d, csr %08x\n", rxcount, csr);
status = -EINVAL;
}
csr |= CLR_FX;
csr &= ~(SET_FX | AT91_UDP_RXSETUP);
__raw_writel(csr, creg);
udc->wait_for_addr_ack = 0;
udc->wait_for_config_ack = 0;
ep->stopped = 0;
if (unlikely(status != 0))
goto stall;
#define w_index le16_to_cpu(pkt.r.wIndex)
#define w_value le16_to_cpu(pkt.r.wValue)
#define w_length le16_to_cpu(pkt.r.wLength)
VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
pkt.r.bRequestType, pkt.r.bRequest,
w_value, w_index, w_length);
/*
* A few standard requests get handled here, ones that touch
* hardware ... notably for device and endpoint features.
*/
udc->req_pending = 1;
csr = __raw_readl(creg);
csr |= CLR_FX;
csr &= ~SET_FX;
switch ((pkt.r.bRequestType << 8) | pkt.r.bRequest) {
case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
| USB_REQ_SET_ADDRESS:
__raw_writel(csr | AT91_UDP_TXPKTRDY, creg);
udc->addr = w_value;
udc->wait_for_addr_ack = 1;
udc->req_pending = 0;
/* FADDR is set later, when we ack host STATUS */
return;
case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
| USB_REQ_SET_CONFIGURATION:
tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_CONFG;
if (pkt.r.wValue)
udc->wait_for_config_ack = (tmp == 0);
else
udc->wait_for_config_ack = (tmp != 0);
if (udc->wait_for_config_ack)
VDBG("wait for config\n");
/* CONFG is toggled later, if gadget driver succeeds */
break;
/*
* Hosts may set or clear remote wakeup status, and
* devices may report they're VBUS powered.
*/
case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
| USB_REQ_GET_STATUS:
tmp = (udc->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED);
if (at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_ESR)
tmp |= (1 << USB_DEVICE_REMOTE_WAKEUP);
PACKET("get device status\n");
__raw_writeb(tmp, dreg);
__raw_writeb(0, dreg);
goto write_in;
/* then STATUS starts later, automatically */
case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
| USB_REQ_SET_FEATURE:
if (w_value != USB_DEVICE_REMOTE_WAKEUP)
goto stall;
tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
tmp |= AT91_UDP_ESR;
at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
goto succeed;
case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
| USB_REQ_CLEAR_FEATURE:
if (w_value != USB_DEVICE_REMOTE_WAKEUP)
goto stall;
tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
tmp &= ~AT91_UDP_ESR;
at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
goto succeed;
/*
* Interfaces have no feature settings; this is pretty useless.
* we won't even insist the interface exists...
*/
case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
| USB_REQ_GET_STATUS:
PACKET("get interface status\n");
__raw_writeb(0, dreg);
__raw_writeb(0, dreg);
goto write_in;
/* then STATUS starts later, automatically */
case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
| USB_REQ_SET_FEATURE:
case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
| USB_REQ_CLEAR_FEATURE:
goto stall;
/*
* Hosts may clear bulk/intr endpoint halt after the gadget
* driver sets it (not widely used); or set it (for testing)
*/
case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
| USB_REQ_GET_STATUS:
tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
ep = &udc->ep[tmp];
if (tmp >= NUM_ENDPOINTS || (tmp && !ep->ep.desc))
goto stall;
if (tmp) {
if ((w_index & USB_DIR_IN)) {
if (!ep->is_in)
goto stall;
} else if (ep->is_in)
goto stall;
}
PACKET("get %s status\n", ep->ep.name);
if (__raw_readl(ep->creg) & AT91_UDP_FORCESTALL)
tmp = (1 << USB_ENDPOINT_HALT);
else
tmp = 0;
__raw_writeb(tmp, dreg);
__raw_writeb(0, dreg);
goto write_in;
/* then STATUS starts later, automatically */
case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
| USB_REQ_SET_FEATURE:
tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
ep = &udc->ep[tmp];
if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS)
goto stall;
if (!ep->ep.desc || ep->is_iso)
goto stall;
if ((w_index & USB_DIR_IN)) {
if (!ep->is_in)
goto stall;
} else if (ep->is_in)
goto stall;
tmp = __raw_readl(ep->creg);
tmp &= ~SET_FX;
tmp |= CLR_FX | AT91_UDP_FORCESTALL;
__raw_writel(tmp, ep->creg);
goto succeed;
case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
| USB_REQ_CLEAR_FEATURE:
tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
ep = &udc->ep[tmp];
if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS)
goto stall;
if (tmp == 0)
goto succeed;
if (!ep->ep.desc || ep->is_iso)
goto stall;
if ((w_index & USB_DIR_IN)) {
if (!ep->is_in)
goto stall;
} else if (ep->is_in)
goto stall;
at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
at91_udp_write(udc, AT91_UDP_RST_EP, 0);
tmp = __raw_readl(ep->creg);
tmp |= CLR_FX;
tmp &= ~(SET_FX | AT91_UDP_FORCESTALL);
__raw_writel(tmp, ep->creg);
if (!list_empty(&ep->queue))
handle_ep(ep);
goto succeed;
}
#undef w_value
#undef w_index
#undef w_length
/* pass request up to the gadget driver */
if (udc->driver) {
spin_unlock(&udc->lock);
status = udc->driver->setup(&udc->gadget, &pkt.r);
spin_lock(&udc->lock);
}
else
status = -ENODEV;
if (status < 0) {
stall:
VDBG("req %02x.%02x protocol STALL; stat %d\n",
pkt.r.bRequestType, pkt.r.bRequest, status);
csr |= AT91_UDP_FORCESTALL;
__raw_writel(csr, creg);
udc->req_pending = 0;
}
return;
succeed:
/* immediate successful (IN) STATUS after zero length DATA */
PACKET("ep0 in/status\n");
write_in:
csr |= AT91_UDP_TXPKTRDY;
__raw_writel(csr, creg);
udc->req_pending = 0;
}
static void handle_ep0(struct at91_udc *udc)
{
struct at91_ep *ep0 = &udc->ep[0];
u32 __iomem *creg = ep0->creg;
u32 csr = __raw_readl(creg);
struct at91_request *req;
if (unlikely(csr & AT91_UDP_STALLSENT)) {
nuke(ep0, -EPROTO);
udc->req_pending = 0;
csr |= CLR_FX;
csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_FORCESTALL);
__raw_writel(csr, creg);
VDBG("ep0 stalled\n");
csr = __raw_readl(creg);
}
if (csr & AT91_UDP_RXSETUP) {
nuke(ep0, 0);
udc->req_pending = 0;
handle_setup(udc, ep0, csr);
return;
}
if (list_empty(&ep0->queue))
req = NULL;
else
req = list_entry(ep0->queue.next, struct at91_request, queue);
/* host ACKed an IN packet that we sent */
if (csr & AT91_UDP_TXCOMP) {
csr |= CLR_FX;
csr &= ~(SET_FX | AT91_UDP_TXCOMP);
/* write more IN DATA? */
if (req && ep0->is_in) {
if (handle_ep(ep0))
udc->req_pending = 0;
/*
* Ack after:
* - last IN DATA packet (including GET_STATUS)
* - IN/STATUS for OUT DATA
* - IN/STATUS for any zero-length DATA stage
* except for the IN DATA case, the host should send
* an OUT status later, which we'll ack.
*/
} else {
udc->req_pending = 0;
__raw_writel(csr, creg);
/*
* SET_ADDRESS takes effect only after the STATUS
* (to the original address) gets acked.
*/
if (udc->wait_for_addr_ack) {
u32 tmp;
at91_udp_write(udc, AT91_UDP_FADDR,
AT91_UDP_FEN | udc->addr);
tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
tmp &= ~AT91_UDP_FADDEN;
if (udc->addr)
tmp |= AT91_UDP_FADDEN;
at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
udc->wait_for_addr_ack = 0;
VDBG("address %d\n", udc->addr);
}
}
}
/* OUT packet arrived ... */
else if (csr & AT91_UDP_RX_DATA_BK0) {
csr |= CLR_FX;
csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
/* OUT DATA stage */
if (!ep0->is_in) {
if (req) {
if (handle_ep(ep0)) {
/* send IN/STATUS */
PACKET("ep0 in/status\n");
csr = __raw_readl(creg);
csr &= ~SET_FX;
csr |= CLR_FX | AT91_UDP_TXPKTRDY;
__raw_writel(csr, creg);
udc->req_pending = 0;
}
} else if (udc->req_pending) {
/*
* AT91 hardware has a hard time with this
* "deferred response" mode for control-OUT
* transfers. (For control-IN it's fine.)
*
* The normal solution leaves OUT data in the
* fifo until the gadget driver is ready.
* We couldn't do that here without disabling
* the IRQ that tells about SETUP packets,
* e.g. when the host gets impatient...
*
* Working around it by copying into a buffer
* would almost be a non-deferred response,
* except that it wouldn't permit reliable
* stalling of the request. Instead, demand
* that gadget drivers not use this mode.
*/
DBG("no control-OUT deferred responses!\n");
__raw_writel(csr | AT91_UDP_FORCESTALL, creg);
udc->req_pending = 0;
}
/* STATUS stage for control-IN; ack. */
} else {
PACKET("ep0 out/status ACK\n");
__raw_writel(csr, creg);
/* "early" status stage */
if (req)
done(ep0, req, 0);
}
}
}
static irqreturn_t at91_udc_irq (int irq, void *_udc)
{
struct at91_udc *udc = _udc;
u32 rescans = 5;
int disable_clock = 0;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
if (!udc->clocked) {
clk_on(udc);
disable_clock = 1;
}
while (rescans--) {
u32 status;
status = at91_udp_read(udc, AT91_UDP_ISR)
& at91_udp_read(udc, AT91_UDP_IMR);
if (!status)
break;
/* USB reset irq: not maskable */
if (status & AT91_UDP_ENDBUSRES) {
at91_udp_write(udc, AT91_UDP_IDR, ~MINIMUS_INTERRUPTUS);
at91_udp_write(udc, AT91_UDP_IER, MINIMUS_INTERRUPTUS);
/* Atmel code clears this irq twice */
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES);
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES);
VDBG("end bus reset\n");
udc->addr = 0;
reset_gadget(udc);
/* enable ep0 */
at91_udp_write(udc, AT91_UDP_CSR(0),
AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL);
udc->gadget.speed = USB_SPEED_FULL;
udc->suspended = 0;
at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_EP(0));
/*
* NOTE: this driver keeps clocks off unless the
* USB host is present. That saves power, but for
* boards that don't support VBUS detection, both
* clocks need to be active most of the time.
*/
/* host initiated suspend (3+ms bus idle) */
} else if (status & AT91_UDP_RXSUSP) {
at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXSUSP);
at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXRSM);
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXSUSP);
/* VDBG("bus suspend\n"); */
if (udc->suspended)
continue;
udc->suspended = 1;
/*
* NOTE: when suspending a VBUS-powered device, the
* gadget driver should switch into slow clock mode
* and then into standby to avoid drawing more than
* 500uA power (2500uA for some high-power configs).
*/
if (udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
/* host initiated resume */
} else if (status & AT91_UDP_RXRSM) {
at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM);
at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXSUSP);
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM);
/* VDBG("bus resume\n"); */
if (!udc->suspended)
continue;
udc->suspended = 0;
/*
* NOTE: for a VBUS-powered device, the gadget driver
* would normally want to switch out of slow clock
* mode into normal mode.
*/
if (udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
/* endpoint IRQs are cleared by handling them */
} else {
int i;
unsigned mask = 1;
struct at91_ep *ep = &udc->ep[1];
if (status & mask)
handle_ep0(udc);
for (i = 1; i < NUM_ENDPOINTS; i++) {
mask <<= 1;
if (status & mask)
handle_ep(ep);
ep++;
}
}
}
if (disable_clock)
clk_off(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
static void at91_vbus_update(struct at91_udc *udc, unsigned value)
{
if (value != udc->vbus)
at91_vbus_session(&udc->gadget, value);
}
static irqreturn_t at91_vbus_irq(int irq, void *_udc)
{
struct at91_udc *udc = _udc;
/* vbus needs at least brief debouncing */
udelay(10);
at91_vbus_update(udc, gpiod_get_value(udc->board.vbus_pin));
return IRQ_HANDLED;
}
static void at91_vbus_timer_work(struct work_struct *work)
{
struct at91_udc *udc = container_of(work, struct at91_udc,
vbus_timer_work);
at91_vbus_update(udc, gpiod_get_value_cansleep(udc->board.vbus_pin));
if (!timer_pending(&udc->vbus_timer))
mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
}
static void at91_vbus_timer(struct timer_list *t)
{
struct at91_udc *udc = from_timer(udc, t, vbus_timer);
/*
* If we are polling vbus it is likely that the gpio is on an
* bus such as i2c or spi which may sleep, so schedule some work
* to read the vbus gpio
*/
schedule_work(&udc->vbus_timer_work);
}
static int at91_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct at91_udc *udc;
udc = container_of(gadget, struct at91_udc, gadget);
udc->driver = driver;
udc->gadget.dev.of_node = udc->pdev->dev.of_node;
udc->enabled = 1;
udc->gadget.is_selfpowered = 1;
return 0;
}
static int at91_stop(struct usb_gadget *gadget)
{
struct at91_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct at91_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->enabled = 0;
at91_udp_write(udc, AT91_UDP_IDR, ~0);
spin_unlock_irqrestore(&udc->lock, flags);
udc->driver = NULL;
return 0;
}
/*-------------------------------------------------------------------------*/
static void at91udc_shutdown(struct platform_device *dev)
{
struct at91_udc *udc = platform_get_drvdata(dev);
unsigned long flags;
/* force disconnect on reboot */
spin_lock_irqsave(&udc->lock, flags);
pullup(platform_get_drvdata(dev), 0);
spin_unlock_irqrestore(&udc->lock, flags);
}
static int at91rm9200_udc_init(struct at91_udc *udc)
{
struct at91_ep *ep;
int i;
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
switch (i) {
case 0:
case 3:
ep->maxpacket = 8;
break;
case 1 ... 2:
ep->maxpacket = 64;
break;
case 4 ... 5:
ep->maxpacket = 256;
break;
}
}
if (!udc->board.pullup_pin) {
DBG("no D+ pullup?\n");
return -ENODEV;
}
gpiod_direction_output(udc->board.pullup_pin,
gpiod_is_active_low(udc->board.pullup_pin));
return 0;
}
static void at91rm9200_udc_pullup(struct at91_udc *udc, int is_on)
{
gpiod_set_value(udc->board.pullup_pin, is_on);
}
static const struct at91_udc_caps at91rm9200_udc_caps = {
.init = at91rm9200_udc_init,
.pullup = at91rm9200_udc_pullup,
};
static int at91sam9260_udc_init(struct at91_udc *udc)
{
struct at91_ep *ep;
int i;
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
switch (i) {
case 0 ... 3:
ep->maxpacket = 64;
break;
case 4 ... 5:
ep->maxpacket = 512;
break;
}
}
return 0;
}
static void at91sam9260_udc_pullup(struct at91_udc *udc, int is_on)
{
u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC);
if (is_on)
txvc |= AT91_UDP_TXVC_PUON;
else
txvc &= ~AT91_UDP_TXVC_PUON;
at91_udp_write(udc, AT91_UDP_TXVC, txvc);
}
static const struct at91_udc_caps at91sam9260_udc_caps = {
.init = at91sam9260_udc_init,
.pullup = at91sam9260_udc_pullup,
};
static int at91sam9261_udc_init(struct at91_udc *udc)
{
struct at91_ep *ep;
int i;
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
switch (i) {
case 0:
ep->maxpacket = 8;
break;
case 1 ... 3:
ep->maxpacket = 64;
break;
case 4 ... 5:
ep->maxpacket = 256;
break;
}
}
udc->matrix = syscon_regmap_lookup_by_phandle(udc->pdev->dev.of_node,
"atmel,matrix");
return PTR_ERR_OR_ZERO(udc->matrix);
}
static void at91sam9261_udc_pullup(struct at91_udc *udc, int is_on)
{
u32 usbpucr = 0;
if (is_on)
usbpucr = AT91_MATRIX_USBPUCR_PUON;
regmap_update_bits(udc->matrix, AT91SAM9261_MATRIX_USBPUCR,
AT91_MATRIX_USBPUCR_PUON, usbpucr);
}
static const struct at91_udc_caps at91sam9261_udc_caps = {
.init = at91sam9261_udc_init,
.pullup = at91sam9261_udc_pullup,
};
static int at91sam9263_udc_init(struct at91_udc *udc)
{
struct at91_ep *ep;
int i;
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
switch (i) {
case 0:
case 1:
case 2:
case 3:
ep->maxpacket = 64;
break;
case 4:
case 5:
ep->maxpacket = 256;
break;
}
}
return 0;
}
static const struct at91_udc_caps at91sam9263_udc_caps = {
.init = at91sam9263_udc_init,
.pullup = at91sam9260_udc_pullup,
};
static const struct of_device_id at91_udc_dt_ids[] = {
{
.compatible = "atmel,at91rm9200-udc",
.data = &at91rm9200_udc_caps,
},
{
.compatible = "atmel,at91sam9260-udc",
.data = &at91sam9260_udc_caps,
},
{
.compatible = "atmel,at91sam9261-udc",
.data = &at91sam9261_udc_caps,
},
{
.compatible = "atmel,at91sam9263-udc",
.data = &at91sam9263_udc_caps,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_udc_dt_ids);
static void at91udc_of_init(struct at91_udc *udc, struct device_node *np)
{
struct at91_udc_data *board = &udc->board;
const struct of_device_id *match;
u32 val;
if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
board->vbus_polled = 1;
board->vbus_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
"atmel,vbus", 0, GPIOD_IN,
"udc_vbus");
if (IS_ERR(board->vbus_pin))
board->vbus_pin = NULL;
board->pullup_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
"atmel,pullup", 0,
GPIOD_ASIS, "udc_pullup");
if (IS_ERR(board->pullup_pin))
board->pullup_pin = NULL;
match = of_match_node(at91_udc_dt_ids, np);
if (match)
udc->caps = match->data;
}
static int at91udc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct at91_udc *udc;
int retval;
struct at91_ep *ep;
int i;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
/* init software state */
udc->gadget.dev.parent = dev;
at91udc_of_init(udc, pdev->dev.of_node);
udc->pdev = pdev;
udc->enabled = 0;
spin_lock_init(&udc->lock);
udc->gadget.ops = &at91_udc_ops;
udc->gadget.ep0 = &udc->ep[0].ep;
udc->gadget.name = driver_name;
udc->gadget.dev.init_name = "gadget";
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
ep->ep.name = ep_info[i].name;
ep->ep.caps = ep_info[i].caps;
ep->ep.ops = &at91_ep_ops;
ep->udc = udc;
ep->int_mask = BIT(i);
if (i != 0 && i != 3)
ep->is_pingpong = 1;
}
udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr))
return PTR_ERR(udc->udp_baseaddr);
if (udc->caps && udc->caps->init) {
retval = udc->caps->init(udc);
if (retval)
return retval;
}
udc_reinit(udc);
/* get interface and function clocks */
udc->iclk = devm_clk_get(dev, "pclk");
if (IS_ERR(udc->iclk))
return PTR_ERR(udc->iclk);
udc->fclk = devm_clk_get(dev, "hclk");
if (IS_ERR(udc->fclk))
return PTR_ERR(udc->fclk);
/* don't do anything until we have both gadget driver and VBUS */
clk_set_rate(udc->fclk, 48000000);
retval = clk_prepare(udc->fclk);
if (retval)
return retval;
retval = clk_prepare_enable(udc->iclk);
if (retval)
goto err_unprepare_fclk;
at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
/* Clear all pending interrupts - UDP may be used by bootloader. */
at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
clk_disable(udc->iclk);
/* request UDC and maybe VBUS irqs */
udc->udp_irq = retval = platform_get_irq(pdev, 0);
if (retval < 0)
goto err_unprepare_iclk;
retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0,
driver_name, udc);
if (retval) {
DBG("request irq %d failed\n", udc->udp_irq);
goto err_unprepare_iclk;
}
if (udc->board.vbus_pin) {
gpiod_direction_input(udc->board.vbus_pin);
/*
* Get the initial state of VBUS - we cannot expect
* a pending interrupt.
*/
udc->vbus = gpiod_get_value_cansleep(udc->board.vbus_pin);
if (udc->board.vbus_polled) {
INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
mod_timer(&udc->vbus_timer,
jiffies + VBUS_POLL_TIMEOUT);
} else {
retval = devm_request_irq(dev,
gpiod_to_irq(udc->board.vbus_pin),
at91_vbus_irq, 0, driver_name, udc);
if (retval) {
DBG("request vbus irq %d failed\n",
desc_to_gpio(udc->board.vbus_pin));
goto err_unprepare_iclk;
}
}
} else {
DBG("no VBUS detection, assuming always-on\n");
udc->vbus = 1;
}
retval = usb_add_gadget_udc(dev, &udc->gadget);
if (retval)
goto err_unprepare_iclk;
dev_set_drvdata(dev, udc);
device_init_wakeup(dev, 1);
create_debug_file(udc);
INFO("%s version %s\n", driver_name, DRIVER_VERSION);
return 0;
err_unprepare_iclk:
clk_unprepare(udc->iclk);
err_unprepare_fclk:
clk_unprepare(udc->fclk);
DBG("%s probe failed, %d\n", driver_name, retval);
return retval;
}
static int at91udc_remove(struct platform_device *pdev)
{
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
DBG("remove\n");
usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
spin_lock_irqsave(&udc->lock, flags);
pullup(udc, 0);
spin_unlock_irqrestore(&udc->lock, flags);
device_init_wakeup(&pdev->dev, 0);
remove_debug_file(udc);
clk_unprepare(udc->fclk);
clk_unprepare(udc->iclk);
return 0;
}
#ifdef CONFIG_PM
static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct at91_udc *udc = platform_get_drvdata(pdev);
int wake = udc->driver && device_may_wakeup(&pdev->dev);
unsigned long flags;
/* Unless we can act normally to the host (letting it wake us up
* whenever it has work for us) force disconnect. Wakeup requires
* PLLB for USB events (signaling for reset, wakeup, or incoming
* tokens) and VBUS irqs (on systems which support them).
*/
if ((!udc->suspended && udc->addr)
|| !wake
|| at91_suspend_entering_slow_clock()) {
spin_lock_irqsave(&udc->lock, flags);
pullup(udc, 0);
wake = 0;
spin_unlock_irqrestore(&udc->lock, flags);
} else
enable_irq_wake(udc->udp_irq);
udc->active_suspend = wake;
if (udc->board.vbus_pin && !udc->board.vbus_polled && wake)
enable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
return 0;
}
static int at91udc_resume(struct platform_device *pdev)
{
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
if (udc->board.vbus_pin && !udc->board.vbus_polled &&
udc->active_suspend)
disable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
/* maybe reconnect to host; if so, clocks on */
if (udc->active_suspend)
disable_irq_wake(udc->udp_irq);
else {
spin_lock_irqsave(&udc->lock, flags);
pullup(udc, 1);
spin_unlock_irqrestore(&udc->lock, flags);
}
return 0;
}
#else
#define at91udc_suspend NULL
#define at91udc_resume NULL
#endif
static struct platform_driver at91_udc_driver = {
.remove = at91udc_remove,
.shutdown = at91udc_shutdown,
.suspend = at91udc_suspend,
.resume = at91udc_resume,
.driver = {
.name = driver_name,
.of_match_table = at91_udc_dt_ids,
},
};
module_platform_driver_probe(at91_udc_driver, at91udc_probe);
MODULE_DESCRIPTION("AT91 udc driver");
MODULE_AUTHOR("Thomas Rathbone, David Brownell");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:at91_udc");
| linux-master | drivers/usb/gadget/udc/at91_udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* omap_udc.c -- for OMAP full speed udc; most chips support OTG.
*
* Copyright (C) 2004 Texas Instruments, Inc.
* Copyright (C) 2004-2005 David Brownell
*
* OMAP2 & DMA support by Kyungmin Park <[email protected]>
*/
#undef DEBUG
#undef VERBOSE
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/prefetch.h>
#include <linux/io.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <asm/mach-types.h>
#include <linux/omap-dma.h>
#include <linux/platform_data/usb-omap1.h>
#include <linux/soc/ti/omap1-usb.h>
#include <linux/soc/ti/omap1-soc.h>
#include <linux/soc/ti/omap1-io.h>
#include "omap_udc.h"
#undef USB_TRACE
/* bulk DMA seems to be behaving for both IN and OUT */
#define USE_DMA
/* ISO too */
#define USE_ISO
#define DRIVER_DESC "OMAP UDC driver"
#define DRIVER_VERSION "4 October 2004"
#define OMAP_DMA_USB_W2FC_TX0 29
#define OMAP_DMA_USB_W2FC_RX0 26
/*
* The OMAP UDC needs _very_ early endpoint setup: before enabling the
* D+ pullup to allow enumeration. That's too early for the gadget
* framework to use from usb_endpoint_enable(), which happens after
* enumeration as part of activating an interface. (But if we add an
* optional new "UDC not yet running" state to the gadget driver model,
* even just during driver binding, the endpoint autoconfig logic is the
* natural spot to manufacture new endpoints.)
*
* So instead of using endpoint enable calls to control the hardware setup,
* this driver defines a "fifo mode" parameter. It's used during driver
* initialization to choose among a set of pre-defined endpoint configs.
* See omap_udc_setup() for available modes, or to add others. That code
* lives in an init section, so use this driver as a module if you need
* to change the fifo mode after the kernel boots.
*
* Gadget drivers normally ignore endpoints they don't care about, and
* won't include them in configuration descriptors. That means only
* misbehaving hosts would even notice they exist.
*/
#ifdef USE_ISO
static unsigned fifo_mode = 3;
#else
static unsigned fifo_mode;
#endif
/* "modprobe omap_udc fifo_mode=42", or else as a kernel
* boot parameter "omap_udc:fifo_mode=42"
*/
module_param(fifo_mode, uint, 0);
MODULE_PARM_DESC(fifo_mode, "endpoint configuration");
#ifdef USE_DMA
static bool use_dma = 1;
/* "modprobe omap_udc use_dma=y", or else as a kernel
* boot parameter "omap_udc:use_dma=y"
*/
module_param(use_dma, bool, 0);
MODULE_PARM_DESC(use_dma, "enable/disable DMA");
#else /* !USE_DMA */
/* save a bit of code */
#define use_dma 0
#endif /* !USE_DMA */
static const char driver_name[] = "omap_udc";
static const char driver_desc[] = DRIVER_DESC;
/*-------------------------------------------------------------------------*/
/* there's a notion of "current endpoint" for modifying endpoint
* state, and PIO access to its FIFO.
*/
static void use_ep(struct omap_ep *ep, u16 select)
{
u16 num = ep->bEndpointAddress & 0x0f;
if (ep->bEndpointAddress & USB_DIR_IN)
num |= UDC_EP_DIR;
omap_writew(num | select, UDC_EP_NUM);
/* when select, MUST deselect later !! */
}
static inline void deselect_ep(void)
{
u16 w;
w = omap_readw(UDC_EP_NUM);
w &= ~UDC_EP_SEL;
omap_writew(w, UDC_EP_NUM);
/* 6 wait states before TX will happen */
}
static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
/*-------------------------------------------------------------------------*/
static int omap_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_udc *udc;
unsigned long flags;
u16 maxp;
/* catch various bogus parameters */
if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->maxpacket < usb_endpoint_maxp(desc)) {
DBG("%s, bad ep or descriptor\n", __func__);
return -EINVAL;
}
maxp = usb_endpoint_maxp(desc);
if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
&& maxp != ep->maxpacket)
|| usb_endpoint_maxp(desc) > ep->maxpacket
|| !desc->wMaxPacketSize) {
DBG("%s, bad %s maxpacket\n", __func__, _ep->name);
return -ERANGE;
}
#ifdef USE_ISO
if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC
&& desc->bInterval != 1)) {
/* hardware wants period = 1; USB allows 2^(Interval-1) */
DBG("%s, unsupported ISO period %dms\n", _ep->name,
1 << (desc->bInterval - 1));
return -EDOM;
}
#else
if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
DBG("%s, ISO nyet\n", _ep->name);
return -EDOM;
}
#endif
/* xfer types must match, except that interrupt ~= bulk */
if (ep->bmAttributes != desc->bmAttributes
&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
DBG("%s, %s type mismatch\n", __func__, _ep->name);
return -EINVAL;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
DBG("%s, bogus device state\n", __func__);
return -ESHUTDOWN;
}
spin_lock_irqsave(&udc->lock, flags);
ep->ep.desc = desc;
ep->irqs = 0;
ep->stopped = 0;
ep->ep.maxpacket = maxp;
/* set endpoint to initial state */
ep->dma_channel = 0;
ep->has_dma = 0;
ep->lch = -1;
use_ep(ep, UDC_EP_SEL);
omap_writew(udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
deselect_ep();
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
list_add(&ep->iso, &udc->iso);
/* maybe assign a DMA channel to this endpoint */
if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK)
/* FIXME ISO can dma, but prefers first channel */
dma_channel_claim(ep, 0);
/* PIO OUT may RX packets */
if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
&& !ep->has_dma
&& !(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
spin_unlock_irqrestore(&udc->lock, flags);
VDBG("%s enabled\n", _ep->name);
return 0;
}
static void nuke(struct omap_ep *, int status);
static int omap_ep_disable(struct usb_ep *_ep)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
unsigned long flags;
if (!_ep || !ep->ep.desc) {
DBG("%s, %s not enabled\n", __func__,
_ep ? ep->ep.name : NULL);
return -EINVAL;
}
spin_lock_irqsave(&ep->udc->lock, flags);
ep->ep.desc = NULL;
nuke(ep, -ESHUTDOWN);
ep->ep.maxpacket = ep->maxpacket;
ep->has_dma = 0;
omap_writew(UDC_SET_HALT, UDC_CTRL);
list_del_init(&ep->iso);
del_timer(&ep->timer);
spin_unlock_irqrestore(&ep->udc->lock, flags);
VDBG("%s disabled\n", _ep->name);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct omap_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
omap_free_request(struct usb_ep *ep, struct usb_request *_req)
{
struct omap_req *req = container_of(_req, struct omap_req, req);
kfree(req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct omap_ep *ep, struct omap_req *req, int status)
{
struct omap_udc *udc = ep->udc;
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
if (use_dma && ep->has_dma)
usb_gadget_unmap_request(&udc->gadget, &req->req,
(ep->bEndpointAddress & USB_DIR_IN));
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG("complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&ep->udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->udc->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
#define UDC_FIFO_FULL (UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL)
#define UDC_FIFO_UNWRITABLE (UDC_EP_HALTED | UDC_FIFO_FULL)
#define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
static inline int
write_packet(u8 *buf, struct omap_req *req, unsigned max)
{
unsigned len;
u16 *wp;
len = min(req->req.length - req->req.actual, max);
req->req.actual += len;
max = len;
if (likely((((int)buf) & 1) == 0)) {
wp = (u16 *)buf;
while (max >= 2) {
omap_writew(*wp++, UDC_DATA);
max -= 2;
}
buf = (u8 *)wp;
}
while (max--)
omap_writeb(*buf++, UDC_DATA);
return len;
}
/* FIXME change r/w fifo calling convention */
/* return: 0 = still running, 1 = completed, negative = errno */
static int write_fifo(struct omap_ep *ep, struct omap_req *req)
{
u8 *buf;
unsigned count;
int is_last;
u16 ep_stat;
buf = req->req.buf + req->req.actual;
prefetch(buf);
/* PIO-IN isn't double buffered except for iso */
ep_stat = omap_readw(UDC_STAT_FLG);
if (ep_stat & UDC_FIFO_UNWRITABLE)
return 0;
count = ep->ep.maxpacket;
count = write_packet(buf, req, count);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1;
/* last packet is often short (sometimes a zlp) */
if (count != ep->ep.maxpacket)
is_last = 1;
else if (req->req.length == req->req.actual
&& !req->req.zero)
is_last = 1;
else
is_last = 0;
/* NOTE: requests complete when all IN data is in a
* FIFO (or sometimes later, if a zlp was needed).
* Use usb_ep_fifo_status() where needed.
*/
if (is_last)
done(ep, req, 0);
return is_last;
}
static inline int
read_packet(u8 *buf, struct omap_req *req, unsigned avail)
{
unsigned len;
u16 *wp;
len = min(req->req.length - req->req.actual, avail);
req->req.actual += len;
avail = len;
if (likely((((int)buf) & 1) == 0)) {
wp = (u16 *)buf;
while (avail >= 2) {
*wp++ = omap_readw(UDC_DATA);
avail -= 2;
}
buf = (u8 *)wp;
}
while (avail--)
*buf++ = omap_readb(UDC_DATA);
return len;
}
/* return: 0 = still running, 1 = queue empty, negative = errno */
static int read_fifo(struct omap_ep *ep, struct omap_req *req)
{
u8 *buf;
unsigned count, avail;
int is_last;
buf = req->req.buf + req->req.actual;
prefetchw(buf);
for (;;) {
u16 ep_stat = omap_readw(UDC_STAT_FLG);
is_last = 0;
if (ep_stat & FIFO_EMPTY) {
if (!ep->double_buf)
break;
ep->fnf = 1;
}
if (ep_stat & UDC_EP_HALTED)
break;
if (ep_stat & UDC_FIFO_FULL)
avail = ep->ep.maxpacket;
else {
avail = omap_readw(UDC_RXFSTAT);
ep->fnf = ep->double_buf;
}
count = read_packet(buf, req, avail);
/* partial packet reads may not be errors */
if (count < ep->ep.maxpacket) {
is_last = 1;
/* overflowed this request? flush extra data */
if (count != avail) {
req->req.status = -EOVERFLOW;
avail -= count;
while (avail--)
omap_readw(UDC_DATA);
}
} else if (req->req.length == req->req.actual)
is_last = 1;
else
is_last = 0;
if (!ep->bEndpointAddress)
break;
if (is_last)
done(ep, req, 0);
break;
}
return is_last;
}
/*-------------------------------------------------------------------------*/
static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
/* IN-DMA needs this on fault/cancel paths, so 15xx misreports
* the last transfer's bytecount by more than a FIFO's worth.
*/
if (cpu_is_omap15xx())
return 0;
end = omap_get_dma_src_pos(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (end < start)
end += 0x10000;
return end - start;
}
static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
end = omap_get_dma_dst_pos(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (cpu_is_omap15xx())
end++;
if (end < start)
end += 0x10000;
return end - start;
}
/* Each USB transfer request using DMA maps to one or more DMA transfers.
* When DMA completion isn't request completion, the UDC continues with
* the next DMA transfer for that USB transfer.
*/
static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
{
u16 txdma_ctrl, w;
unsigned length = req->req.length - req->req.actual;
const int sync_mode = cpu_is_omap15xx()
? OMAP_DMA_SYNC_FRAME
: OMAP_DMA_SYNC_ELEMENT;
int dma_trigger = 0;
/* measure length in either bytes or packets */
if ((cpu_is_omap16xx() && length <= UDC_TXN_TSC)
|| (cpu_is_omap15xx() && length < ep->maxpacket)) {
txdma_ctrl = UDC_TXN_EOT | length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
length, 1, sync_mode, dma_trigger, 0);
} else {
length = min(length / ep->maxpacket,
(unsigned) UDC_TXN_TSC + 1);
txdma_ctrl = length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, length, sync_mode,
dma_trigger, 0);
length *= ep->maxpacket;
}
omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
0, 0);
omap_start_dma(ep->lch);
ep->dma_counter = omap_get_dma_src_pos(ep->lch);
w = omap_readw(UDC_DMA_IRQ_EN);
w |= UDC_TX_DONE_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel));
req->dma_bytes = length;
}
static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
{
u16 w;
if (status == 0) {
req->req.actual += req->dma_bytes;
/* return if this request needs to send data or zlp */
if (req->req.actual < req->req.length)
return;
if (req->req.zero
&& req->dma_bytes != 0
&& (req->req.actual % ep->maxpacket) == 0)
return;
} else
req->req.actual += dma_src_len(ep, req->req.dma
+ req->req.actual);
/* tx completion */
omap_stop_dma(ep->lch);
w = omap_readw(UDC_DMA_IRQ_EN);
w &= ~UDC_TX_DONE_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
done(ep, req, status);
}
static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
{
unsigned packets = req->req.length - req->req.actual;
int dma_trigger = 0;
u16 w;
/* set up this DMA transfer, enable the fifo, start */
packets /= ep->ep.maxpacket;
packets = min(packets, (unsigned)UDC_RXN_TC + 1);
req->dma_bytes = packets * ep->ep.maxpacket;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, packets,
OMAP_DMA_SYNC_ELEMENT,
dma_trigger, 0);
omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
0, 0);
ep->dma_counter = omap_get_dma_dst_pos(ep->lch);
omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel));
w = omap_readw(UDC_DMA_IRQ_EN);
w |= UDC_RX_EOT_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_start_dma(ep->lch);
}
static void
finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
{
u16 count, w;
if (status == 0)
ep->dma_counter = (u16) (req->req.dma + req->req.actual);
count = dma_dest_len(ep, req->req.dma + req->req.actual);
count += req->req.actual;
if (one)
count--;
if (count <= req->req.length)
req->req.actual = count;
if (count != req->dma_bytes || status)
omap_stop_dma(ep->lch);
/* if this wasn't short, request may need another transfer */
else if (req->req.actual < req->req.length)
return;
/* rx completion */
w = omap_readw(UDC_DMA_IRQ_EN);
w &= ~UDC_RX_EOT_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
done(ep, req, status);
}
static void dma_irq(struct omap_udc *udc, u16 irq_src)
{
u16 dman_stat = omap_readw(UDC_DMAN_STAT);
struct omap_ep *ep;
struct omap_req *req;
/* IN dma: tx to host */
if (irq_src & UDC_TXN_DONE) {
ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
ep->irqs++;
/* can see TXN_DONE after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_in_dma(ep, req, 0);
}
omap_writew(UDC_TXN_DONE, UDC_IRQ_SRC);
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_in_dma(ep, req);
}
}
/* OUT dma: rx from host */
if (irq_src & UDC_RXN_EOT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* can see RXN_EOT after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
}
omap_writew(UDC_RXN_EOT, UDC_IRQ_SRC);
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_out_dma(ep, req);
}
}
if (irq_src & UDC_RXN_CNT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* omap15xx does this unasked... */
VDBG("%s, RX_CNT irq?\n", ep->ep.name);
omap_writew(UDC_RXN_CNT, UDC_IRQ_SRC);
}
}
static void dma_error(int lch, u16 ch_status, void *data)
{
struct omap_ep *ep = data;
/* if ch_status & OMAP_DMA_DROP_IRQ ... */
/* if ch_status & OMAP1_DMA_TOUT_IRQ ... */
ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
/* complete current transfer ... */
}
static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
{
u16 reg;
int status, restart, is_in;
int dma_channel;
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (is_in)
reg = omap_readw(UDC_TXDMA_CFG);
else
reg = omap_readw(UDC_RXDMA_CFG);
reg |= UDC_DMA_REQ; /* "pulse" activated */
ep->dma_channel = 0;
ep->lch = -1;
if (channel == 0 || channel > 3) {
if ((reg & 0x0f00) == 0)
channel = 3;
else if ((reg & 0x00f0) == 0)
channel = 2;
else if ((reg & 0x000f) == 0) /* preferred for ISO */
channel = 1;
else {
status = -EMLINK;
goto just_restart;
}
}
reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
ep->dma_channel = channel;
if (is_in) {
dma_channel = OMAP_DMA_USB_W2FC_TX0 - 1 + channel;
status = omap_request_dma(dma_channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
omap_writew(reg, UDC_TXDMA_CFG);
/* EMIFF or SDRC */
omap_set_dma_src_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_src_data_pack(ep->lch, 1);
/* TIPB */
omap_set_dma_dest_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
UDC_DATA_DMA,
0, 0);
}
} else {
dma_channel = OMAP_DMA_USB_W2FC_RX0 - 1 + channel;
status = omap_request_dma(dma_channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
omap_writew(reg, UDC_RXDMA_CFG);
/* TIPB */
omap_set_dma_src_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
UDC_DATA_DMA,
0, 0);
/* EMIFF or SDRC */
omap_set_dma_dest_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_dest_data_pack(ep->lch, 1);
}
}
if (status)
ep->dma_channel = 0;
else {
ep->has_dma = 1;
omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
/* channel type P: hw synch (fifo) */
if (!cpu_is_omap15xx())
omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P);
}
just_restart:
/* restart any queue, even if the claim failed */
restart = !ep->stopped && !list_empty(&ep->queue);
if (status)
DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
restart ? " (restart)" : "");
else
DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
is_in ? 't' : 'r',
ep->dma_channel - 1, ep->lch,
restart ? " (restart)" : "");
if (restart) {
struct omap_req *req;
req = container_of(ep->queue.next, struct omap_req, queue);
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else {
use_ep(ep, UDC_EP_SEL);
(is_in ? write_fifo : read_fifo)(ep, req);
deselect_ep();
if (!is_in) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
}
static void dma_channel_release(struct omap_ep *ep)
{
int shift = 4 * (ep->dma_channel - 1);
u16 mask = 0x0f << shift;
struct omap_req *req;
int active;
/* abort any active usb transfer request */
if (!list_empty(&ep->queue))
req = container_of(ep->queue.next, struct omap_req, queue);
else
req = NULL;
active = omap_get_dma_active_status(ep->lch);
DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
active ? "active" : "idle",
(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
ep->dma_channel - 1, req);
/* NOTE: re-setting RX_REQ/TX_REQ because of a chip bug (before
* OMAP 1710 ES2.0) where reading the DMA_CFG can clear them.
*/
/* wait till current packet DMA finishes, and fifo empties */
if (ep->bEndpointAddress & USB_DIR_IN) {
omap_writew((omap_readw(UDC_TXDMA_CFG) & ~mask) | UDC_DMA_REQ,
UDC_TXDMA_CFG);
if (req) {
finish_in_dma(ep, req, -ECONNRESET);
/* clear FIFO; hosts probably won't empty it */
use_ep(ep, UDC_EP_SEL);
omap_writew(UDC_CLR_EP, UDC_CTRL);
deselect_ep();
}
while (omap_readw(UDC_TXDMA_CFG) & mask)
udelay(10);
} else {
omap_writew((omap_readw(UDC_RXDMA_CFG) & ~mask) | UDC_DMA_REQ,
UDC_RXDMA_CFG);
/* dma empties the fifo */
while (omap_readw(UDC_RXDMA_CFG) & mask)
udelay(10);
if (req)
finish_out_dma(ep, req, -ECONNRESET, 0);
}
omap_free_dma(ep->lch);
ep->dma_channel = 0;
ep->lch = -1;
/* has_dma still set, till endpoint is fully quiesced */
}
/*-------------------------------------------------------------------------*/
static int
omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req = container_of(_req, struct omap_req, req);
struct omap_udc *udc;
unsigned long flags;
int is_iso = 0;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
DBG("%s, bad params\n", __func__);
return -EINVAL;
}
if (!_ep || (!ep->ep.desc && ep->bEndpointAddress)) {
DBG("%s, bad ep\n", __func__);
return -EINVAL;
}
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
is_iso = 1;
}
/* this isn't bogus, but OMAP DMA isn't the only hardware to
* have a hard time with partial packet reads... reject it.
*/
if (use_dma
&& ep->has_dma
&& ep->bEndpointAddress != 0
&& (ep->bEndpointAddress & USB_DIR_IN) == 0
&& (req->req.length % ep->ep.maxpacket) != 0) {
DBG("%s, no partial packet OUT reads\n", __func__);
return -EMSGSIZE;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (use_dma && ep->has_dma)
usb_gadget_map_request(&udc->gadget, &req->req,
(ep->bEndpointAddress & USB_DIR_IN));
VDBG("%s queue req %p, len %d buf %p\n",
ep->ep.name, _req, _req->length, _req->buf);
spin_lock_irqsave(&udc->lock, flags);
req->req.status = -EINPROGRESS;
req->req.actual = 0;
/* maybe kickstart non-iso i/o queues */
if (is_iso) {
u16 w;
w = omap_readw(UDC_IRQ_EN);
w |= UDC_SOF_IE;
omap_writew(w, UDC_IRQ_EN);
} else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
int is_in;
if (ep->bEndpointAddress == 0) {
if (!udc->ep0_pending || !list_empty(&ep->queue)) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EL2HLT;
}
/* empty DATA stage? */
is_in = udc->ep0_in;
if (!req->req.length) {
/* chip became CONFIGURED or ADDRESSED
* earlier; drivers may already have queued
* requests to non-control endpoints
*/
if (udc->ep0_set_config) {
u16 irq_en = omap_readw(UDC_IRQ_EN);
irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
if (!udc->ep0_reset_config)
irq_en |= UDC_EPN_RX_IE
| UDC_EPN_TX_IE;
omap_writew(irq_en, UDC_IRQ_EN);
}
/* STATUS for zero length DATA stages is
* always an IN ... even for IN transfers,
* a weird case which seem to stall OMAP.
*/
omap_writew(UDC_EP_SEL | UDC_EP_DIR,
UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
/* cleanup */
udc->ep0_pending = 0;
done(ep, req, 0);
req = NULL;
/* non-empty DATA stage */
} else if (is_in) {
omap_writew(UDC_EP_SEL | UDC_EP_DIR,
UDC_EP_NUM);
} else {
if (udc->ep0_setup)
goto irq_wait;
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
}
} else {
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (!ep->has_dma)
use_ep(ep, UDC_EP_SEL);
/* if ISO: SOF IRQs must be enabled/disabled! */
}
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else if (req) {
if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
req = NULL;
deselect_ep();
if (!is_in) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
irq_wait:
/* irq handler advances the queue */
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req = NULL, *iter;
unsigned long flags;
if (!_ep || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore(&ep->udc->lock, flags);
return -EINVAL;
}
if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
int channel = ep->dma_channel;
/* releasing the channel cancels the request,
* reclaiming the channel restarts the queue
*/
dma_channel_release(ep);
dma_channel_claim(ep, channel);
} else
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int omap_ep_set_halt(struct usb_ep *_ep, int value)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
unsigned long flags;
int status = -EOPNOTSUPP;
spin_lock_irqsave(&ep->udc->lock, flags);
/* just use protocol stalls for ep0; real halts are annoying */
if (ep->bEndpointAddress == 0) {
if (!ep->udc->ep0_pending)
status = -EINVAL;
else if (value) {
if (ep->udc->ep0_set_config) {
WARNING("error changing config?\n");
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
}
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
ep->udc->ep0_pending = 0;
status = 0;
} else /* NOP */
status = 0;
/* otherwise, all active non-ISO endpoints can halt */
} else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->ep.desc) {
/* IN endpoints must already be idle */
if ((ep->bEndpointAddress & USB_DIR_IN)
&& !list_empty(&ep->queue)) {
status = -EAGAIN;
goto done;
}
if (value) {
int channel;
if (use_dma && ep->dma_channel
&& !list_empty(&ep->queue)) {
channel = ep->dma_channel;
dma_channel_release(ep);
} else
channel = 0;
use_ep(ep, UDC_EP_SEL);
if (omap_readw(UDC_STAT_FLG) & UDC_NON_ISO_FIFO_EMPTY) {
omap_writew(UDC_SET_HALT, UDC_CTRL);
status = 0;
} else
status = -EAGAIN;
deselect_ep();
if (channel)
dma_channel_claim(ep, channel);
} else {
use_ep(ep, 0);
omap_writew(ep->udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
if (!(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
}
}
done:
VDBG("%s %s halt stat %d\n", ep->ep.name,
value ? "set" : "clear", status);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return status;
}
static const struct usb_ep_ops omap_ep_ops = {
.enable = omap_ep_enable,
.disable = omap_ep_disable,
.alloc_request = omap_alloc_request,
.free_request = omap_free_request,
.queue = omap_ep_queue,
.dequeue = omap_ep_dequeue,
.set_halt = omap_ep_set_halt,
/* fifo_status ... report bytes in fifo */
/* fifo_flush ... flush fifo */
};
/*-------------------------------------------------------------------------*/
static int omap_get_frame(struct usb_gadget *gadget)
{
u16 sof = omap_readw(UDC_SOF);
return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
}
static int omap_wakeup(struct usb_gadget *gadget)
{
struct omap_udc *udc;
unsigned long flags;
int retval = -EHOSTUNREACH;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
if (udc->devstat & UDC_SUS) {
/* NOTE: OTG spec erratum says that OTG devices may
* issue wakeups without host enable.
*/
if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
DBG("remote wakeup...\n");
omap_writew(UDC_RMT_WKP, UDC_SYSCON2);
retval = 0;
}
/* NOTE: non-OTG systems may use SRP TOO... */
} else if (!(udc->devstat & UDC_ATT)) {
if (!IS_ERR_OR_NULL(udc->transceiver))
retval = otg_start_srp(udc->transceiver->otg);
}
spin_unlock_irqrestore(&udc->lock, flags);
return retval;
}
static int
omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
{
struct omap_udc *udc;
unsigned long flags;
u16 syscon1;
gadget->is_selfpowered = (is_selfpowered != 0);
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
syscon1 = omap_readw(UDC_SYSCON1);
if (is_selfpowered)
syscon1 |= UDC_SELF_PWR;
else
syscon1 &= ~UDC_SELF_PWR;
omap_writew(syscon1, UDC_SYSCON1);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int can_pullup(struct omap_udc *udc)
{
return udc->driver && udc->softconnect && udc->vbus_active;
}
static void pullup_enable(struct omap_udc *udc)
{
u16 w;
w = omap_readw(UDC_SYSCON1);
w |= UDC_PULLUP_EN;
omap_writew(w, UDC_SYSCON1);
if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
u32 l;
l = omap_readl(OTG_CTRL);
l |= OTG_BSESSVLD;
omap_writel(l, OTG_CTRL);
}
omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
}
static void pullup_disable(struct omap_udc *udc)
{
u16 w;
if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
u32 l;
l = omap_readl(OTG_CTRL);
l &= ~OTG_BSESSVLD;
omap_writel(l, OTG_CTRL);
}
omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
w = omap_readw(UDC_SYSCON1);
w &= ~UDC_PULLUP_EN;
omap_writew(w, UDC_SYSCON1);
}
static struct omap_udc *udc;
static void omap_udc_enable_clock(int enable)
{
if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL)
return;
if (enable) {
clk_enable(udc->dc_clk);
clk_enable(udc->hhc_clk);
udelay(100);
} else {
clk_disable(udc->hhc_clk);
clk_disable(udc->dc_clk);
}
}
/*
* Called by whatever detects VBUS sessions: external transceiver
* driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock.
*/
static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct omap_udc *udc;
unsigned long flags;
u32 l;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
VDBG("VBUS %s\n", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (cpu_is_omap15xx()) {
/* "software" detect, ignored if !VBUS_MODE_1510 */
l = omap_readl(FUNC_MUX_CTRL_0);
if (is_active)
l |= VBUS_CTRL_1510;
else
l &= ~VBUS_CTRL_1510;
omap_writel(l, FUNC_MUX_CTRL_0);
}
if (udc->dc_clk != NULL && is_active) {
if (!udc->clk_requested) {
omap_udc_enable_clock(1);
udc->clk_requested = 1;
}
}
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
if (udc->dc_clk != NULL && !is_active) {
if (udc->clk_requested) {
omap_udc_enable_clock(0);
udc->clk_requested = 0;
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct omap_udc *udc;
udc = container_of(gadget, struct omap_udc, gadget);
if (!IS_ERR_OR_NULL(udc->transceiver))
return usb_phy_set_power(udc->transceiver, mA);
return -EOPNOTSUPP;
}
static int omap_pullup(struct usb_gadget *gadget, int is_on)
{
struct omap_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->softconnect = (is_on != 0);
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int omap_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops omap_gadget_ops = {
.get_frame = omap_get_frame,
.wakeup = omap_wakeup,
.set_selfpowered = omap_set_selfpowered,
.vbus_session = omap_vbus_session,
.vbus_draw = omap_vbus_draw,
.pullup = omap_pullup,
.udc_start = omap_udc_start,
.udc_stop = omap_udc_stop,
};
/*-------------------------------------------------------------------------*/
/* dequeue ALL requests; caller holds udc->lock */
static void nuke(struct omap_ep *ep, int status)
{
struct omap_req *req;
ep->stopped = 1;
if (use_dma && ep->dma_channel)
dma_channel_release(ep);
use_ep(ep, 0);
omap_writew(UDC_CLR_EP, UDC_CTRL);
if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
omap_writew(UDC_SET_HALT, UDC_CTRL);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct omap_req, queue);
done(ep, req, status);
}
}
/* caller holds udc->lock */
static void udc_quiesce(struct omap_udc *udc)
{
struct omap_ep *ep;
udc->gadget.speed = USB_SPEED_UNKNOWN;
nuke(&udc->ep[0], -ESHUTDOWN);
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list)
nuke(ep, -ESHUTDOWN);
}
/*-------------------------------------------------------------------------*/
static void update_otg(struct omap_udc *udc)
{
u16 devstat;
if (!gadget_is_otg(&udc->gadget))
return;
if (omap_readl(OTG_CTRL) & OTG_ID)
devstat = omap_readw(UDC_DEVSTAT);
else
devstat = 0;
udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
/* Enable HNP early, avoiding races on suspend irq path.
* ASSUMES OTG state machine B_BUS_REQ input is true.
*/
if (udc->gadget.b_hnp_enable) {
u32 l;
l = omap_readl(OTG_CTRL);
l |= OTG_B_HNPEN | OTG_B_BUSREQ;
l &= ~OTG_PULLUP;
omap_writel(l, OTG_CTRL);
}
}
static void ep0_irq(struct omap_udc *udc, u16 irq_src)
{
struct omap_ep *ep0 = &udc->ep[0];
struct omap_req *req = NULL;
ep0->irqs++;
/* Clear any pending requests and then scrub any rx/tx state
* before starting to handle the SETUP request.
*/
if (irq_src & UDC_SETUP) {
u16 ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
nuke(ep0, 0);
if (ack) {
omap_writew(ack, UDC_IRQ_SRC);
irq_src = UDC_SETUP;
}
}
/* IN/OUT packets mean we're in the DATA or STATUS stage.
* This driver uses only uses protocol stalls (ep0 never halts),
* and if we got this far the gadget driver already had a
* chance to stall. Tries to be forgiving of host oddities.
*
* NOTE: the last chance gadget drivers have to stall control
* requests is during their request completion callback.
*/
if (!list_empty(&ep0->queue))
req = container_of(ep0->queue.next, struct omap_req, queue);
/* IN == TX to host */
if (irq_src & UDC_EP0_TX) {
int stat;
omap_writew(UDC_EP0_TX, UDC_IRQ_SRC);
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
stat = omap_readw(UDC_STAT_FLG);
if (stat & UDC_ACK) {
if (udc->ep0_in) {
/* write next IN packet from response,
* or set up the status stage.
*/
if (req)
stat = write_fifo(ep0, req);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
if (!req && udc->ep0_pending) {
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
udc->ep0_pending = 0;
} /* else: 6 wait states before it'll tx */
} else {
/* ack status stage of OUT transfer */
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
if (req)
done(ep0, req, 0);
}
req = NULL;
} else if (stat & UDC_STALL) {
omap_writew(UDC_CLR_HALT, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
} else {
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
}
}
/* OUT == RX from host */
if (irq_src & UDC_EP0_RX) {
int stat;
omap_writew(UDC_EP0_RX, UDC_IRQ_SRC);
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
stat = omap_readw(UDC_STAT_FLG);
if (stat & UDC_ACK) {
if (!udc->ep0_in) {
stat = 0;
/* read next OUT packet of request, maybe
* reactivating the fifo; stall on errors.
*/
stat = read_fifo(ep0, req);
if (!req || stat < 0) {
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
udc->ep0_pending = 0;
stat = 0;
} else if (stat == 0)
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
/* activate status stage */
if (stat == 1) {
done(ep0, req, 0);
/* that may have STALLed ep0... */
omap_writew(UDC_EP_SEL | UDC_EP_DIR,
UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
udc->ep0_pending = 0;
}
} else {
/* ack status stage of IN transfer */
omap_writew(0, UDC_EP_NUM);
if (req)
done(ep0, req, 0);
}
} else if (stat & UDC_STALL) {
omap_writew(UDC_CLR_HALT, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
} else {
omap_writew(0, UDC_EP_NUM);
}
}
/* SETUP starts all control transfers */
if (irq_src & UDC_SETUP) {
union u {
u16 word[4];
struct usb_ctrlrequest r;
} u;
int status = -EINVAL;
struct omap_ep *ep;
/* read the (latest) SETUP message */
do {
omap_writew(UDC_SETUP_SEL, UDC_EP_NUM);
/* two bytes at a time */
u.word[0] = omap_readw(UDC_DATA);
u.word[1] = omap_readw(UDC_DATA);
u.word[2] = omap_readw(UDC_DATA);
u.word[3] = omap_readw(UDC_DATA);
omap_writew(0, UDC_EP_NUM);
} while (omap_readw(UDC_IRQ_SRC) & UDC_SETUP);
#define w_value le16_to_cpu(u.r.wValue)
#define w_index le16_to_cpu(u.r.wIndex)
#define w_length le16_to_cpu(u.r.wLength)
/* Delegate almost all control requests to the gadget driver,
* except for a handful of ch9 status/feature requests that
* hardware doesn't autodecode _and_ the gadget API hides.
*/
udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0;
udc->ep0_set_config = 0;
udc->ep0_pending = 1;
ep0->stopped = 0;
ep0->ackwait = 0;
switch (u.r.bRequest) {
case USB_REQ_SET_CONFIGURATION:
/* udc needs to know when ep != 0 is valid */
if (u.r.bRequestType != USB_RECIP_DEVICE)
goto delegate;
if (w_length != 0)
goto do_stall;
udc->ep0_set_config = 1;
udc->ep0_reset_config = (w_value == 0);
VDBG("set config %d\n", w_value);
/* update udc NOW since gadget driver may start
* queueing requests immediately; clear config
* later if it fails the request.
*/
if (udc->ep0_reset_config)
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
else
omap_writew(UDC_DEV_CFG, UDC_SYSCON2);
update_otg(udc);
goto delegate;
case USB_REQ_CLEAR_FEATURE:
/* clear endpoint halt */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
ep = &udc->ep[w_index & 0xf];
if (ep != ep0) {
if (w_index & USB_DIR_IN)
ep += 16;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
|| !ep->ep.desc)
goto do_stall;
use_ep(ep, 0);
omap_writew(udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
if (!(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* NOTE: assumes the host behaves sanely,
* only clearing real halts. Else we may
* need to kill pending transfers and then
* restart the queue... very messy for DMA!
*/
}
VDBG("%s halt cleared by host\n", ep->name);
goto ep0out_status_stage;
case USB_REQ_SET_FEATURE:
/* set endpoint halt */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
ep = &udc->ep[w_index & 0xf];
if (w_index & USB_DIR_IN)
ep += 16;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
|| ep == ep0 || !ep->ep.desc)
goto do_stall;
if (use_dma && ep->has_dma) {
/* this has rude side-effects (aborts) and
* can't really work if DMA-IN is active
*/
DBG("%s host set_halt, NYET\n", ep->name);
goto do_stall;
}
use_ep(ep, 0);
/* can't halt if fifo isn't empty... */
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_HALT, UDC_CTRL);
VDBG("%s halted by host\n", ep->name);
ep0out_status_stage:
status = 0;
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
udc->ep0_pending = 0;
break;
case USB_REQ_GET_STATUS:
/* USB_ENDPOINT_HALT status? */
if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
goto intf_status;
/* ep0 never stalls */
if (!(w_index & 0xf))
goto zero_status;
/* only active endpoints count */
ep = &udc->ep[w_index & 0xf];
if (w_index & USB_DIR_IN)
ep += 16;
if (!ep->ep.desc)
goto do_stall;
/* iso never stalls */
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
goto zero_status;
/* FIXME don't assume non-halted endpoints!! */
ERR("%s status, can't report\n", ep->ep.name);
goto do_stall;
intf_status:
/* return interface status. if we were pedantic,
* we'd detect non-existent interfaces, and stall.
*/
if (u.r.bRequestType
!= (USB_DIR_IN|USB_RECIP_INTERFACE))
goto delegate;
zero_status:
/* return two zero bytes */
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
omap_writew(0, UDC_DATA);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
status = 0;
VDBG("GET_STATUS, interface %d\n", w_index);
/* next, status stage */
break;
default:
delegate:
/* activate the ep0out fifo right away */
if (!udc->ep0_in && w_length) {
omap_writew(0, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
}
/* gadget drivers see class/vendor specific requests,
* {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION},
* and more
*/
VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
u.r.bRequestType, u.r.bRequest,
w_value, w_index, w_length);
#undef w_value
#undef w_index
#undef w_length
/* The gadget driver may return an error here,
* causing an immediate protocol stall.
*
* Else it must issue a response, either queueing a
* response buffer for the DATA stage, or halting ep0
* (causing a protocol stall, not a real halt). A
* zero length buffer means no DATA stage.
*
* It's fine to issue that response after the setup()
* call returns, and this IRQ was handled.
*/
udc->ep0_setup = 1;
spin_unlock(&udc->lock);
status = udc->driver->setup(&udc->gadget, &u.r);
spin_lock(&udc->lock);
udc->ep0_setup = 0;
}
if (status < 0) {
do_stall:
VDBG("req %02x.%02x protocol STALL; stat %d\n",
u.r.bRequestType, u.r.bRequest, status);
if (udc->ep0_set_config) {
if (udc->ep0_reset_config)
WARNING("error resetting config?\n");
else
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
}
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
udc->ep0_pending = 0;
}
}
}
/*-------------------------------------------------------------------------*/
#define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT)
static void devstate_irq(struct omap_udc *udc, u16 irq_src)
{
u16 devstat, change;
devstat = omap_readw(UDC_DEVSTAT);
change = devstat ^ udc->devstat;
udc->devstat = devstat;
if (change & (UDC_USB_RESET|UDC_ATT)) {
udc_quiesce(udc);
if (change & UDC_ATT) {
/* driver for any external transceiver will
* have called omap_vbus_session() already
*/
if (devstat & UDC_ATT) {
udc->gadget.speed = USB_SPEED_FULL;
VDBG("connect\n");
if (IS_ERR_OR_NULL(udc->transceiver))
pullup_enable(udc);
/* if (driver->connect) call it */
} else if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
if (IS_ERR_OR_NULL(udc->transceiver))
pullup_disable(udc);
DBG("disconnect, gadget %s\n",
udc->driver->driver.name);
if (udc->driver->disconnect) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
}
change &= ~UDC_ATT;
}
if (change & UDC_USB_RESET) {
if (devstat & UDC_USB_RESET) {
VDBG("RESET=1\n");
} else {
udc->gadget.speed = USB_SPEED_FULL;
INFO("USB reset done, gadget %s\n",
udc->driver->driver.name);
/* ep0 traffic is legal from now on */
omap_writew(UDC_DS_CHG_IE | UDC_EP0_IE,
UDC_IRQ_EN);
}
change &= ~UDC_USB_RESET;
}
}
if (change & UDC_SUS) {
if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
/* FIXME tell isp1301 to suspend/resume (?) */
if (devstat & UDC_SUS) {
VDBG("suspend\n");
update_otg(udc);
/* HNP could be under way already */
if (udc->gadget.speed == USB_SPEED_FULL
&& udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
if (!IS_ERR_OR_NULL(udc->transceiver))
usb_phy_set_suspend(
udc->transceiver, 1);
} else {
VDBG("resume\n");
if (!IS_ERR_OR_NULL(udc->transceiver))
usb_phy_set_suspend(
udc->transceiver, 0);
if (udc->gadget.speed == USB_SPEED_FULL
&& udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
}
change &= ~UDC_SUS;
}
if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
update_otg(udc);
change &= ~OTG_FLAGS;
}
change &= ~(UDC_CFG|UDC_DEF|UDC_ADD);
if (change)
VDBG("devstat %03x, ignore change %03x\n",
devstat, change);
omap_writew(UDC_DS_CHG, UDC_IRQ_SRC);
}
static irqreturn_t omap_udc_irq(int irq, void *_udc)
{
struct omap_udc *udc = _udc;
u16 irq_src;
irqreturn_t status = IRQ_NONE;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
irq_src = omap_readw(UDC_IRQ_SRC);
/* Device state change (usb ch9 stuff) */
if (irq_src & UDC_DS_CHG) {
devstate_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~UDC_DS_CHG;
}
/* EP0 control transfers */
if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) {
ep0_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX);
}
/* DMA transfer completion */
if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) {
dma_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
}
irq_src &= ~(UDC_IRQ_SOF | UDC_EPN_TX|UDC_EPN_RX);
if (irq_src)
DBG("udc_irq, unhandled %03x\n", irq_src);
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
/* workaround for seemingly-lost IRQs for RX ACKs... */
#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
static void pio_out_timer(struct timer_list *t)
{
struct omap_ep *ep = from_timer(ep, t, timer);
unsigned long flags;
u16 stat_flg;
spin_lock_irqsave(&ep->udc->lock, flags);
if (!list_empty(&ep->queue) && ep->ackwait) {
use_ep(ep, UDC_EP_SEL);
stat_flg = omap_readw(UDC_STAT_FLG);
if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
|| (ep->double_buf && HALF_FULL(stat_flg)))) {
struct omap_req *req;
VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
req = container_of(ep->queue.next,
struct omap_req, queue);
(void) read_fifo(ep, req);
omap_writew(ep->bEndpointAddress, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
} else
deselect_ep();
}
mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
spin_unlock_irqrestore(&ep->udc->lock, flags);
}
static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
{
u16 epn_stat, irq_src;
irqreturn_t status = IRQ_NONE;
struct omap_ep *ep;
int epnum;
struct omap_udc *udc = _dev;
struct omap_req *req;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
epn_stat = omap_readw(UDC_EPN_STAT);
irq_src = omap_readw(UDC_IRQ_SRC);
/* handle OUT first, to avoid some wasteful NAKs */
if (irq_src & UDC_EPN_RX) {
epnum = (epn_stat >> 8) & 0x0f;
omap_writew(UDC_EPN_RX, UDC_IRQ_SRC);
status = IRQ_HANDLED;
ep = &udc->ep[epnum];
ep->irqs++;
omap_writew(epnum | UDC_EP_SEL, UDC_EP_NUM);
ep->fnf = 0;
if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
ep->ackwait--;
if (!list_empty(&ep->queue)) {
int stat;
req = container_of(ep->queue.next,
struct omap_req, queue);
stat = read_fifo(ep, req);
if (!ep->double_buf)
ep->fnf = 1;
}
}
/* min 6 clock delay before clearing EP_SEL ... */
epn_stat = omap_readw(UDC_EPN_STAT);
epn_stat = omap_readw(UDC_EPN_STAT);
omap_writew(epnum, UDC_EP_NUM);
/* enabling fifo _after_ clearing ACK, contrary to docs,
* reduces lossage; timer still needed though (sigh).
*/
if (ep->fnf) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
}
/* then IN transfers */
else if (irq_src & UDC_EPN_TX) {
epnum = epn_stat & 0x0f;
omap_writew(UDC_EPN_TX, UDC_IRQ_SRC);
status = IRQ_HANDLED;
ep = &udc->ep[16 + epnum];
ep->irqs++;
omap_writew(epnum | UDC_EP_DIR | UDC_EP_SEL, UDC_EP_NUM);
if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
ep->ackwait = 0;
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
(void) write_fifo(ep, req);
}
}
/* min 6 clock delay before clearing EP_SEL ... */
epn_stat = omap_readw(UDC_EPN_STAT);
epn_stat = omap_readw(UDC_EPN_STAT);
omap_writew(epnum | UDC_EP_DIR, UDC_EP_NUM);
/* then 6 clocks before it'd tx */
}
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
#ifdef USE_ISO
static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
{
struct omap_udc *udc = _dev;
struct omap_ep *ep;
int pending = 0;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* handle all non-DMA ISO transfers */
list_for_each_entry(ep, &udc->iso, iso) {
u16 stat;
struct omap_req *req;
if (ep->has_dma || list_empty(&ep->queue))
continue;
req = list_entry(ep->queue.next, struct omap_req, queue);
use_ep(ep, UDC_EP_SEL);
stat = omap_readw(UDC_STAT_FLG);
/* NOTE: like the other controller drivers, this isn't
* currently reporting lost or damaged frames.
*/
if (ep->bEndpointAddress & USB_DIR_IN) {
if (stat & UDC_MISS_IN)
/* done(ep, req, -EPROTO) */;
else
write_fifo(ep, req);
} else {
int status = 0;
if (stat & UDC_NO_RXPACKET)
status = -EREMOTEIO;
else if (stat & UDC_ISO_ERR)
status = -EILSEQ;
else if (stat & UDC_DATA_FLUSH)
status = -ENOSR;
if (status)
/* done(ep, req, status) */;
else
read_fifo(ep, req);
}
deselect_ep();
/* 6 wait states before next EP */
ep->irqs++;
if (!list_empty(&ep->queue))
pending = 1;
}
if (!pending) {
u16 w;
w = omap_readw(UDC_IRQ_EN);
w &= ~UDC_SOF_IE;
omap_writew(w, UDC_IRQ_EN);
}
omap_writew(UDC_IRQ_SOF, UDC_IRQ_SRC);
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
#endif
/*-------------------------------------------------------------------------*/
static inline int machine_without_vbus_sense(void)
{
return machine_is_omap_osk() || machine_is_sx1();
}
static int omap_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
int status;
struct omap_ep *ep;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* reset state */
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
ep->irqs = 0;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
continue;
use_ep(ep, 0);
omap_writew(UDC_SET_HALT, UDC_CTRL);
}
udc->ep0_pending = 0;
udc->ep[0].irqs = 0;
udc->softconnect = 1;
/* hook up the driver */
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
/* connect to bus through transceiver */
if (!IS_ERR_OR_NULL(udc->transceiver)) {
status = otg_set_peripheral(udc->transceiver->otg,
&udc->gadget);
if (status < 0) {
ERR("can't bind to transceiver\n");
udc->driver = NULL;
goto done;
}
} else {
status = 0;
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
}
/* boards that don't have VBUS sensing can't autogate 48MHz;
* can't enter deep sleep while a gadget driver is active.
*/
if (machine_without_vbus_sense())
omap_vbus_session(&udc->gadget, 1);
done:
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
return status;
}
static int omap_udc_stop(struct usb_gadget *g)
{
unsigned long flags;
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
if (machine_without_vbus_sense())
omap_vbus_session(&udc->gadget, 0);
if (!IS_ERR_OR_NULL(udc->transceiver))
(void) otg_set_peripheral(udc->transceiver->otg, NULL);
else
pullup_disable(udc);
spin_lock_irqsave(&udc->lock, flags);
udc_quiesce(udc);
spin_unlock_irqrestore(&udc->lock, flags);
udc->driver = NULL;
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
return 0;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#include <linux/seq_file.h>
static const char proc_filename[] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS "%s%s%s%s%s%s%s%s"
static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
{
u16 stat_flg;
struct omap_req *req;
char buf[20];
use_ep(ep, 0);
if (use_dma && ep->has_dma)
snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ",
(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
ep->dma_channel - 1, ep->lch);
else
buf[0] = 0;
stat_flg = omap_readw(UDC_STAT_FLG);
seq_printf(s,
"\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
ep->name, buf,
ep->double_buf ? "dbuf " : "",
({ char *s;
switch (ep->ackwait) {
case 0:
s = "";
break;
case 1:
s = "(ackw) ";
break;
case 2:
s = "(ackw2) ";
break;
default:
s = "(?) ";
break;
} s; }),
ep->irqs, stat_flg,
(stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "",
(stat_flg & UDC_MISS_IN) ? "miss_in " : "",
(stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "",
(stat_flg & UDC_ISO_ERR) ? "iso_err " : "",
(stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "",
(stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "",
(stat_flg & UDC_EP_HALTED) ? "HALT " : "",
(stat_flg & UDC_STALL) ? "STALL " : "",
(stat_flg & UDC_NAK) ? "NAK " : "",
(stat_flg & UDC_ACK) ? "ACK " : "",
(stat_flg & UDC_FIFO_EN) ? "fifo_en " : "",
(stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "",
(stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : "");
if (list_empty(&ep->queue))
seq_printf(s, "\t(queue empty)\n");
else
list_for_each_entry(req, &ep->queue, queue) {
unsigned length = req->req.actual;
if (use_dma && buf[0]) {
length += ((ep->bEndpointAddress & USB_DIR_IN)
? dma_src_len : dma_dest_len)
(ep, req->req.dma + length);
buf[0] = 0;
}
seq_printf(s, "\treq %p len %d/%d buf %p\n",
&req->req, length,
req->req.length, req->req.buf);
}
}
static char *trx_mode(unsigned m, int enabled)
{
switch (m) {
case 0:
return enabled ? "*6wire" : "unused";
case 1:
return "4wire";
case 2:
return "3wire";
case 3:
return "6wire";
default:
return "unknown";
}
}
static int proc_otg_show(struct seq_file *s)
{
u32 tmp;
u32 trans = 0;
char *ctrl_name = "(UNKNOWN)";
tmp = omap_readl(OTG_REV);
ctrl_name = "transceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
tmp >> 4, tmp & 0xf, ctrl_name, trans);
tmp = omap_readw(OTG_SYSCON_1);
seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
FOURBITS "\n", tmp,
trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R),
(USB0_TRX_MODE(tmp) == 0 && !cpu_is_omap1710())
? "internal"
: trx_mode(USB0_TRX_MODE(tmp), 1),
(tmp & OTG_IDLE_EN) ? " !otg" : "",
(tmp & HST_IDLE_EN) ? " !host" : "",
(tmp & DEV_IDLE_EN) ? " !dev" : "",
(tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
tmp = omap_readl(OTG_SYSCON_2);
seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
" b_ase_brst=%d hmc=%d\n", tmp,
(tmp & OTG_EN) ? " otg_en" : "",
(tmp & USBX_SYNCHRO) ? " synchro" : "",
/* much more SRP stuff */
(tmp & SRP_DATA) ? " srp_data" : "",
(tmp & SRP_VBUS) ? " srp_vbus" : "",
(tmp & OTG_PADEN) ? " otg_paden" : "",
(tmp & HMC_PADEN) ? " hmc_paden" : "",
(tmp & UHOST_EN) ? " uhost_en" : "",
(tmp & HMC_TLLSPEED) ? " tllspeed" : "",
(tmp & HMC_TLLATTACH) ? " tllattach" : "",
B_ASE_BRST(tmp),
OTG_HMC(tmp));
tmp = omap_readl(OTG_CTRL);
seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
(tmp & OTG_ASESSVLD) ? " asess" : "",
(tmp & OTG_BSESSEND) ? " bsess_end" : "",
(tmp & OTG_BSESSVLD) ? " bsess" : "",
(tmp & OTG_VBUSVLD) ? " vbus" : "",
(tmp & OTG_ID) ? " id" : "",
(tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST",
(tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "",
(tmp & OTG_A_BUSREQ) ? " a_bus" : "",
(tmp & OTG_B_HNPEN) ? " b_hnpen" : "",
(tmp & OTG_B_BUSREQ) ? " b_bus" : "",
(tmp & OTG_BUSDROP) ? " busdrop" : "",
(tmp & OTG_PULLDOWN) ? " down" : "",
(tmp & OTG_PULLUP) ? " up" : "",
(tmp & OTG_DRV_VBUS) ? " drv" : "",
(tmp & OTG_PD_VBUS) ? " pd_vb" : "",
(tmp & OTG_PU_VBUS) ? " pu_vb" : "",
(tmp & OTG_PU_ID) ? " pu_id" : ""
);
tmp = omap_readw(OTG_IRQ_EN);
seq_printf(s, "otg_irq_en %04x" "\n", tmp);
tmp = omap_readw(OTG_IRQ_SRC);
seq_printf(s, "otg_irq_src %04x" "\n", tmp);
tmp = omap_readw(OTG_OUTCTRL);
seq_printf(s, "otg_outctrl %04x" "\n", tmp);
tmp = omap_readw(OTG_TEST);
seq_printf(s, "otg_test %04x" "\n", tmp);
return 0;
}
static int proc_udc_show(struct seq_file *s, void *_)
{
u32 tmp;
struct omap_ep *ep;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
seq_printf(s, "%s, version: " DRIVER_VERSION
#ifdef USE_ISO
" (iso)"
#endif
"%s\n",
driver_desc,
use_dma ? " (dma)" : "");
tmp = omap_readw(UDC_REV) & 0xff;
seq_printf(s,
"UDC rev %d.%d, fifo mode %d, gadget %s\n"
"hmc %d, transceiver %s\n",
tmp >> 4, tmp & 0xf,
fifo_mode,
udc->driver ? udc->driver->driver.name : "(none)",
HMC,
udc->transceiver
? udc->transceiver->label
: (cpu_is_omap1710()
? "external" : "(none)"));
seq_printf(s, "ULPD control %04x req %04x status %04x\n",
omap_readw(ULPD_CLOCK_CTRL),
omap_readw(ULPD_SOFT_REQ),
omap_readw(ULPD_STATUS_REQ));
/* OTG controller registers */
if (!cpu_is_omap15xx())
proc_otg_show(s);
tmp = omap_readw(UDC_SYSCON1);
seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp,
(tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
(tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
(tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "",
(tmp & UDC_NAK_EN) ? " nak" : "",
(tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "",
(tmp & UDC_SELF_PWR) ? " self_pwr" : "",
(tmp & UDC_SOFF_DIS) ? " soff_dis" : "",
(tmp & UDC_PULLUP_EN) ? " PULLUP" : "");
/* syscon2 is write-only */
/* UDC controller registers */
if (!(tmp & UDC_PULLUP_EN)) {
seq_printf(s, "(suspended)\n");
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
tmp = omap_readw(UDC_DEVSTAT);
seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp,
(tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
(tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
(tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "",
(tmp & UDC_R_WK_OK) ? " r_wk_ok" : "",
(tmp & UDC_USB_RESET) ? " usb_reset" : "",
(tmp & UDC_SUS) ? " SUS" : "",
(tmp & UDC_CFG) ? " CFG" : "",
(tmp & UDC_ADD) ? " ADD" : "",
(tmp & UDC_DEF) ? " DEF" : "",
(tmp & UDC_ATT) ? " ATT" : "");
seq_printf(s, "sof %04x\n", omap_readw(UDC_SOF));
tmp = omap_readw(UDC_IRQ_EN);
seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp,
(tmp & UDC_SOF_IE) ? " sof" : "",
(tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
(tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
(tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
(tmp & UDC_EP0_IE) ? " ep0" : "");
tmp = omap_readw(UDC_IRQ_SRC);
seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp,
(tmp & UDC_TXN_DONE) ? " txn_done" : "",
(tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
(tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
(tmp & UDC_IRQ_SOF) ? " sof" : "",
(tmp & UDC_EPN_RX) ? " epn_rx" : "",
(tmp & UDC_EPN_TX) ? " epn_tx" : "",
(tmp & UDC_DS_CHG) ? " ds_chg" : "",
(tmp & UDC_SETUP) ? " setup" : "",
(tmp & UDC_EP0_RX) ? " ep0out" : "",
(tmp & UDC_EP0_TX) ? " ep0in" : "");
if (use_dma) {
unsigned i;
tmp = omap_readw(UDC_DMA_IRQ_EN);
seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp,
(tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
(tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
(tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "",
(tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "",
(tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "",
(tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "",
(tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "",
(tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
(tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
tmp = omap_readw(UDC_RXDMA_CFG);
seq_printf(s, "rxdma_cfg %04x\n", tmp);
if (tmp) {
for (i = 0; i < 3; i++) {
if ((tmp & (0x0f << (i * 4))) == 0)
continue;
seq_printf(s, "rxdma[%d] %04x\n", i,
omap_readw(UDC_RXDMA(i + 1)));
}
}
tmp = omap_readw(UDC_TXDMA_CFG);
seq_printf(s, "txdma_cfg %04x\n", tmp);
if (tmp) {
for (i = 0; i < 3; i++) {
if (!(tmp & (0x0f << (i * 4))))
continue;
seq_printf(s, "txdma[%d] %04x\n", i,
omap_readw(UDC_TXDMA(i + 1)));
}
}
}
tmp = omap_readw(UDC_DEVSTAT);
if (tmp & UDC_ATT) {
proc_ep_show(s, &udc->ep[0]);
if (tmp & UDC_ADD) {
list_for_each_entry(ep, &udc->gadget.ep_list,
ep.ep_list) {
if (ep->ep.desc)
proc_ep_show(s, ep);
}
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static void create_proc_file(void)
{
proc_create_single(proc_filename, 0, NULL, proc_udc_show);
}
static void remove_proc_file(void)
{
remove_proc_entry(proc_filename, NULL);
}
#else
static inline void create_proc_file(void) {}
static inline void remove_proc_file(void) {}
#endif
/*-------------------------------------------------------------------------*/
/* Before this controller can enumerate, we need to pick an endpoint
* configuration, or "fifo_mode" That involves allocating 2KB of packet
* buffer space among the endpoints we'll be operating.
*
* NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when
* UDC_SYSCON_1.CFG_LOCK is set can now work. We won't use that
* capability yet though.
*/
static unsigned
omap_ep_setup(char *name, u8 addr, u8 type,
unsigned buf, unsigned maxp, int dbuf)
{
struct omap_ep *ep;
u16 epn_rxtx = 0;
/* OUT endpoints first, then IN */
ep = &udc->ep[addr & 0xf];
if (addr & USB_DIR_IN)
ep += 16;
/* in case of ep init table bugs */
BUG_ON(ep->name[0]);
/* chip setup ... bit values are same for IN, OUT */
if (type == USB_ENDPOINT_XFER_ISOC) {
switch (maxp) {
case 8:
epn_rxtx = 0 << 12;
break;
case 16:
epn_rxtx = 1 << 12;
break;
case 32:
epn_rxtx = 2 << 12;
break;
case 64:
epn_rxtx = 3 << 12;
break;
case 128:
epn_rxtx = 4 << 12;
break;
case 256:
epn_rxtx = 5 << 12;
break;
case 512:
epn_rxtx = 6 << 12;
break;
default:
BUG();
}
epn_rxtx |= UDC_EPN_RX_ISO;
dbuf = 1;
} else {
/* double-buffering "not supported" on 15xx,
* and ignored for PIO-IN on newer chips
* (for more reliable behavior)
*/
if (!use_dma || cpu_is_omap15xx())
dbuf = 0;
switch (maxp) {
case 8:
epn_rxtx = 0 << 12;
break;
case 16:
epn_rxtx = 1 << 12;
break;
case 32:
epn_rxtx = 2 << 12;
break;
case 64:
epn_rxtx = 3 << 12;
break;
default:
BUG();
}
if (dbuf && addr)
epn_rxtx |= UDC_EPN_RX_DB;
timer_setup(&ep->timer, pio_out_timer, 0);
}
if (addr)
epn_rxtx |= UDC_EPN_RX_VALID;
BUG_ON(buf & 0x07);
epn_rxtx |= buf >> 3;
DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n",
name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
if (addr & USB_DIR_IN)
omap_writew(epn_rxtx, UDC_EP_TX(addr & 0xf));
else
omap_writew(epn_rxtx, UDC_EP_RX(addr));
/* next endpoint's buffer starts after this one's */
buf += maxp;
if (dbuf)
buf += maxp;
BUG_ON(buf > 2048);
/* set up driver data structures */
BUG_ON(strlen(name) >= sizeof ep->name);
strscpy(ep->name, name, sizeof(ep->name));
INIT_LIST_HEAD(&ep->queue);
INIT_LIST_HEAD(&ep->iso);
ep->bEndpointAddress = addr;
ep->bmAttributes = type;
ep->double_buf = dbuf;
ep->udc = udc;
switch (type) {
case USB_ENDPOINT_XFER_CONTROL:
ep->ep.caps.type_control = true;
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
break;
case USB_ENDPOINT_XFER_ISOC:
ep->ep.caps.type_iso = true;
break;
case USB_ENDPOINT_XFER_BULK:
ep->ep.caps.type_bulk = true;
break;
case USB_ENDPOINT_XFER_INT:
ep->ep.caps.type_int = true;
break;
}
if (addr & USB_DIR_IN)
ep->ep.caps.dir_in = true;
else
ep->ep.caps.dir_out = true;
ep->ep.name = ep->name;
ep->ep.ops = &omap_ep_ops;
ep->maxpacket = maxp;
usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
return buf;
}
static void omap_udc_release(struct device *dev)
{
pullup_disable(udc);
if (!IS_ERR_OR_NULL(udc->transceiver)) {
usb_put_phy(udc->transceiver);
udc->transceiver = NULL;
}
omap_writew(0, UDC_SYSCON1);
remove_proc_file();
if (udc->dc_clk) {
if (udc->clk_requested)
omap_udc_enable_clock(0);
clk_unprepare(udc->hhc_clk);
clk_unprepare(udc->dc_clk);
clk_put(udc->hhc_clk);
clk_put(udc->dc_clk);
}
if (udc->done)
complete(udc->done);
kfree(udc);
}
static int
omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
{
unsigned tmp, buf;
/* abolish any previous hardware state */
omap_writew(0, UDC_SYSCON1);
omap_writew(0, UDC_IRQ_EN);
omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
omap_writew(0, UDC_DMA_IRQ_EN);
omap_writew(0, UDC_RXDMA_CFG);
omap_writew(0, UDC_TXDMA_CFG);
/* UDC_PULLUP_EN gates the chip clock */
/* OTG_SYSCON_1 |= DEV_IDLE_EN; */
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
spin_lock_init(&udc->lock);
udc->gadget.ops = &omap_gadget_ops;
udc->gadget.ep0 = &udc->ep[0].ep;
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->iso);
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.name = driver_name;
udc->gadget.quirk_ep_out_aligned_size = 1;
udc->transceiver = xceiv;
/* ep0 is special; put it right after the SETUP buffer */
buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL,
8 /* after SETUP */, 64 /* maxpacket */, 0);
list_del_init(&udc->ep[0].ep.ep_list);
/* initially disable all non-ep0 endpoints */
for (tmp = 1; tmp < 15; tmp++) {
omap_writew(0, UDC_EP_RX(tmp));
omap_writew(0, UDC_EP_TX(tmp));
}
#define OMAP_BULK_EP(name, addr) \
buf = omap_ep_setup(name "-bulk", addr, \
USB_ENDPOINT_XFER_BULK, buf, 64, 1);
#define OMAP_INT_EP(name, addr, maxp) \
buf = omap_ep_setup(name "-int", addr, \
USB_ENDPOINT_XFER_INT, buf, maxp, 0);
#define OMAP_ISO_EP(name, addr, maxp) \
buf = omap_ep_setup(name "-iso", addr, \
USB_ENDPOINT_XFER_ISOC, buf, maxp, 1);
switch (fifo_mode) {
case 0:
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
break;
case 1:
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
OMAP_BULK_EP("ep3in", USB_DIR_IN | 3);
OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4);
OMAP_INT_EP("ep10in", USB_DIR_IN | 10, 16);
OMAP_BULK_EP("ep5in", USB_DIR_IN | 5);
OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
OMAP_INT_EP("ep11in", USB_DIR_IN | 11, 16);
OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6);
OMAP_INT_EP("ep12in", USB_DIR_IN | 12, 16);
OMAP_BULK_EP("ep7in", USB_DIR_IN | 7);
OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
OMAP_INT_EP("ep13in", USB_DIR_IN | 13, 16);
OMAP_INT_EP("ep13out", USB_DIR_OUT | 13, 16);
OMAP_BULK_EP("ep8in", USB_DIR_IN | 8);
OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8);
OMAP_INT_EP("ep14in", USB_DIR_IN | 14, 16);
OMAP_INT_EP("ep14out", USB_DIR_OUT | 14, 16);
OMAP_BULK_EP("ep15in", USB_DIR_IN | 15);
OMAP_BULK_EP("ep15out", USB_DIR_OUT | 15);
break;
#ifdef USE_ISO
case 2: /* mixed iso/bulk */
OMAP_ISO_EP("ep1in", USB_DIR_IN | 1, 256);
OMAP_ISO_EP("ep2out", USB_DIR_OUT | 2, 256);
OMAP_ISO_EP("ep3in", USB_DIR_IN | 3, 128);
OMAP_ISO_EP("ep4out", USB_DIR_OUT | 4, 128);
OMAP_INT_EP("ep5in", USB_DIR_IN | 5, 16);
OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
OMAP_INT_EP("ep8in", USB_DIR_IN | 8, 16);
break;
case 3: /* mixed bulk/iso */
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
OMAP_BULK_EP("ep4in", USB_DIR_IN | 4);
OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
OMAP_INT_EP("ep6in", USB_DIR_IN | 6, 16);
OMAP_ISO_EP("ep7in", USB_DIR_IN | 7, 256);
OMAP_ISO_EP("ep8out", USB_DIR_OUT | 8, 256);
OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
break;
#endif
/* add more modes as needed */
default:
ERR("unsupported fifo_mode #%d\n", fifo_mode);
return -ENODEV;
}
omap_writew(UDC_CFG_LOCK|UDC_SELF_PWR, UDC_SYSCON1);
INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
return 0;
}
static int omap_udc_probe(struct platform_device *pdev)
{
int status = -ENODEV;
int hmc;
struct usb_phy *xceiv = NULL;
const char *type = NULL;
struct omap_usb_config *config = dev_get_platdata(&pdev->dev);
struct clk *dc_clk = NULL;
struct clk *hhc_clk = NULL;
/* NOTE: "knows" the order of the resources! */
if (!request_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]),
driver_name)) {
DBG("request_mem_region failed\n");
return -EBUSY;
}
if (cpu_is_omap16xx()) {
dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
clk_prepare_enable(dc_clk);
clk_prepare_enable(hhc_clk);
udelay(100);
}
INFO("OMAP UDC rev %d.%d%s\n",
omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf,
config->otg ? ", Mini-AB" : "");
/* use the mode given to us by board init code */
if (cpu_is_omap15xx()) {
hmc = HMC_1510;
type = "(unknown)";
if (machine_without_vbus_sense()) {
/* just set up software VBUS detect, and then
* later rig it so we always report VBUS.
* FIXME without really sensing VBUS, we can't
* know when to turn PULLUP_EN on/off; and that
* means we always "need" the 48MHz clock.
*/
u32 tmp = omap_readl(FUNC_MUX_CTRL_0);
tmp &= ~VBUS_CTRL_1510;
omap_writel(tmp, FUNC_MUX_CTRL_0);
tmp |= VBUS_MODE_1510;
tmp &= ~VBUS_CTRL_1510;
omap_writel(tmp, FUNC_MUX_CTRL_0);
}
} else {
/* The transceiver may package some GPIO logic or handle
* loopback and/or transceiverless setup; if we find one,
* use it. Except for OTG, we don't _need_ to talk to one;
* but not having one probably means no VBUS detection.
*/
xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (!IS_ERR_OR_NULL(xceiv))
type = xceiv->label;
else if (config->otg) {
DBG("OTG requires external transceiver!\n");
goto cleanup0;
}
hmc = HMC_1610;
switch (hmc) {
case 0: /* POWERUP DEFAULT == 0 */
case 4:
case 12:
case 20:
if (!cpu_is_omap1710()) {
type = "integrated";
break;
}
fallthrough;
case 3:
case 11:
case 16:
case 19:
case 25:
if (IS_ERR_OR_NULL(xceiv)) {
DBG("external transceiver not registered!\n");
type = "unknown";
}
break;
case 21: /* internal loopback */
type = "loopback";
break;
case 14: /* transceiverless */
if (cpu_is_omap1710())
goto bad_on_1710;
fallthrough;
case 13:
case 15:
type = "no";
break;
default:
bad_on_1710:
ERR("unrecognized UDC HMC mode %d\n", hmc);
goto cleanup0;
}
}
INFO("hmc mode %d, %s transceiver\n", hmc, type);
/* a "gadget" abstracts/virtualizes the controller */
status = omap_udc_setup(pdev, xceiv);
if (status)
goto cleanup0;
xceiv = NULL;
/* "udc" is now valid */
pullup_disable(udc);
#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
udc->gadget.is_otg = (config->otg != 0);
#endif
/* starting with omap1710 es2.0, clear toggle is a separate bit */
if (omap_readw(UDC_REV) >= 0x61)
udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE;
else
udc->clr_halt = UDC_RESET_EP;
/* USB general purpose IRQ: ep0, state changes, dma, etc */
status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
omap_udc_irq, 0, driver_name, udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[1].start, status);
goto cleanup1;
}
/* USB "non-iso" IRQ (PIO for all but ep0) */
status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
omap_udc_pio_irq, 0, "omap_udc pio", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[2].start, status);
goto cleanup1;
}
#ifdef USE_ISO
status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
omap_udc_iso_irq, 0, "omap_udc iso", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[3].start, status);
goto cleanup1;
}
#endif
if (cpu_is_omap16xx()) {
udc->dc_clk = dc_clk;
udc->hhc_clk = hhc_clk;
clk_disable(hhc_clk);
clk_disable(dc_clk);
}
create_proc_file();
return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
omap_udc_release);
cleanup1:
kfree(udc);
udc = NULL;
cleanup0:
if (!IS_ERR_OR_NULL(xceiv))
usb_put_phy(xceiv);
if (cpu_is_omap16xx()) {
clk_disable_unprepare(hhc_clk);
clk_disable_unprepare(dc_clk);
clk_put(hhc_clk);
clk_put(dc_clk);
}
release_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]));
return status;
}
static void omap_udc_remove(struct platform_device *pdev)
{
DECLARE_COMPLETION_ONSTACK(done);
udc->done = &done;
usb_del_gadget_udc(&udc->gadget);
wait_for_completion(&done);
release_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]));
}
/* suspend/resume/wakeup from sysfs (echo > power/state) or when the
* system is forced into deep sleep
*
* REVISIT we should probably reject suspend requests when there's a host
* session active, rather than disconnecting, at least on boards that can
* report VBUS irqs (UDC_DEVSTAT.UDC_ATT). And in any case, we need to
* make host resumes and VBUS detection trigger OMAP wakeup events; that
* may involve talking to an external transceiver (e.g. isp1301).
*/
static int omap_udc_suspend(struct platform_device *dev, pm_message_t message)
{
u32 devstat;
devstat = omap_readw(UDC_DEVSTAT);
/* we're requesting 48 MHz clock if the pullup is enabled
* (== we're attached to the host) and we're not suspended,
* which would prevent entry to deep sleep...
*/
if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) {
WARNING("session active; suspend requires disconnect\n");
omap_pullup(&udc->gadget, 0);
}
return 0;
}
static int omap_udc_resume(struct platform_device *dev)
{
DBG("resume + wakeup/SRP\n");
omap_pullup(&udc->gadget, 1);
/* maybe the host would enumerate us if we nudged it */
msleep(100);
return omap_wakeup(&udc->gadget);
}
/*-------------------------------------------------------------------------*/
static struct platform_driver udc_driver = {
.probe = omap_udc_probe,
.remove_new = omap_udc_remove,
.suspend = omap_udc_suspend,
.resume = omap_udc_resume,
.driver = {
.name = driver_name,
},
};
module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_udc");
| linux-master | drivers/usb/gadget/udc/omap_udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
*
* 2013 (c) Aeroflex Gaisler AB
*
* This driver supports GRUSBDC USB Device Controller cores available in the
* GRLIB VHDL IP core library.
*
* Full documentation of the GRUSBDC core can be found here:
* https://www.gaisler.com/products/grlib/grip.pdf
*
* Contributors:
* - Andreas Larsson <[email protected]>
* - Marko Isomaki
*/
/*
* A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
* individually configurable to any of the four USB transfer types. This driver
* only supports cores in DMA mode.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <asm/byteorder.h>
#include "gr_udc.h"
#define DRIVER_NAME "gr_udc"
#define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
static const char driver_name[] = DRIVER_NAME;
#define gr_read32(x) (ioread32be((x)))
#define gr_write32(x, v) (iowrite32be((v), (x)))
/* USB speed and corresponding string calculated from status register value */
#define GR_SPEED(status) \
((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
#define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
/* Size of hardware buffer calculated from epctrl register value */
#define GR_BUFFER_SIZE(epctrl) \
((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
GR_EPCTRL_BUFSZ_SCALER)
/* ---------------------------------------------------------------------- */
/* Debug printout functionality */
static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
static const char *gr_ep0state_string(enum gr_ep0state state)
{
static const char *const names[] = {
[GR_EP0_DISCONNECT] = "disconnect",
[GR_EP0_SETUP] = "setup",
[GR_EP0_IDATA] = "idata",
[GR_EP0_ODATA] = "odata",
[GR_EP0_ISTATUS] = "istatus",
[GR_EP0_OSTATUS] = "ostatus",
[GR_EP0_STALL] = "stall",
[GR_EP0_SUSPEND] = "suspend",
};
if (state < 0 || state >= ARRAY_SIZE(names))
return "UNKNOWN";
return names[state];
}
#ifdef VERBOSE_DEBUG
static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
struct gr_request *req)
{
int buflen = ep->is_in ? req->req.length : req->req.actual;
int rowlen = 32;
int plen = min(rowlen, buflen);
dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
(buflen > plen ? " (truncated)" : ""));
print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
rowlen, 4, req->req.buf, plen, false);
}
static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
u16 value, u16 index, u16 length)
{
dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
type, request, value, index, length);
}
#else /* !VERBOSE_DEBUG */
static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
struct gr_request *req) {}
static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
u16 value, u16 index, u16 length) {}
#endif /* VERBOSE_DEBUG */
/* ---------------------------------------------------------------------- */
/* Debugfs functionality */
#ifdef CONFIG_USB_GADGET_DEBUG_FS
static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
{
u32 epctrl = gr_read32(&ep->regs->epctrl);
u32 epstat = gr_read32(&ep->regs->epstat);
int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
struct gr_request *req;
seq_printf(seq, "%s:\n", ep->ep.name);
seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
seq_printf(seq, " dma_start = %d\n", ep->dma_start);
seq_printf(seq, " stopped = %d\n", ep->stopped);
seq_printf(seq, " wedged = %d\n", ep->wedged);
seq_printf(seq, " callback = %d\n", ep->callback);
seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
if (mode == 1 || mode == 3)
seq_printf(seq, " nt = %d\n",
(epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
seq_printf(seq, " Buffer 0: %s %s%d\n",
epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
epstat & GR_EPSTAT_BS ? " " : "selected ",
(epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
seq_printf(seq, " Buffer 1: %s %s%d\n",
epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
epstat & GR_EPSTAT_BS ? "selected " : " ",
(epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
if (list_empty(&ep->queue)) {
seq_puts(seq, " Queue: empty\n\n");
return;
}
seq_puts(seq, " Queue:\n");
list_for_each_entry(req, &ep->queue, queue) {
struct gr_dma_desc *desc;
struct gr_dma_desc *next;
seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
&req->req.buf, req->req.actual, req->req.length);
next = req->first_desc;
do {
desc = next;
next = desc->next_desc;
seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
desc == req->curr_desc ? 'c' : ' ',
desc, desc->paddr, desc->ctrl, desc->data);
} while (desc != req->last_desc);
}
seq_puts(seq, "\n");
}
static int gr_dfs_show(struct seq_file *seq, void *v)
{
struct gr_udc *dev = seq->private;
u32 control = gr_read32(&dev->regs->control);
u32 status = gr_read32(&dev->regs->status);
struct gr_ep *ep;
seq_printf(seq, "usb state = %s\n",
usb_state_string(dev->gadget.state));
seq_printf(seq, "address = %d\n",
(control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
seq_printf(seq, "test_mode = %d\n", dev->test_mode);
seq_puts(seq, "\n");
list_for_each_entry(ep, &dev->ep_list, ep_list)
gr_seq_ep_show(seq, ep);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(gr_dfs);
static void gr_dfs_create(struct gr_udc *dev)
{
const char *name = "gr_udc_state";
struct dentry *root;
root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
debugfs_create_file(name, 0444, root, dev, &gr_dfs_fops);
}
static void gr_dfs_delete(struct gr_udc *dev)
{
debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
}
#else /* !CONFIG_USB_GADGET_DEBUG_FS */
static void gr_dfs_create(struct gr_udc *dev) {}
static void gr_dfs_delete(struct gr_udc *dev) {}
#endif /* CONFIG_USB_GADGET_DEBUG_FS */
/* ---------------------------------------------------------------------- */
/* DMA and request handling */
/* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
{
dma_addr_t paddr;
struct gr_dma_desc *dma_desc;
dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
if (!dma_desc) {
dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
return NULL;
}
dma_desc->paddr = paddr;
return dma_desc;
}
static inline void gr_free_dma_desc(struct gr_udc *dev,
struct gr_dma_desc *desc)
{
dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
}
/* Frees the chain of struct gr_dma_desc for the given request */
static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
{
struct gr_dma_desc *desc;
struct gr_dma_desc *next;
next = req->first_desc;
if (!next)
return;
do {
desc = next;
next = desc->next_desc;
gr_free_dma_desc(dev, desc);
} while (desc != req->last_desc);
req->first_desc = NULL;
req->curr_desc = NULL;
req->last_desc = NULL;
}
static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
/*
* Frees allocated resources and calls the appropriate completion function/setup
* package handler for a finished request.
*
* Must be called with dev->lock held and irqs disabled.
*/
static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
int status)
__releases(&dev->lock)
__acquires(&dev->lock)
{
struct gr_udc *dev;
list_del_init(&req->queue);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
gr_free_dma_desc_chain(dev, req);
if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
req->req.actual = req->req.length;
} else if (req->oddlen && req->req.actual > req->evenlen) {
/*
* Copy to user buffer in this case where length was not evenly
* divisible by ep->ep.maxpacket and the last descriptor was
* actually used.
*/
char *buftail = ((char *)req->req.buf + req->evenlen);
memcpy(buftail, ep->tailbuf, req->oddlen);
if (req->req.actual > req->req.length) {
/* We got more data than was requested */
dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
ep->ep.name);
gr_dbgprint_request("OVFL", ep, req);
req->req.status = -EOVERFLOW;
}
}
if (!status) {
if (ep->is_in)
gr_dbgprint_request("SENT", ep, req);
else
gr_dbgprint_request("RECV", ep, req);
}
/* Prevent changes to ep->queue during callback */
ep->callback = 1;
if (req == dev->ep0reqo && !status) {
if (req->setup)
gr_ep0_setup(dev, req);
else
dev_err(dev->dev,
"Unexpected non setup packet on ep0in\n");
} else if (req->req.complete) {
spin_unlock(&dev->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dev->lock);
}
ep->callback = 0;
}
static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct gr_request *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
/*
* Starts DMA for endpoint ep if there are requests in the queue.
*
* Must be called with dev->lock held and with !ep->stopped.
*/
static void gr_start_dma(struct gr_ep *ep)
{
struct gr_request *req;
u32 dmactrl;
if (list_empty(&ep->queue)) {
ep->dma_start = 0;
return;
}
req = list_first_entry(&ep->queue, struct gr_request, queue);
/* A descriptor should already have been allocated */
BUG_ON(!req->curr_desc);
/*
* The DMA controller can not handle smaller OUT buffers than
* ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
* long packet are received. Therefore an internal bounce buffer gets
* used when such a request gets enabled.
*/
if (!ep->is_in && req->oddlen)
req->last_desc->data = ep->tailbuf_paddr;
wmb(); /* Make sure all is settled before handing it over to DMA */
/* Set the descriptor pointer in the hardware */
gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
/* Announce available descriptors */
dmactrl = gr_read32(&ep->regs->dmactrl);
gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
ep->dma_start = 1;
}
/*
* Finishes the first request in the ep's queue and, if available, starts the
* next request in queue.
*
* Must be called with dev->lock held, irqs disabled and with !ep->stopped.
*/
static void gr_dma_advance(struct gr_ep *ep, int status)
{
struct gr_request *req;
req = list_first_entry(&ep->queue, struct gr_request, queue);
gr_finish_request(ep, req, status);
gr_start_dma(ep); /* Regardless of ep->dma_start */
}
/*
* Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
* transfer to be canceled and clears GR_DMACTRL_DA.
*
* Must be called with dev->lock held.
*/
static void gr_abort_dma(struct gr_ep *ep)
{
u32 dmactrl;
dmactrl = gr_read32(&ep->regs->dmactrl);
gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
}
/*
* Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
* chain.
*
* Size is not used for OUT endpoints. Hardware can not be instructed to handle
* smaller buffer than MAXPL in the OUT direction.
*/
static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
dma_addr_t data, unsigned size, gfp_t gfp_flags)
{
struct gr_dma_desc *desc;
desc = gr_alloc_dma_desc(ep, gfp_flags);
if (!desc)
return -ENOMEM;
desc->data = data;
if (ep->is_in)
desc->ctrl =
(GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
else
desc->ctrl = GR_DESC_OUT_CTRL_IE;
if (!req->first_desc) {
req->first_desc = desc;
req->curr_desc = desc;
} else {
req->last_desc->next_desc = desc;
req->last_desc->next = desc->paddr;
req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
}
req->last_desc = desc;
return 0;
}
/*
* Sets up a chain of struct gr_dma_descriptors pointing to buffers that
* together covers req->req.length bytes of the buffer at DMA address
* req->req.dma for the OUT direction.
*
* The first descriptor in the chain is enabled, the rest disabled. The
* interrupt handler will later enable them one by one when needed so we can
* find out when the transfer is finished. For OUT endpoints, all descriptors
* therefore generate interrutps.
*/
static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
gfp_t gfp_flags)
{
u16 bytes_left; /* Bytes left to provide descriptors for */
u16 bytes_used; /* Bytes accommodated for */
int ret = 0;
req->first_desc = NULL; /* Signals that no allocation is done yet */
bytes_left = req->req.length;
bytes_used = 0;
while (bytes_left > 0) {
dma_addr_t start = req->req.dma + bytes_used;
u16 size = min(bytes_left, ep->bytes_per_buffer);
if (size < ep->bytes_per_buffer) {
/* Prepare using bounce buffer */
req->evenlen = req->req.length - bytes_left;
req->oddlen = size;
}
ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
if (ret)
goto alloc_err;
bytes_left -= size;
bytes_used += size;
}
req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
return 0;
alloc_err:
gr_free_dma_desc_chain(ep->dev, req);
return ret;
}
/*
* Sets up a chain of struct gr_dma_descriptors pointing to buffers that
* together covers req->req.length bytes of the buffer at DMA address
* req->req.dma for the IN direction.
*
* When more data is provided than the maximum payload size, the hardware splits
* this up into several payloads automatically. Moreover, ep->bytes_per_buffer
* is always set to a multiple of the maximum payload (restricted to the valid
* number of maximum payloads during high bandwidth isochronous or interrupt
* transfers)
*
* All descriptors are enabled from the beginning and we only generate an
* interrupt for the last one indicating that the entire request has been pushed
* to hardware.
*/
static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
gfp_t gfp_flags)
{
u16 bytes_left; /* Bytes left in req to provide descriptors for */
u16 bytes_used; /* Bytes in req accommodated for */
int ret = 0;
req->first_desc = NULL; /* Signals that no allocation is done yet */
bytes_left = req->req.length;
bytes_used = 0;
do { /* Allow for zero length packets */
dma_addr_t start = req->req.dma + bytes_used;
u16 size = min(bytes_left, ep->bytes_per_buffer);
ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
if (ret)
goto alloc_err;
bytes_left -= size;
bytes_used += size;
} while (bytes_left > 0);
/*
* Send an extra zero length packet to indicate that no more data is
* available when req->req.zero is set and the data length is even
* multiples of ep->ep.maxpacket.
*/
if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
if (ret)
goto alloc_err;
}
/*
* For IN packets we only want to know when the last packet has been
* transmitted (not just put into internal buffers).
*/
req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
return 0;
alloc_err:
gr_free_dma_desc_chain(ep->dev, req);
return ret;
}
/* Must be called with dev->lock held */
static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
{
struct gr_udc *dev = ep->dev;
int ret;
if (unlikely(!ep->ep.desc && ep->num != 0)) {
dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
return -EINVAL;
}
if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
dev_err(dev->dev,
"Invalid request for %s: buf=%p list_empty=%d\n",
ep->ep.name, req->req.buf, list_empty(&req->queue));
return -EINVAL;
}
if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
dev_err(dev->dev, "-ESHUTDOWN");
return -ESHUTDOWN;
}
/* Can't touch registers when suspended */
if (dev->ep0state == GR_EP0_SUSPEND) {
dev_err(dev->dev, "-EBUSY");
return -EBUSY;
}
/* Set up DMA mapping in case the caller didn't */
ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
if (ret) {
dev_err(dev->dev, "usb_gadget_map_request");
return ret;
}
if (ep->is_in)
ret = gr_setup_in_desc_list(ep, req, gfp_flags);
else
ret = gr_setup_out_desc_list(ep, req, gfp_flags);
if (ret)
return ret;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
list_add_tail(&req->queue, &ep->queue);
/* Start DMA if not started, otherwise interrupt handler handles it */
if (!ep->dma_start && likely(!ep->stopped))
gr_start_dma(ep);
return 0;
}
/*
* Queue a request from within the driver.
*
* Must be called with dev->lock held.
*/
static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
gfp_t gfp_flags)
{
if (ep->is_in)
gr_dbgprint_request("RESP", ep, req);
return gr_queue(ep, req, gfp_flags);
}
/* ---------------------------------------------------------------------- */
/* General helper functions */
/*
* Dequeue ALL requests.
*
* Must be called with dev->lock held and irqs disabled.
*/
static void gr_ep_nuke(struct gr_ep *ep)
{
struct gr_request *req;
ep->stopped = 1;
ep->dma_start = 0;
gr_abort_dma(ep);
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct gr_request, queue);
gr_finish_request(ep, req, -ESHUTDOWN);
}
}
/*
* Reset the hardware state of this endpoint.
*
* Must be called with dev->lock held.
*/
static void gr_ep_reset(struct gr_ep *ep)
{
gr_write32(&ep->regs->epctrl, 0);
gr_write32(&ep->regs->dmactrl, 0);
ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
ep->ep.desc = NULL;
ep->stopped = 1;
ep->dma_start = 0;
}
/*
* Generate STALL on ep0in/out.
*
* Must be called with dev->lock held.
*/
static void gr_control_stall(struct gr_udc *dev)
{
u32 epctrl;
epctrl = gr_read32(&dev->epo[0].regs->epctrl);
gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
epctrl = gr_read32(&dev->epi[0].regs->epctrl);
gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
dev->ep0state = GR_EP0_STALL;
}
/*
* Halts, halts and wedges, or clears halt for an endpoint.
*
* Must be called with dev->lock held.
*/
static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
{
u32 epctrl;
int retval = 0;
if (ep->num && !ep->ep.desc)
return -EINVAL;
if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
return -EOPNOTSUPP;
/* Never actually halt ep0, and therefore never clear halt for ep0 */
if (!ep->num) {
if (halt && !fromhost) {
/* ep0 halt from gadget - generate protocol stall */
gr_control_stall(ep->dev);
dev_dbg(ep->dev->dev, "EP: stall ep0\n");
return 0;
}
return -EINVAL;
}
dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
(halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
epctrl = gr_read32(&ep->regs->epctrl);
if (halt) {
/* Set HALT */
gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
ep->stopped = 1;
if (wedge)
ep->wedged = 1;
} else {
gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
ep->stopped = 0;
ep->wedged = 0;
/* Things might have been queued up in the meantime */
if (!ep->dma_start)
gr_start_dma(ep);
}
return retval;
}
/* Must be called with dev->lock held */
static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
{
if (dev->ep0state != value)
dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
gr_ep0state_string(value));
dev->ep0state = value;
}
/*
* Should only be called when endpoints can not generate interrupts.
*
* Must be called with dev->lock held.
*/
static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
{
gr_write32(&dev->regs->control, 0);
wmb(); /* Make sure that we do not deny one of our interrupts */
dev->irq_enabled = 0;
}
/*
* Stop all device activity and disable data line pullup.
*
* Must be called with dev->lock held and irqs disabled.
*/
static void gr_stop_activity(struct gr_udc *dev)
{
struct gr_ep *ep;
list_for_each_entry(ep, &dev->ep_list, ep_list)
gr_ep_nuke(ep);
gr_disable_interrupts_and_pullup(dev);
gr_set_ep0state(dev, GR_EP0_DISCONNECT);
usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
}
/* ---------------------------------------------------------------------- */
/* ep0 setup packet handling */
static void gr_ep0_testmode_complete(struct usb_ep *_ep,
struct usb_request *_req)
{
struct gr_ep *ep;
struct gr_udc *dev;
u32 control;
ep = container_of(_ep, struct gr_ep, ep);
dev = ep->dev;
spin_lock(&dev->lock);
control = gr_read32(&dev->regs->control);
control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
gr_write32(&dev->regs->control, control);
spin_unlock(&dev->lock);
}
static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
{
/* Nothing needs to be done here */
}
/*
* Queue a response on ep0in.
*
* Must be called with dev->lock held.
*/
static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
void (*complete)(struct usb_ep *ep,
struct usb_request *req))
{
u8 *reqbuf = dev->ep0reqi->req.buf;
int status;
int i;
for (i = 0; i < length; i++)
reqbuf[i] = buf[i];
dev->ep0reqi->req.length = length;
dev->ep0reqi->req.complete = complete;
status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
if (status < 0)
dev_err(dev->dev,
"Could not queue ep0in setup response: %d\n", status);
return status;
}
/*
* Queue a 2 byte response on ep0in.
*
* Must be called with dev->lock held.
*/
static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
{
__le16 le_response = cpu_to_le16(response);
return gr_ep0_respond(dev, (u8 *)&le_response, 2,
gr_ep0_dummy_complete);
}
/*
* Queue a ZLP response on ep0in.
*
* Must be called with dev->lock held.
*/
static inline int gr_ep0_respond_empty(struct gr_udc *dev)
{
return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
}
/*
* This is run when a SET_ADDRESS request is received. First writes
* the new address to the control register which is updated internally
* when the next IN packet is ACKED.
*
* Must be called with dev->lock held.
*/
static void gr_set_address(struct gr_udc *dev, u8 address)
{
u32 control;
control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
control |= GR_CONTROL_SU;
gr_write32(&dev->regs->control, control);
}
/*
* Returns negative for STALL, 0 for successful handling and positive for
* delegation.
*
* Must be called with dev->lock held.
*/
static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
u16 value, u16 index)
{
u16 response;
u8 test;
switch (request) {
case USB_REQ_SET_ADDRESS:
dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
gr_set_address(dev, value & 0xff);
if (value)
usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
else
usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
return gr_ep0_respond_empty(dev);
case USB_REQ_GET_STATUS:
/* Self powered | remote wakeup */
response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
return gr_ep0_respond_u16(dev, response);
case USB_REQ_SET_FEATURE:
switch (value) {
case USB_DEVICE_REMOTE_WAKEUP:
/* Allow remote wakeup */
dev->remote_wakeup = 1;
return gr_ep0_respond_empty(dev);
case USB_DEVICE_TEST_MODE:
/* The hardware does not support USB_TEST_FORCE_ENABLE */
test = index >> 8;
if (test >= USB_TEST_J && test <= USB_TEST_PACKET) {
dev->test_mode = test;
return gr_ep0_respond(dev, NULL, 0,
gr_ep0_testmode_complete);
}
}
break;
case USB_REQ_CLEAR_FEATURE:
switch (value) {
case USB_DEVICE_REMOTE_WAKEUP:
/* Disallow remote wakeup */
dev->remote_wakeup = 0;
return gr_ep0_respond_empty(dev);
}
break;
}
return 1; /* Delegate the rest */
}
/*
* Returns negative for STALL, 0 for successful handling and positive for
* delegation.
*
* Must be called with dev->lock held.
*/
static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
u16 value, u16 index)
{
if (dev->gadget.state != USB_STATE_CONFIGURED)
return -1;
/*
* Should return STALL for invalid interfaces, but udc driver does not
* know anything about that. However, many gadget drivers do not handle
* GET_STATUS so we need to take care of that.
*/
switch (request) {
case USB_REQ_GET_STATUS:
return gr_ep0_respond_u16(dev, 0x0000);
case USB_REQ_SET_FEATURE:
case USB_REQ_CLEAR_FEATURE:
/*
* No possible valid standard requests. Still let gadget drivers
* have a go at it.
*/
break;
}
return 1; /* Delegate the rest */
}
/*
* Returns negative for STALL, 0 for successful handling and positive for
* delegation.
*
* Must be called with dev->lock held.
*/
static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
u16 value, u16 index)
{
struct gr_ep *ep;
int status;
int halted;
u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
u8 is_in = index & USB_ENDPOINT_DIR_MASK;
if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
return -1;
if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
return -1;
ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
switch (request) {
case USB_REQ_GET_STATUS:
halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
case USB_REQ_SET_FEATURE:
switch (value) {
case USB_ENDPOINT_HALT:
status = gr_ep_halt_wedge(ep, 1, 0, 1);
if (status >= 0)
status = gr_ep0_respond_empty(dev);
return status;
}
break;
case USB_REQ_CLEAR_FEATURE:
switch (value) {
case USB_ENDPOINT_HALT:
if (ep->wedged)
return -1;
status = gr_ep_halt_wedge(ep, 0, 0, 1);
if (status >= 0)
status = gr_ep0_respond_empty(dev);
return status;
}
break;
}
return 1; /* Delegate the rest */
}
/* Must be called with dev->lock held */
static void gr_ep0out_requeue(struct gr_udc *dev)
{
int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
if (ret)
dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
ret);
}
/*
* The main function dealing with setup requests on ep0.
*
* Must be called with dev->lock held and irqs disabled
*/
static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
__releases(&dev->lock)
__acquires(&dev->lock)
{
union {
struct usb_ctrlrequest ctrl;
u8 raw[8];
u32 word[2];
} u;
u8 type;
u8 request;
u16 value;
u16 index;
u16 length;
int i;
int status;
/* Restore from ep0 halt */
if (dev->ep0state == GR_EP0_STALL) {
gr_set_ep0state(dev, GR_EP0_SETUP);
if (!req->req.actual)
goto out;
}
if (dev->ep0state == GR_EP0_ISTATUS) {
gr_set_ep0state(dev, GR_EP0_SETUP);
if (req->req.actual > 0)
dev_dbg(dev->dev,
"Unexpected setup packet at state %s\n",
gr_ep0state_string(GR_EP0_ISTATUS));
else
goto out; /* Got expected ZLP */
} else if (dev->ep0state != GR_EP0_SETUP) {
dev_info(dev->dev,
"Unexpected ep0out request at state %s - stalling\n",
gr_ep0state_string(dev->ep0state));
gr_control_stall(dev);
gr_set_ep0state(dev, GR_EP0_SETUP);
goto out;
} else if (!req->req.actual) {
dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
gr_ep0state_string(dev->ep0state));
goto out;
}
/* Handle SETUP packet */
for (i = 0; i < req->req.actual; i++)
u.raw[i] = ((u8 *)req->req.buf)[i];
type = u.ctrl.bRequestType;
request = u.ctrl.bRequest;
value = le16_to_cpu(u.ctrl.wValue);
index = le16_to_cpu(u.ctrl.wIndex);
length = le16_to_cpu(u.ctrl.wLength);
gr_dbgprint_devreq(dev, type, request, value, index, length);
/* Check for data stage */
if (length) {
if (type & USB_DIR_IN)
gr_set_ep0state(dev, GR_EP0_IDATA);
else
gr_set_ep0state(dev, GR_EP0_ODATA);
}
status = 1; /* Positive status flags delegation */
if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (type & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
status = gr_device_request(dev, type, request,
value, index);
break;
case USB_RECIP_ENDPOINT:
status = gr_endpoint_request(dev, type, request,
value, index);
break;
case USB_RECIP_INTERFACE:
status = gr_interface_request(dev, type, request,
value, index);
break;
}
}
if (status > 0) {
spin_unlock(&dev->lock);
dev_vdbg(dev->dev, "DELEGATE\n");
status = dev->driver->setup(&dev->gadget, &u.ctrl);
spin_lock(&dev->lock);
}
/* Generate STALL on both ep0out and ep0in if requested */
if (unlikely(status < 0)) {
dev_vdbg(dev->dev, "STALL\n");
gr_control_stall(dev);
}
if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
request == USB_REQ_SET_CONFIGURATION) {
if (!value) {
dev_dbg(dev->dev, "STATUS: deconfigured\n");
usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
} else if (status >= 0) {
/* Not configured unless gadget OK:s it */
dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
usb_gadget_set_state(&dev->gadget,
USB_STATE_CONFIGURED);
}
}
/* Get ready for next stage */
if (dev->ep0state == GR_EP0_ODATA)
gr_set_ep0state(dev, GR_EP0_OSTATUS);
else if (dev->ep0state == GR_EP0_IDATA)
gr_set_ep0state(dev, GR_EP0_ISTATUS);
else
gr_set_ep0state(dev, GR_EP0_SETUP);
out:
gr_ep0out_requeue(dev);
}
/* ---------------------------------------------------------------------- */
/* VBUS and USB reset handling */
/* Must be called with dev->lock held and irqs disabled */
static void gr_vbus_connected(struct gr_udc *dev, u32 status)
{
u32 control;
dev->gadget.speed = GR_SPEED(status);
usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
/* Turn on full interrupts and pullup */
control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
GR_CONTROL_SP | GR_CONTROL_EP);
gr_write32(&dev->regs->control, control);
}
/* Must be called with dev->lock held */
static void gr_enable_vbus_detect(struct gr_udc *dev)
{
u32 status;
dev->irq_enabled = 1;
wmb(); /* Make sure we do not ignore an interrupt */
gr_write32(&dev->regs->control, GR_CONTROL_VI);
/* Take care of the case we are already plugged in at this point */
status = gr_read32(&dev->regs->status);
if (status & GR_STATUS_VB)
gr_vbus_connected(dev, status);
}
/* Must be called with dev->lock held and irqs disabled */
static void gr_vbus_disconnected(struct gr_udc *dev)
{
gr_stop_activity(dev);
/* Report disconnect */
if (dev->driver && dev->driver->disconnect) {
spin_unlock(&dev->lock);
dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
gr_enable_vbus_detect(dev);
}
/* Must be called with dev->lock held and irqs disabled */
static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
{
gr_set_address(dev, 0);
gr_set_ep0state(dev, GR_EP0_SETUP);
usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
dev->gadget.speed = GR_SPEED(status);
gr_ep_nuke(&dev->epo[0]);
gr_ep_nuke(&dev->epi[0]);
dev->epo[0].stopped = 0;
dev->epi[0].stopped = 0;
gr_ep0out_requeue(dev);
}
/* ---------------------------------------------------------------------- */
/* Irq handling */
/*
* Handles interrupts from in endpoints. Returns whether something was handled.
*
* Must be called with dev->lock held, irqs disabled and with !ep->stopped.
*/
static int gr_handle_in_ep(struct gr_ep *ep)
{
struct gr_request *req;
req = list_first_entry(&ep->queue, struct gr_request, queue);
if (!req->last_desc)
return 0;
if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
return 0; /* Not put in hardware buffers yet */
if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
return 0; /* Not transmitted yet, still in hardware buffers */
/* Write complete */
gr_dma_advance(ep, 0);
return 1;
}
/*
* Handles interrupts from out endpoints. Returns whether something was handled.
*
* Must be called with dev->lock held, irqs disabled and with !ep->stopped.
*/
static int gr_handle_out_ep(struct gr_ep *ep)
{
u32 ep_dmactrl;
u32 ctrl;
u16 len;
struct gr_request *req;
struct gr_udc *dev = ep->dev;
req = list_first_entry(&ep->queue, struct gr_request, queue);
if (!req->curr_desc)
return 0;
ctrl = READ_ONCE(req->curr_desc->ctrl);
if (ctrl & GR_DESC_OUT_CTRL_EN)
return 0; /* Not received yet */
/* Read complete */
len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
req->req.actual += len;
if (ctrl & GR_DESC_OUT_CTRL_SE)
req->setup = 1;
if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
/* Short packet or >= expected size - we are done */
if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
/*
* Send a status stage ZLP to ack the DATA stage in the
* OUT direction. This needs to be done before
* gr_dma_advance as that can lead to a call to
* ep0_setup that can change dev->ep0state.
*/
gr_ep0_respond_empty(dev);
gr_set_ep0state(dev, GR_EP0_SETUP);
}
gr_dma_advance(ep, 0);
} else {
/* Not done yet. Enable the next descriptor to receive more. */
req->curr_desc = req->curr_desc->next_desc;
req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
ep_dmactrl = gr_read32(&ep->regs->dmactrl);
gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
}
return 1;
}
/*
* Handle state changes. Returns whether something was handled.
*
* Must be called with dev->lock held and irqs disabled.
*/
static int gr_handle_state_changes(struct gr_udc *dev)
{
u32 status = gr_read32(&dev->regs->status);
int handled = 0;
int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
dev->gadget.state == USB_STATE_ATTACHED);
/* VBUS valid detected */
if (!powstate && (status & GR_STATUS_VB)) {
dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
gr_vbus_connected(dev, status);
handled = 1;
}
/* Disconnect */
if (powstate && !(status & GR_STATUS_VB)) {
dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
gr_vbus_disconnected(dev);
handled = 1;
}
/* USB reset detected */
if (status & GR_STATUS_UR) {
dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
GR_SPEED_STR(status));
gr_write32(&dev->regs->status, GR_STATUS_UR);
gr_udc_usbreset(dev, status);
handled = 1;
}
/* Speed change */
if (dev->gadget.speed != GR_SPEED(status)) {
dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
GR_SPEED_STR(status));
dev->gadget.speed = GR_SPEED(status);
handled = 1;
}
/* Going into suspend */
if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
dev_dbg(dev->dev, "STATUS: USB suspend\n");
gr_set_ep0state(dev, GR_EP0_SUSPEND);
dev->suspended_from = dev->gadget.state;
usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
dev->driver && dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
handled = 1;
}
/* Coming out of suspend */
if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
dev_dbg(dev->dev, "STATUS: USB resume\n");
if (dev->suspended_from == USB_STATE_POWERED)
gr_set_ep0state(dev, GR_EP0_DISCONNECT);
else
gr_set_ep0state(dev, GR_EP0_SETUP);
usb_gadget_set_state(&dev->gadget, dev->suspended_from);
if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
dev->driver && dev->driver->resume) {
spin_unlock(&dev->lock);
dev->driver->resume(&dev->gadget);
spin_lock(&dev->lock);
}
handled = 1;
}
return handled;
}
/* Non-interrupt context irq handler */
static irqreturn_t gr_irq_handler(int irq, void *_dev)
{
struct gr_udc *dev = _dev;
struct gr_ep *ep;
int handled = 0;
int i;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (!dev->irq_enabled)
goto out;
/*
* Check IN ep interrupts. We check these before the OUT eps because
* some gadgets reuse the request that might already be currently
* outstanding and needs to be completed (mainly setup requests).
*/
for (i = 0; i < dev->nepi; i++) {
ep = &dev->epi[i];
if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
handled = gr_handle_in_ep(ep) || handled;
}
/* Check OUT ep interrupts */
for (i = 0; i < dev->nepo; i++) {
ep = &dev->epo[i];
if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
handled = gr_handle_out_ep(ep) || handled;
}
/* Check status interrupts */
handled = gr_handle_state_changes(dev) || handled;
/*
* Check AMBA DMA errors. Only check if we didn't find anything else to
* handle because this shouldn't happen if we did everything right.
*/
if (!handled) {
list_for_each_entry(ep, &dev->ep_list, ep_list) {
if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
dev_err(dev->dev,
"AMBA Error occurred for %s\n",
ep->ep.name);
handled = 1;
}
}
}
out:
spin_unlock_irqrestore(&dev->lock, flags);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
/* Interrupt context irq handler */
static irqreturn_t gr_irq(int irq, void *_dev)
{
struct gr_udc *dev = _dev;
if (!dev->irq_enabled)
return IRQ_NONE;
return IRQ_WAKE_THREAD;
}
/* ---------------------------------------------------------------------- */
/* USB ep ops */
/* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
static int gr_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct gr_udc *dev;
struct gr_ep *ep;
u8 mode;
u8 nt;
u16 max;
u16 buffer_size = 0;
u32 epctrl;
ep = container_of(_ep, struct gr_ep, ep);
if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
/* 'ep0' IN and OUT are reserved */
if (ep == &dev->epo[0] || ep == &dev->epi[0])
return -EINVAL;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* Make sure we are clear for enabling */
epctrl = gr_read32(&ep->regs->epctrl);
if (epctrl & GR_EPCTRL_EV)
return -EBUSY;
/* Check that directions match */
if (!ep->is_in != !usb_endpoint_dir_in(desc))
return -EINVAL;
/* Check ep num */
if ((!ep->is_in && ep->num >= dev->nepo) ||
(ep->is_in && ep->num >= dev->nepi))
return -EINVAL;
if (usb_endpoint_xfer_control(desc)) {
mode = 0;
} else if (usb_endpoint_xfer_isoc(desc)) {
mode = 1;
} else if (usb_endpoint_xfer_bulk(desc)) {
mode = 2;
} else if (usb_endpoint_xfer_int(desc)) {
mode = 3;
} else {
dev_err(dev->dev, "Unknown transfer type for %s\n",
ep->ep.name);
return -EINVAL;
}
/*
* Bits 10-0 set the max payload. 12-11 set the number of
* additional transactions.
*/
max = usb_endpoint_maxp(desc);
nt = usb_endpoint_maxp_mult(desc) - 1;
buffer_size = GR_BUFFER_SIZE(epctrl);
if (nt && (mode == 0 || mode == 2)) {
dev_err(dev->dev,
"%s mode: multiple trans./microframe not valid\n",
(mode == 2 ? "Bulk" : "Control"));
return -EINVAL;
} else if (nt == 0x3) {
dev_err(dev->dev,
"Invalid value 0x3 for additional trans./microframe\n");
return -EINVAL;
} else if ((nt + 1) * max > buffer_size) {
dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
buffer_size, (nt + 1), max);
return -EINVAL;
} else if (max == 0) {
dev_err(dev->dev, "Max payload cannot be set to 0\n");
return -EINVAL;
} else if (max > ep->ep.maxpacket_limit) {
dev_err(dev->dev, "Requested max payload %d > limit %d\n",
max, ep->ep.maxpacket_limit);
return -EINVAL;
}
spin_lock(&ep->dev->lock);
if (!ep->stopped) {
spin_unlock(&ep->dev->lock);
return -EBUSY;
}
ep->stopped = 0;
ep->wedged = 0;
ep->ep.desc = desc;
ep->ep.maxpacket = max;
ep->dma_start = 0;
if (nt) {
/*
* Maximum possible size of all payloads in one microframe
* regardless of direction when using high-bandwidth mode.
*/
ep->bytes_per_buffer = (nt + 1) * max;
} else if (ep->is_in) {
/*
* The biggest multiple of maximum packet size that fits into
* the buffer. The hardware will split up into many packets in
* the IN direction.
*/
ep->bytes_per_buffer = (buffer_size / max) * max;
} else {
/*
* Only single packets will be placed the buffers in the OUT
* direction.
*/
ep->bytes_per_buffer = max;
}
epctrl = (max << GR_EPCTRL_MAXPL_POS)
| (nt << GR_EPCTRL_NT_POS)
| (mode << GR_EPCTRL_TT_POS)
| GR_EPCTRL_EV;
if (ep->is_in)
epctrl |= GR_EPCTRL_PI;
gr_write32(&ep->regs->epctrl, epctrl);
gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
spin_unlock(&ep->dev->lock);
dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
return 0;
}
/* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
static int gr_ep_disable(struct usb_ep *_ep)
{
struct gr_ep *ep;
struct gr_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct gr_ep, ep);
if (!_ep || !ep->ep.desc)
return -ENODEV;
dev = ep->dev;
/* 'ep0' IN and OUT are reserved */
if (ep == &dev->epo[0] || ep == &dev->epi[0])
return -EINVAL;
if (dev->ep0state == GR_EP0_SUSPEND)
return -EBUSY;
dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
spin_lock_irqsave(&dev->lock, flags);
gr_ep_nuke(ep);
gr_ep_reset(ep);
ep->ep.desc = NULL;
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*
* Frees a request, but not any DMA buffers associated with it
* (gr_finish_request should already have taken care of that).
*/
static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct gr_request *req;
if (!_ep || !_req)
return;
req = container_of(_req, struct gr_request, req);
/* Leads to memory leak */
WARN(!list_empty(&req->queue),
"request not dequeued properly before freeing\n");
kfree(req);
}
/* Queue a request from the gadget */
static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct gr_ep *ep;
struct gr_request *req;
struct gr_udc *dev;
int ret;
if (unlikely(!_ep || !_req))
return -EINVAL;
ep = container_of(_ep, struct gr_ep, ep);
req = container_of(_req, struct gr_request, req);
dev = ep->dev;
spin_lock(&ep->dev->lock);
/*
* The ep0 pointer in the gadget struct is used both for ep0in and
* ep0out. In a data stage in the out direction ep0out needs to be used
* instead of the default ep0in. Completion functions might use
* driver_data, so that needs to be copied as well.
*/
if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
ep = &dev->epo[0];
ep->ep.driver_data = dev->epi[0].ep.driver_data;
}
if (ep->is_in)
gr_dbgprint_request("EXTERN", ep, req);
ret = gr_queue(ep, req, GFP_ATOMIC);
spin_unlock(&ep->dev->lock);
return ret;
}
/* Dequeue JUST ONE request */
static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct gr_request *req = NULL, *iter;
struct gr_ep *ep;
struct gr_udc *dev;
int ret = 0;
unsigned long flags;
ep = container_of(_ep, struct gr_ep, ep);
if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver)
return -ESHUTDOWN;
/* We can't touch (DMA) registers when suspended */
if (dev->ep0state == GR_EP0_SUSPEND)
return -EBUSY;
spin_lock_irqsave(&dev->lock, flags);
/* Make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
ret = -EINVAL;
goto out;
}
if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
/* This request is currently being processed */
gr_abort_dma(ep);
if (ep->stopped)
gr_finish_request(ep, req, -ECONNRESET);
else
gr_dma_advance(ep, -ECONNRESET);
} else if (!list_empty(&req->queue)) {
/* Not being processed - gr_finish_request dequeues it */
gr_finish_request(ep, req, -ECONNRESET);
} else {
ret = -EOPNOTSUPP;
}
out:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
/* Helper for gr_set_halt and gr_set_wedge */
static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
{
int ret;
struct gr_ep *ep;
if (!_ep)
return -ENODEV;
ep = container_of(_ep, struct gr_ep, ep);
spin_lock(&ep->dev->lock);
/* Halting an IN endpoint should fail if queue is not empty */
if (halt && ep->is_in && !list_empty(&ep->queue)) {
ret = -EAGAIN;
goto out;
}
ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
out:
spin_unlock(&ep->dev->lock);
return ret;
}
/* Halt endpoint */
static int gr_set_halt(struct usb_ep *_ep, int halt)
{
return gr_set_halt_wedge(_ep, halt, 0);
}
/* Halt and wedge endpoint */
static int gr_set_wedge(struct usb_ep *_ep)
{
return gr_set_halt_wedge(_ep, 1, 1);
}
/*
* Return the total number of bytes currently stored in the internal buffers of
* the endpoint.
*/
static int gr_fifo_status(struct usb_ep *_ep)
{
struct gr_ep *ep;
u32 epstat;
u32 bytes = 0;
if (!_ep)
return -ENODEV;
ep = container_of(_ep, struct gr_ep, ep);
epstat = gr_read32(&ep->regs->epstat);
if (epstat & GR_EPSTAT_B0)
bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
if (epstat & GR_EPSTAT_B1)
bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
return bytes;
}
/* Empty data from internal buffers of an endpoint. */
static void gr_fifo_flush(struct usb_ep *_ep)
{
struct gr_ep *ep;
u32 epctrl;
if (!_ep)
return;
ep = container_of(_ep, struct gr_ep, ep);
dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
spin_lock(&ep->dev->lock);
epctrl = gr_read32(&ep->regs->epctrl);
epctrl |= GR_EPCTRL_CB;
gr_write32(&ep->regs->epctrl, epctrl);
spin_unlock(&ep->dev->lock);
}
static const struct usb_ep_ops gr_ep_ops = {
.enable = gr_ep_enable,
.disable = gr_ep_disable,
.alloc_request = gr_alloc_request,
.free_request = gr_free_request,
.queue = gr_queue_ext,
.dequeue = gr_dequeue,
.set_halt = gr_set_halt,
.set_wedge = gr_set_wedge,
.fifo_status = gr_fifo_status,
.fifo_flush = gr_fifo_flush,
};
/* ---------------------------------------------------------------------- */
/* USB Gadget ops */
static int gr_get_frame(struct usb_gadget *_gadget)
{
struct gr_udc *dev;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct gr_udc, gadget);
return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
}
static int gr_wakeup(struct usb_gadget *_gadget)
{
struct gr_udc *dev;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct gr_udc, gadget);
/* Remote wakeup feature not enabled by host*/
if (!dev->remote_wakeup)
return -EINVAL;
spin_lock(&dev->lock);
gr_write32(&dev->regs->control,
gr_read32(&dev->regs->control) | GR_CONTROL_RW);
spin_unlock(&dev->lock);
return 0;
}
static int gr_pullup(struct usb_gadget *_gadget, int is_on)
{
struct gr_udc *dev;
u32 control;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct gr_udc, gadget);
spin_lock(&dev->lock);
control = gr_read32(&dev->regs->control);
if (is_on)
control |= GR_CONTROL_EP;
else
control &= ~GR_CONTROL_EP;
gr_write32(&dev->regs->control, control);
spin_unlock(&dev->lock);
return 0;
}
static int gr_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct gr_udc *dev = to_gr_udc(gadget);
spin_lock(&dev->lock);
/* Hook up the driver */
dev->driver = driver;
/* Get ready for host detection */
gr_enable_vbus_detect(dev);
spin_unlock(&dev->lock);
return 0;
}
static int gr_udc_stop(struct usb_gadget *gadget)
{
struct gr_udc *dev = to_gr_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
gr_stop_activity(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static const struct usb_gadget_ops gr_ops = {
.get_frame = gr_get_frame,
.wakeup = gr_wakeup,
.pullup = gr_pullup,
.udc_start = gr_udc_start,
.udc_stop = gr_udc_stop,
/* Other operations not supported */
};
/* ---------------------------------------------------------------------- */
/* Module probe, removal and of-matching */
static const char * const onames[] = {
"ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
"ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
"ep12out", "ep13out", "ep14out", "ep15out"
};
static const char * const inames[] = {
"ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
"ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
"ep12in", "ep13in", "ep14in", "ep15in"
};
/* Must be called with dev->lock held */
static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
{
struct gr_ep *ep;
struct gr_request *req;
struct usb_request *_req;
void *buf;
if (is_in) {
ep = &dev->epi[num];
ep->ep.name = inames[num];
ep->regs = &dev->regs->epi[num];
} else {
ep = &dev->epo[num];
ep->ep.name = onames[num];
ep->regs = &dev->regs->epo[num];
}
gr_ep_reset(ep);
ep->num = num;
ep->is_in = is_in;
ep->dev = dev;
ep->ep.ops = &gr_ep_ops;
INIT_LIST_HEAD(&ep->queue);
if (num == 0) {
_req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
if (!_req)
return -ENOMEM;
buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
if (!buf) {
gr_free_request(&ep->ep, _req);
return -ENOMEM;
}
req = container_of(_req, struct gr_request, req);
req->req.buf = buf;
req->req.length = MAX_CTRL_PL_SIZE;
if (is_in)
dev->ep0reqi = req; /* Complete gets set as used */
else
dev->ep0reqo = req; /* Completion treated separately */
usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
ep->ep.caps.type_control = true;
} else {
usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
list_add_tail(&ep->ep_list, &dev->ep_list);
if (is_in)
ep->ep.caps.dir_in = true;
else
ep->ep.caps.dir_out = true;
ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
&ep->tailbuf_paddr, GFP_ATOMIC);
if (!ep->tailbuf)
return -ENOMEM;
return 0;
}
/* Must be called with dev->lock held */
static int gr_udc_init(struct gr_udc *dev)
{
struct device_node *np = dev->dev->of_node;
u32 epctrl_val;
u32 dmactrl_val;
int i;
int ret = 0;
u32 bufsize;
gr_set_address(dev, 0);
INIT_LIST_HEAD(&dev->gadget.ep_list);
dev->gadget.speed = USB_SPEED_UNKNOWN;
dev->gadget.ep0 = &dev->epi[0].ep;
INIT_LIST_HEAD(&dev->ep_list);
gr_set_ep0state(dev, GR_EP0_DISCONNECT);
for (i = 0; i < dev->nepo; i++) {
if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
bufsize = 1024;
ret = gr_ep_init(dev, i, 0, bufsize);
if (ret)
return ret;
}
for (i = 0; i < dev->nepi; i++) {
if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
bufsize = 1024;
ret = gr_ep_init(dev, i, 1, bufsize);
if (ret)
return ret;
}
/* Must be disabled by default */
dev->remote_wakeup = 0;
/* Enable ep0out and ep0in */
epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
return 0;
}
static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
{
struct gr_ep *ep;
if (is_in)
ep = &dev->epi[num];
else
ep = &dev->epo[num];
if (ep->tailbuf)
dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
ep->tailbuf, ep->tailbuf_paddr);
}
static int gr_remove(struct platform_device *pdev)
{
struct gr_udc *dev = platform_get_drvdata(pdev);
int i;
if (dev->added)
usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
if (dev->driver)
return -EBUSY;
gr_dfs_delete(dev);
dma_pool_destroy(dev->desc_pool);
platform_set_drvdata(pdev, NULL);
gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
for (i = 0; i < dev->nepo; i++)
gr_ep_remove(dev, i, 0);
for (i = 0; i < dev->nepi; i++)
gr_ep_remove(dev, i, 1);
return 0;
}
static int gr_request_irq(struct gr_udc *dev, int irq)
{
return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
IRQF_SHARED, driver_name, dev);
}
static int gr_probe(struct platform_device *pdev)
{
struct gr_udc *dev;
struct gr_regs __iomem *regs;
int retval;
u32 status;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->dev = &pdev->dev;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0)
return dev->irq;
/* Some core configurations has separate irqs for IN and OUT events */
dev->irqi = platform_get_irq(pdev, 1);
if (dev->irqi > 0) {
dev->irqo = platform_get_irq(pdev, 2);
if (dev->irqo < 0)
return dev->irqo;
} else {
dev->irqi = 0;
}
dev->gadget.name = driver_name;
dev->gadget.max_speed = USB_SPEED_HIGH;
dev->gadget.ops = &gr_ops;
spin_lock_init(&dev->lock);
dev->regs = regs;
platform_set_drvdata(pdev, dev);
/* Determine number of endpoints and data interface mode */
status = gr_read32(&dev->regs->status);
dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
if (!(status & GR_STATUS_DM)) {
dev_err(dev->dev, "Slave mode cores are not supported\n");
return -ENODEV;
}
/* --- Effects of the following calls might need explicit cleanup --- */
/* Create DMA pool for descriptors */
dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
sizeof(struct gr_dma_desc), 4, 0);
if (!dev->desc_pool) {
dev_err(dev->dev, "Could not allocate DMA pool");
return -ENOMEM;
}
/* Inside lock so that no gadget can use this udc until probe is done */
retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
if (retval) {
dev_err(dev->dev, "Could not add gadget udc");
goto out;
}
dev->added = 1;
spin_lock(&dev->lock);
retval = gr_udc_init(dev);
if (retval) {
spin_unlock(&dev->lock);
goto out;
}
/* Clear all interrupt enables that might be left on since last boot */
gr_disable_interrupts_and_pullup(dev);
spin_unlock(&dev->lock);
gr_dfs_create(dev);
retval = gr_request_irq(dev, dev->irq);
if (retval) {
dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
goto out;
}
if (dev->irqi) {
retval = gr_request_irq(dev, dev->irqi);
if (retval) {
dev_err(dev->dev, "Failed to request irqi %d\n",
dev->irqi);
goto out;
}
retval = gr_request_irq(dev, dev->irqo);
if (retval) {
dev_err(dev->dev, "Failed to request irqo %d\n",
dev->irqo);
goto out;
}
}
if (dev->irqi)
dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
dev->irq, dev->irqi, dev->irqo);
else
dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
out:
if (retval)
gr_remove(pdev);
return retval;
}
static const struct of_device_id gr_match[] = {
{.name = "GAISLER_USBDC"},
{.name = "01_021"},
{},
};
MODULE_DEVICE_TABLE(of, gr_match);
static struct platform_driver gr_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = gr_match,
},
.probe = gr_probe,
.remove = gr_remove,
};
module_platform_driver(gr_driver);
MODULE_AUTHOR("Aeroflex Gaisler AB.");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/gr_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* trace.c - USB Gadget Framework Trace Support
*
* Copyright (C) 2016 Intel Corporation
* Author: Felipe Balbi <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | drivers/usb/gadget/udc/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* snps_udc_plat.c - Synopsys UDC Platform Driver
*
* Copyright (C) 2016 Broadcom
*/
#include <linux/extcon.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/module.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include "amd5536udc.h"
/* description */
#define UDC_MOD_DESCRIPTION "Synopsys UDC platform driver"
static void start_udc(struct udc *udc)
{
if (udc->driver) {
dev_info(udc->dev, "Connecting...\n");
udc_enable_dev_setup_interrupts(udc);
udc_basic_init(udc);
udc->connected = 1;
}
}
static void stop_udc(struct udc *udc)
{
int tmp;
u32 reg;
spin_lock(&udc->lock);
/* Flush the receieve fifo */
reg = readl(&udc->regs->ctl);
reg |= AMD_BIT(UDC_DEVCTL_SRX_FLUSH);
writel(reg, &udc->regs->ctl);
reg = readl(&udc->regs->ctl);
reg &= ~(AMD_BIT(UDC_DEVCTL_SRX_FLUSH));
writel(reg, &udc->regs->ctl);
dev_dbg(udc->dev, "ep rx queue flushed\n");
/* Mask interrupts. Required more so when the
* UDC is connected to a DRD phy.
*/
udc_mask_unused_interrupts(udc);
/* Disconnect gadget driver */
if (udc->driver) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
/* empty queues */
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&udc->ep[tmp]);
}
udc->connected = 0;
spin_unlock(&udc->lock);
dev_info(udc->dev, "Device disconnected\n");
}
static void udc_drd_work(struct work_struct *work)
{
struct udc *udc;
udc = container_of(to_delayed_work(work),
struct udc, drd_work);
if (udc->conn_type) {
dev_dbg(udc->dev, "idle -> device\n");
start_udc(udc);
} else {
dev_dbg(udc->dev, "device -> idle\n");
stop_udc(udc);
}
}
static int usbd_connect_notify(struct notifier_block *self,
unsigned long event, void *ptr)
{
struct udc *udc = container_of(self, struct udc, nb);
dev_dbg(udc->dev, "%s: event: %lu\n", __func__, event);
udc->conn_type = event;
schedule_delayed_work(&udc->drd_work, 0);
return NOTIFY_OK;
}
static int udc_plat_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct udc *udc;
int ret;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
spin_lock_init(&udc->lock);
udc->dev = dev;
udc->virt_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(udc->virt_addr))
return PTR_ERR(udc->virt_addr);
/* udc csr registers base */
udc->csr = udc->virt_addr + UDC_CSR_ADDR;
/* dev registers base */
udc->regs = udc->virt_addr + UDC_DEVCFG_ADDR;
/* ep registers base */
udc->ep_regs = udc->virt_addr + UDC_EPREGS_ADDR;
/* fifo's base */
udc->rxfifo = (u32 __iomem *)(udc->virt_addr + UDC_RXFIFO_ADDR);
udc->txfifo = (u32 __iomem *)(udc->virt_addr + UDC_TXFIFO_ADDR);
udc->phys_addr = (unsigned long)res->start;
udc->irq = irq_of_parse_and_map(dev->of_node, 0);
if (udc->irq <= 0) {
dev_err(dev, "Can't parse and map interrupt\n");
return -EINVAL;
}
udc->udc_phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
if (IS_ERR(udc->udc_phy)) {
dev_err(dev, "Failed to obtain phy from device tree\n");
return PTR_ERR(udc->udc_phy);
}
ret = phy_init(udc->udc_phy);
if (ret) {
dev_err(dev, "UDC phy init failed");
return ret;
}
ret = phy_power_on(udc->udc_phy);
if (ret) {
dev_err(dev, "UDC phy power on failed");
phy_exit(udc->udc_phy);
return ret;
}
/* Register for extcon if supported */
if (of_property_present(dev->of_node, "extcon")) {
udc->edev = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(udc->edev)) {
if (PTR_ERR(udc->edev) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_err(dev, "Invalid or missing extcon\n");
ret = PTR_ERR(udc->edev);
goto exit_phy;
}
udc->nb.notifier_call = usbd_connect_notify;
ret = extcon_register_notifier(udc->edev, EXTCON_USB,
&udc->nb);
if (ret < 0) {
dev_err(dev, "Can't register extcon device\n");
goto exit_phy;
}
ret = extcon_get_state(udc->edev, EXTCON_USB);
if (ret < 0) {
dev_err(dev, "Can't get cable state\n");
goto exit_extcon;
} else if (ret) {
udc->conn_type = ret;
}
INIT_DELAYED_WORK(&udc->drd_work, udc_drd_work);
}
/* init dma pools */
if (use_dma) {
ret = init_dma_pools(udc);
if (ret != 0)
goto exit_extcon;
}
ret = devm_request_irq(dev, udc->irq, udc_irq, IRQF_SHARED,
"snps-udc", udc);
if (ret < 0) {
dev_err(dev, "Request irq %d failed for UDC\n", udc->irq);
goto exit_dma;
}
platform_set_drvdata(pdev, udc);
udc->chiprev = UDC_BCM_REV;
if (udc_probe(udc)) {
ret = -ENODEV;
goto exit_dma;
}
dev_info(dev, "Synopsys UDC platform driver probe successful\n");
return 0;
exit_dma:
if (use_dma)
free_dma_pools(udc);
exit_extcon:
if (udc->edev)
extcon_unregister_notifier(udc->edev, EXTCON_USB, &udc->nb);
exit_phy:
if (udc->udc_phy) {
phy_power_off(udc->udc_phy);
phy_exit(udc->udc_phy);
}
return ret;
}
static void udc_plat_remove(struct platform_device *pdev)
{
struct udc *dev;
dev = platform_get_drvdata(pdev);
usb_del_gadget_udc(&dev->gadget);
/* gadget driver must not be registered */
if (WARN_ON(dev->driver))
return;
/* dma pool cleanup */
free_dma_pools(dev);
udc_remove(dev);
platform_set_drvdata(pdev, NULL);
phy_power_off(dev->udc_phy);
phy_exit(dev->udc_phy);
extcon_unregister_notifier(dev->edev, EXTCON_USB, &dev->nb);
dev_info(&pdev->dev, "Synopsys UDC platform driver removed\n");
}
#ifdef CONFIG_PM_SLEEP
static int udc_plat_suspend(struct device *dev)
{
struct udc *udc;
udc = dev_get_drvdata(dev);
stop_udc(udc);
if (extcon_get_state(udc->edev, EXTCON_USB) > 0) {
dev_dbg(udc->dev, "device -> idle\n");
stop_udc(udc);
}
phy_power_off(udc->udc_phy);
phy_exit(udc->udc_phy);
return 0;
}
static int udc_plat_resume(struct device *dev)
{
struct udc *udc;
int ret;
udc = dev_get_drvdata(dev);
ret = phy_init(udc->udc_phy);
if (ret) {
dev_err(udc->dev, "UDC phy init failure");
return ret;
}
ret = phy_power_on(udc->udc_phy);
if (ret) {
dev_err(udc->dev, "UDC phy power on failure");
phy_exit(udc->udc_phy);
return ret;
}
if (extcon_get_state(udc->edev, EXTCON_USB) > 0) {
dev_dbg(udc->dev, "idle -> device\n");
start_udc(udc);
}
return 0;
}
static const struct dev_pm_ops udc_plat_pm_ops = {
.suspend = udc_plat_suspend,
.resume = udc_plat_resume,
};
#endif
static const struct of_device_id of_udc_match[] = {
{ .compatible = "brcm,ns2-udc", },
{ .compatible = "brcm,cygnus-udc", },
{ .compatible = "brcm,iproc-udc", },
{ }
};
MODULE_DEVICE_TABLE(of, of_udc_match);
static struct platform_driver udc_plat_driver = {
.probe = udc_plat_probe,
.remove_new = udc_plat_remove,
.driver = {
.name = "snps-udc-plat",
.of_match_table = of_udc_match,
#ifdef CONFIG_PM_SLEEP
.pm = &udc_plat_pm_ops,
#endif
},
};
module_platform_driver(udc_plat_driver);
MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
MODULE_AUTHOR("Broadcom");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/gadget/udc/snps_udc_plat.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
*
* Copyright (C) 2012 Kevin Cernekee <[email protected]>
* Copyright (C) 2012 Broadcom Corporation
*/
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/workqueue.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_iudma.h>
#include <bcm63xx_dev_usb_usbd.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#define DRV_MODULE_NAME "bcm63xx_udc"
static const char bcm63xx_ep0name[] = "ep0";
static const struct {
const char *name;
const struct usb_ep_caps caps;
} bcm63xx_ep_info[] = {
#define EP_INFO(_name, _caps) \
{ \
.name = _name, \
.caps = _caps, \
}
EP_INFO(bcm63xx_ep0name,
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep1in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep2out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep3in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep4out-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
#undef EP_INFO
};
static bool use_fullspeed;
module_param(use_fullspeed, bool, S_IRUGO);
MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
/*
* RX IRQ coalescing options:
*
* false (default) - one IRQ per DATAx packet. Slow but reliable. The
* driver is able to pass the "testusb" suite and recover from conditions like:
*
* 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
* 2) Host sends 512 bytes of data
* 3) Host decides to reconfigure the device and sends SET_INTERFACE
* 4) Device shuts down the endpoint and cancels the RX transaction
*
* true - one IRQ per transfer, for transfers <= 2048B. Generates
* considerably fewer IRQs, but error recovery is less robust. Does not
* reliably pass "testusb".
*
* TX always uses coalescing, because we can cancel partially complete TX
* transfers by repeatedly flushing the FIFO. The hardware doesn't allow
* this on RX.
*/
static bool irq_coalesce;
module_param(irq_coalesce, bool, S_IRUGO);
MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
#define BCM63XX_NUM_EP 5
#define BCM63XX_NUM_IUDMA 6
#define BCM63XX_NUM_FIFO_PAIRS 3
#define IUDMA_RESET_TIMEOUT_US 10000
#define IUDMA_EP0_RXCHAN 0
#define IUDMA_EP0_TXCHAN 1
#define IUDMA_MAX_FRAGMENT 2048
#define BCM63XX_MAX_CTRL_PKT 64
#define BCMEP_CTRL 0x00
#define BCMEP_ISOC 0x01
#define BCMEP_BULK 0x02
#define BCMEP_INTR 0x03
#define BCMEP_OUT 0x00
#define BCMEP_IN 0x01
#define BCM63XX_SPD_FULL 1
#define BCM63XX_SPD_HIGH 0
#define IUDMA_DMAC_OFFSET 0x200
#define IUDMA_DMAS_OFFSET 0x400
enum bcm63xx_ep0_state {
EP0_REQUEUE,
EP0_IDLE,
EP0_IN_DATA_PHASE_SETUP,
EP0_IN_DATA_PHASE_COMPLETE,
EP0_OUT_DATA_PHASE_SETUP,
EP0_OUT_DATA_PHASE_COMPLETE,
EP0_OUT_STATUS_PHASE,
EP0_IN_FAKE_STATUS_PHASE,
EP0_SHUTDOWN,
};
static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
"REQUEUE",
"IDLE",
"IN_DATA_PHASE_SETUP",
"IN_DATA_PHASE_COMPLETE",
"OUT_DATA_PHASE_SETUP",
"OUT_DATA_PHASE_COMPLETE",
"OUT_STATUS_PHASE",
"IN_FAKE_STATUS_PHASE",
"SHUTDOWN",
};
/**
* struct iudma_ch_cfg - Static configuration for an IUDMA channel.
* @ep_num: USB endpoint number.
* @n_bds: Number of buffer descriptors in the ring.
* @ep_type: Endpoint type (control, bulk, interrupt).
* @dir: Direction (in, out).
* @n_fifo_slots: Number of FIFO entries to allocate for this channel.
* @max_pkt_hs: Maximum packet size in high speed mode.
* @max_pkt_fs: Maximum packet size in full speed mode.
*/
struct iudma_ch_cfg {
int ep_num;
int n_bds;
int ep_type;
int dir;
int n_fifo_slots;
int max_pkt_hs;
int max_pkt_fs;
};
static const struct iudma_ch_cfg iudma_defaults[] = {
/* This controller was designed to support a CDC/RNDIS application.
It may be possible to reconfigure some of the endpoints, but
the hardware limitations (FIFO sizing and number of DMA channels)
may significantly impact flexibility and/or stability. Change
these values at your own risk.
ep_num ep_type n_fifo_slots max_pkt_fs
idx | n_bds | dir | max_pkt_hs |
| | | | | | | | */
[0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
[1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
[2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
[3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
[4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
[5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
};
struct bcm63xx_udc;
/**
* struct iudma_ch - Represents the current state of a single IUDMA channel.
* @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
* @ep_num: USB endpoint number. -1 for ep0 RX.
* @enabled: Whether bcm63xx_ep_enable() has been called.
* @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
* @is_tx: true for TX, false for RX.
* @bep: Pointer to the associated endpoint. NULL for ep0 RX.
* @udc: Reference to the device controller.
* @read_bd: Next buffer descriptor to reap from the hardware.
* @write_bd: Next BD available for a new packet.
* @end_bd: Points to the final BD in the ring.
* @n_bds_used: Number of BD entries currently occupied.
* @bd_ring: Base pointer to the BD ring.
* @bd_ring_dma: Physical (DMA) address of bd_ring.
* @n_bds: Total number of BDs in the ring.
*
* ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
* bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
* only.
*
* Each bulk/intr endpoint has a single IUDMA channel and a single
* struct usb_ep.
*/
struct iudma_ch {
unsigned int ch_idx;
int ep_num;
bool enabled;
int max_pkt;
bool is_tx;
struct bcm63xx_ep *bep;
struct bcm63xx_udc *udc;
struct bcm_enet_desc *read_bd;
struct bcm_enet_desc *write_bd;
struct bcm_enet_desc *end_bd;
int n_bds_used;
struct bcm_enet_desc *bd_ring;
dma_addr_t bd_ring_dma;
unsigned int n_bds;
};
/**
* struct bcm63xx_ep - Internal (driver) state of a single endpoint.
* @ep_num: USB endpoint number.
* @iudma: Pointer to IUDMA channel state.
* @ep: USB gadget layer representation of the EP.
* @udc: Reference to the device controller.
* @queue: Linked list of outstanding requests for this EP.
* @halted: 1 if the EP is stalled; 0 otherwise.
*/
struct bcm63xx_ep {
unsigned int ep_num;
struct iudma_ch *iudma;
struct usb_ep ep;
struct bcm63xx_udc *udc;
struct list_head queue;
unsigned halted:1;
};
/**
* struct bcm63xx_req - Internal (driver) state of a single request.
* @queue: Links back to the EP's request list.
* @req: USB gadget layer representation of the request.
* @offset: Current byte offset into the data buffer (next byte to queue).
* @bd_bytes: Number of data bytes in outstanding BD entries.
* @iudma: IUDMA channel used for the request.
*/
struct bcm63xx_req {
struct list_head queue; /* ep's requests */
struct usb_request req;
unsigned int offset;
unsigned int bd_bytes;
struct iudma_ch *iudma;
};
/**
* struct bcm63xx_udc - Driver/hardware private context.
* @lock: Spinlock to mediate access to this struct, and (most) HW regs.
* @dev: Generic Linux device structure.
* @pd: Platform data (board/port info).
* @usbd_clk: Clock descriptor for the USB device block.
* @usbh_clk: Clock descriptor for the USB host block.
* @gadget: USB device.
* @driver: Driver for USB device.
* @usbd_regs: Base address of the USBD/USB20D block.
* @iudma_regs: Base address of the USBD's associated IUDMA block.
* @bep: Array of endpoints, including ep0.
* @iudma: Array of all IUDMA channels used by this controller.
* @cfg: USB configuration number, from SET_CONFIGURATION wValue.
* @iface: USB interface number, from SET_INTERFACE wIndex.
* @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
* @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
* @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
* @ep0state: Current state of the ep0 state machine.
* @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
* @wedgemap: Bitmap of wedged endpoints.
* @ep0_req_reset: USB reset is pending.
* @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
* @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
* @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
* @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
* @ep0_reply: Pending reply from gadget driver.
* @ep0_request: Outstanding ep0 request.
*/
struct bcm63xx_udc {
spinlock_t lock;
struct device *dev;
struct bcm63xx_usbd_platform_data *pd;
struct clk *usbd_clk;
struct clk *usbh_clk;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
void __iomem *usbd_regs;
void __iomem *iudma_regs;
struct bcm63xx_ep bep[BCM63XX_NUM_EP];
struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
int cfg;
int iface;
int alt_iface;
struct bcm63xx_req ep0_ctrl_req;
u8 *ep0_ctrl_buf;
int ep0state;
struct work_struct ep0_wq;
unsigned long wedgemap;
unsigned ep0_req_reset:1;
unsigned ep0_req_set_cfg:1;
unsigned ep0_req_set_iface:1;
unsigned ep0_req_shutdown:1;
unsigned ep0_req_completed:1;
struct usb_request *ep0_reply;
struct usb_request *ep0_request;
};
static const struct usb_ep_ops bcm63xx_udc_ep_ops;
/***********************************************************************
* Convenience functions
***********************************************************************/
static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
{
return container_of(g, struct bcm63xx_udc, gadget);
}
static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
{
return container_of(ep, struct bcm63xx_ep, ep);
}
static inline struct bcm63xx_req *our_req(struct usb_request *req)
{
return container_of(req, struct bcm63xx_req, req);
}
static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
{
return bcm_readl(udc->usbd_regs + off);
}
static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
{
bcm_writel(val, udc->usbd_regs + off);
}
static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
{
return bcm_readl(udc->iudma_regs + off);
}
static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
{
bcm_writel(val, udc->iudma_regs + off);
}
static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
(ENETDMA_CHAN_WIDTH * chan));
}
static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
int chan)
{
bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
(ENETDMA_CHAN_WIDTH * chan));
}
static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
(ENETDMA_CHAN_WIDTH * chan));
}
static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
int chan)
{
bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
(ENETDMA_CHAN_WIDTH * chan));
}
static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
{
if (is_enabled) {
clk_enable(udc->usbh_clk);
clk_enable(udc->usbd_clk);
udelay(10);
} else {
clk_disable(udc->usbd_clk);
clk_disable(udc->usbh_clk);
}
}
/***********************************************************************
* Low-level IUDMA / FIFO operations
***********************************************************************/
/**
* bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
* @udc: Reference to the device controller.
* @idx: Desired init_sel value.
*
* The "init_sel" signal is used as a selection index for both endpoints
* and IUDMA channels. Since these do not map 1:1, the use of this signal
* depends on the context.
*/
static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
{
u32 val = usbd_readl(udc, USBD_CONTROL_REG);
val &= ~USBD_CONTROL_INIT_SEL_MASK;
val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
usbd_writel(udc, val, USBD_CONTROL_REG);
}
/**
* bcm63xx_set_stall - Enable/disable stall on one endpoint.
* @udc: Reference to the device controller.
* @bep: Endpoint on which to operate.
* @is_stalled: true to enable stall, false to disable.
*
* See notes in bcm63xx_update_wedge() regarding automatic clearing of
* halt/stall conditions.
*/
static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
bool is_stalled)
{
u32 val;
val = USBD_STALL_UPDATE_MASK |
(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
usbd_writel(udc, val, USBD_STALL_REG);
}
/**
* bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
* @udc: Reference to the device controller.
*
* These parameters depend on the USB link speed. Settings are
* per-IUDMA-channel-pair.
*/
static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
{
int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
u32 i, val, rx_fifo_slot, tx_fifo_slot;
/* set up FIFO boundaries and packet sizes; this is done in pairs */
rx_fifo_slot = tx_fifo_slot = 0;
for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
bcm63xx_ep_dma_select(udc, i >> 1);
val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
USBD_RXFIFO_CONFIG_END_SHIFT);
rx_fifo_slot += rx_cfg->n_fifo_slots;
usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
usbd_writel(udc,
is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
USBD_RXFIFO_EPSIZE_REG);
val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
USBD_TXFIFO_CONFIG_END_SHIFT);
tx_fifo_slot += tx_cfg->n_fifo_slots;
usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
usbd_writel(udc,
is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
USBD_TXFIFO_EPSIZE_REG);
usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
}
}
/**
* bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
* @udc: Reference to the device controller.
* @ep_num: Endpoint number.
*/
static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
{
u32 val;
bcm63xx_ep_dma_select(udc, ep_num);
val = usbd_readl(udc, USBD_CONTROL_REG);
val |= USBD_CONTROL_FIFO_RESET_MASK;
usbd_writel(udc, val, USBD_CONTROL_REG);
usbd_readl(udc, USBD_CONTROL_REG);
}
/**
* bcm63xx_fifo_reset - Flush all hardware FIFOs.
* @udc: Reference to the device controller.
*/
static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
{
int i;
for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
bcm63xx_fifo_reset_ep(udc, i);
}
/**
* bcm63xx_ep_init - Initial (one-time) endpoint initialization.
* @udc: Reference to the device controller.
*/
static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
{
u32 i, val;
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
if (cfg->ep_num < 0)
continue;
bcm63xx_ep_dma_select(udc, cfg->ep_num);
val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
}
}
/**
* bcm63xx_ep_setup - Configure per-endpoint settings.
* @udc: Reference to the device controller.
*
* This needs to be rerun if the speed/cfg/intf/altintf changes.
*/
static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
{
u32 val, i;
usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
cfg->max_pkt_hs : cfg->max_pkt_fs;
int idx = cfg->ep_num;
udc->iudma[i].max_pkt = max_pkt;
if (idx < 0)
continue;
usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
val = (idx << USBD_CSR_EP_LOG_SHIFT) |
(cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
(cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
(udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
(udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
(udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
(max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
}
}
/**
* iudma_write - Queue a single IUDMA transaction.
* @udc: Reference to the device controller.
* @iudma: IUDMA channel to use.
* @breq: Request containing the transaction data.
*
* For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
* does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
* So iudma_write() may be called several times to fulfill a single
* usb_request.
*
* For TX IUDMA, this can queue multiple buffer descriptors if needed.
*/
static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
struct bcm63xx_req *breq)
{
int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
unsigned int bytes_left = breq->req.length - breq->offset;
const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
iudma->max_pkt : IUDMA_MAX_FRAGMENT;
iudma->n_bds_used = 0;
breq->bd_bytes = 0;
breq->iudma = iudma;
if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
extra_zero_pkt = 1;
do {
struct bcm_enet_desc *d = iudma->write_bd;
u32 dmaflags = 0;
unsigned int n_bytes;
if (d == iudma->end_bd) {
dmaflags |= DMADESC_WRAP_MASK;
iudma->write_bd = iudma->bd_ring;
} else {
iudma->write_bd++;
}
iudma->n_bds_used++;
n_bytes = min_t(int, bytes_left, max_bd_bytes);
if (n_bytes)
dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
else
dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
DMADESC_USB_ZERO_MASK;
dmaflags |= DMADESC_OWNER_MASK;
if (first_bd) {
dmaflags |= DMADESC_SOP_MASK;
first_bd = 0;
}
/*
* extra_zero_pkt forces one more iteration through the loop
* after all data is queued up, to send the zero packet
*/
if (extra_zero_pkt && !bytes_left)
extra_zero_pkt = 0;
if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
(n_bytes == bytes_left && !extra_zero_pkt)) {
last_bd = 1;
dmaflags |= DMADESC_EOP_MASK;
}
d->address = breq->req.dma + breq->offset;
mb();
d->len_stat = dmaflags;
breq->offset += n_bytes;
breq->bd_bytes += n_bytes;
bytes_left -= n_bytes;
} while (!last_bd);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
ENETDMAC_CHANCFG_REG, iudma->ch_idx);
}
/**
* iudma_read - Check for IUDMA buffer completion.
* @udc: Reference to the device controller.
* @iudma: IUDMA channel to use.
*
* This checks to see if ALL of the outstanding BDs on the DMA channel
* have been filled. If so, it returns the actual transfer length;
* otherwise it returns -EBUSY.
*/
static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
{
int i, actual_len = 0;
struct bcm_enet_desc *d = iudma->read_bd;
if (!iudma->n_bds_used)
return -EINVAL;
for (i = 0; i < iudma->n_bds_used; i++) {
u32 dmaflags;
dmaflags = d->len_stat;
if (dmaflags & DMADESC_OWNER_MASK)
return -EBUSY;
actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
DMADESC_LENGTH_SHIFT;
if (d == iudma->end_bd)
d = iudma->bd_ring;
else
d++;
}
iudma->read_bd = d;
iudma->n_bds_used = 0;
return actual_len;
}
/**
* iudma_reset_channel - Stop DMA on a single channel.
* @udc: Reference to the device controller.
* @iudma: IUDMA channel to reset.
*/
static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
{
int timeout = IUDMA_RESET_TIMEOUT_US;
struct bcm_enet_desc *d;
int ch_idx = iudma->ch_idx;
if (!iudma->is_tx)
bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
/* stop DMA, then wait for the hardware to wrap up */
usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
ENETDMAC_CHANCFG_EN_MASK) {
udelay(1);
/* repeatedly flush the FIFO data until the BD completes */
if (iudma->is_tx && iudma->ep_num >= 0)
bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
if (!timeout--) {
dev_err(udc->dev, "can't reset IUDMA channel %d\n",
ch_idx);
break;
}
if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
ch_idx);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
ENETDMAC_CHANCFG_REG, ch_idx);
}
}
usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
/* don't leave "live" HW-owned entries for the next guy to step on */
for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
d->len_stat = 0;
mb();
iudma->read_bd = iudma->write_bd = iudma->bd_ring;
iudma->n_bds_used = 0;
/* set up IRQs, UBUS burst size, and BD base for this channel */
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
ENETDMAC_IRMASK_REG, ch_idx);
usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
}
/**
* iudma_init_channel - One-time IUDMA channel initialization.
* @udc: Reference to the device controller.
* @ch_idx: Channel to initialize.
*/
static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
{
struct iudma_ch *iudma = &udc->iudma[ch_idx];
const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
unsigned int n_bds = cfg->n_bds;
struct bcm63xx_ep *bep = NULL;
iudma->ep_num = cfg->ep_num;
iudma->ch_idx = ch_idx;
iudma->is_tx = !!(ch_idx & 0x01);
if (iudma->ep_num >= 0) {
bep = &udc->bep[iudma->ep_num];
bep->iudma = iudma;
INIT_LIST_HEAD(&bep->queue);
}
iudma->bep = bep;
iudma->udc = udc;
/* ep0 is always active; others are controlled by the gadget driver */
if (iudma->ep_num <= 0)
iudma->enabled = true;
iudma->n_bds = n_bds;
iudma->bd_ring = dmam_alloc_coherent(udc->dev,
n_bds * sizeof(struct bcm_enet_desc),
&iudma->bd_ring_dma, GFP_KERNEL);
if (!iudma->bd_ring)
return -ENOMEM;
iudma->end_bd = &iudma->bd_ring[n_bds - 1];
return 0;
}
/**
* iudma_init - One-time initialization of all IUDMA channels.
* @udc: Reference to the device controller.
*
* Enable DMA, flush channels, and enable global IUDMA IRQs.
*/
static int iudma_init(struct bcm63xx_udc *udc)
{
int i, rc;
usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
rc = iudma_init_channel(udc, i);
if (rc)
return rc;
iudma_reset_channel(udc, &udc->iudma[i]);
}
usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
return 0;
}
/**
* iudma_uninit - Uninitialize IUDMA channels.
* @udc: Reference to the device controller.
*
* Kill global IUDMA IRQs, flush channels, and kill DMA.
*/
static void iudma_uninit(struct bcm63xx_udc *udc)
{
int i;
usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
iudma_reset_channel(udc, &udc->iudma[i]);
usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
}
/***********************************************************************
* Other low-level USBD operations
***********************************************************************/
/**
* bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
* @udc: Reference to the device controller.
* @enable_irqs: true to enable, false to disable.
*/
static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
{
u32 val;
usbd_writel(udc, 0, USBD_STATUS_REG);
val = BIT(USBD_EVENT_IRQ_USB_RESET) |
BIT(USBD_EVENT_IRQ_SETUP) |
BIT(USBD_EVENT_IRQ_SETCFG) |
BIT(USBD_EVENT_IRQ_SETINTF) |
BIT(USBD_EVENT_IRQ_USB_LINK);
usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
}
/**
* bcm63xx_select_phy_mode - Select between USB device and host mode.
* @udc: Reference to the device controller.
* @is_device: true for device, false for host.
*
* This should probably be reworked to use the drivers/usb/otg
* infrastructure.
*
* By default, the AFE/pullups are disabled in device mode, until
* bcm63xx_select_pullup() is called.
*/
static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
{
u32 val, portmask = BIT(udc->pd->port_no);
if (BCMCPU_IS_6328()) {
/* configure pinmux to sense VBUS signal */
val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
GPIO_PINMUX_OTHR_6328_USB_HOST;
bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
}
val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
if (is_device) {
val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
} else {
val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
}
bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
if (is_device)
val |= USBH_PRIV_SWAP_USBD_MASK;
else
val &= ~USBH_PRIV_SWAP_USBD_MASK;
bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
}
/**
* bcm63xx_select_pullup - Enable/disable the pullup on D+
* @udc: Reference to the device controller.
* @is_on: true to enable the pullup, false to disable.
*
* If the pullup is active, the host will sense a FS/HS device connected to
* the port. If the pullup is inactive, the host will think the USB
* device has been disconnected.
*/
static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
{
u32 val, portmask = BIT(udc->pd->port_no);
val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
if (is_on)
val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
else
val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
}
/**
* bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
* @udc: Reference to the device controller.
*
* This just masks the IUDMA IRQs and releases the clocks. It is assumed
* that bcm63xx_udc_stop() has already run, and the clocks are stopped.
*/
static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
{
set_clocks(udc, true);
iudma_uninit(udc);
set_clocks(udc, false);
clk_put(udc->usbd_clk);
clk_put(udc->usbh_clk);
}
/**
* bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
* @udc: Reference to the device controller.
*/
static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
{
int i, rc = 0;
u32 val;
udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
GFP_KERNEL);
if (!udc->ep0_ctrl_buf)
return -ENOMEM;
INIT_LIST_HEAD(&udc->gadget.ep_list);
for (i = 0; i < BCM63XX_NUM_EP; i++) {
struct bcm63xx_ep *bep = &udc->bep[i];
bep->ep.name = bcm63xx_ep_info[i].name;
bep->ep.caps = bcm63xx_ep_info[i].caps;
bep->ep_num = i;
bep->ep.ops = &bcm63xx_udc_ep_ops;
list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
bep->halted = 0;
usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
bep->udc = udc;
bep->ep.desc = NULL;
INIT_LIST_HEAD(&bep->queue);
}
udc->gadget.ep0 = &udc->bep[0].ep;
list_del(&udc->bep[0].ep.ep_list);
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->ep0state = EP0_SHUTDOWN;
udc->usbh_clk = clk_get(udc->dev, "usbh");
if (IS_ERR(udc->usbh_clk))
return -EIO;
udc->usbd_clk = clk_get(udc->dev, "usbd");
if (IS_ERR(udc->usbd_clk)) {
clk_put(udc->usbh_clk);
return -EIO;
}
set_clocks(udc, true);
val = USBD_CONTROL_AUTO_CSRS_MASK |
USBD_CONTROL_DONE_CSRS_MASK |
(irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
usbd_writel(udc, val, USBD_CONTROL_REG);
val = USBD_STRAPS_APP_SELF_PWR_MASK |
USBD_STRAPS_APP_RAM_IF_MASK |
USBD_STRAPS_APP_CSRPRGSUP_MASK |
USBD_STRAPS_APP_8BITPHY_MASK |
USBD_STRAPS_APP_RMTWKUP_MASK;
if (udc->gadget.max_speed == USB_SPEED_HIGH)
val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
else
val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
usbd_writel(udc, val, USBD_STRAPS_REG);
bcm63xx_set_ctrl_irqs(udc, false);
usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
rc = iudma_init(udc);
set_clocks(udc, false);
if (rc)
bcm63xx_uninit_udc_hw(udc);
return 0;
}
/***********************************************************************
* Standard EP gadget operations
***********************************************************************/
/**
* bcm63xx_ep_enable - Enable one endpoint.
* @ep: Endpoint to enable.
* @desc: Contains max packet, direction, etc.
*
* Most of the endpoint parameters are fixed in this controller, so there
* isn't much for this function to do.
*/
static int bcm63xx_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct bcm63xx_ep *bep = our_ep(ep);
struct bcm63xx_udc *udc = bep->udc;
struct iudma_ch *iudma = bep->iudma;
unsigned long flags;
if (!ep || !desc || ep->name == bcm63xx_ep0name)
return -EINVAL;
if (!udc->driver)
return -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (iudma->enabled) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
iudma->enabled = true;
BUG_ON(!list_empty(&bep->queue));
iudma_reset_channel(udc, iudma);
bep->halted = 0;
bcm63xx_set_stall(udc, bep, false);
clear_bit(bep->ep_num, &udc->wedgemap);
ep->desc = desc;
ep->maxpacket = usb_endpoint_maxp(desc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* bcm63xx_ep_disable - Disable one endpoint.
* @ep: Endpoint to disable.
*/
static int bcm63xx_ep_disable(struct usb_ep *ep)
{
struct bcm63xx_ep *bep = our_ep(ep);
struct bcm63xx_udc *udc = bep->udc;
struct iudma_ch *iudma = bep->iudma;
struct bcm63xx_req *breq, *n;
unsigned long flags;
if (!ep || !ep->desc)
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (!iudma->enabled) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
iudma->enabled = false;
iudma_reset_channel(udc, iudma);
if (!list_empty(&bep->queue)) {
list_for_each_entry_safe(breq, n, &bep->queue, queue) {
usb_gadget_unmap_request(&udc->gadget, &breq->req,
iudma->is_tx);
list_del(&breq->queue);
breq->req.status = -ESHUTDOWN;
spin_unlock_irqrestore(&udc->lock, flags);
usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
spin_lock_irqsave(&udc->lock, flags);
}
}
ep->desc = NULL;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* bcm63xx_udc_alloc_request - Allocate a new request.
* @ep: Endpoint associated with the request.
* @mem_flags: Flags to pass to kzalloc().
*/
static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
gfp_t mem_flags)
{
struct bcm63xx_req *breq;
breq = kzalloc(sizeof(*breq), mem_flags);
if (!breq)
return NULL;
return &breq->req;
}
/**
* bcm63xx_udc_free_request - Free a request.
* @ep: Endpoint associated with the request.
* @req: Request to free.
*/
static void bcm63xx_udc_free_request(struct usb_ep *ep,
struct usb_request *req)
{
struct bcm63xx_req *breq = our_req(req);
kfree(breq);
}
/**
* bcm63xx_udc_queue - Queue up a new request.
* @ep: Endpoint associated with the request.
* @req: Request to add.
* @mem_flags: Unused.
*
* If the queue is empty, start this request immediately. Otherwise, add
* it to the list.
*
* ep0 replies are sent through this function from the gadget driver, but
* they are treated differently because they need to be handled by the ep0
* state machine. (Sometimes they are replies to control requests that
* were spoofed by this driver, and so they shouldn't be transmitted at all.)
*/
static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t mem_flags)
{
struct bcm63xx_ep *bep = our_ep(ep);
struct bcm63xx_udc *udc = bep->udc;
struct bcm63xx_req *breq = our_req(req);
unsigned long flags;
int rc = 0;
if (unlikely(!req || !req->complete || !req->buf || !ep))
return -EINVAL;
req->actual = 0;
req->status = 0;
breq->offset = 0;
if (bep == &udc->bep[0]) {
/* only one reply per request, please */
if (udc->ep0_reply)
return -EINVAL;
udc->ep0_reply = req;
schedule_work(&udc->ep0_wq);
return 0;
}
spin_lock_irqsave(&udc->lock, flags);
if (!bep->iudma->enabled) {
rc = -ESHUTDOWN;
goto out;
}
rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
if (rc == 0) {
list_add_tail(&breq->queue, &bep->queue);
if (list_is_singular(&bep->queue))
iudma_write(udc, bep->iudma, breq);
}
out:
spin_unlock_irqrestore(&udc->lock, flags);
return rc;
}
/**
* bcm63xx_udc_dequeue - Remove a pending request from the queue.
* @ep: Endpoint associated with the request.
* @req: Request to remove.
*
* If the request is not at the head of the queue, this is easy - just nuke
* it. If the request is at the head of the queue, we'll need to stop the
* DMA transaction and then queue up the successor.
*/
static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct bcm63xx_ep *bep = our_ep(ep);
struct bcm63xx_udc *udc = bep->udc;
struct bcm63xx_req *breq = our_req(req), *cur;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&udc->lock, flags);
if (list_empty(&bep->queue)) {
rc = -EINVAL;
goto out;
}
cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
if (breq == cur) {
iudma_reset_channel(udc, bep->iudma);
list_del(&breq->queue);
if (!list_empty(&bep->queue)) {
struct bcm63xx_req *next;
next = list_first_entry(&bep->queue,
struct bcm63xx_req, queue);
iudma_write(udc, bep->iudma, next);
}
} else {
list_del(&breq->queue);
}
out:
spin_unlock_irqrestore(&udc->lock, flags);
req->status = -ESHUTDOWN;
req->complete(ep, req);
return rc;
}
/**
* bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
* @ep: Endpoint to halt.
* @value: Zero to clear halt; nonzero to set halt.
*
* See comments in bcm63xx_update_wedge().
*/
static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
{
struct bcm63xx_ep *bep = our_ep(ep);
struct bcm63xx_udc *udc = bep->udc;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
bcm63xx_set_stall(udc, bep, !!value);
bep->halted = value;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
* @ep: Endpoint to wedge.
*
* See comments in bcm63xx_update_wedge().
*/
static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
{
struct bcm63xx_ep *bep = our_ep(ep);
struct bcm63xx_udc *udc = bep->udc;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
set_bit(bep->ep_num, &udc->wedgemap);
bcm63xx_set_stall(udc, bep, true);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
.enable = bcm63xx_ep_enable,
.disable = bcm63xx_ep_disable,
.alloc_request = bcm63xx_udc_alloc_request,
.free_request = bcm63xx_udc_free_request,
.queue = bcm63xx_udc_queue,
.dequeue = bcm63xx_udc_dequeue,
.set_halt = bcm63xx_udc_set_halt,
.set_wedge = bcm63xx_udc_set_wedge,
};
/***********************************************************************
* EP0 handling
***********************************************************************/
/**
* bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
* @udc: Reference to the device controller.
* @ctrl: 8-byte SETUP request.
*/
static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
struct usb_ctrlrequest *ctrl)
{
int rc;
spin_unlock_irq(&udc->lock);
rc = udc->driver->setup(&udc->gadget, ctrl);
spin_lock_irq(&udc->lock);
return rc;
}
/**
* bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
* @udc: Reference to the device controller.
*
* Many standard requests are handled automatically in the hardware, but
* we still need to pass them to the gadget driver so that it can
* reconfigure the interfaces/endpoints if necessary.
*
* Unfortunately we are not able to send a STALL response if the host
* requests an invalid configuration. If this happens, we'll have to be
* content with printing a warning.
*/
static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
{
struct usb_ctrlrequest ctrl;
int rc;
ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
ctrl.wValue = cpu_to_le16(udc->cfg);
ctrl.wIndex = 0;
ctrl.wLength = 0;
rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
if (rc < 0) {
dev_warn_ratelimited(udc->dev,
"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
udc->cfg);
}
return rc;
}
/**
* bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
* @udc: Reference to the device controller.
*/
static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
{
struct usb_ctrlrequest ctrl;
int rc;
ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
ctrl.bRequest = USB_REQ_SET_INTERFACE;
ctrl.wValue = cpu_to_le16(udc->alt_iface);
ctrl.wIndex = cpu_to_le16(udc->iface);
ctrl.wLength = 0;
rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
if (rc < 0) {
dev_warn_ratelimited(udc->dev,
"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
udc->iface, udc->alt_iface);
}
return rc;
}
/**
* bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
* @udc: Reference to the device controller.
* @ch_idx: IUDMA channel number.
* @req: USB gadget layer representation of the request.
*/
static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
struct usb_request *req)
{
struct bcm63xx_req *breq = our_req(req);
struct iudma_ch *iudma = &udc->iudma[ch_idx];
BUG_ON(udc->ep0_request);
udc->ep0_request = req;
req->actual = 0;
breq->offset = 0;
usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
iudma_write(udc, iudma, breq);
}
/**
* bcm63xx_ep0_complete - Set completion status and "stage" the callback.
* @udc: Reference to the device controller.
* @req: USB gadget layer representation of the request.
* @status: Status to return to the gadget driver.
*/
static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
struct usb_request *req, int status)
{
req->status = status;
if (status)
req->actual = 0;
if (req->complete) {
spin_unlock_irq(&udc->lock);
req->complete(&udc->bep[0].ep, req);
spin_lock_irq(&udc->lock);
}
}
/**
* bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
* reset/shutdown.
* @udc: Reference to the device controller.
* @is_tx: Nonzero for TX (IN), zero for RX (OUT).
*/
static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
{
struct usb_request *req = udc->ep0_reply;
udc->ep0_reply = NULL;
usb_gadget_unmap_request(&udc->gadget, req, is_tx);
if (udc->ep0_request == req) {
udc->ep0_req_completed = 0;
udc->ep0_request = NULL;
}
bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
}
/**
* bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
* transfer len.
* @udc: Reference to the device controller.
*/
static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
{
struct usb_request *req = udc->ep0_request;
udc->ep0_req_completed = 0;
udc->ep0_request = NULL;
return req->actual;
}
/**
* bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
* @udc: Reference to the device controller.
* @ch_idx: IUDMA channel number.
* @length: Number of bytes to TX/RX.
*
* Used for simple transfers performed by the ep0 worker. This will always
* use ep0_ctrl_req / ep0_ctrl_buf.
*/
static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
int length)
{
struct usb_request *req = &udc->ep0_ctrl_req.req;
req->buf = udc->ep0_ctrl_buf;
req->length = length;
req->complete = NULL;
bcm63xx_ep0_map_write(udc, ch_idx, req);
}
/**
* bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
* @udc: Reference to the device controller.
*
* EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
* for the next packet. Anything else means the transaction requires multiple
* stages of handling.
*/
static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
{
int rc;
struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
rc = bcm63xx_ep0_read_complete(udc);
if (rc < 0) {
dev_err(udc->dev, "missing SETUP packet\n");
return EP0_IDLE;
}
/*
* Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
* ALWAYS deliver these 100% of the time, so if we happen to see one,
* just throw it away.
*/
if (rc == 0)
return EP0_REQUEUE;
/* Drop malformed SETUP packets */
if (rc != sizeof(*ctrl)) {
dev_warn_ratelimited(udc->dev,
"malformed SETUP packet (%d bytes)\n", rc);
return EP0_REQUEUE;
}
/* Process new SETUP packet arriving on ep0 */
rc = bcm63xx_ep0_setup_callback(udc, ctrl);
if (rc < 0) {
bcm63xx_set_stall(udc, &udc->bep[0], true);
return EP0_REQUEUE;
}
if (!ctrl->wLength)
return EP0_REQUEUE;
else if (ctrl->bRequestType & USB_DIR_IN)
return EP0_IN_DATA_PHASE_SETUP;
else
return EP0_OUT_DATA_PHASE_SETUP;
}
/**
* bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
* @udc: Reference to the device controller.
*
* In state EP0_IDLE, the RX descriptor is either pending, or has been
* filled with a SETUP packet from the host. This function handles new
* SETUP packets, control IRQ events (which can generate fake SETUP packets),
* and reset/shutdown events.
*
* Returns 0 if work was done; -EAGAIN if nothing to do.
*/
static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
{
if (udc->ep0_req_reset) {
udc->ep0_req_reset = 0;
} else if (udc->ep0_req_set_cfg) {
udc->ep0_req_set_cfg = 0;
if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
} else if (udc->ep0_req_set_iface) {
udc->ep0_req_set_iface = 0;
if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
} else if (udc->ep0_req_completed) {
udc->ep0state = bcm63xx_ep0_do_setup(udc);
return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
} else if (udc->ep0_req_shutdown) {
udc->ep0_req_shutdown = 0;
udc->ep0_req_completed = 0;
udc->ep0_request = NULL;
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
usb_gadget_unmap_request(&udc->gadget,
&udc->ep0_ctrl_req.req, 0);
/* bcm63xx_udc_pullup() is waiting for this */
mb();
udc->ep0state = EP0_SHUTDOWN;
} else if (udc->ep0_reply) {
/*
* This could happen if a USB RESET shows up during an ep0
* transaction (especially if a laggy driver like gadgetfs
* is in use).
*/
dev_warn(udc->dev, "nuking unexpected reply\n");
bcm63xx_ep0_nuke_reply(udc, 0);
} else {
return -EAGAIN;
}
return 0;
}
/**
* bcm63xx_ep0_one_round - Handle the current ep0 state.
* @udc: Reference to the device controller.
*
* Returns 0 if work was done; -EAGAIN if nothing to do.
*/
static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
{
enum bcm63xx_ep0_state ep0state = udc->ep0state;
bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
switch (udc->ep0state) {
case EP0_REQUEUE:
/* set up descriptor to receive SETUP packet */
bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
BCM63XX_MAX_CTRL_PKT);
ep0state = EP0_IDLE;
break;
case EP0_IDLE:
return bcm63xx_ep0_do_idle(udc);
case EP0_IN_DATA_PHASE_SETUP:
/*
* Normal case: TX request is in ep0_reply (queued by the
* callback), or will be queued shortly. When it's here,
* send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
*
* Shutdown case: Stop waiting for the reply. Just
* REQUEUE->IDLE. The gadget driver is NOT expected to
* queue anything else now.
*/
if (udc->ep0_reply) {
bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
udc->ep0_reply);
ep0state = EP0_IN_DATA_PHASE_COMPLETE;
} else if (shutdown) {
ep0state = EP0_REQUEUE;
}
break;
case EP0_IN_DATA_PHASE_COMPLETE: {
/*
* Normal case: TX packet (ep0_reply) is in flight; wait for
* it to finish, then go back to REQUEUE->IDLE.
*
* Shutdown case: Reset the TX channel, send -ESHUTDOWN
* completion to the gadget driver, then REQUEUE->IDLE.
*/
if (udc->ep0_req_completed) {
udc->ep0_reply = NULL;
bcm63xx_ep0_read_complete(udc);
/*
* the "ack" sometimes gets eaten (see
* bcm63xx_ep0_do_idle)
*/
ep0state = EP0_REQUEUE;
} else if (shutdown) {
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
bcm63xx_ep0_nuke_reply(udc, 1);
ep0state = EP0_REQUEUE;
}
break;
}
case EP0_OUT_DATA_PHASE_SETUP:
/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
if (udc->ep0_reply) {
bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
udc->ep0_reply);
ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
} else if (shutdown) {
ep0state = EP0_REQUEUE;
}
break;
case EP0_OUT_DATA_PHASE_COMPLETE: {
/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
if (udc->ep0_req_completed) {
udc->ep0_reply = NULL;
bcm63xx_ep0_read_complete(udc);
/* send 0-byte ack to host */
bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
ep0state = EP0_OUT_STATUS_PHASE;
} else if (shutdown) {
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
bcm63xx_ep0_nuke_reply(udc, 0);
ep0state = EP0_REQUEUE;
}
break;
}
case EP0_OUT_STATUS_PHASE:
/*
* Normal case: 0-byte OUT ack packet is in flight; wait
* for it to finish, then go back to REQUEUE->IDLE.
*
* Shutdown case: just cancel the transmission. Don't bother
* calling the completion, because it originated from this
* function anyway. Then go back to REQUEUE->IDLE.
*/
if (udc->ep0_req_completed) {
bcm63xx_ep0_read_complete(udc);
ep0state = EP0_REQUEUE;
} else if (shutdown) {
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
udc->ep0_request = NULL;
ep0state = EP0_REQUEUE;
}
break;
case EP0_IN_FAKE_STATUS_PHASE: {
/*
* Normal case: we spoofed a SETUP packet and are now
* waiting for the gadget driver to send a 0-byte reply.
* This doesn't actually get sent to the HW because the
* HW has already sent its own reply. Once we get the
* response, return to IDLE.
*
* Shutdown case: return to IDLE immediately.
*
* Note that the ep0 RX descriptor has remained queued
* (and possibly unfilled) during this entire transaction.
* The HW datapath (IUDMA) never even sees SET_CONFIGURATION
* or SET_INTERFACE transactions.
*/
struct usb_request *r = udc->ep0_reply;
if (!r) {
if (shutdown)
ep0state = EP0_IDLE;
break;
}
bcm63xx_ep0_complete(udc, r, 0);
udc->ep0_reply = NULL;
ep0state = EP0_IDLE;
break;
}
case EP0_SHUTDOWN:
break;
}
if (udc->ep0state == ep0state)
return -EAGAIN;
udc->ep0state = ep0state;
return 0;
}
/**
* bcm63xx_ep0_process - ep0 worker thread / state machine.
* @w: Workqueue struct.
*
* bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
* is used to synchronize ep0 events and ensure that both HW and SW events
* occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
* synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
* by the USBD hardware.
*
* The worker function will continue iterating around the state machine
* until there is nothing left to do. Usually "nothing left to do" means
* that we're waiting for a new event from the hardware.
*/
static void bcm63xx_ep0_process(struct work_struct *w)
{
struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
spin_lock_irq(&udc->lock);
while (bcm63xx_ep0_one_round(udc) == 0)
;
spin_unlock_irq(&udc->lock);
}
/***********************************************************************
* Standard UDC gadget operations
***********************************************************************/
/**
* bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
* @gadget: USB device.
*/
static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
{
struct bcm63xx_udc *udc = gadget_to_udc(gadget);
return (usbd_readl(udc, USBD_STATUS_REG) &
USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
}
/**
* bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
* @gadget: USB device.
* @is_on: 0 to disable pullup, 1 to enable.
*
* See notes in bcm63xx_select_pullup().
*/
static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
{
struct bcm63xx_udc *udc = gadget_to_udc(gadget);
unsigned long flags;
int i, rc = -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (is_on && udc->ep0state == EP0_SHUTDOWN) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->ep0state = EP0_REQUEUE;
bcm63xx_fifo_setup(udc);
bcm63xx_fifo_reset(udc);
bcm63xx_ep_setup(udc);
bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
for (i = 0; i < BCM63XX_NUM_EP; i++)
bcm63xx_set_stall(udc, &udc->bep[i], false);
bcm63xx_set_ctrl_irqs(udc, true);
bcm63xx_select_pullup(gadget_to_udc(gadget), true);
rc = 0;
} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
bcm63xx_select_pullup(gadget_to_udc(gadget), false);
udc->ep0_req_shutdown = 1;
spin_unlock_irqrestore(&udc->lock, flags);
while (1) {
schedule_work(&udc->ep0_wq);
if (udc->ep0state == EP0_SHUTDOWN)
break;
msleep(50);
}
bcm63xx_set_ctrl_irqs(udc, false);
cancel_work_sync(&udc->ep0_wq);
return 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return rc;
}
/**
* bcm63xx_udc_start - Start the controller.
* @gadget: USB device.
* @driver: Driver for USB device.
*/
static int bcm63xx_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct bcm63xx_udc *udc = gadget_to_udc(gadget);
unsigned long flags;
if (!driver || driver->max_speed < USB_SPEED_HIGH ||
!driver->setup)
return -EINVAL;
if (!udc)
return -ENODEV;
if (udc->driver)
return -EBUSY;
spin_lock_irqsave(&udc->lock, flags);
set_clocks(udc, true);
bcm63xx_fifo_setup(udc);
bcm63xx_ep_init(udc);
bcm63xx_ep_setup(udc);
bcm63xx_fifo_reset(udc);
bcm63xx_select_phy_mode(udc, true);
udc->driver = driver;
udc->gadget.dev.of_node = udc->dev->of_node;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* bcm63xx_udc_stop - Shut down the controller.
* @gadget: USB device.
* @driver: Driver for USB device.
*/
static int bcm63xx_udc_stop(struct usb_gadget *gadget)
{
struct bcm63xx_udc *udc = gadget_to_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->driver = NULL;
/*
* If we switch the PHY too abruptly after dropping D+, the host
* will often complain:
*
* hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
*/
msleep(100);
bcm63xx_select_phy_mode(udc, false);
set_clocks(udc, false);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static const struct usb_gadget_ops bcm63xx_udc_ops = {
.get_frame = bcm63xx_udc_get_frame,
.pullup = bcm63xx_udc_pullup,
.udc_start = bcm63xx_udc_start,
.udc_stop = bcm63xx_udc_stop,
};
/***********************************************************************
* IRQ handling
***********************************************************************/
/**
* bcm63xx_update_cfg_iface - Read current configuration/interface settings.
* @udc: Reference to the device controller.
*
* This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
* The driver never sees the raw control packets coming in on the ep0
* IUDMA channel, but at least we get an interrupt event to tell us that
* new values are waiting in the USBD_STATUS register.
*/
static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
{
u32 reg = usbd_readl(udc, USBD_STATUS_REG);
udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
USBD_STATUS_ALTINTF_SHIFT;
bcm63xx_ep_setup(udc);
}
/**
* bcm63xx_update_link_speed - Check to see if the link speed has changed.
* @udc: Reference to the device controller.
*
* The link speed update coincides with a SETUP IRQ. Returns 1 if the
* speed has changed, so that the caller can update the endpoint settings.
*/
static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
{
u32 reg = usbd_readl(udc, USBD_STATUS_REG);
enum usb_device_speed oldspeed = udc->gadget.speed;
switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
case BCM63XX_SPD_HIGH:
udc->gadget.speed = USB_SPEED_HIGH;
break;
case BCM63XX_SPD_FULL:
udc->gadget.speed = USB_SPEED_FULL;
break;
default:
/* this should never happen */
udc->gadget.speed = USB_SPEED_UNKNOWN;
dev_err(udc->dev,
"received SETUP packet with invalid link speed\n");
return 0;
}
if (udc->gadget.speed != oldspeed) {
dev_info(udc->dev, "link up, %s-speed mode\n",
udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
return 1;
} else {
return 0;
}
}
/**
* bcm63xx_update_wedge - Iterate through wedged endpoints.
* @udc: Reference to the device controller.
* @new_status: true to "refresh" wedge status; false to clear it.
*
* On a SETUP interrupt, we need to manually "refresh" the wedge status
* because the controller hardware is designed to automatically clear
* stalls in response to a CLEAR_FEATURE request from the host.
*
* On a RESET interrupt, we do want to restore all wedged endpoints.
*/
static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
{
int i;
for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
bcm63xx_set_stall(udc, &udc->bep[i], new_status);
if (!new_status)
clear_bit(i, &udc->wedgemap);
}
}
/**
* bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
* @irq: IRQ number (unused).
* @dev_id: Reference to the device controller.
*
* This is where we handle link (VBUS) down, USB reset, speed changes,
* SET_CONFIGURATION, and SET_INTERFACE events.
*/
static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
{
struct bcm63xx_udc *udc = dev_id;
u32 stat;
bool disconnected = false, bus_reset = false;
stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
spin_lock(&udc->lock);
if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
/* VBUS toggled */
if (!(usbd_readl(udc, USBD_EVENTS_REG) &
USBD_EVENTS_USB_LINK_MASK) &&
udc->gadget.speed != USB_SPEED_UNKNOWN)
dev_info(udc->dev, "link down\n");
udc->gadget.speed = USB_SPEED_UNKNOWN;
disconnected = true;
}
if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
bcm63xx_fifo_setup(udc);
bcm63xx_fifo_reset(udc);
bcm63xx_ep_setup(udc);
bcm63xx_update_wedge(udc, false);
udc->ep0_req_reset = 1;
schedule_work(&udc->ep0_wq);
bus_reset = true;
}
if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
if (bcm63xx_update_link_speed(udc)) {
bcm63xx_fifo_setup(udc);
bcm63xx_ep_setup(udc);
}
bcm63xx_update_wedge(udc, true);
}
if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
bcm63xx_update_cfg_iface(udc);
udc->ep0_req_set_cfg = 1;
schedule_work(&udc->ep0_wq);
}
if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
bcm63xx_update_cfg_iface(udc);
udc->ep0_req_set_iface = 1;
schedule_work(&udc->ep0_wq);
}
spin_unlock(&udc->lock);
if (disconnected && udc->driver)
udc->driver->disconnect(&udc->gadget);
else if (bus_reset && udc->driver)
usb_gadget_udc_reset(&udc->gadget, udc->driver);
return IRQ_HANDLED;
}
/**
* bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
* @irq: IRQ number (unused).
* @dev_id: Reference to the IUDMA channel that generated the interrupt.
*
* For the two ep0 channels, we have special handling that triggers the
* ep0 worker thread. For normal bulk/intr channels, either queue up
* the next buffer descriptor for the transaction (incomplete transaction),
* or invoke the completion callback (complete transactions).
*/
static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
{
struct iudma_ch *iudma = dev_id;
struct bcm63xx_udc *udc = iudma->udc;
struct bcm63xx_ep *bep;
struct usb_request *req = NULL;
struct bcm63xx_req *breq = NULL;
int rc;
bool is_done = false;
spin_lock(&udc->lock);
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
ENETDMAC_IR_REG, iudma->ch_idx);
bep = iudma->bep;
rc = iudma_read(udc, iudma);
/* special handling for EP0 RX (0) and TX (1) */
if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
iudma->ch_idx == IUDMA_EP0_TXCHAN) {
req = udc->ep0_request;
breq = our_req(req);
/* a single request could require multiple submissions */
if (rc >= 0) {
req->actual += rc;
if (req->actual >= req->length || breq->bd_bytes > rc) {
udc->ep0_req_completed = 1;
is_done = true;
schedule_work(&udc->ep0_wq);
/* "actual" on a ZLP is 1 byte */
req->actual = min(req->actual, req->length);
} else {
/* queue up the next BD (same request) */
iudma_write(udc, iudma, breq);
}
}
} else if (!list_empty(&bep->queue)) {
breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
req = &breq->req;
if (rc >= 0) {
req->actual += rc;
if (req->actual >= req->length || breq->bd_bytes > rc) {
is_done = true;
list_del(&breq->queue);
req->actual = min(req->actual, req->length);
if (!list_empty(&bep->queue)) {
struct bcm63xx_req *next;
next = list_first_entry(&bep->queue,
struct bcm63xx_req, queue);
iudma_write(udc, iudma, next);
}
} else {
iudma_write(udc, iudma, breq);
}
}
}
spin_unlock(&udc->lock);
if (is_done) {
usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
if (req->complete)
req->complete(&bep->ep, req);
}
return IRQ_HANDLED;
}
/***********************************************************************
* Debug filesystem
***********************************************************************/
/*
* bcm63xx_usbd_dbg_show - Show USBD controller state.
* @s: seq_file to which the information will be written.
* @p: Unused.
*
* This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
*/
static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
{
struct bcm63xx_udc *udc = s->private;
if (!udc->driver)
return -ENODEV;
seq_printf(s, "ep0 state: %s\n",
bcm63xx_ep0_state_names[udc->ep0state]);
seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
udc->ep0_req_reset ? "reset " : "",
udc->ep0_req_set_cfg ? "set_cfg " : "",
udc->ep0_req_set_iface ? "set_iface " : "",
udc->ep0_req_shutdown ? "shutdown " : "",
udc->ep0_request ? "pending " : "",
udc->ep0_req_completed ? "completed " : "",
udc->ep0_reply ? "reply " : "");
seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
udc->cfg, udc->iface, udc->alt_iface);
seq_printf(s, "regs:\n");
seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
usbd_readl(udc, USBD_CONTROL_REG),
usbd_readl(udc, USBD_STRAPS_REG),
usbd_readl(udc, USBD_STATUS_REG));
seq_printf(s, " events: %08x; stall: %08x\n",
usbd_readl(udc, USBD_EVENTS_REG),
usbd_readl(udc, USBD_STALL_REG));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
/*
* bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
* @s: seq_file to which the information will be written.
* @p: Unused.
*
* This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
*/
static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
{
struct bcm63xx_udc *udc = s->private;
int ch_idx, i;
u32 sram2, sram3;
if (!udc->driver)
return -ENODEV;
for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
struct iudma_ch *iudma = &udc->iudma[ch_idx];
seq_printf(s, "IUDMA channel %d -- ", ch_idx);
switch (iudma_defaults[ch_idx].ep_type) {
case BCMEP_CTRL:
seq_printf(s, "control");
break;
case BCMEP_BULK:
seq_printf(s, "bulk");
break;
case BCMEP_INTR:
seq_printf(s, "interrupt");
break;
}
seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
seq_printf(s, " [ep%d]:\n",
max_t(int, iudma_defaults[ch_idx].ep_num, 0));
seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
sram2 >> 16, sram2 & 0xffff,
sram3 >> 16, sram3 & 0xffff,
usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
iudma->n_bds);
if (iudma->bep)
seq_printf(s, "; %zu queued\n", list_count_nodes(&iudma->bep->queue));
else
seq_printf(s, "\n");
for (i = 0; i < iudma->n_bds; i++) {
struct bcm_enet_desc *d = &iudma->bd_ring[i];
seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
i * sizeof(*d), i,
d->len_stat >> 16, d->len_stat & 0xffff,
d->address);
if (d == iudma->read_bd)
seq_printf(s, " <<RD");
if (d == iudma->write_bd)
seq_printf(s, " <<WR");
seq_printf(s, "\n");
}
seq_printf(s, "\n");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
/**
* bcm63xx_udc_init_debugfs - Create debugfs entries.
* @udc: Reference to the device controller.
*/
static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
{
struct dentry *root;
if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
return;
root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
}
/**
* bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
* @udc: Reference to the device controller.
*
* debugfs_remove() is safe to call with a NULL argument.
*/
static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
{
debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
}
/***********************************************************************
* Driver init/exit
***********************************************************************/
/**
* bcm63xx_udc_probe - Initialize a new instance of the UDC.
* @pdev: Platform device struct from the bcm63xx BSP code.
*
* Note that platform data is required, because pd.port_no varies from chip
* to chip and is used to switch the correct USB port to device mode.
*/
static int bcm63xx_udc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
struct bcm63xx_udc *udc;
int rc = -ENOMEM, i, irq;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
platform_set_drvdata(pdev, udc);
udc->dev = dev;
udc->pd = pd;
if (!pd) {
dev_err(dev, "missing platform data\n");
return -EINVAL;
}
udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->usbd_regs))
return PTR_ERR(udc->usbd_regs);
udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(udc->iudma_regs))
return PTR_ERR(udc->iudma_regs);
spin_lock_init(&udc->lock);
INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
udc->gadget.ops = &bcm63xx_udc_ops;
udc->gadget.name = dev_name(dev);
if (!pd->use_fullspeed && !use_fullspeed)
udc->gadget.max_speed = USB_SPEED_HIGH;
else
udc->gadget.max_speed = USB_SPEED_FULL;
/* request clocks, allocate buffers, and clear any pending IRQs */
rc = bcm63xx_init_udc_hw(udc);
if (rc)
return rc;
rc = -ENXIO;
/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
rc = irq;
goto out_uninit;
}
if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
dev_name(dev), udc) < 0)
goto report_request_failure;
/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
irq = platform_get_irq(pdev, i + 1);
if (irq < 0) {
rc = irq;
goto out_uninit;
}
if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
dev_name(dev), &udc->iudma[i]) < 0)
goto report_request_failure;
}
bcm63xx_udc_init_debugfs(udc);
rc = usb_add_gadget_udc(dev, &udc->gadget);
if (!rc)
return 0;
bcm63xx_udc_cleanup_debugfs(udc);
out_uninit:
bcm63xx_uninit_udc_hw(udc);
return rc;
report_request_failure:
dev_err(dev, "error requesting IRQ #%d\n", irq);
goto out_uninit;
}
/**
* bcm63xx_udc_remove - Remove the device from the system.
* @pdev: Platform device struct from the bcm63xx BSP code.
*/
static void bcm63xx_udc_remove(struct platform_device *pdev)
{
struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
bcm63xx_udc_cleanup_debugfs(udc);
usb_del_gadget_udc(&udc->gadget);
BUG_ON(udc->driver);
bcm63xx_uninit_udc_hw(udc);
}
static struct platform_driver bcm63xx_udc_driver = {
.probe = bcm63xx_udc_probe,
.remove_new = bcm63xx_udc_remove,
.driver = {
.name = DRV_MODULE_NAME,
},
};
module_platform_driver(bcm63xx_udc_driver);
MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
MODULE_AUTHOR("Kevin Cernekee <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
| linux-master | drivers/usb/gadget/udc/bcm63xx_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/irq.h>
#define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
#define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */
/* Address offset of Registers */
#define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
#define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
#define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
#define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
#define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
#define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
#define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
#define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
#define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
#define UDC_DEVCTL_ADDR 0x404 /* Device control */
#define UDC_DEVSTS_ADDR 0x408 /* Device status */
#define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
#define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
#define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
#define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
#define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
#define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
#define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
#define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
/* Endpoint control register */
/* Bit position */
#define UDC_EPCTL_MRXFLUSH (1 << 12)
#define UDC_EPCTL_RRDY (1 << 9)
#define UDC_EPCTL_CNAK (1 << 8)
#define UDC_EPCTL_SNAK (1 << 7)
#define UDC_EPCTL_NAK (1 << 6)
#define UDC_EPCTL_P (1 << 3)
#define UDC_EPCTL_F (1 << 1)
#define UDC_EPCTL_S (1 << 0)
#define UDC_EPCTL_ET_SHIFT 4
/* Mask patern */
#define UDC_EPCTL_ET_MASK 0x00000030
/* Value for ET field */
#define UDC_EPCTL_ET_CONTROL 0
#define UDC_EPCTL_ET_ISO 1
#define UDC_EPCTL_ET_BULK 2
#define UDC_EPCTL_ET_INTERRUPT 3
/* Endpoint status register */
/* Bit position */
#define UDC_EPSTS_XFERDONE (1 << 27)
#define UDC_EPSTS_RSS (1 << 26)
#define UDC_EPSTS_RCS (1 << 25)
#define UDC_EPSTS_TXEMPTY (1 << 24)
#define UDC_EPSTS_TDC (1 << 10)
#define UDC_EPSTS_HE (1 << 9)
#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
#define UDC_EPSTS_BNA (1 << 7)
#define UDC_EPSTS_IN (1 << 6)
#define UDC_EPSTS_OUT_SHIFT 4
/* Mask patern */
#define UDC_EPSTS_OUT_MASK 0x00000030
#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
/* Value for OUT field */
#define UDC_EPSTS_OUT_SETUP 2
#define UDC_EPSTS_OUT_DATA 1
/* Device configuration register */
/* Bit position */
#define UDC_DEVCFG_CSR_PRG (1 << 17)
#define UDC_DEVCFG_SP (1 << 3)
/* SPD Valee */
#define UDC_DEVCFG_SPD_HS 0x0
#define UDC_DEVCFG_SPD_FS 0x1
#define UDC_DEVCFG_SPD_LS 0x2
/* Device control register */
/* Bit position */
#define UDC_DEVCTL_THLEN_SHIFT 24
#define UDC_DEVCTL_BRLEN_SHIFT 16
#define UDC_DEVCTL_CSR_DONE (1 << 13)
#define UDC_DEVCTL_SD (1 << 10)
#define UDC_DEVCTL_MODE (1 << 9)
#define UDC_DEVCTL_BREN (1 << 8)
#define UDC_DEVCTL_THE (1 << 7)
#define UDC_DEVCTL_DU (1 << 4)
#define UDC_DEVCTL_TDE (1 << 3)
#define UDC_DEVCTL_RDE (1 << 2)
#define UDC_DEVCTL_RES (1 << 0)
/* Device status register */
/* Bit position */
#define UDC_DEVSTS_TS_SHIFT 18
#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
#define UDC_DEVSTS_ALT_SHIFT 8
#define UDC_DEVSTS_INTF_SHIFT 4
#define UDC_DEVSTS_CFG_SHIFT 0
/* Mask patern */
#define UDC_DEVSTS_TS_MASK 0xfffc0000
#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
#define UDC_DEVSTS_ALT_MASK 0x00000f00
#define UDC_DEVSTS_INTF_MASK 0x000000f0
#define UDC_DEVSTS_CFG_MASK 0x0000000f
/* value for maximum speed for SPEED field */
#define UDC_DEVSTS_ENUM_SPEED_FULL 1
#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
#define UDC_DEVSTS_ENUM_SPEED_LOW 2
#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
/* Device irq register */
/* Bit position */
#define UDC_DEVINT_RWKP (1 << 7)
#define UDC_DEVINT_ENUM (1 << 6)
#define UDC_DEVINT_SOF (1 << 5)
#define UDC_DEVINT_US (1 << 4)
#define UDC_DEVINT_UR (1 << 3)
#define UDC_DEVINT_ES (1 << 2)
#define UDC_DEVINT_SI (1 << 1)
#define UDC_DEVINT_SC (1 << 0)
/* Mask patern */
#define UDC_DEVINT_MSK 0x7f
/* Endpoint irq register */
/* Bit position */
#define UDC_EPINT_IN_SHIFT 0
#define UDC_EPINT_OUT_SHIFT 16
#define UDC_EPINT_IN_EP0 (1 << 0)
#define UDC_EPINT_OUT_EP0 (1 << 16)
/* Mask patern */
#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
/* UDC_CSR_BUSY Status register */
/* Bit position */
#define UDC_CSR_BUSY (1 << 0)
/* SOFT RESET register */
/* Bit position */
#define UDC_PSRST (1 << 1)
#define UDC_SRST (1 << 0)
/* USB_DEVICE endpoint register */
/* Bit position */
#define UDC_CSR_NE_NUM_SHIFT 0
#define UDC_CSR_NE_DIR_SHIFT 4
#define UDC_CSR_NE_TYPE_SHIFT 5
#define UDC_CSR_NE_CFG_SHIFT 7
#define UDC_CSR_NE_INTF_SHIFT 11
#define UDC_CSR_NE_ALT_SHIFT 15
#define UDC_CSR_NE_MAX_PKT_SHIFT 19
/* Mask patern */
#define UDC_CSR_NE_NUM_MASK 0x0000000f
#define UDC_CSR_NE_DIR_MASK 0x00000010
#define UDC_CSR_NE_TYPE_MASK 0x00000060
#define UDC_CSR_NE_CFG_MASK 0x00000780
#define UDC_CSR_NE_INTF_MASK 0x00007800
#define UDC_CSR_NE_ALT_MASK 0x00078000
#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
#define PCH_UDC_EPINT(in, num)\
(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
/* Index of endpoint */
#define UDC_EP0IN_IDX 0
#define UDC_EP0OUT_IDX 1
#define UDC_EPIN_IDX(ep) (ep * 2)
#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
#define PCH_UDC_EP0 0
#define PCH_UDC_EP1 1
#define PCH_UDC_EP2 2
#define PCH_UDC_EP3 3
/* Number of endpoint */
#define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
#define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
/* Length Value */
#define PCH_UDC_BRLEN 0x0F /* Burst length */
#define PCH_UDC_THLEN 0x1F /* Threshold length */
/* Value of EP Buffer Size */
#define UDC_EP0IN_BUFF_SIZE 16
#define UDC_EPIN_BUFF_SIZE 256
#define UDC_EP0OUT_BUFF_SIZE 16
#define UDC_EPOUT_BUFF_SIZE 256
/* Value of EP maximum packet size */
#define UDC_EP0IN_MAX_PKT_SIZE 64
#define UDC_EP0OUT_MAX_PKT_SIZE 64
#define UDC_BULK_MAX_PKT_SIZE 512
/* DMA */
#define DMA_DIR_RX 1 /* DMA for data receive */
#define DMA_DIR_TX 2 /* DMA for data transmit */
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
/**
* struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
* for data
* @status: Status quadlet
* @reserved: Reserved
* @dataptr: Buffer descriptor
* @next: Next descriptor
*/
struct pch_udc_data_dma_desc {
u32 status;
u32 reserved;
u32 dataptr;
u32 next;
};
/**
* struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
* for control data
* @status: Status
* @reserved: Reserved
* @request: Control Request
*/
struct pch_udc_stp_dma_desc {
u32 status;
u32 reserved;
struct usb_ctrlrequest request;
} __attribute((packed));
/* DMA status definitions */
/* Buffer status */
#define PCH_UDC_BUFF_STS 0xC0000000
#define PCH_UDC_BS_HST_RDY 0x00000000
#define PCH_UDC_BS_DMA_BSY 0x40000000
#define PCH_UDC_BS_DMA_DONE 0x80000000
#define PCH_UDC_BS_HST_BSY 0xC0000000
/* Rx/Tx Status */
#define PCH_UDC_RXTX_STS 0x30000000
#define PCH_UDC_RTS_SUCC 0x00000000
#define PCH_UDC_RTS_DESERR 0x10000000
#define PCH_UDC_RTS_BUFERR 0x30000000
/* Last Descriptor Indication */
#define PCH_UDC_DMA_LAST 0x08000000
/* Number of Rx/Tx Bytes Mask */
#define PCH_UDC_RXTX_BYTES 0x0000ffff
/**
* struct pch_udc_cfg_data - Structure to hold current configuration
* and interface information
* @cur_cfg: current configuration in use
* @cur_intf: current interface in use
* @cur_alt: current alt interface in use
*/
struct pch_udc_cfg_data {
u16 cur_cfg;
u16 cur_intf;
u16 cur_alt;
};
/**
* struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
* @ep: embedded ep request
* @td_stp_phys: for setup request
* @td_data_phys: for data request
* @td_stp: for setup request
* @td_data: for data request
* @dev: reference to device struct
* @offset_addr: offset address of ep register
* @desc: for this ep
* @queue: queue for requests
* @num: endpoint number
* @in: endpoint is IN
* @halted: endpoint halted?
* @epsts: Endpoint status
*/
struct pch_udc_ep {
struct usb_ep ep;
dma_addr_t td_stp_phys;
dma_addr_t td_data_phys;
struct pch_udc_stp_dma_desc *td_stp;
struct pch_udc_data_dma_desc *td_data;
struct pch_udc_dev *dev;
unsigned long offset_addr;
struct list_head queue;
unsigned num:5,
in:1,
halted:1;
unsigned long epsts;
};
/**
* struct pch_vbus_gpio_data - Structure holding GPIO informaton
* for detecting VBUS
* @port: gpio descriptor for the VBUS GPIO
* @intr: gpio interrupt number
* @irq_work_fall: Structure for WorkQueue
* @irq_work_rise: Structure for WorkQueue
*/
struct pch_vbus_gpio_data {
struct gpio_desc *port;
int intr;
struct work_struct irq_work_fall;
struct work_struct irq_work_rise;
};
/**
* struct pch_udc_dev - Structure holding complete information
* of the PCH USB device
* @gadget: gadget driver data
* @driver: reference to gadget driver bound
* @pdev: reference to the PCI device
* @ep: array of endpoints
* @lock: protects all state
* @stall: stall requested
* @prot_stall: protcol stall requested
* @registered: driver registered with system
* @suspended: driver in suspended state
* @connected: gadget driver associated
* @vbus_session: required vbus_session state
* @set_cfg_not_acked: pending acknowledgement 4 setup
* @waiting_zlp_ack: pending acknowledgement 4 ZLP
* @data_requests: DMA pool for data requests
* @stp_requests: DMA pool for setup requests
* @dma_addr: DMA pool for received
* @setup_data: Received setup data
* @base_addr: for mapped device memory
* @bar: PCI BAR used for mapped device memory
* @cfg_data: current cfg, intf, and alt in use
* @vbus_gpio: GPIO informaton for detecting VBUS
*/
struct pch_udc_dev {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct pci_dev *pdev;
struct pch_udc_ep ep[PCH_UDC_EP_NUM];
spinlock_t lock; /* protects all state */
unsigned
stall:1,
prot_stall:1,
suspended:1,
connected:1,
vbus_session:1,
set_cfg_not_acked:1,
waiting_zlp_ack:1;
struct dma_pool *data_requests;
struct dma_pool *stp_requests;
dma_addr_t dma_addr;
struct usb_ctrlrequest setup_data;
void __iomem *base_addr;
unsigned short bar;
struct pch_udc_cfg_data cfg_data;
struct pch_vbus_gpio_data vbus_gpio;
};
#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
#define PCH_UDC_PCI_BAR_QUARK_X1000 0
#define PCH_UDC_PCI_BAR 1
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
static bool speed_fs;
module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
/**
* struct pch_udc_request - Structure holding a PCH USB device request packet
* @req: embedded ep request
* @td_data_phys: phys. address
* @td_data: first dma desc. of chain
* @td_data_last: last dma desc. of chain
* @queue: associated queue
* @dma_going: DMA in progress for request
* @dma_done: DMA completed for request
* @chain_len: chain length
*/
struct pch_udc_request {
struct usb_request req;
dma_addr_t td_data_phys;
struct pch_udc_data_dma_desc *td_data;
struct pch_udc_data_dma_desc *td_data_last;
struct list_head queue;
unsigned dma_going:1,
dma_done:1;
unsigned chain_len;
};
static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
{
return ioread32(dev->base_addr + reg);
}
static inline void pch_udc_writel(struct pch_udc_dev *dev,
unsigned long val, unsigned long reg)
{
iowrite32(val, dev->base_addr + reg);
}
static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
unsigned long reg,
unsigned long bitmask)
{
pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
}
static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
unsigned long reg,
unsigned long bitmask)
{
pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
}
static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
{
return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
}
static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
unsigned long val, unsigned long reg)
{
iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
}
static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
unsigned long reg,
unsigned long bitmask)
{
pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
}
static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
unsigned long reg,
unsigned long bitmask)
{
pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
}
/**
* pch_udc_csr_busy() - Wait till idle.
* @dev: Reference to pch_udc_dev structure
*/
static void pch_udc_csr_busy(struct pch_udc_dev *dev)
{
unsigned int count = 200;
/* Wait till idle */
while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
&& --count)
cpu_relax();
if (!count)
dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
}
/**
* pch_udc_write_csr() - Write the command and status registers.
* @dev: Reference to pch_udc_dev structure
* @val: value to be written to CSR register
* @ep: end-point number
*/
static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
unsigned int ep)
{
unsigned long reg = PCH_UDC_CSR(ep);
pch_udc_csr_busy(dev); /* Wait till idle */
pch_udc_writel(dev, val, reg);
pch_udc_csr_busy(dev); /* Wait till idle */
}
/**
* pch_udc_read_csr() - Read the command and status registers.
* @dev: Reference to pch_udc_dev structure
* @ep: end-point number
*
* Return codes: content of CSR register
*/
static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
{
unsigned long reg = PCH_UDC_CSR(ep);
pch_udc_csr_busy(dev); /* Wait till idle */
pch_udc_readl(dev, reg); /* Dummy read */
pch_udc_csr_busy(dev); /* Wait till idle */
return pch_udc_readl(dev, reg);
}
/**
* pch_udc_rmt_wakeup() - Initiate for remote wakeup
* @dev: Reference to pch_udc_dev structure
*/
static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
{
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
mdelay(1);
pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
}
/**
* pch_udc_get_frame() - Get the current frame from device status register
* @dev: Reference to pch_udc_dev structure
* Retern current frame
*/
static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
{
u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
}
/**
* pch_udc_clear_selfpowered() - Clear the self power control
* @dev: Reference to pch_udc_regs structure
*/
static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
{
pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
}
/**
* pch_udc_set_selfpowered() - Set the self power control
* @dev: Reference to pch_udc_regs structure
*/
static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
{
pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
}
/**
* pch_udc_set_disconnect() - Set the disconnect status.
* @dev: Reference to pch_udc_regs structure
*/
static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
{
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
}
/**
* pch_udc_clear_disconnect() - Clear the disconnect status.
* @dev: Reference to pch_udc_regs structure
*/
static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
{
/* Clear the disconnect */
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
mdelay(1);
/* Resume USB signalling */
pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
}
static void pch_udc_init(struct pch_udc_dev *dev);
/**
* pch_udc_reconnect() - This API initializes usb device controller,
* and clear the disconnect status.
* @dev: Reference to pch_udc_regs structure
*/
static void pch_udc_reconnect(struct pch_udc_dev *dev)
{
pch_udc_init(dev);
/* enable device interrupts */
/* pch_udc_enable_interrupts() */
pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
UDC_DEVINT_UR | UDC_DEVINT_ENUM);
/* Clear the disconnect */
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
mdelay(1);
/* Resume USB signalling */
pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
}
/**
* pch_udc_vbus_session() - set or clearr the disconnect status.
* @dev: Reference to pch_udc_regs structure
* @is_active: Parameter specifying the action
* 0: indicating VBUS power is ending
* !0: indicating VBUS power is starting
*/
static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
int is_active)
{
unsigned long iflags;
spin_lock_irqsave(&dev->lock, iflags);
if (is_active) {
pch_udc_reconnect(dev);
dev->vbus_session = 1;
} else {
if (dev->driver && dev->driver->disconnect) {
spin_unlock_irqrestore(&dev->lock, iflags);
dev->driver->disconnect(&dev->gadget);
spin_lock_irqsave(&dev->lock, iflags);
}
pch_udc_set_disconnect(dev);
dev->vbus_session = 0;
}
spin_unlock_irqrestore(&dev->lock, iflags);
}
/**
* pch_udc_ep_set_stall() - Set the stall of endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
*/
static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
{
if (ep->in) {
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
} else {
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
}
}
/**
* pch_udc_ep_clear_stall() - Clear the stall of endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
*/
static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
{
/* Clear the stall */
pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
/* Clear NAK by writing CNAK */
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
}
/**
* pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
* @type: Type of endpoint
*/
static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
u8 type)
{
pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
}
/**
* pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
* @buf_size: The buffer word size
* @ep_in: EP is IN
*/
static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
u32 buf_size, u32 ep_in)
{
u32 data;
if (ep_in) {
data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
data = (data & 0xffff0000) | (buf_size & 0xffff);
pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
} else {
data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
data = (buf_size << 16) | (data & 0xffff);
pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
}
}
/**
* pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
* @pkt_size: The packet byte size
*/
static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
{
u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
data = (data & 0xffff0000) | (pkt_size & 0xffff);
pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
}
/**
* pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
* @addr: Address of the register
*/
static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
{
pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
}
/**
* pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
* @addr: Address of the register
*/
static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
{
pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
}
/**
* pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
*/
static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
{
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
}
/**
* pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
*/
static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
{
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
}
/**
* pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
*/
static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
{
pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
}
/**
* pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
* register depending on the direction specified
* @dev: Reference to structure of type pch_udc_regs
* @dir: whether Tx or Rx
* DMA_DIR_RX: Receive
* DMA_DIR_TX: Transmit
*/
static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
{
if (dir == DMA_DIR_RX)
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
else if (dir == DMA_DIR_TX)
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
}
/**
* pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
* register depending on the direction specified
* @dev: Reference to structure of type pch_udc_regs
* @dir: Whether Tx or Rx
* DMA_DIR_RX: Receive
* DMA_DIR_TX: Transmit
*/
static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
{
if (dir == DMA_DIR_RX)
pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
else if (dir == DMA_DIR_TX)
pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
}
/**
* pch_udc_set_csr_done() - Set the device control register
* CSR done field (bit 13)
* @dev: reference to structure of type pch_udc_regs
*/
static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
{
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
}
/**
* pch_udc_disable_interrupts() - Disables the specified interrupts
* @dev: Reference to structure of type pch_udc_regs
* @mask: Mask to disable interrupts
*/
static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
u32 mask)
{
pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
}
/**
* pch_udc_enable_interrupts() - Enable the specified interrupts
* @dev: Reference to structure of type pch_udc_regs
* @mask: Mask to enable interrupts
*/
static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
u32 mask)
{
pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
}
/**
* pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
* @dev: Reference to structure of type pch_udc_regs
* @mask: Mask to disable interrupts
*/
static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
u32 mask)
{
pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
}
/**
* pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
* @dev: Reference to structure of type pch_udc_regs
* @mask: Mask to enable interrupts
*/
static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
u32 mask)
{
pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
}
/**
* pch_udc_read_device_interrupts() - Read the device interrupts
* @dev: Reference to structure of type pch_udc_regs
* Retern The device interrupts
*/
static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
{
return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
}
/**
* pch_udc_write_device_interrupts() - Write device interrupts
* @dev: Reference to structure of type pch_udc_regs
* @val: The value to be written to interrupt register
*/
static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
u32 val)
{
pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
}
/**
* pch_udc_read_ep_interrupts() - Read the endpoint interrupts
* @dev: Reference to structure of type pch_udc_regs
* Retern The endpoint interrupt
*/
static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
{
return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
}
/**
* pch_udc_write_ep_interrupts() - Clear endpoint interupts
* @dev: Reference to structure of type pch_udc_regs
* @val: The value to be written to interrupt register
*/
static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
u32 val)
{
pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
}
/**
* pch_udc_read_device_status() - Read the device status
* @dev: Reference to structure of type pch_udc_regs
* Retern The device status
*/
static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
{
return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
}
/**
* pch_udc_read_ep_control() - Read the endpoint control
* @ep: Reference to structure of type pch_udc_ep_regs
* Retern The endpoint control register value
*/
static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
{
return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
}
/**
* pch_udc_clear_ep_control() - Clear the endpoint control register
* @ep: Reference to structure of type pch_udc_ep_regs
* Retern The endpoint control register value
*/
static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
{
return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
}
/**
* pch_udc_read_ep_status() - Read the endpoint status
* @ep: Reference to structure of type pch_udc_ep_regs
* Retern The endpoint status
*/
static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
{
return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
}
/**
* pch_udc_clear_ep_status() - Clear the endpoint status
* @ep: Reference to structure of type pch_udc_ep_regs
* @stat: Endpoint status
*/
static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
u32 stat)
{
return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
}
/**
* pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
* of the endpoint control register
* @ep: Reference to structure of type pch_udc_ep_regs
*/
static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
{
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
}
/**
* pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
* of the endpoint control register
* @ep: reference to structure of type pch_udc_ep_regs
*/
static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
{
unsigned int loopcnt = 0;
struct pch_udc_dev *dev = ep->dev;
if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
return;
if (!ep->in) {
loopcnt = 10000;
while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
--loopcnt)
udelay(5);
if (!loopcnt)
dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
__func__);
}
loopcnt = 10000;
while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
udelay(5);
}
if (!loopcnt)
dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
__func__, ep->num, (ep->in ? "in" : "out"));
}
/**
* pch_udc_ep_fifo_flush() - Flush the endpoint fifo
* @ep: reference to structure of type pch_udc_ep_regs
* @dir: direction of endpoint
* 0: endpoint is OUT
* !0: endpoint is IN
*/
static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
{
if (dir) { /* IN ep */
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
return;
}
}
/**
* pch_udc_ep_enable() - This api enables endpoint
* @ep: reference to structure of type pch_udc_ep_regs
* @cfg: current configuration information
* @desc: endpoint descriptor
*/
static void pch_udc_ep_enable(struct pch_udc_ep *ep,
struct pch_udc_cfg_data *cfg,
const struct usb_endpoint_descriptor *desc)
{
u32 val = 0;
u32 buff_size = 0;
pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
if (ep->in)
buff_size = UDC_EPIN_BUFF_SIZE;
else
buff_size = UDC_EPOUT_BUFF_SIZE;
pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
pch_udc_ep_set_nak(ep);
pch_udc_ep_fifo_flush(ep, ep->in);
/* Configure the endpoint */
val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
UDC_CSR_NE_TYPE_SHIFT) |
(cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
(cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
(cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
if (ep->in)
pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
else
pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
}
/**
* pch_udc_ep_disable() - This api disables endpoint
* @ep: reference to structure of type pch_udc_ep_regs
*/
static void pch_udc_ep_disable(struct pch_udc_ep *ep)
{
if (ep->in) {
/* flush the fifo */
pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
/* set NAK */
pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
} else {
/* set NAK */
pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
}
/* reset desc pointer */
pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
}
/**
* pch_udc_wait_ep_stall() - Wait EP stall.
* @ep: reference to structure of type pch_udc_ep_regs
*/
static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
{
unsigned int count = 10000;
/* Wait till idle */
while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
udelay(5);
if (!count)
dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
}
/**
* pch_udc_init() - This API initializes usb device controller
* @dev: Rreference to pch_udc_regs structure
*/
static void pch_udc_init(struct pch_udc_dev *dev)
{
if (NULL == dev) {
pr_err("%s: Invalid address\n", __func__);
return;
}
/* Soft Reset and Reset PHY */
pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
mdelay(1);
pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
mdelay(1);
/* mask and clear all device interrupts */
pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
/* mask and clear all ep interrupts */
pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
/* enable dynamic CSR programmingi, self powered and device speed */
if (speed_fs)
pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
else /* defaul high speed */
pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
UDC_DEVCTL_THE);
}
/**
* pch_udc_exit() - This API exit usb device controller
* @dev: Reference to pch_udc_regs structure
*/
static void pch_udc_exit(struct pch_udc_dev *dev)
{
/* mask all device interrupts */
pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
/* mask all ep interrupts */
pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
/* put device in disconnected state */
pch_udc_set_disconnect(dev);
}
/**
* pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
* @gadget: Reference to the gadget driver
*
* Return codes:
* 0: Success
* -EINVAL: If the gadget passed is NULL
*/
static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
{
struct pch_udc_dev *dev;
if (!gadget)
return -EINVAL;
dev = container_of(gadget, struct pch_udc_dev, gadget);
return pch_udc_get_frame(dev);
}
/**
* pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
* @gadget: Reference to the gadget driver
*
* Return codes:
* 0: Success
* -EINVAL: If the gadget passed is NULL
*/
static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
{
struct pch_udc_dev *dev;
unsigned long flags;
if (!gadget)
return -EINVAL;
dev = container_of(gadget, struct pch_udc_dev, gadget);
spin_lock_irqsave(&dev->lock, flags);
pch_udc_rmt_wakeup(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/**
* pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
* is self powered or not
* @gadget: Reference to the gadget driver
* @value: Specifies self powered or not
*
* Return codes:
* 0: Success
* -EINVAL: If the gadget passed is NULL
*/
static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
{
struct pch_udc_dev *dev;
if (!gadget)
return -EINVAL;
gadget->is_selfpowered = (value != 0);
dev = container_of(gadget, struct pch_udc_dev, gadget);
if (value)
pch_udc_set_selfpowered(dev);
else
pch_udc_clear_selfpowered(dev);
return 0;
}
/**
* pch_udc_pcd_pullup() - This API is invoked to make the device
* visible/invisible to the host
* @gadget: Reference to the gadget driver
* @is_on: Specifies whether the pull up is made active or inactive
*
* Return codes:
* 0: Success
* -EINVAL: If the gadget passed is NULL
*/
static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
{
struct pch_udc_dev *dev;
unsigned long iflags;
if (!gadget)
return -EINVAL;
dev = container_of(gadget, struct pch_udc_dev, gadget);
spin_lock_irqsave(&dev->lock, iflags);
if (is_on) {
pch_udc_reconnect(dev);
} else {
if (dev->driver && dev->driver->disconnect) {
spin_unlock_irqrestore(&dev->lock, iflags);
dev->driver->disconnect(&dev->gadget);
spin_lock_irqsave(&dev->lock, iflags);
}
pch_udc_set_disconnect(dev);
}
spin_unlock_irqrestore(&dev->lock, iflags);
return 0;
}
/**
* pch_udc_pcd_vbus_session() - This API is used by a driver for an external
* transceiver (or GPIO) that
* detects a VBUS power session starting/ending
* @gadget: Reference to the gadget driver
* @is_active: specifies whether the session is starting or ending
*
* Return codes:
* 0: Success
* -EINVAL: If the gadget passed is NULL
*/
static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct pch_udc_dev *dev;
if (!gadget)
return -EINVAL;
dev = container_of(gadget, struct pch_udc_dev, gadget);
pch_udc_vbus_session(dev, is_active);
return 0;
}
/**
* pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
* SET_CONFIGURATION calls to
* specify how much power the device can consume
* @gadget: Reference to the gadget driver
* @mA: specifies the current limit in 2mA unit
*
* Return codes:
* -EINVAL: If the gadget passed is NULL
* -EOPNOTSUPP:
*/
static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
{
return -EOPNOTSUPP;
}
static int pch_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int pch_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops pch_udc_ops = {
.get_frame = pch_udc_pcd_get_frame,
.wakeup = pch_udc_pcd_wakeup,
.set_selfpowered = pch_udc_pcd_selfpowered,
.pullup = pch_udc_pcd_pullup,
.vbus_session = pch_udc_pcd_vbus_session,
.vbus_draw = pch_udc_pcd_vbus_draw,
.udc_start = pch_udc_start,
.udc_stop = pch_udc_stop,
};
/**
* pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
* @dev: Reference to the driver structure
*
* Return value:
* 1: VBUS is high
* 0: VBUS is low
* -1: It is not enable to detect VBUS using GPIO
*/
static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
{
int vbus = 0;
if (dev->vbus_gpio.port)
vbus = gpiod_get_value(dev->vbus_gpio.port) ? 1 : 0;
else
vbus = -1;
return vbus;
}
/**
* pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
* If VBUS is Low, disconnect is processed
* @irq_work: Structure for WorkQueue
*
*/
static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
{
struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
struct pch_vbus_gpio_data, irq_work_fall);
struct pch_udc_dev *dev =
container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
int vbus_saved = -1;
int vbus;
int count;
if (!dev->vbus_gpio.port)
return;
for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
count++) {
vbus = pch_vbus_gpio_get_value(dev);
if ((vbus_saved == vbus) && (vbus == 0)) {
dev_dbg(&dev->pdev->dev, "VBUS fell");
if (dev->driver
&& dev->driver->disconnect) {
dev->driver->disconnect(
&dev->gadget);
}
if (dev->vbus_gpio.intr)
pch_udc_init(dev);
else
pch_udc_reconnect(dev);
return;
}
vbus_saved = vbus;
mdelay(PCH_VBUS_INTERVAL);
}
}
/**
* pch_vbus_gpio_work_rise() - This API checks VBUS is High.
* If VBUS is High, connect is processed
* @irq_work: Structure for WorkQueue
*
*/
static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
{
struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
struct pch_vbus_gpio_data, irq_work_rise);
struct pch_udc_dev *dev =
container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
int vbus;
if (!dev->vbus_gpio.port)
return;
mdelay(PCH_VBUS_INTERVAL);
vbus = pch_vbus_gpio_get_value(dev);
if (vbus == 1) {
dev_dbg(&dev->pdev->dev, "VBUS rose");
pch_udc_reconnect(dev);
return;
}
}
/**
* pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
* @irq: Interrupt request number
* @data: Reference to the device structure
*
* Return codes:
* 0: Success
* -EINVAL: GPIO port is invalid or can't be initialized.
*/
static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
{
struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
return IRQ_NONE;
if (pch_vbus_gpio_get_value(dev))
schedule_work(&dev->vbus_gpio.irq_work_rise);
else
schedule_work(&dev->vbus_gpio.irq_work_fall);
return IRQ_HANDLED;
}
/**
* pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
* @dev: Reference to the driver structure
*
* Return codes:
* 0: Success
* -EINVAL: GPIO port is invalid or can't be initialized.
*/
static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
{
struct device *d = &dev->pdev->dev;
int err;
int irq_num = 0;
struct gpio_desc *gpiod;
dev->vbus_gpio.port = NULL;
dev->vbus_gpio.intr = 0;
/* Retrieve the GPIO line from the USB gadget device */
gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
gpiod_set_consumer_name(gpiod, "pch_vbus");
dev->vbus_gpio.port = gpiod;
INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
irq_num = gpiod_to_irq(gpiod);
if (irq_num > 0) {
irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
"vbus_detect", dev);
if (!err) {
dev->vbus_gpio.intr = irq_num;
INIT_WORK(&dev->vbus_gpio.irq_work_rise,
pch_vbus_gpio_work_rise);
} else {
pr_err("%s: can't request irq %d, err: %d\n",
__func__, irq_num, err);
}
}
return 0;
}
/**
* pch_vbus_gpio_free() - This API frees resources of GPIO port
* @dev: Reference to the driver structure
*/
static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
{
if (dev->vbus_gpio.intr)
free_irq(dev->vbus_gpio.intr, dev);
}
/**
* complete_req() - This API is invoked from the driver when processing
* of a request is complete
* @ep: Reference to the endpoint structure
* @req: Reference to the request structure
* @status: Indicates the success/failure of completion
*/
static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
int status)
__releases(&dev->lock)
__acquires(&dev->lock)
{
struct pch_udc_dev *dev;
unsigned halted = ep->halted;
list_del_init(&req->queue);
/* set new status if pending */
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
ep->halted = 1;
spin_unlock(&dev->lock);
if (!ep->in)
pch_udc_ep_clear_rrdy(ep);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->halted = halted;
}
/**
* empty_req_queue() - This API empties the request queue of an endpoint
* @ep: Reference to the endpoint structure
*/
static void empty_req_queue(struct pch_udc_ep *ep)
{
struct pch_udc_request *req;
ep->halted = 1;
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
}
}
/**
* pch_udc_free_dma_chain() - This function frees the DMA chain created
* for the request
* @dev: Reference to the driver structure
* @req: Reference to the request to be freed
*
* Return codes:
* 0: Success
*/
static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
struct pch_udc_request *req)
{
struct pch_udc_data_dma_desc *td = req->td_data;
unsigned i = req->chain_len;
dma_addr_t addr2;
dma_addr_t addr = (dma_addr_t)td->next;
td->next = 0x00;
for (; i > 1; --i) {
/* do not free first desc., will be done by free for request */
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
dma_pool_free(dev->data_requests, td, addr);
addr = addr2;
}
req->chain_len = 1;
}
/**
* pch_udc_create_dma_chain() - This function creates or reinitializes
* a DMA chain
* @ep: Reference to the endpoint structure
* @req: Reference to the request
* @buf_len: The buffer length
* @gfp_flags: Flags to be used while mapping the data buffer
*
* Return codes:
* 0: success,
* -ENOMEM: dma_pool_alloc invocation fails
*/
static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
struct pch_udc_request *req,
unsigned long buf_len,
gfp_t gfp_flags)
{
struct pch_udc_data_dma_desc *td = req->td_data, *last;
unsigned long bytes = req->req.length, i = 0;
dma_addr_t dma_addr;
unsigned len = 1;
if (req->chain_len > 1)
pch_udc_free_dma_chain(ep->dev, req);
td->dataptr = req->req.dma;
td->status = PCH_UDC_BS_HST_BSY;
for (; ; bytes -= buf_len, ++len) {
td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
if (bytes <= buf_len)
break;
last = td;
td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
&dma_addr);
if (!td)
goto nomem;
i += buf_len;
td->dataptr = req->td_data->dataptr + i;
last->next = dma_addr;
}
req->td_data_last = td;
td->status |= PCH_UDC_DMA_LAST;
td->next = req->td_data_phys;
req->chain_len = len;
return 0;
nomem:
if (len > 1) {
req->chain_len = len;
pch_udc_free_dma_chain(ep->dev, req);
}
req->chain_len = 1;
return -ENOMEM;
}
/**
* prepare_dma() - This function creates and initializes the DMA chain
* for the request
* @ep: Reference to the endpoint structure
* @req: Reference to the request
* @gfp: Flag to be used while mapping the data buffer
*
* Return codes:
* 0: Success
* Other 0: linux error number on failure
*/
static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
gfp_t gfp)
{
int retval;
/* Allocate and create a DMA chain */
retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
if (retval) {
pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
return retval;
}
if (ep->in)
req->td_data->status = (req->td_data->status &
~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
return 0;
}
/**
* process_zlp() - This function process zero length packets
* from the gadget driver
* @ep: Reference to the endpoint structure
* @req: Reference to the request
*/
static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
{
struct pch_udc_dev *dev = ep->dev;
/* IN zlp's are handled by hardware */
complete_req(ep, req, 0);
/* if set_config or set_intf is waiting for ack by zlp
* then set CSR_DONE
*/
if (dev->set_cfg_not_acked) {
pch_udc_set_csr_done(dev);
dev->set_cfg_not_acked = 0;
}
/* setup command is ACK'ed now by zlp */
if (!dev->stall && dev->waiting_zlp_ack) {
pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
dev->waiting_zlp_ack = 0;
}
}
/**
* pch_udc_start_rxrequest() - This function starts the receive requirement.
* @ep: Reference to the endpoint structure
* @req: Reference to the request structure
*/
static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
struct pch_udc_request *req)
{
struct pch_udc_data_dma_desc *td_data;
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
td_data = req->td_data;
/* Set the status bits for all descriptors */
while (1) {
td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
PCH_UDC_BS_HST_RDY;
if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
break;
td_data = phys_to_virt(td_data->next);
}
/* Write the descriptor pointer */
pch_udc_ep_set_ddptr(ep, req->td_data_phys);
req->dma_going = 1;
pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
pch_udc_set_dma(ep->dev, DMA_DIR_RX);
pch_udc_ep_clear_nak(ep);
pch_udc_ep_set_rrdy(ep);
}
/**
* pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
* from gadget driver
* @usbep: Reference to the USB endpoint structure
* @desc: Reference to the USB endpoint descriptor structure
*
* Return codes:
* 0: Success
* -EINVAL:
* -ESHUTDOWN:
*/
static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
const struct usb_endpoint_descriptor *desc)
{
struct pch_udc_ep *ep;
struct pch_udc_dev *dev;
unsigned long iflags;
if (!usbep || (usbep->name == ep0_string) || !desc ||
(desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
dev = ep->dev;
if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
spin_lock_irqsave(&dev->lock, iflags);
ep->ep.desc = desc;
ep->halted = 0;
pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
ep->ep.maxpacket = usb_endpoint_maxp(desc);
pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
spin_unlock_irqrestore(&dev->lock, iflags);
return 0;
}
/**
* pch_udc_pcd_ep_disable() - This API disables endpoint and is called
* from gadget driver
* @usbep: Reference to the USB endpoint structure
*
* Return codes:
* 0: Success
* -EINVAL:
*/
static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
{
struct pch_udc_ep *ep;
unsigned long iflags;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
if ((usbep->name == ep0_string) || !ep->ep.desc)
return -EINVAL;
spin_lock_irqsave(&ep->dev->lock, iflags);
empty_req_queue(ep);
ep->halted = 1;
pch_udc_ep_disable(ep);
pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
ep->ep.desc = NULL;
INIT_LIST_HEAD(&ep->queue);
spin_unlock_irqrestore(&ep->dev->lock, iflags);
return 0;
}
/**
* pch_udc_alloc_request() - This function allocates request structure.
* It is called by gadget driver
* @usbep: Reference to the USB endpoint structure
* @gfp: Flag to be used while allocating memory
*
* Return codes:
* NULL: Failure
* Allocated address: Success
*/
static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
gfp_t gfp)
{
struct pch_udc_request *req;
struct pch_udc_ep *ep;
struct pch_udc_data_dma_desc *dma_desc;
if (!usbep)
return NULL;
ep = container_of(usbep, struct pch_udc_ep, ep);
req = kzalloc(sizeof *req, gfp);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
if (!ep->dev->dma_addr)
return &req->req;
/* ep0 in requests are allocated from data pool here */
dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
&req->td_data_phys);
if (NULL == dma_desc) {
kfree(req);
return NULL;
}
/* prevent from using desc. - set HOST BUSY */
dma_desc->status |= PCH_UDC_BS_HST_BSY;
dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
req->td_data = dma_desc;
req->td_data_last = dma_desc;
req->chain_len = 1;
return &req->req;
}
/**
* pch_udc_free_request() - This function frees request structure.
* It is called by gadget driver
* @usbep: Reference to the USB endpoint structure
* @usbreq: Reference to the USB request
*/
static void pch_udc_free_request(struct usb_ep *usbep,
struct usb_request *usbreq)
{
struct pch_udc_ep *ep;
struct pch_udc_request *req;
struct pch_udc_dev *dev;
if (!usbep || !usbreq)
return;
ep = container_of(usbep, struct pch_udc_ep, ep);
req = container_of(usbreq, struct pch_udc_request, req);
dev = ep->dev;
if (!list_empty(&req->queue))
dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
__func__, usbep->name, req);
if (req->td_data != NULL) {
if (req->chain_len > 1)
pch_udc_free_dma_chain(ep->dev, req);
dma_pool_free(ep->dev->data_requests, req->td_data,
req->td_data_phys);
}
kfree(req);
}
/**
* pch_udc_pcd_queue() - This function queues a request packet. It is called
* by gadget driver
* @usbep: Reference to the USB endpoint structure
* @usbreq: Reference to the USB request
* @gfp: Flag to be used while mapping the data buffer
*
* Return codes:
* 0: Success
* linux error number: Failure
*/
static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
gfp_t gfp)
{
int retval = 0;
struct pch_udc_ep *ep;
struct pch_udc_dev *dev;
struct pch_udc_request *req;
unsigned long iflags;
if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
dev = ep->dev;
if (!ep->ep.desc && ep->num)
return -EINVAL;
req = container_of(usbreq, struct pch_udc_request, req);
if (!list_empty(&req->queue))
return -EINVAL;
if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
spin_lock_irqsave(&dev->lock, iflags);
/* map the buffer for dma */
retval = usb_gadget_map_request(&dev->gadget, usbreq, ep->in);
if (retval)
goto probe_end;
if (usbreq->length > 0) {
retval = prepare_dma(ep, req, GFP_ATOMIC);
if (retval)
goto probe_end;
}
usbreq->actual = 0;
usbreq->status = -EINPROGRESS;
req->dma_done = 0;
if (list_empty(&ep->queue) && !ep->halted) {
/* no pending transfer, so start this req */
if (!usbreq->length) {
process_zlp(ep, req);
retval = 0;
goto probe_end;
}
if (!ep->in) {
pch_udc_start_rxrequest(ep, req);
} else {
/*
* For IN trfr the descriptors will be programmed and
* P bit will be set when
* we get an IN token
*/
pch_udc_wait_ep_stall(ep);
pch_udc_ep_clear_nak(ep);
pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
}
}
/* Now add this request to the ep's pending requests */
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
probe_end:
spin_unlock_irqrestore(&dev->lock, iflags);
return retval;
}
/**
* pch_udc_pcd_dequeue() - This function de-queues a request packet.
* It is called by gadget driver
* @usbep: Reference to the USB endpoint structure
* @usbreq: Reference to the USB request
*
* Return codes:
* 0: Success
* linux error number: Failure
*/
static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
struct usb_request *usbreq)
{
struct pch_udc_ep *ep;
struct pch_udc_request *req;
unsigned long flags;
int ret = -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
return ret;
req = container_of(usbreq, struct pch_udc_request, req);
spin_lock_irqsave(&ep->dev->lock, flags);
/* make sure it's still queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == usbreq) {
pch_udc_ep_set_nak(ep);
if (!list_empty(&req->queue))
complete_req(ep, req, -ECONNRESET);
ret = 0;
break;
}
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return ret;
}
/**
* pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
* feature
* @usbep: Reference to the USB endpoint structure
* @halt: Specifies whether to set or clear the feature
*
* Return codes:
* 0: Success
* linux error number: Failure
*/
static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
{
struct pch_udc_ep *ep;
unsigned long iflags;
int ret;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
if (!ep->ep.desc && !ep->num)
return -EINVAL;
if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
spin_lock_irqsave(&udc_stall_spinlock, iflags);
if (list_empty(&ep->queue)) {
if (halt) {
if (ep->num == PCH_UDC_EP0)
ep->dev->stall = 1;
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(
ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
} else {
pch_udc_ep_clear_stall(ep);
}
ret = 0;
} else {
ret = -EAGAIN;
}
spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
return ret;
}
/**
* pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
* halt feature
* @usbep: Reference to the USB endpoint structure
*
* Return codes:
* 0: Success
* linux error number: Failure
*/
static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
{
struct pch_udc_ep *ep;
unsigned long iflags;
int ret;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
if (!ep->ep.desc && !ep->num)
return -EINVAL;
if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
spin_lock_irqsave(&udc_stall_spinlock, iflags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
} else {
if (ep->num == PCH_UDC_EP0)
ep->dev->stall = 1;
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
ep->dev->prot_stall = 1;
ret = 0;
}
spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
return ret;
}
/**
* pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
* @usbep: Reference to the USB endpoint structure
*/
static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
{
struct pch_udc_ep *ep;
if (!usbep)
return;
ep = container_of(usbep, struct pch_udc_ep, ep);
if (ep->ep.desc || !ep->num)
pch_udc_ep_fifo_flush(ep, ep->in);
}
static const struct usb_ep_ops pch_udc_ep_ops = {
.enable = pch_udc_pcd_ep_enable,
.disable = pch_udc_pcd_ep_disable,
.alloc_request = pch_udc_alloc_request,
.free_request = pch_udc_free_request,
.queue = pch_udc_pcd_queue,
.dequeue = pch_udc_pcd_dequeue,
.set_halt = pch_udc_pcd_set_halt,
.set_wedge = pch_udc_pcd_set_wedge,
.fifo_status = NULL,
.fifo_flush = pch_udc_pcd_fifo_flush,
};
/**
* pch_udc_init_setup_buff() - This function initializes the SETUP buffer
* @td_stp: Reference to the SETP buffer structure
*/
static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
{
static u32 pky_marker;
if (!td_stp)
return;
td_stp->reserved = ++pky_marker;
memset(&td_stp->request, 0xFF, sizeof td_stp->request);
td_stp->status = PCH_UDC_BS_HST_RDY;
}
/**
* pch_udc_start_next_txrequest() - This function starts
* the next transmission requirement
* @ep: Reference to the endpoint structure
*/
static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
{
struct pch_udc_request *req;
struct pch_udc_data_dma_desc *td_data;
if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
return;
if (list_empty(&ep->queue))
return;
/* next request */
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
if (req->dma_going)
return;
if (!req->td_data)
return;
pch_udc_wait_ep_stall(ep);
req->dma_going = 1;
pch_udc_ep_set_ddptr(ep, 0);
td_data = req->td_data;
while (1) {
td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
PCH_UDC_BS_HST_RDY;
if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
break;
td_data = phys_to_virt(td_data->next);
}
pch_udc_ep_set_ddptr(ep, req->td_data_phys);
pch_udc_set_dma(ep->dev, DMA_DIR_TX);
pch_udc_ep_set_pd(ep);
pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
pch_udc_ep_clear_nak(ep);
}
/**
* pch_udc_complete_transfer() - This function completes a transfer
* @ep: Reference to the endpoint structure
*/
static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
{
struct pch_udc_request *req;
struct pch_udc_dev *dev = ep->dev;
if (list_empty(&ep->queue))
return;
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
PCH_UDC_BS_DMA_DONE)
return;
if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
PCH_UDC_RTS_SUCC) {
dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
"epstatus=0x%08x\n",
(req->td_data_last->status & PCH_UDC_RXTX_STS),
(int)(ep->epsts));
return;
}
req->req.actual = req->req.length;
req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
complete_req(ep, req, 0);
req->dma_going = 0;
if (!list_empty(&ep->queue)) {
pch_udc_wait_ep_stall(ep);
pch_udc_ep_clear_nak(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
} else {
pch_udc_disable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
}
}
/**
* pch_udc_complete_receiver() - This function completes a receiver
* @ep: Reference to the endpoint structure
*/
static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
{
struct pch_udc_request *req;
struct pch_udc_dev *dev = ep->dev;
unsigned int count;
struct pch_udc_data_dma_desc *td;
dma_addr_t addr;
if (list_empty(&ep->queue))
return;
/* next request */
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
pch_udc_ep_set_ddptr(ep, 0);
if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
PCH_UDC_BS_DMA_DONE)
td = req->td_data_last;
else
td = req->td_data;
while (1) {
if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
"epstatus=0x%08x\n",
(req->td_data->status & PCH_UDC_RXTX_STS),
(int)(ep->epsts));
return;
}
if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
if (td->status & PCH_UDC_DMA_LAST) {
count = td->status & PCH_UDC_RXTX_BYTES;
break;
}
if (td == req->td_data_last) {
dev_err(&dev->pdev->dev, "Not complete RX descriptor");
return;
}
addr = (dma_addr_t)td->next;
td = phys_to_virt(addr);
}
/* on 64k packets the RXBYTES field is zero */
if (!count && (req->req.length == UDC_DMA_MAXPACKET))
count = UDC_DMA_MAXPACKET;
req->td_data->status |= PCH_UDC_DMA_LAST;
td->status |= PCH_UDC_BS_HST_BSY;
req->dma_going = 0;
req->req.actual = count;
complete_req(ep, req, 0);
/* If there is a new/failed requests try that now */
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
pch_udc_start_rxrequest(ep, req);
}
}
/**
* pch_udc_svc_data_in() - This function process endpoint interrupts
* for IN endpoints
* @dev: Reference to the device structure
* @ep_num: Endpoint that generated the interrupt
*/
static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
{
u32 epsts;
struct pch_udc_ep *ep;
ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
epsts = ep->epsts;
ep->epsts = 0;
if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
return;
if ((epsts & UDC_EPSTS_BNA))
return;
if (epsts & UDC_EPSTS_HE)
return;
if (epsts & UDC_EPSTS_RSS) {
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
}
if (epsts & UDC_EPSTS_RCS) {
if (!dev->prot_stall) {
pch_udc_ep_clear_stall(ep);
} else {
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
}
}
if (epsts & UDC_EPSTS_TDC)
pch_udc_complete_transfer(ep);
/* On IN interrupt, provide data if we have any */
if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
!(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
pch_udc_start_next_txrequest(ep);
}
/**
* pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
* @dev: Reference to the device structure
* @ep_num: Endpoint that generated the interrupt
*/
static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
{
u32 epsts;
struct pch_udc_ep *ep;
struct pch_udc_request *req = NULL;
ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
epsts = ep->epsts;
ep->epsts = 0;
if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
/* next request */
req = list_entry(ep->queue.next, struct pch_udc_request,
queue);
if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
PCH_UDC_BS_DMA_DONE) {
if (!req->dma_going)
pch_udc_start_rxrequest(ep, req);
return;
}
}
if (epsts & UDC_EPSTS_HE)
return;
if (epsts & UDC_EPSTS_RSS) {
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
}
if (epsts & UDC_EPSTS_RCS) {
if (!dev->prot_stall) {
pch_udc_ep_clear_stall(ep);
} else {
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
}
}
if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
UDC_EPSTS_OUT_DATA) {
if (ep->dev->prot_stall == 1) {
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
} else {
pch_udc_complete_receiver(ep);
}
}
if (list_empty(&ep->queue))
pch_udc_set_dma(dev, DMA_DIR_RX);
}
static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
__must_hold(&dev->lock)
{
int rc;
/* In some cases we can get an interrupt before driver gets setup */
if (!dev->driver)
return -ESHUTDOWN;
spin_unlock(&dev->lock);
rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
spin_lock(&dev->lock);
return rc;
}
/**
* pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
* @dev: Reference to the device structure
*/
static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
{
u32 epsts;
struct pch_udc_ep *ep;
struct pch_udc_ep *ep_out;
ep = &dev->ep[UDC_EP0IN_IDX];
ep_out = &dev->ep[UDC_EP0OUT_IDX];
epsts = ep->epsts;
ep->epsts = 0;
if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
UDC_EPSTS_XFERDONE)))
return;
if ((epsts & UDC_EPSTS_BNA))
return;
if (epsts & UDC_EPSTS_HE)
return;
if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
pch_udc_complete_transfer(ep);
pch_udc_clear_dma(dev, DMA_DIR_RX);
ep_out->td_data->status = (ep_out->td_data->status &
~PCH_UDC_BUFF_STS) |
PCH_UDC_BS_HST_RDY;
pch_udc_ep_clear_nak(ep_out);
pch_udc_set_dma(dev, DMA_DIR_RX);
pch_udc_ep_set_rrdy(ep_out);
}
/* On IN interrupt, provide data if we have any */
if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
!(epsts & UDC_EPSTS_TXEMPTY))
pch_udc_start_next_txrequest(ep);
}
/**
* pch_udc_svc_control_out() - Routine that handle Control
* OUT endpoint interrupts
* @dev: Reference to the device structure
*/
static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
__releases(&dev->lock)
__acquires(&dev->lock)
{
u32 stat;
int setup_supported;
struct pch_udc_ep *ep;
ep = &dev->ep[UDC_EP0OUT_IDX];
stat = ep->epsts;
ep->epsts = 0;
/* If setup data */
if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
UDC_EPSTS_OUT_SETUP) {
dev->stall = 0;
dev->ep[UDC_EP0IN_IDX].halted = 0;
dev->ep[UDC_EP0OUT_IDX].halted = 0;
dev->setup_data = ep->td_stp->request;
pch_udc_init_setup_buff(ep->td_stp);
pch_udc_clear_dma(dev, DMA_DIR_RX);
pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
dev->ep[UDC_EP0IN_IDX].in);
if ((dev->setup_data.bRequestType & USB_DIR_IN))
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
else /* OUT */
dev->gadget.ep0 = &ep->ep;
/* If Mass storage Reset */
if ((dev->setup_data.bRequestType == 0x21) &&
(dev->setup_data.bRequest == 0xFF))
dev->prot_stall = 0;
/* call gadget with setup data received */
setup_supported = pch_udc_gadget_setup(dev);
if (dev->setup_data.bRequestType & USB_DIR_IN) {
ep->td_data->status = (ep->td_data->status &
~PCH_UDC_BUFF_STS) |
PCH_UDC_BS_HST_RDY;
pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
}
/* ep0 in returns data on IN phase */
if (setup_supported >= 0 && setup_supported <
UDC_EP0IN_MAX_PKT_SIZE) {
pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
/* Gadget would have queued a request when
* we called the setup */
if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
pch_udc_set_dma(dev, DMA_DIR_RX);
pch_udc_ep_clear_nak(ep);
}
} else if (setup_supported < 0) {
/* if unsupported request, then stall */
pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
dev->stall = 0;
pch_udc_set_dma(dev, DMA_DIR_RX);
} else {
dev->waiting_zlp_ack = 1;
}
} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
UDC_EPSTS_OUT_DATA) && !dev->stall) {
pch_udc_clear_dma(dev, DMA_DIR_RX);
pch_udc_ep_set_ddptr(ep, 0);
if (!list_empty(&ep->queue)) {
ep->epsts = stat;
pch_udc_svc_data_out(dev, PCH_UDC_EP0);
}
pch_udc_set_dma(dev, DMA_DIR_RX);
}
pch_udc_ep_set_rrdy(ep);
}
/**
* pch_udc_postsvc_epinters() - This function enables end point interrupts
* and clears NAK status
* @dev: Reference to the device structure
* @ep_num: End point number
*/
static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
{
struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
if (list_empty(&ep->queue))
return;
pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
pch_udc_ep_clear_nak(ep);
}
/**
* pch_udc_read_all_epstatus() - This function read all endpoint status
* @dev: Reference to the device structure
* @ep_intr: Status of endpoint interrupt
*/
static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
{
int i;
struct pch_udc_ep *ep;
for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
/* IN */
if (ep_intr & (0x1 << i)) {
ep = &dev->ep[UDC_EPIN_IDX(i)];
ep->epsts = pch_udc_read_ep_status(ep);
pch_udc_clear_ep_status(ep, ep->epsts);
}
/* OUT */
if (ep_intr & (0x10000 << i)) {
ep = &dev->ep[UDC_EPOUT_IDX(i)];
ep->epsts = pch_udc_read_ep_status(ep);
pch_udc_clear_ep_status(ep, ep->epsts);
}
}
}
/**
* pch_udc_activate_control_ep() - This function enables the control endpoints
* for traffic after a reset
* @dev: Reference to the device structure
*/
static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
{
struct pch_udc_ep *ep;
u32 val;
/* Setup the IN endpoint */
ep = &dev->ep[UDC_EP0IN_IDX];
pch_udc_clear_ep_control(ep);
pch_udc_ep_fifo_flush(ep, ep->in);
pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
/* Initialize the IN EP Descriptor */
ep->td_data = NULL;
ep->td_stp = NULL;
ep->td_data_phys = 0;
ep->td_stp_phys = 0;
/* Setup the OUT endpoint */
ep = &dev->ep[UDC_EP0OUT_IDX];
pch_udc_clear_ep_control(ep);
pch_udc_ep_fifo_flush(ep, ep->in);
pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
/* Initialize the SETUP buffer */
pch_udc_init_setup_buff(ep->td_stp);
/* Write the pointer address of dma descriptor */
pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
/* Write the pointer address of Setup descriptor */
pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
/* Initialize the dma descriptor */
ep->td_data->status = PCH_UDC_DMA_LAST;
ep->td_data->dataptr = dev->dma_addr;
ep->td_data->next = ep->td_data_phys;
pch_udc_ep_clear_nak(ep);
}
/**
* pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
* @dev: Reference to driver structure
*/
static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
{
struct pch_udc_ep *ep;
int i;
pch_udc_clear_dma(dev, DMA_DIR_TX);
pch_udc_clear_dma(dev, DMA_DIR_RX);
/* Mask all endpoint interrupts */
pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
/* clear all endpoint interrupts */
pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
for (i = 0; i < PCH_UDC_EP_NUM; i++) {
ep = &dev->ep[i];
pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
pch_udc_clear_ep_control(ep);
pch_udc_ep_set_ddptr(ep, 0);
pch_udc_write_csr(ep->dev, 0x00, i);
}
dev->stall = 0;
dev->prot_stall = 0;
dev->waiting_zlp_ack = 0;
dev->set_cfg_not_acked = 0;
/* disable ep to empty req queue. Skip the control EP's */
for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
ep = &dev->ep[i];
pch_udc_ep_set_nak(ep);
pch_udc_ep_fifo_flush(ep, ep->in);
/* Complete request queue */
empty_req_queue(ep);
}
if (dev->driver) {
spin_unlock(&dev->lock);
usb_gadget_udc_reset(&dev->gadget, dev->driver);
spin_lock(&dev->lock);
}
}
/**
* pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
* done interrupt
* @dev: Reference to driver structure
*/
static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
{
u32 dev_stat, dev_speed;
u32 speed = USB_SPEED_FULL;
dev_stat = pch_udc_read_device_status(dev);
dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
UDC_DEVSTS_ENUM_SPEED_SHIFT;
switch (dev_speed) {
case UDC_DEVSTS_ENUM_SPEED_HIGH:
speed = USB_SPEED_HIGH;
break;
case UDC_DEVSTS_ENUM_SPEED_FULL:
speed = USB_SPEED_FULL;
break;
case UDC_DEVSTS_ENUM_SPEED_LOW:
speed = USB_SPEED_LOW;
break;
default:
BUG();
}
dev->gadget.speed = speed;
pch_udc_activate_control_ep(dev);
pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
pch_udc_set_dma(dev, DMA_DIR_TX);
pch_udc_set_dma(dev, DMA_DIR_RX);
pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
/* enable device interrupts */
pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
UDC_DEVINT_ES | UDC_DEVINT_ENUM |
UDC_DEVINT_SI | UDC_DEVINT_SC);
}
/**
* pch_udc_svc_intf_interrupt() - This function handles a set interface
* interrupt
* @dev: Reference to driver structure
*/
static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
{
u32 reg, dev_stat = 0;
int i;
dev_stat = pch_udc_read_device_status(dev);
dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
UDC_DEVSTS_INTF_SHIFT;
dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
UDC_DEVSTS_ALT_SHIFT;
dev->set_cfg_not_acked = 1;
/* Construct the usb request for gadget driver and inform it */
memset(&dev->setup_data, 0 , sizeof dev->setup_data);
dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
/* programm the Endpoint Cfg registers */
/* Only one end point cfg register */
reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
(dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
(dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
/* clear stall bits */
pch_udc_ep_clear_stall(&(dev->ep[i]));
dev->ep[i].halted = 0;
}
dev->stall = 0;
pch_udc_gadget_setup(dev);
}
/**
* pch_udc_svc_cfg_interrupt() - This function handles a set configuration
* interrupt
* @dev: Reference to driver structure
*/
static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
{
int i;
u32 reg, dev_stat = 0;
dev_stat = pch_udc_read_device_status(dev);
dev->set_cfg_not_acked = 1;
dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
UDC_DEVSTS_CFG_SHIFT;
/* make usb request for gadget driver */
memset(&dev->setup_data, 0 , sizeof dev->setup_data);
dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
/* program the NE registers */
/* Only one end point cfg register */
reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
(dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
/* clear stall bits */
pch_udc_ep_clear_stall(&(dev->ep[i]));
dev->ep[i].halted = 0;
}
dev->stall = 0;
/* call gadget zero with setup data received */
pch_udc_gadget_setup(dev);
}
/**
* pch_udc_dev_isr() - This function services device interrupts
* by invoking appropriate routines.
* @dev: Reference to the device structure
* @dev_intr: The Device interrupt status.
*/
static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
{
int vbus;
/* USB Reset Interrupt */
if (dev_intr & UDC_DEVINT_UR) {
pch_udc_svc_ur_interrupt(dev);
dev_dbg(&dev->pdev->dev, "USB_RESET\n");
}
/* Enumeration Done Interrupt */
if (dev_intr & UDC_DEVINT_ENUM) {
pch_udc_svc_enum_interrupt(dev);
dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
}
/* Set Interface Interrupt */
if (dev_intr & UDC_DEVINT_SI)
pch_udc_svc_intf_interrupt(dev);
/* Set Config Interrupt */
if (dev_intr & UDC_DEVINT_SC)
pch_udc_svc_cfg_interrupt(dev);
/* USB Suspend interrupt */
if (dev_intr & UDC_DEVINT_US) {
if (dev->driver
&& dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
vbus = pch_vbus_gpio_get_value(dev);
if ((dev->vbus_session == 0)
&& (vbus != 1)) {
if (dev->driver && dev->driver->disconnect) {
spin_unlock(&dev->lock);
dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
pch_udc_reconnect(dev);
} else if ((dev->vbus_session == 0)
&& (vbus == 1)
&& !dev->vbus_gpio.intr)
schedule_work(&dev->vbus_gpio.irq_work_fall);
dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
}
/* Clear the SOF interrupt, if enabled */
if (dev_intr & UDC_DEVINT_SOF)
dev_dbg(&dev->pdev->dev, "SOF\n");
/* ES interrupt, IDLE > 3ms on the USB */
if (dev_intr & UDC_DEVINT_ES)
dev_dbg(&dev->pdev->dev, "ES\n");
/* RWKP interrupt */
if (dev_intr & UDC_DEVINT_RWKP)
dev_dbg(&dev->pdev->dev, "RWKP\n");
}
/**
* pch_udc_isr() - This function handles interrupts from the PCH USB Device
* @irq: Interrupt request number
* @pdev: Reference to the device structure
*/
static irqreturn_t pch_udc_isr(int irq, void *pdev)
{
struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
u32 dev_intr, ep_intr;
int i;
dev_intr = pch_udc_read_device_interrupts(dev);
ep_intr = pch_udc_read_ep_interrupts(dev);
/* For a hot plug, this find that the controller is hung up. */
if (dev_intr == ep_intr)
if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
/* The controller is reset */
pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
return IRQ_HANDLED;
}
if (dev_intr)
/* Clear device interrupts */
pch_udc_write_device_interrupts(dev, dev_intr);
if (ep_intr)
/* Clear ep interrupts */
pch_udc_write_ep_interrupts(dev, ep_intr);
if (!dev_intr && !ep_intr)
return IRQ_NONE;
spin_lock(&dev->lock);
if (dev_intr)
pch_udc_dev_isr(dev, dev_intr);
if (ep_intr) {
pch_udc_read_all_epstatus(dev, ep_intr);
/* Process Control In interrupts, if present */
if (ep_intr & UDC_EPINT_IN_EP0) {
pch_udc_svc_control_in(dev);
pch_udc_postsvc_epinters(dev, 0);
}
/* Process Control Out interrupts, if present */
if (ep_intr & UDC_EPINT_OUT_EP0)
pch_udc_svc_control_out(dev);
/* Process data in end point interrupts */
for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
if (ep_intr & (1 << i)) {
pch_udc_svc_data_in(dev, i);
pch_udc_postsvc_epinters(dev, i);
}
}
/* Process data out end point interrupts */
for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
PCH_UDC_USED_EP_NUM); i++)
if (ep_intr & (1 << i))
pch_udc_svc_data_out(dev, i -
UDC_EPINT_OUT_SHIFT);
}
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
/**
* pch_udc_setup_ep0() - This function enables control endpoint for traffic
* @dev: Reference to the device structure
*/
static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
{
/* enable ep0 interrupts */
pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
UDC_EPINT_OUT_EP0);
/* enable device interrupts */
pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
UDC_DEVINT_ES | UDC_DEVINT_ENUM |
UDC_DEVINT_SI | UDC_DEVINT_SC);
}
/**
* pch_udc_pcd_reinit() - This API initializes the endpoint structures
* @dev: Reference to the driver structure
*/
static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
{
const char *const ep_string[] = {
ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
"ep15in", "ep15out",
};
int i;
dev->gadget.speed = USB_SPEED_UNKNOWN;
INIT_LIST_HEAD(&dev->gadget.ep_list);
/* Initialize the endpoints structures */
memset(dev->ep, 0, sizeof dev->ep);
for (i = 0; i < PCH_UDC_EP_NUM; i++) {
struct pch_udc_ep *ep = &dev->ep[i];
ep->dev = dev;
ep->halted = 1;
ep->num = i / 2;
ep->in = ~i & 1;
ep->ep.name = ep_string[i];
ep->ep.ops = &pch_udc_ep_ops;
if (ep->in) {
ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
ep->ep.caps.dir_in = true;
} else {
ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
UDC_EP_REG_SHIFT;
ep->ep.caps.dir_out = true;
}
if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
/* need to set ep->ep.maxpacket and set Default Configuration?*/
usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
INIT_LIST_HEAD(&ep->queue);
}
usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
/* remove ep0 in and out from the list. They have own pointer */
list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
}
/**
* pch_udc_pcd_init() - This API initializes the driver structure
* @dev: Reference to the driver structure
*
* Return codes:
* 0: Success
* -ERRNO: All kind of errors when retrieving VBUS GPIO
*/
static int pch_udc_pcd_init(struct pch_udc_dev *dev)
{
int ret;
pch_udc_init(dev);
pch_udc_pcd_reinit(dev);
ret = pch_vbus_gpio_init(dev);
if (ret)
pch_udc_exit(dev);
return ret;
}
/**
* init_dma_pools() - create dma pools during initialization
* @dev: reference to struct pci_dev
*/
static int init_dma_pools(struct pch_udc_dev *dev)
{
struct pch_udc_stp_dma_desc *td_stp;
struct pch_udc_data_dma_desc *td_data;
void *ep0out_buf;
/* DMA setup */
dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
sizeof(struct pch_udc_data_dma_desc), 0, 0);
if (!dev->data_requests) {
dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
__func__);
return -ENOMEM;
}
/* dma desc for setup data */
dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
sizeof(struct pch_udc_stp_dma_desc), 0, 0);
if (!dev->stp_requests) {
dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
__func__);
return -ENOMEM;
}
/* setup */
td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
if (!td_stp) {
dev_err(&dev->pdev->dev,
"%s: can't allocate setup dma descriptor\n", __func__);
return -ENOMEM;
}
dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
/* data: 0 packets !? */
td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
if (!td_data) {
dev_err(&dev->pdev->dev,
"%s: can't allocate data dma descriptor\n", __func__);
return -ENOMEM;
}
dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
dev->ep[UDC_EP0IN_IDX].td_data = NULL;
dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
GFP_KERNEL);
if (!ep0out_buf)
return -ENOMEM;
dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
UDC_EP0OUT_BUFF_SIZE * 4,
DMA_FROM_DEVICE);
return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
}
static int pch_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct pch_udc_dev *dev = to_pch_udc(g);
dev->driver = driver;
/* get ready for ep0 traffic */
pch_udc_setup_ep0(dev);
/* clear SD */
if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
pch_udc_clear_disconnect(dev);
dev->connected = 1;
return 0;
}
static int pch_udc_stop(struct usb_gadget *g)
{
struct pch_udc_dev *dev = to_pch_udc(g);
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
/* Assures that there are no pending requests with this driver */
dev->driver = NULL;
dev->connected = 0;
/* set SD */
pch_udc_set_disconnect(dev);
return 0;
}
static void pch_vbus_gpio_remove_table(void *table)
{
gpiod_remove_lookup_table(table);
}
static int pch_vbus_gpio_add_table(struct device *d, void *table)
{
gpiod_add_lookup_table(table);
return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, table);
}
static struct gpiod_lookup_table pch_udc_minnow_vbus_gpio_table = {
.dev_id = "0000:02:02.4",
.table = {
GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
{}
},
};
static int pch_udc_minnow_platform_init(struct device *d)
{
return pch_vbus_gpio_add_table(d, &pch_udc_minnow_vbus_gpio_table);
}
static int pch_udc_quark_platform_init(struct device *d)
{
struct pch_udc_dev *dev = dev_get_drvdata(d);
dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
return 0;
}
static void pch_udc_shutdown(struct pci_dev *pdev)
{
struct pch_udc_dev *dev = pci_get_drvdata(pdev);
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
/* disable the pullup so the host will think we're gone */
pch_udc_set_disconnect(dev);
}
static void pch_udc_remove(struct pci_dev *pdev)
{
struct pch_udc_dev *dev = pci_get_drvdata(pdev);
usb_del_gadget_udc(&dev->gadget);
/* gadget driver must not be registered */
if (dev->driver)
dev_err(&pdev->dev,
"%s: gadget driver still bound!!!\n", __func__);
/* dma pool cleanup */
dma_pool_destroy(dev->data_requests);
if (dev->stp_requests) {
/* cleanup DMA desc's for ep0in */
if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
dma_pool_free(dev->stp_requests,
dev->ep[UDC_EP0OUT_IDX].td_stp,
dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
}
if (dev->ep[UDC_EP0OUT_IDX].td_data) {
dma_pool_free(dev->stp_requests,
dev->ep[UDC_EP0OUT_IDX].td_data,
dev->ep[UDC_EP0OUT_IDX].td_data_phys);
}
dma_pool_destroy(dev->stp_requests);
}
if (dev->dma_addr)
dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
pch_vbus_gpio_free(dev);
pch_udc_exit(dev);
}
static int __maybe_unused pch_udc_suspend(struct device *d)
{
struct pch_udc_dev *dev = dev_get_drvdata(d);
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
return 0;
}
static int __maybe_unused pch_udc_resume(struct device *d)
{
return 0;
}
static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
typedef int (*platform_init_fn)(struct device *);
static int pch_udc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
platform_init_fn platform_init = (platform_init_fn)id->driver_data;
int retval;
struct pch_udc_dev *dev;
/* init */
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
/* pci setup */
retval = pcim_enable_device(pdev);
if (retval)
return retval;
dev->bar = PCH_UDC_PCI_BAR;
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
/* Platform specific hook */
if (platform_init) {
retval = platform_init(&pdev->dev);
if (retval)
return retval;
}
/* PCI resource allocation */
retval = pcim_iomap_regions(pdev, BIT(dev->bar), pci_name(pdev));
if (retval)
return retval;
dev->base_addr = pcim_iomap_table(pdev)[dev->bar];
/* initialize the hardware */
retval = pch_udc_pcd_init(dev);
if (retval)
return retval;
pci_enable_msi(pdev);
retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (retval) {
dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
pdev->irq);
goto finished;
}
pci_set_master(pdev);
pci_try_set_mwi(pdev);
/* device struct setup */
spin_lock_init(&dev->lock);
dev->gadget.ops = &pch_udc_ops;
retval = init_dma_pools(dev);
if (retval)
goto finished;
dev->gadget.name = KBUILD_MODNAME;
dev->gadget.max_speed = USB_SPEED_HIGH;
/* Put the device in disconnected state till a driver is bound */
pch_udc_set_disconnect(dev);
retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
if (retval)
goto finished;
return 0;
finished:
pch_udc_remove(pdev);
return retval;
}
static const struct pci_device_id pch_udc_pcidev_id[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
.driver_data = (kernel_ulong_t)&pch_udc_quark_platform_init,
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC,
PCI_VENDOR_ID_CIRCUITCO, PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD),
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
.driver_data = (kernel_ulong_t)&pch_udc_minnow_platform_init,
},
{
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{ 0 },
};
MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
static struct pci_driver pch_udc_driver = {
.name = KBUILD_MODNAME,
.id_table = pch_udc_pcidev_id,
.probe = pch_udc_probe,
.remove = pch_udc_remove,
.shutdown = pch_udc_shutdown,
.driver = {
.pm = &pch_udc_pm,
},
};
module_pci_driver(pch_udc_driver);
MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
MODULE_AUTHOR("LAPIS Semiconductor, <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/pch_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USBF USB Function driver
*
* Copyright 2022 Schneider Electric
* Author: Herve Codina <[email protected]>
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/usb/role.h>
#define USBF_NUM_ENDPOINTS 16
#define USBF_EP0_MAX_PCKT_SIZE 64
/* EPC registers */
#define USBF_REG_USB_CONTROL 0x000
#define USBF_USB_PUE2 BIT(2)
#define USBF_USB_CONNECTB BIT(3)
#define USBF_USB_DEFAULT BIT(4)
#define USBF_USB_CONF BIT(5)
#define USBF_USB_SUSPEND BIT(6)
#define USBF_USB_RSUM_IN BIT(7)
#define USBF_USB_SOF_RCV BIT(8)
#define USBF_USB_FORCEFS BIT(9)
#define USBF_USB_INT_SEL BIT(10)
#define USBF_USB_SOF_CLK_MODE BIT(11)
#define USBF_REG_USB_STATUS 0x004
#define USBF_USB_RSUM_OUT BIT(1)
#define USBF_USB_SPND_OUT BIT(2)
#define USBF_USB_USB_RST BIT(3)
#define USBF_USB_DEFAULT_ST BIT(4)
#define USBF_USB_CONF_ST BIT(5)
#define USBF_USB_SPEED_MODE BIT(6)
#define USBF_USB_SOF_DELAY_STATUS BIT(31)
#define USBF_REG_USB_ADDRESS 0x008
#define USBF_USB_SOF_STATUS BIT(15)
#define USBF_USB_SET_USB_ADDR(_a) ((_a) << 16)
#define USBF_USB_GET_FRAME(_r) ((_r) & 0x7FF)
#define USBF_REG_SETUP_DATA0 0x018
#define USBF_REG_SETUP_DATA1 0x01C
#define USBF_REG_USB_INT_STA 0x020
#define USBF_USB_RSUM_INT BIT(1)
#define USBF_USB_SPND_INT BIT(2)
#define USBF_USB_USB_RST_INT BIT(3)
#define USBF_USB_SOF_INT BIT(4)
#define USBF_USB_SOF_ERROR_INT BIT(5)
#define USBF_USB_SPEED_MODE_INT BIT(6)
#define USBF_USB_EPN_INT(_n) (BIT(8) << (_n)) /* n=0..15 */
#define USBF_REG_USB_INT_ENA 0x024
#define USBF_USB_RSUM_EN BIT(1)
#define USBF_USB_SPND_EN BIT(2)
#define USBF_USB_USB_RST_EN BIT(3)
#define USBF_USB_SOF_EN BIT(4)
#define USBF_USB_SOF_ERROR_EN BIT(5)
#define USBF_USB_SPEED_MODE_EN BIT(6)
#define USBF_USB_EPN_EN(_n) (BIT(8) << (_n)) /* n=0..15 */
#define USBF_BASE_EP0 0x028
/* EP0 registers offsets from Base + USBF_BASE_EP0 (EP0 regs area) */
#define USBF_REG_EP0_CONTROL 0x00
#define USBF_EP0_ONAK BIT(0)
#define USBF_EP0_INAK BIT(1)
#define USBF_EP0_STL BIT(2)
#define USBF_EP0_PERR_NAK_CLR BIT(3)
#define USBF_EP0_INAK_EN BIT(4)
#define USBF_EP0_DW_MASK (0x3 << 5)
#define USBF_EP0_DW(_s) ((_s) << 5)
#define USBF_EP0_DEND BIT(7)
#define USBF_EP0_BCLR BIT(8)
#define USBF_EP0_PIDCLR BIT(9)
#define USBF_EP0_AUTO BIT(16)
#define USBF_EP0_OVERSEL BIT(17)
#define USBF_EP0_STGSEL BIT(18)
#define USBF_REG_EP0_STATUS 0x04
#define USBF_EP0_SETUP_INT BIT(0)
#define USBF_EP0_STG_START_INT BIT(1)
#define USBF_EP0_STG_END_INT BIT(2)
#define USBF_EP0_STALL_INT BIT(3)
#define USBF_EP0_IN_INT BIT(4)
#define USBF_EP0_OUT_INT BIT(5)
#define USBF_EP0_OUT_OR_INT BIT(6)
#define USBF_EP0_OUT_NULL_INT BIT(7)
#define USBF_EP0_IN_EMPTY BIT(8)
#define USBF_EP0_IN_FULL BIT(9)
#define USBF_EP0_IN_DATA BIT(10)
#define USBF_EP0_IN_NAK_INT BIT(11)
#define USBF_EP0_OUT_EMPTY BIT(12)
#define USBF_EP0_OUT_FULL BIT(13)
#define USBF_EP0_OUT_NULL BIT(14)
#define USBF_EP0_OUT_NAK_INT BIT(15)
#define USBF_EP0_PERR_NAK_INT BIT(16)
#define USBF_EP0_PERR_NAK BIT(17)
#define USBF_EP0_PID BIT(18)
#define USBF_REG_EP0_INT_ENA 0x08
#define USBF_EP0_SETUP_EN BIT(0)
#define USBF_EP0_STG_START_EN BIT(1)
#define USBF_EP0_STG_END_EN BIT(2)
#define USBF_EP0_STALL_EN BIT(3)
#define USBF_EP0_IN_EN BIT(4)
#define USBF_EP0_OUT_EN BIT(5)
#define USBF_EP0_OUT_OR_EN BIT(6)
#define USBF_EP0_OUT_NULL_EN BIT(7)
#define USBF_EP0_IN_NAK_EN BIT(11)
#define USBF_EP0_OUT_NAK_EN BIT(15)
#define USBF_EP0_PERR_NAK_EN BIT(16)
#define USBF_REG_EP0_LENGTH 0x0C
#define USBF_EP0_LDATA (0x7FF << 0)
#define USBF_REG_EP0_READ 0x10
#define USBF_REG_EP0_WRITE 0x14
#define USBF_BASE_EPN(_n) (0x040 + (_n) * 0x020)
/* EPn registers offsets from Base + USBF_BASE_EPN(n-1). n=1..15 */
#define USBF_REG_EPN_CONTROL 0x000
#define USBF_EPN_ONAK BIT(0)
#define USBF_EPN_OSTL BIT(2)
#define USBF_EPN_ISTL BIT(3)
#define USBF_EPN_OSTL_EN BIT(4)
#define USBF_EPN_DW_MASK (0x3 << 5)
#define USBF_EPN_DW(_s) ((_s) << 5)
#define USBF_EPN_DEND BIT(7)
#define USBF_EPN_CBCLR BIT(8)
#define USBF_EPN_BCLR BIT(9)
#define USBF_EPN_OPIDCLR BIT(10)
#define USBF_EPN_IPIDCLR BIT(11)
#define USBF_EPN_AUTO BIT(16)
#define USBF_EPN_OVERSEL BIT(17)
#define USBF_EPN_MODE_MASK (0x3 << 24)
#define USBF_EPN_MODE_BULK (0x0 << 24)
#define USBF_EPN_MODE_INTR (0x1 << 24)
#define USBF_EPN_MODE_ISO (0x2 << 24)
#define USBF_EPN_DIR0 BIT(26)
#define USBF_EPN_BUF_TYPE_DOUBLE BIT(30)
#define USBF_EPN_EN BIT(31)
#define USBF_REG_EPN_STATUS 0x004
#define USBF_EPN_IN_EMPTY BIT(0)
#define USBF_EPN_IN_FULL BIT(1)
#define USBF_EPN_IN_DATA BIT(2)
#define USBF_EPN_IN_INT BIT(3)
#define USBF_EPN_IN_STALL_INT BIT(4)
#define USBF_EPN_IN_NAK_ERR_INT BIT(5)
#define USBF_EPN_IN_END_INT BIT(7)
#define USBF_EPN_IPID BIT(10)
#define USBF_EPN_OUT_EMPTY BIT(16)
#define USBF_EPN_OUT_FULL BIT(17)
#define USBF_EPN_OUT_NULL_INT BIT(18)
#define USBF_EPN_OUT_INT BIT(19)
#define USBF_EPN_OUT_STALL_INT BIT(20)
#define USBF_EPN_OUT_NAK_ERR_INT BIT(21)
#define USBF_EPN_OUT_OR_INT BIT(22)
#define USBF_EPN_OUT_END_INT BIT(23)
#define USBF_EPN_ISO_CRC BIT(24)
#define USBF_EPN_ISO_OR BIT(26)
#define USBF_EPN_OUT_NOTKN BIT(27)
#define USBF_EPN_ISO_OPID BIT(28)
#define USBF_EPN_ISO_PIDERR BIT(29)
#define USBF_REG_EPN_INT_ENA 0x008
#define USBF_EPN_IN_EN BIT(3)
#define USBF_EPN_IN_STALL_EN BIT(4)
#define USBF_EPN_IN_NAK_ERR_EN BIT(5)
#define USBF_EPN_IN_END_EN BIT(7)
#define USBF_EPN_OUT_NULL_EN BIT(18)
#define USBF_EPN_OUT_EN BIT(19)
#define USBF_EPN_OUT_STALL_EN BIT(20)
#define USBF_EPN_OUT_NAK_ERR_EN BIT(21)
#define USBF_EPN_OUT_OR_EN BIT(22)
#define USBF_EPN_OUT_END_EN BIT(23)
#define USBF_REG_EPN_DMA_CTRL 0x00C
#define USBF_EPN_DMAMODE0 BIT(0)
#define USBF_EPN_DMA_EN BIT(4)
#define USBF_EPN_STOP_SET BIT(8)
#define USBF_EPN_BURST_SET BIT(9)
#define USBF_EPN_DEND_SET BIT(10)
#define USBF_EPN_STOP_MODE BIT(11)
#define USBF_REG_EPN_PCKT_ADRS 0x010
#define USBF_EPN_MPKT(_l) ((_l) << 0)
#define USBF_EPN_BASEAD(_a) ((_a) << 16)
#define USBF_REG_EPN_LEN_DCNT 0x014
#define USBF_EPN_GET_LDATA(_r) ((_r) & 0x7FF)
#define USBF_EPN_SET_DMACNT(_c) ((_c) << 16)
#define USBF_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x1ff)
#define USBF_REG_EPN_READ 0x018
#define USBF_REG_EPN_WRITE 0x01C
/* AHB-EPC Bridge registers */
#define USBF_REG_AHBSCTR 0x1000
#define USBF_REG_AHBMCTR 0x1004
#define USBF_SYS_WBURST_TYPE BIT(2)
#define USBF_SYS_ARBITER_CTR BIT(31)
#define USBF_REG_AHBBINT 0x1008
#define USBF_SYS_ERR_MASTER (0x0F << 0)
#define USBF_SYS_SBUS_ERRINT0 BIT(4)
#define USBF_SYS_SBUS_ERRINT1 BIT(5)
#define USBF_SYS_MBUS_ERRINT BIT(6)
#define USBF_SYS_VBUS_INT BIT(13)
#define USBF_SYS_DMA_ENDINT_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
#define USBF_REG_AHBBINTEN 0x100C
#define USBF_SYS_SBUS_ERRINT0EN BIT(4)
#define USBF_SYS_SBUS_ERRINT1EN BIT(5)
#define USBF_SYS_MBUS_ERRINTEN BIT(6)
#define USBF_SYS_VBUS_INTEN BIT(13)
#define USBF_SYS_DMA_ENDINTEN_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
#define USBF_REG_EPCTR 0x1010
#define USBF_SYS_EPC_RST BIT(0)
#define USBF_SYS_PLL_RST BIT(2)
#define USBF_SYS_PLL_LOCK BIT(4)
#define USBF_SYS_PLL_RESUME BIT(5)
#define USBF_SYS_VBUS_LEVEL BIT(8)
#define USBF_SYS_DIRPD BIT(12)
#define USBF_REG_USBSSVER 0x1020
#define USBF_REG_USBSSCONF 0x1024
#define USBF_SYS_DMA_AVAILABLE(_n) (BIT(0) << (_n)) /* _n=0..15 */
#define USBF_SYS_EP_AVAILABLE(_n) (BIT(16) << (_n)) /* _n=0..15 */
#define USBF_BASE_DMA_EPN(_n) (0x1110 + (_n) * 0x010)
/* EPn DMA registers offsets from Base USBF_BASE_DMA_EPN(n-1). n=1..15*/
#define USBF_REG_DMA_EPN_DCR1 0x00
#define USBF_SYS_EPN_REQEN BIT(0)
#define USBF_SYS_EPN_DIR0 BIT(1)
#define USBF_SYS_EPN_SET_DMACNT(_c) ((_c) << 16)
#define USBF_SYS_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x0FF)
#define USBF_REG_DMA_EPN_DCR2 0x04
#define USBF_SYS_EPN_MPKT(_s) ((_s) << 0)
#define USBF_SYS_EPN_LMPKT(_l) ((_l) << 16)
#define USBF_REG_DMA_EPN_TADR 0x08
/* USB request */
struct usbf_req {
struct usb_request req;
struct list_head queue;
unsigned int is_zero_sent : 1;
unsigned int is_mapped : 1;
enum {
USBF_XFER_START,
USBF_XFER_WAIT_DMA,
USBF_XFER_SEND_NULL,
USBF_XFER_WAIT_END,
USBF_XFER_WAIT_DMA_SHORT,
USBF_XFER_WAIT_BRIDGE,
} xfer_step;
size_t dma_size;
};
/* USB Endpoint */
struct usbf_ep {
struct usb_ep ep;
char name[32];
struct list_head queue;
unsigned int is_processing : 1;
unsigned int is_in : 1;
struct usbf_udc *udc;
void __iomem *regs;
void __iomem *dma_regs;
unsigned int id : 8;
unsigned int disabled : 1;
unsigned int is_wedged : 1;
unsigned int delayed_status : 1;
u32 status;
void (*bridge_on_dma_end)(struct usbf_ep *ep);
};
enum usbf_ep0state {
EP0_IDLE,
EP0_IN_DATA_PHASE,
EP0_OUT_DATA_PHASE,
EP0_OUT_STATUS_START_PHASE,
EP0_OUT_STATUS_PHASE,
EP0_OUT_STATUS_END_PHASE,
EP0_IN_STATUS_START_PHASE,
EP0_IN_STATUS_PHASE,
EP0_IN_STATUS_END_PHASE,
};
struct usbf_udc {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct device *dev;
void __iomem *regs;
spinlock_t lock;
bool is_remote_wakeup;
bool is_usb_suspended;
struct usbf_ep ep[USBF_NUM_ENDPOINTS];
/* for EP0 control messages */
enum usbf_ep0state ep0state;
struct usbf_req setup_reply;
u8 ep0_buf[USBF_EP0_MAX_PCKT_SIZE];
};
struct usbf_ep_info {
const char *name;
struct usb_ep_caps caps;
u16 base_addr;
unsigned int is_double : 1;
u16 maxpacket_limit;
};
#define USBF_SINGLE_BUFFER 0
#define USBF_DOUBLE_BUFFER 1
#define USBF_EP_INFO(_name, _caps, _base_addr, _is_double, _maxpacket_limit) \
{ \
.name = _name, \
.caps = _caps, \
.base_addr = _base_addr, \
.is_double = _is_double, \
.maxpacket_limit = _maxpacket_limit, \
}
/* This table is computed from the recommended values provided in the SOC
* datasheet. The buffer type (single/double) and the endpoint type cannot
* be changed. The mapping in internal RAM (base_addr and number of words)
* for each endpoints depends on the max packet size and the buffer type.
*/
static const struct usbf_ep_info usbf_ep_info[USBF_NUM_ENDPOINTS] = {
/* ep0: buf @0x0000 64 bytes, fixed 32 words */
[0] = USBF_EP_INFO("ep0-ctrl",
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
USB_EP_CAPS_DIR_ALL),
0x0000, USBF_SINGLE_BUFFER, USBF_EP0_MAX_PCKT_SIZE),
/* ep1: buf @0x0020, 2 buffers 512 bytes -> (512 * 2 / 4) words */
[1] = USBF_EP_INFO("ep1-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0020, USBF_DOUBLE_BUFFER, 512),
/* ep2: buf @0x0120, 2 buffers 512 bytes -> (512 * 2 / 4) words */
[2] = USBF_EP_INFO("ep2-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0120, USBF_DOUBLE_BUFFER, 512),
/* ep3: buf @0x0220, 1 buffer 512 bytes -> (512 * 2 / 4) words */
[3] = USBF_EP_INFO("ep3-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0220, USBF_SINGLE_BUFFER, 512),
/* ep4: buf @0x02A0, 1 buffer 512 bytes -> (512 * 1 / 4) words */
[4] = USBF_EP_INFO("ep4-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x02A0, USBF_SINGLE_BUFFER, 512),
/* ep5: buf @0x0320, 1 buffer 512 bytes -> (512 * 2 / 4) words */
[5] = USBF_EP_INFO("ep5-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0320, USBF_SINGLE_BUFFER, 512),
/* ep6: buf @0x03A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[6] = USBF_EP_INFO("ep6-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x03A0, USBF_SINGLE_BUFFER, 1024),
/* ep7: buf @0x04A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[7] = USBF_EP_INFO("ep7-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x04A0, USBF_SINGLE_BUFFER, 1024),
/* ep8: buf @0x0520, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[8] = USBF_EP_INFO("ep8-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x0520, USBF_SINGLE_BUFFER, 1024),
/* ep9: buf @0x0620, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[9] = USBF_EP_INFO("ep9-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x0620, USBF_SINGLE_BUFFER, 1024),
/* ep10: buf @0x0720, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[10] = USBF_EP_INFO("ep10-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0720, USBF_DOUBLE_BUFFER, 1024),
/* ep11: buf @0x0920, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[11] = USBF_EP_INFO("ep11-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0920, USBF_DOUBLE_BUFFER, 1024),
/* ep12: buf @0x0B20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[12] = USBF_EP_INFO("ep12-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0B20, USBF_DOUBLE_BUFFER, 1024),
/* ep13: buf @0x0D20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[13] = USBF_EP_INFO("ep13-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0D20, USBF_DOUBLE_BUFFER, 1024),
/* ep14: buf @0x0F20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[14] = USBF_EP_INFO("ep14-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0F20, USBF_DOUBLE_BUFFER, 1024),
/* ep15: buf @0x1120, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[15] = USBF_EP_INFO("ep15-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x1120, USBF_DOUBLE_BUFFER, 1024),
};
static inline u32 usbf_reg_readl(struct usbf_udc *udc, uint offset)
{
return readl(udc->regs + offset);
}
static inline void usbf_reg_writel(struct usbf_udc *udc, uint offset, u32 val)
{
writel(val, udc->regs + offset);
}
static inline void usbf_reg_bitset(struct usbf_udc *udc, uint offset, u32 set)
{
u32 tmp;
tmp = usbf_reg_readl(udc, offset);
tmp |= set;
usbf_reg_writel(udc, offset, tmp);
}
static inline void usbf_reg_bitclr(struct usbf_udc *udc, uint offset, u32 clr)
{
u32 tmp;
tmp = usbf_reg_readl(udc, offset);
tmp &= ~clr;
usbf_reg_writel(udc, offset, tmp);
}
static inline void usbf_reg_clrset(struct usbf_udc *udc, uint offset,
u32 clr, u32 set)
{
u32 tmp;
tmp = usbf_reg_readl(udc, offset);
tmp &= ~clr;
tmp |= set;
usbf_reg_writel(udc, offset, tmp);
}
static inline u32 usbf_ep_reg_readl(struct usbf_ep *ep, uint offset)
{
return readl(ep->regs + offset);
}
static inline void usbf_ep_reg_read_rep(struct usbf_ep *ep, uint offset,
void *dst, uint count)
{
readsl(ep->regs + offset, dst, count);
}
static inline void usbf_ep_reg_writel(struct usbf_ep *ep, uint offset, u32 val)
{
writel(val, ep->regs + offset);
}
static inline void usbf_ep_reg_write_rep(struct usbf_ep *ep, uint offset,
const void *src, uint count)
{
writesl(ep->regs + offset, src, count);
}
static inline void usbf_ep_reg_bitset(struct usbf_ep *ep, uint offset, u32 set)
{
u32 tmp;
tmp = usbf_ep_reg_readl(ep, offset);
tmp |= set;
usbf_ep_reg_writel(ep, offset, tmp);
}
static inline void usbf_ep_reg_bitclr(struct usbf_ep *ep, uint offset, u32 clr)
{
u32 tmp;
tmp = usbf_ep_reg_readl(ep, offset);
tmp &= ~clr;
usbf_ep_reg_writel(ep, offset, tmp);
}
static inline void usbf_ep_reg_clrset(struct usbf_ep *ep, uint offset,
u32 clr, u32 set)
{
u32 tmp;
tmp = usbf_ep_reg_readl(ep, offset);
tmp &= ~clr;
tmp |= set;
usbf_ep_reg_writel(ep, offset, tmp);
}
static inline u32 usbf_ep_dma_reg_readl(struct usbf_ep *ep, uint offset)
{
return readl(ep->dma_regs + offset);
}
static inline void usbf_ep_dma_reg_writel(struct usbf_ep *ep, uint offset,
u32 val)
{
writel(val, ep->dma_regs + offset);
}
static inline void usbf_ep_dma_reg_bitset(struct usbf_ep *ep, uint offset,
u32 set)
{
u32 tmp;
tmp = usbf_ep_dma_reg_readl(ep, offset);
tmp |= set;
usbf_ep_dma_reg_writel(ep, offset, tmp);
}
static inline void usbf_ep_dma_reg_bitclr(struct usbf_ep *ep, uint offset,
u32 clr)
{
u32 tmp;
tmp = usbf_ep_dma_reg_readl(ep, offset);
tmp &= ~clr;
usbf_ep_dma_reg_writel(ep, offset, tmp);
}
static void usbf_ep0_send_null(struct usbf_ep *ep0, bool is_data1)
{
u32 set;
set = USBF_EP0_DEND;
if (is_data1)
set |= USBF_EP0_PIDCLR;
usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, set);
}
static int usbf_ep0_pio_in(struct usbf_ep *ep0, struct usbf_req *req)
{
unsigned int left;
unsigned int nb;
const void *buf;
u32 ctrl;
u32 last;
left = req->req.length - req->req.actual;
if (left == 0) {
if (!req->is_zero_sent) {
if (req->req.length == 0) {
dev_dbg(ep0->udc->dev, "ep0 send null\n");
usbf_ep0_send_null(ep0, false);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
if ((req->req.actual % ep0->ep.maxpacket) == 0) {
if (req->req.zero) {
dev_dbg(ep0->udc->dev, "ep0 send null\n");
usbf_ep0_send_null(ep0, false);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
}
}
return 0;
}
if (left > ep0->ep.maxpacket)
left = ep0->ep.maxpacket;
buf = req->req.buf;
buf += req->req.actual;
nb = left / sizeof(u32);
if (nb) {
usbf_ep_reg_write_rep(ep0, USBF_REG_EP0_WRITE, buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
left -= (nb * sizeof(u32));
}
ctrl = usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL);
ctrl &= ~USBF_EP0_DW_MASK;
if (left) {
memcpy(&last, buf, left);
usbf_ep_reg_writel(ep0, USBF_REG_EP0_WRITE, last);
ctrl |= USBF_EP0_DW(left);
req->req.actual += left;
}
usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, ctrl | USBF_EP0_DEND);
dev_dbg(ep0->udc->dev, "ep0 send %u/%u\n",
req->req.actual, req->req.length);
return -EINPROGRESS;
}
static int usbf_ep0_pio_out(struct usbf_ep *ep0, struct usbf_req *req)
{
int req_status = 0;
unsigned int count;
unsigned int recv;
unsigned int left;
unsigned int nb;
void *buf;
u32 last;
if (ep0->status & USBF_EP0_OUT_INT) {
recv = usbf_ep_reg_readl(ep0, USBF_REG_EP0_LENGTH) & USBF_EP0_LDATA;
count = recv;
buf = req->req.buf;
buf += req->req.actual;
left = req->req.length - req->req.actual;
dev_dbg(ep0->udc->dev, "ep0 recv %u, left %u\n", count, left);
if (left > ep0->ep.maxpacket)
left = ep0->ep.maxpacket;
if (count > left) {
req_status = -EOVERFLOW;
count = left;
}
if (count) {
nb = count / sizeof(u32);
if (nb) {
usbf_ep_reg_read_rep(ep0, USBF_REG_EP0_READ,
buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
count -= (nb * sizeof(u32));
}
if (count) {
last = usbf_ep_reg_readl(ep0, USBF_REG_EP0_READ);
memcpy(buf, &last, count);
req->req.actual += count;
}
}
dev_dbg(ep0->udc->dev, "ep0 recv %u/%u\n",
req->req.actual, req->req.length);
if (req_status) {
dev_dbg(ep0->udc->dev, "ep0 req.status=%d\n", req_status);
req->req.status = req_status;
return 0;
}
if (recv < ep0->ep.maxpacket) {
dev_dbg(ep0->udc->dev, "ep0 short packet\n");
/* This is a short packet -> It is the end */
req->req.status = 0;
return 0;
}
/* The Data stage of a control transfer from an endpoint to the
* host is complete when the endpoint does one of the following:
* - Has transferred exactly the expected amount of data
* - Transfers a packet with a payload size less than
* wMaxPacketSize or transfers a zero-length packet
*/
if (req->req.actual == req->req.length) {
req->req.status = 0;
return 0;
}
}
if (ep0->status & USBF_EP0_OUT_NULL_INT) {
/* NULL packet received */
dev_dbg(ep0->udc->dev, "ep0 null packet\n");
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
return 0;
}
return -EINPROGRESS;
}
static void usbf_ep0_fifo_flush(struct usbf_ep *ep0)
{
u32 sts;
int ret;
usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_BCLR);
ret = readl_poll_timeout_atomic(ep0->regs + USBF_REG_EP0_STATUS, sts,
(sts & (USBF_EP0_IN_DATA | USBF_EP0_IN_EMPTY)) == USBF_EP0_IN_EMPTY,
0, 10000);
if (ret)
dev_err(ep0->udc->dev, "ep0 flush fifo timed out\n");
}
static void usbf_epn_send_null(struct usbf_ep *epn)
{
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_DEND);
}
static void usbf_epn_send_residue(struct usbf_ep *epn, const void *buf,
unsigned int size)
{
u32 tmp;
memcpy(&tmp, buf, size);
usbf_ep_reg_writel(epn, USBF_REG_EPN_WRITE, tmp);
usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_DW_MASK,
USBF_EPN_DW(size) | USBF_EPN_DEND);
}
static int usbf_epn_pio_in(struct usbf_ep *epn, struct usbf_req *req)
{
unsigned int left;
unsigned int nb;
const void *buf;
left = req->req.length - req->req.actual;
if (left == 0) {
if (!req->is_zero_sent) {
if (req->req.length == 0) {
dev_dbg(epn->udc->dev, "ep%u send_null\n", epn->id);
usbf_epn_send_null(epn);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
if ((req->req.actual % epn->ep.maxpacket) == 0) {
if (req->req.zero) {
dev_dbg(epn->udc->dev, "ep%u send_null\n",
epn->id);
usbf_epn_send_null(epn);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
}
}
return 0;
}
if (left > epn->ep.maxpacket)
left = epn->ep.maxpacket;
buf = req->req.buf;
buf += req->req.actual;
nb = left / sizeof(u32);
if (nb) {
usbf_ep_reg_write_rep(epn, USBF_REG_EPN_WRITE, buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
left -= (nb * sizeof(u32));
}
if (left) {
usbf_epn_send_residue(epn, buf, left);
req->req.actual += left;
} else {
usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_DW_MASK,
USBF_EPN_DEND);
}
dev_dbg(epn->udc->dev, "ep%u send %u/%u\n", epn->id, req->req.actual,
req->req.length);
return -EINPROGRESS;
}
static void usbf_epn_enable_in_end_int(struct usbf_ep *epn)
{
usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_END_EN);
}
static int usbf_epn_dma_in(struct usbf_ep *epn, struct usbf_req *req)
{
unsigned int left;
u32 npkt;
u32 lastpkt;
int ret;
if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
epn->id);
return usbf_epn_pio_in(epn, req);
}
left = req->req.length - req->req.actual;
switch (req->xfer_step) {
default:
case USBF_XFER_START:
if (left == 0) {
dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
usbf_epn_send_null(epn);
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
if (left < 4) {
dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
left);
usbf_epn_send_residue(epn,
req->req.buf + req->req.actual, left);
req->req.actual += left;
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 1);
if (ret < 0) {
dev_err(epn->udc->dev, "usb_gadget_map_request failed (%d)\n",
ret);
return ret;
}
req->is_mapped = 1;
npkt = DIV_ROUND_UP(left, epn->ep.maxpacket);
lastpkt = (left % epn->ep.maxpacket);
if (lastpkt == 0)
lastpkt = epn->ep.maxpacket;
lastpkt &= ~0x3; /* DMA is done on 32bit units */
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2,
USBF_SYS_EPN_MPKT(epn->ep.maxpacket) | USBF_SYS_EPN_LMPKT(lastpkt));
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR,
req->req.dma);
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_SET_DMACNT(npkt));
usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT, USBF_EPN_SET_DMACNT(npkt));
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
/* The end of DMA transfer at the USBF level needs to be handle
* after the detection of the end of DMA transfer at the brige
* level.
* To force this sequence, EPN_IN_END_EN will be set by the
* detection of the end of transfer at bridge level (ie. bridge
* interrupt).
*/
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_IN_EN | USBF_EPN_IN_END_EN);
epn->bridge_on_dma_end = usbf_epn_enable_in_end_int;
/* Clear any pending IN_END interrupt */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_END_INT);
usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_BURST_SET | USBF_EPN_DMAMODE0);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_DMA_EN);
req->dma_size = (npkt - 1) * epn->ep.maxpacket + lastpkt;
dev_dbg(epn->udc->dev, "ep%u dma xfer %zu\n", epn->id,
req->dma_size);
req->xfer_step = USBF_XFER_WAIT_DMA;
break;
case USBF_XFER_WAIT_DMA:
if (!(epn->status & USBF_EPN_IN_END_INT)) {
dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 1);
req->is_mapped = 0;
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_IN_END_EN,
USBF_EPN_IN_EN);
req->req.actual += req->dma_size;
left = req->req.length - req->req.actual;
if (left) {
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_INT);
dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
left);
usbf_epn_send_residue(epn,
req->req.buf + req->req.actual, left);
req->req.actual += left;
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
if (req->req.actual % epn->ep.maxpacket) {
/* last packet was a short packet. Tell the hardware to
* send it right now.
*/
dev_dbg(epn->udc->dev, "ep%u send short\n", epn->id);
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_IN_INT);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_DEND);
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
/* Last packet size was a maxpacket size
* Send null packet if needed
*/
if (req->req.zero) {
req->xfer_step = USBF_XFER_SEND_NULL;
break;
}
/* No more action to do. Wait for the end of the USB transfer */
req->xfer_step = USBF_XFER_WAIT_END;
break;
case USBF_XFER_SEND_NULL:
dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
usbf_epn_send_null(epn);
req->xfer_step = USBF_XFER_WAIT_END;
break;
case USBF_XFER_WAIT_END:
if (!(epn->status & USBF_EPN_IN_INT)) {
dev_dbg(epn->udc->dev, "ep%u end not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u send done %u/%u\n", epn->id,
req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
return -EINPROGRESS;
}
static void usbf_epn_recv_residue(struct usbf_ep *epn, void *buf,
unsigned int size)
{
u32 last;
last = usbf_ep_reg_readl(epn, USBF_REG_EPN_READ);
memcpy(buf, &last, size);
}
static int usbf_epn_pio_out(struct usbf_ep *epn, struct usbf_req *req)
{
int req_status = 0;
unsigned int count;
unsigned int recv;
unsigned int left;
unsigned int nb;
void *buf;
if (epn->status & USBF_EPN_OUT_INT) {
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
count = recv;
buf = req->req.buf;
buf += req->req.actual;
left = req->req.length - req->req.actual;
dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
recv, left, epn->ep.maxpacket);
if (left > epn->ep.maxpacket)
left = epn->ep.maxpacket;
if (count > left) {
req_status = -EOVERFLOW;
count = left;
}
if (count) {
nb = count / sizeof(u32);
if (nb) {
usbf_ep_reg_read_rep(epn, USBF_REG_EPN_READ,
buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
count -= (nb * sizeof(u32));
}
if (count) {
usbf_epn_recv_residue(epn, buf, count);
req->req.actual += count;
}
}
dev_dbg(epn->udc->dev, "ep%u recv %u/%u\n", epn->id,
req->req.actual, req->req.length);
if (req_status) {
dev_dbg(epn->udc->dev, "ep%u req.status=%d\n", epn->id,
req_status);
req->req.status = req_status;
return 0;
}
if (recv < epn->ep.maxpacket) {
dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
/* This is a short packet -> It is the end */
req->req.status = 0;
return 0;
}
/* Request full -> complete */
if (req->req.actual == req->req.length) {
req->req.status = 0;
return 0;
}
}
if (epn->status & USBF_EPN_OUT_NULL_INT) {
/* NULL packet received */
dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
return 0;
}
return -EINPROGRESS;
}
static void usbf_epn_enable_out_end_int(struct usbf_ep *epn)
{
usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_OUT_END_EN);
}
static void usbf_epn_process_queue(struct usbf_ep *epn);
static void usbf_epn_dma_out_send_dma(struct usbf_ep *epn, dma_addr_t addr, u32 npkt, bool is_short)
{
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2, USBF_SYS_EPN_MPKT(epn->ep.maxpacket));
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR, addr);
if (is_short) {
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_SET_DMACNT(1) | USBF_SYS_EPN_DIR0);
usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
USBF_EPN_SET_DMACNT(0));
/* The end of DMA transfer at the USBF level needs to be handled
* after the detection of the end of DMA transfer at the brige
* level.
* To force this sequence, enabling the OUT_END interrupt will
* be donee by the detection of the end of transfer at bridge
* level (ie. bridge interrupt).
*/
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN | USBF_EPN_OUT_END_EN);
epn->bridge_on_dma_end = usbf_epn_enable_out_end_int;
/* Clear any pending OUT_END interrupt */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_OUT_END_INT);
usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_DMA_EN);
return;
}
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_SET_DMACNT(npkt) | USBF_SYS_EPN_DIR0);
usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
USBF_EPN_SET_DMACNT(npkt));
/* Here, the bridge may or may not generate an interrupt to signal the
* end of DMA transfer.
* Keep only OUT_END interrupt and let handle the bridge later during
* the OUT_END processing.
*/
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN,
USBF_EPN_OUT_END_EN);
/* Disable bridge interrupt. It will be renabled later */
usbf_reg_bitclr(epn->udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
/* Clear any pending DMA_END interrupt at bridge level */
usbf_reg_writel(epn->udc, USBF_REG_AHBBINT,
USBF_SYS_DMA_ENDINT_EPN(epn->id));
/* Clear any pending OUT_END interrupt */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_OUT_END_INT);
usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0 | USBF_EPN_BURST_SET);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_DMA_EN);
}
static size_t usbf_epn_dma_out_complete_dma(struct usbf_ep *epn, bool is_short)
{
u32 dmacnt;
u32 tmp;
int ret;
/* Restore interrupt mask */
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
if (is_short) {
/* Nothing more to do when the DMA was for a short packet */
return 0;
}
/* Enable the bridge interrupt */
usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
tmp = usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT);
dmacnt = USBF_EPN_GET_DMACNT(tmp);
if (dmacnt) {
/* Some packet were not received (halted by a short or a null
* packet.
* The bridge never raises an interrupt in this case.
* Wait for the end of transfer at bridge level
*/
ret = readl_poll_timeout_atomic(
epn->dma_regs + USBF_REG_DMA_EPN_DCR1,
tmp, (USBF_SYS_EPN_GET_DMACNT(tmp) == dmacnt),
0, 10000);
if (ret) {
dev_err(epn->udc->dev, "ep%u wait bridge timed out\n",
epn->id);
}
usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
/* The dmacnt value tells how many packet were not transferred
* from the maximum number of packet we set for the DMA transfer.
* Compute the left DMA size based on this value.
*/
return dmacnt * epn->ep.maxpacket;
}
return 0;
}
static int usbf_epn_dma_out(struct usbf_ep *epn, struct usbf_req *req)
{
unsigned int dma_left;
unsigned int count;
unsigned int recv;
unsigned int left;
u32 npkt;
int ret;
if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
epn->id);
return usbf_epn_pio_out(epn, req);
}
switch (req->xfer_step) {
default:
case USBF_XFER_START:
if (epn->status & USBF_EPN_OUT_NULL_INT) {
dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
return 0;
}
if (!(epn->status & USBF_EPN_OUT_INT)) {
dev_dbg(epn->udc->dev, "ep%u OUT_INT not set -> spurious\n",
epn->id);
break;
}
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
if (!recv) {
dev_dbg(epn->udc->dev, "ep%u recv = 0 -> spurious\n",
epn->id);
break;
}
left = req->req.length - req->req.actual;
dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
recv, left, epn->ep.maxpacket);
if (recv > left) {
dev_err(epn->udc->dev, "ep%u overflow (%u/%u)\n",
epn->id, recv, left);
req->req.status = -EOVERFLOW;
return -EOVERFLOW;
}
if (recv < epn->ep.maxpacket) {
/* Short packet received */
dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
if (recv <= 3) {
usbf_epn_recv_residue(epn,
req->req.buf + req->req.actual, recv);
req->req.actual += recv;
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
epn->id, req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
if (ret < 0) {
dev_err(epn->udc->dev, "map request failed (%d)\n",
ret);
return ret;
}
req->is_mapped = 1;
usbf_epn_dma_out_send_dma(epn,
req->req.dma + req->req.actual,
1, true);
req->dma_size = recv & ~0x3;
dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n", epn->id,
req->dma_size);
req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
break;
}
ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
if (ret < 0) {
dev_err(epn->udc->dev, "map request failed (%d)\n",
ret);
return ret;
}
req->is_mapped = 1;
/* Use the maximum DMA size according to the request buffer.
* We will adjust the received size later at the end of the DMA
* transfer with the left size computed from
* usbf_epn_dma_out_complete_dma().
*/
npkt = left / epn->ep.maxpacket;
usbf_epn_dma_out_send_dma(epn,
req->req.dma + req->req.actual,
npkt, false);
req->dma_size = npkt * epn->ep.maxpacket;
dev_dbg(epn->udc->dev, "ep%u dma xfer %zu (%u)\n", epn->id,
req->dma_size, npkt);
req->xfer_step = USBF_XFER_WAIT_DMA;
break;
case USBF_XFER_WAIT_DMA_SHORT:
if (!(epn->status & USBF_EPN_OUT_END_INT)) {
dev_dbg(epn->udc->dev, "ep%u dma short not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u dma short done\n", epn->id);
usbf_epn_dma_out_complete_dma(epn, true);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
req->is_mapped = 0;
req->req.actual += req->dma_size;
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
count = recv & 0x3;
if (count) {
dev_dbg(epn->udc->dev, "ep%u recv residue %u\n", epn->id,
count);
usbf_epn_recv_residue(epn,
req->req.buf + req->req.actual, count);
req->req.actual += count;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
case USBF_XFER_WAIT_DMA:
if (!(epn->status & USBF_EPN_OUT_END_INT)) {
dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
dma_left = usbf_epn_dma_out_complete_dma(epn, false);
if (dma_left) {
/* Adjust the final DMA size with */
count = req->dma_size - dma_left;
dev_dbg(epn->udc->dev, "ep%u dma xfer done %u\n", epn->id,
count);
req->req.actual += count;
if (epn->status & USBF_EPN_OUT_NULL_INT) {
/* DMA was stopped by a null packet reception */
dev_dbg(epn->udc->dev, "ep%u dma stopped by null pckt\n",
epn->id);
usb_gadget_unmap_request(&epn->udc->gadget,
&req->req, 0);
req->is_mapped = 0;
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_OUT_NULL_INT);
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
epn->id, req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
left = req->req.length - req->req.actual;
if (recv > left) {
dev_err(epn->udc->dev,
"ep%u overflow (%u/%u)\n", epn->id,
recv, left);
req->req.status = -EOVERFLOW;
usb_gadget_unmap_request(&epn->udc->gadget,
&req->req, 0);
req->is_mapped = 0;
req->xfer_step = USBF_XFER_START;
return -EOVERFLOW;
}
if (recv > 3) {
usbf_epn_dma_out_send_dma(epn,
req->req.dma + req->req.actual,
1, true);
req->dma_size = recv & ~0x3;
dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n",
epn->id, req->dma_size);
req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
break;
}
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
req->is_mapped = 0;
count = recv & 0x3;
if (count) {
dev_dbg(epn->udc->dev, "ep%u recv residue %u\n",
epn->id, count);
usbf_epn_recv_residue(epn,
req->req.buf + req->req.actual, count);
req->req.actual += count;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
/* Process queue at bridge interrupt only */
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN | USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
epn->status = 0;
epn->bridge_on_dma_end = usbf_epn_process_queue;
req->xfer_step = USBF_XFER_WAIT_BRIDGE;
break;
case USBF_XFER_WAIT_BRIDGE:
dev_dbg(epn->udc->dev, "ep%u bridge transfers done\n", epn->id);
/* Restore interrupt mask */
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
req->is_mapped = 0;
req->req.actual += req->dma_size;
req->xfer_step = USBF_XFER_START;
left = req->req.length - req->req.actual;
if (!left) {
/* No more data can be added to the buffer */
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
req->req.actual, req->req.length);
return 0;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u, wait more data\n",
epn->id, req->req.actual, req->req.length);
break;
}
return -EINPROGRESS;
}
static void usbf_epn_dma_stop(struct usbf_ep *epn)
{
usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1, USBF_SYS_EPN_REQEN);
/* In the datasheet:
* If EP[m]_REQEN = 0b is set during DMA transfer, AHB-EPC stops DMA
* after 1 packet transfer completed.
* Therefore, wait sufficient time for ensuring DMA transfer
* completion. The WAIT time depends on the system, especially AHB
* bus activity
* So arbitrary 10ms would be sufficient.
*/
mdelay(10);
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_DMA_CTRL, USBF_EPN_DMA_EN);
}
static void usbf_epn_dma_abort(struct usbf_ep *epn, struct usbf_req *req)
{
dev_dbg(epn->udc->dev, "ep%u %s dma abort\n", epn->id,
epn->is_in ? "in" : "out");
epn->bridge_on_dma_end = NULL;
usbf_epn_dma_stop(epn);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req,
epn->is_in ? 1 : 0);
req->is_mapped = 0;
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
if (epn->is_in) {
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_IN_END_EN,
USBF_EPN_IN_EN);
} else {
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
}
/* As dma is stopped, be sure that no DMA interrupt are pending */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
USBF_EPN_IN_END_INT | USBF_EPN_OUT_END_INT);
usbf_reg_writel(epn->udc, USBF_REG_AHBBINT, USBF_SYS_DMA_ENDINT_EPN(epn->id));
/* Enable DMA interrupt the bridge level */
usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
/* Reset transfer step */
req->xfer_step = USBF_XFER_START;
}
static void usbf_epn_fifo_flush(struct usbf_ep *epn)
{
u32 ctrl;
u32 sts;
int ret;
dev_dbg(epn->udc->dev, "ep%u %s fifo flush\n", epn->id,
epn->is_in ? "in" : "out");
ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl | USBF_EPN_BCLR);
if (ctrl & USBF_EPN_DIR0)
return;
ret = readl_poll_timeout_atomic(epn->regs + USBF_REG_EPN_STATUS, sts,
(sts & (USBF_EPN_IN_DATA | USBF_EPN_IN_EMPTY)) == USBF_EPN_IN_EMPTY,
0, 10000);
if (ret)
dev_err(epn->udc->dev, "ep%u flush fifo timed out\n", epn->id);
}
static void usbf_ep_req_done(struct usbf_ep *ep, struct usbf_req *req,
int status)
{
list_del_init(&req->queue);
if (status) {
req->req.status = status;
} else {
if (req->req.status == -EINPROGRESS)
req->req.status = status;
}
dev_dbg(ep->udc->dev, "ep%u %s req done length %u/%u, status=%d\n", ep->id,
ep->is_in ? "in" : "out",
req->req.actual, req->req.length, req->req.status);
if (req->is_mapped)
usbf_epn_dma_abort(ep, req);
spin_unlock(&ep->udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->udc->lock);
}
static void usbf_ep_nuke(struct usbf_ep *ep, int status)
{
struct usbf_req *req;
dev_dbg(ep->udc->dev, "ep%u %s nuke status %d\n", ep->id,
ep->is_in ? "in" : "out",
status);
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct usbf_req, queue);
usbf_ep_req_done(ep, req, status);
}
if (ep->id == 0)
usbf_ep0_fifo_flush(ep);
else
usbf_epn_fifo_flush(ep);
}
static bool usbf_ep_is_stalled(struct usbf_ep *ep)
{
u32 ctrl;
if (ep->id == 0) {
ctrl = usbf_ep_reg_readl(ep, USBF_REG_EP0_CONTROL);
return (ctrl & USBF_EP0_STL) ? true : false;
}
ctrl = usbf_ep_reg_readl(ep, USBF_REG_EPN_CONTROL);
if (ep->is_in)
return (ctrl & USBF_EPN_ISTL) ? true : false;
return (ctrl & USBF_EPN_OSTL) ? true : false;
}
static int usbf_epn_start_queue(struct usbf_ep *epn)
{
struct usbf_req *req;
int ret;
if (usbf_ep_is_stalled(epn))
return 0;
req = list_first_entry_or_null(&epn->queue, struct usbf_req, queue);
if (epn->is_in) {
if (req && !epn->is_processing) {
ret = epn->dma_regs ?
usbf_epn_dma_in(epn, req) :
usbf_epn_pio_in(epn, req);
if (ret != -EINPROGRESS) {
dev_err(epn->udc->dev,
"queued next request not in progress\n");
/* The request cannot be completed (ie
* ret == 0) on the first call.
* stall and nuke the endpoint
*/
return ret ? ret : -EIO;
}
}
} else {
if (req) {
/* Clear ONAK to accept OUT tokens */
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
/* Enable interrupts */
usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
} else {
/* Disable incoming data and interrupt.
* They will be enable on next usb_eb_queue call
*/
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
}
}
return 0;
}
static int usbf_ep_process_queue(struct usbf_ep *ep)
{
int (*usbf_ep_xfer)(struct usbf_ep *ep, struct usbf_req *req);
struct usbf_req *req;
int is_processing;
int ret;
if (ep->is_in) {
usbf_ep_xfer = usbf_ep0_pio_in;
if (ep->id) {
usbf_ep_xfer = ep->dma_regs ?
usbf_epn_dma_in : usbf_epn_pio_in;
}
} else {
usbf_ep_xfer = usbf_ep0_pio_out;
if (ep->id) {
usbf_ep_xfer = ep->dma_regs ?
usbf_epn_dma_out : usbf_epn_pio_out;
}
}
req = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
if (!req) {
dev_err(ep->udc->dev,
"no request available for ep%u %s process\n", ep->id,
ep->is_in ? "in" : "out");
return -ENOENT;
}
do {
/* Were going to read the FIFO for this current request.
* NAK any other incoming data to avoid a race condition if no
* more request are available.
*/
if (!ep->is_in && ep->id != 0) {
usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
}
ret = usbf_ep_xfer(ep, req);
if (ret == -EINPROGRESS) {
if (!ep->is_in && ep->id != 0) {
/* The current request needs more data.
* Allow incoming data
*/
usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
}
return ret;
}
is_processing = ep->is_processing;
ep->is_processing = 1;
usbf_ep_req_done(ep, req, ret);
ep->is_processing = is_processing;
if (ret) {
/* An error was detected during the request transfer.
* Any pending DMA transfers were aborted by the
* usbf_ep_req_done() call.
* It's time to flush the fifo
*/
if (ep->id == 0)
usbf_ep0_fifo_flush(ep);
else
usbf_epn_fifo_flush(ep);
}
req = list_first_entry_or_null(&ep->queue, struct usbf_req,
queue);
if (ep->is_in)
continue;
if (ep->id != 0) {
if (req) {
/* An other request is available.
* Allow incoming data
*/
usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
} else {
/* No request queued. Disable interrupts.
* They will be enabled on usb_ep_queue
*/
usbf_ep_reg_bitclr(ep, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
}
}
/* Do not recall usbf_ep_xfer() */
return req ? -EINPROGRESS : 0;
} while (req);
return 0;
}
static void usbf_ep_stall(struct usbf_ep *ep, bool stall)
{
struct usbf_req *first;
dev_dbg(ep->udc->dev, "ep%u %s %s\n", ep->id,
ep->is_in ? "in" : "out",
stall ? "stall" : "unstall");
if (ep->id == 0) {
if (stall)
usbf_ep_reg_bitset(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
else
usbf_ep_reg_bitclr(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
return;
}
if (stall) {
if (ep->is_in)
usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ISTL);
else
usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_OSTL | USBF_EPN_OSTL_EN);
} else {
first = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
if (first && first->is_mapped) {
/* This can appear if the host halts an endpoint using
* SET_FEATURE and then un-halts the endpoint
*/
usbf_epn_dma_abort(ep, first);
}
usbf_epn_fifo_flush(ep);
if (ep->is_in) {
usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ISTL,
USBF_EPN_IPIDCLR);
} else {
usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_OSTL,
USBF_EPN_OSTL_EN | USBF_EPN_OPIDCLR);
}
usbf_epn_start_queue(ep);
}
}
static void usbf_ep0_enable(struct usbf_ep *ep0)
{
usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_INAK_EN | USBF_EP0_BCLR);
usbf_ep_reg_writel(ep0, USBF_REG_EP0_INT_ENA,
USBF_EP0_SETUP_EN | USBF_EP0_STG_START_EN | USBF_EP0_STG_END_EN |
USBF_EP0_OUT_EN | USBF_EP0_OUT_NULL_EN | USBF_EP0_IN_EN);
ep0->udc->ep0state = EP0_IDLE;
ep0->disabled = 0;
/* enable interrupts for the ep0 */
usbf_reg_bitset(ep0->udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(0));
}
static int usbf_epn_enable(struct usbf_ep *epn)
{
u32 base_addr;
u32 ctrl;
base_addr = usbf_ep_info[epn->id].base_addr;
usbf_ep_reg_writel(epn, USBF_REG_EPN_PCKT_ADRS,
USBF_EPN_BASEAD(base_addr) | USBF_EPN_MPKT(epn->ep.maxpacket));
/* OUT transfer interrupt are enabled during usb_ep_queue */
if (epn->is_in) {
/* Will be changed in DMA processing */
usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_EN);
}
/* Clear, set endpoint direction, set IN/OUT STL, and enable
* Send NAK for Data out as request are not queued yet
*/
ctrl = USBF_EPN_EN | USBF_EPN_BCLR;
if (epn->is_in)
ctrl |= USBF_EPN_OSTL | USBF_EPN_OSTL_EN;
else
ctrl |= USBF_EPN_DIR0 | USBF_EPN_ISTL | USBF_EPN_OSTL_EN | USBF_EPN_ONAK;
usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl);
return 0;
}
static int usbf_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
struct usbf_udc *udc = ep->udc;
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
usb_endpoint_dir_in(desc) ? "in" : "out",
usb_endpoint_maxp(desc));
spin_lock_irqsave(&ep->udc->lock, flags);
ep->is_in = usb_endpoint_dir_in(desc);
ep->ep.maxpacket = usb_endpoint_maxp(desc);
ret = usbf_epn_enable(ep);
if (ret)
goto end;
ep->disabled = 0;
/* enable interrupts for this endpoint */
usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
/* enable DMA interrupt at bridge level if DMA is used */
if (ep->dma_regs) {
ep->bridge_on_dma_end = NULL;
usbf_reg_bitset(udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
}
ret = 0;
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_epn_disable(struct usbf_ep *epn)
{
/* Disable interrupts */
usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, 0);
/* Disable endpoint */
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_EN);
/* remove anything that was pending */
usbf_ep_nuke(epn, -ESHUTDOWN);
return 0;
}
static int usbf_ep_disable(struct usb_ep *_ep)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
struct usbf_udc *udc = ep->udc;
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
ep->is_in ? "in" : "out", ep->ep.maxpacket);
spin_lock_irqsave(&ep->udc->lock, flags);
ep->disabled = 1;
/* Disable DMA interrupt */
if (ep->dma_regs) {
usbf_reg_bitclr(udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
ep->bridge_on_dma_end = NULL;
}
/* disable interrupts for this endpoint */
usbf_reg_bitclr(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
/* and the endpoint itself */
ret = usbf_epn_disable(ep);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_ep0_queue(struct usbf_ep *ep0, struct usbf_req *req,
gfp_t gfp_flags)
{
int ret;
req->req.actual = 0;
req->req.status = -EINPROGRESS;
req->is_zero_sent = 0;
list_add_tail(&req->queue, &ep0->queue);
if (ep0->udc->ep0state == EP0_IN_STATUS_START_PHASE)
return 0;
if (!ep0->is_in)
return 0;
if (ep0->udc->ep0state == EP0_IN_STATUS_PHASE) {
if (req->req.length) {
dev_err(ep0->udc->dev,
"request lng %u for ep0 in status phase\n",
req->req.length);
return -EINVAL;
}
ep0->delayed_status = 0;
}
if (!ep0->is_processing) {
ret = usbf_ep0_pio_in(ep0, req);
if (ret != -EINPROGRESS) {
dev_err(ep0->udc->dev,
"queued request not in progress\n");
/* The request cannot be completed (ie
* ret == 0) on the first call
*/
return ret ? ret : -EIO;
}
}
return 0;
}
static int usbf_epn_queue(struct usbf_ep *ep, struct usbf_req *req,
gfp_t gfp_flags)
{
int was_empty;
int ret;
if (ep->disabled) {
dev_err(ep->udc->dev, "ep%u request queue while disable\n",
ep->id);
return -ESHUTDOWN;
}
req->req.actual = 0;
req->req.status = -EINPROGRESS;
req->is_zero_sent = 0;
req->xfer_step = USBF_XFER_START;
was_empty = list_empty(&ep->queue);
list_add_tail(&req->queue, &ep->queue);
if (was_empty) {
ret = usbf_epn_start_queue(ep);
if (ret)
return ret;
}
return 0;
}
static int usbf_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct usbf_req *req = container_of(_req, struct usbf_req, req);
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
struct usbf_udc *udc = ep->udc;
unsigned long flags;
int ret;
if (!_req || !_req->buf)
return -EINVAL;
if (!udc || !udc->driver)
return -EINVAL;
dev_dbg(ep->udc->dev, "ep%u %s req queue length %u, zero %u, short_not_ok %u\n",
ep->id, ep->is_in ? "in" : "out",
req->req.length, req->req.zero, req->req.short_not_ok);
spin_lock_irqsave(&ep->udc->lock, flags);
if (ep->id == 0)
ret = usbf_ep0_queue(ep, req, gfp_flags);
else
ret = usbf_epn_queue(ep, req, gfp_flags);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usbf_req *req = container_of(_req, struct usbf_req, req);
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
unsigned long flags;
int is_processing;
int first;
int ret;
spin_lock_irqsave(&ep->udc->lock, flags);
dev_dbg(ep->udc->dev, "ep%u %s req dequeue length %u/%u\n",
ep->id, ep->is_in ? "in" : "out",
req->req.actual, req->req.length);
first = list_is_first(&req->queue, &ep->queue);
/* Complete the request but avoid any operation that could be done
* if a new request is queued during the request completion
*/
is_processing = ep->is_processing;
ep->is_processing = 1;
usbf_ep_req_done(ep, req, -ECONNRESET);
ep->is_processing = is_processing;
if (first) {
/* The first item in the list was dequeued.
* This item could already be submitted to the hardware.
* So, flush the fifo
*/
if (ep->id)
usbf_epn_fifo_flush(ep);
else
usbf_ep0_fifo_flush(ep);
}
if (ep->id == 0) {
/* We dequeue a request on ep0. On this endpoint, we can have
* 1 request related to the data stage and/or 1 request
* related to the status stage.
* We dequeue one of them and so the USB control transaction
* is no more coherent. The simple way to be consistent after
* dequeuing is to stall and nuke the endpoint and wait the
* next SETUP packet.
*/
usbf_ep_stall(ep, true);
usbf_ep_nuke(ep, -ECONNRESET);
ep->udc->ep0state = EP0_IDLE;
goto end;
}
if (!first)
goto end;
ret = usbf_epn_start_queue(ep);
if (ret) {
usbf_ep_stall(ep, true);
usbf_ep_nuke(ep, -EIO);
}
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
static struct usb_request *usbf_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct usbf_req *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void usbf_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct usbf_req *req;
unsigned long flags;
struct usbf_ep *ep;
if (!_ep || !_req)
return;
req = container_of(_req, struct usbf_req, req);
ep = container_of(_ep, struct usbf_ep, ep);
spin_lock_irqsave(&ep->udc->lock, flags);
list_del_init(&req->queue);
spin_unlock_irqrestore(&ep->udc->lock, flags);
kfree(req);
}
static int usbf_ep_set_halt(struct usb_ep *_ep, int halt)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
goto end;
}
usbf_ep_stall(ep, halt);
if (!halt)
ep->is_wedged = 0;
ret = 0;
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_ep_set_wedge(struct usb_ep *_ep)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
goto end;
}
usbf_ep_stall(ep, 1);
ep->is_wedged = 1;
ret = 0;
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static struct usb_ep_ops usbf_ep_ops = {
.enable = usbf_ep_enable,
.disable = usbf_ep_disable,
.queue = usbf_ep_queue,
.dequeue = usbf_ep_dequeue,
.set_halt = usbf_ep_set_halt,
.set_wedge = usbf_ep_set_wedge,
.alloc_request = usbf_ep_alloc_request,
.free_request = usbf_ep_free_request,
};
static void usbf_ep0_req_complete(struct usb_ep *_ep, struct usb_request *_req)
{
}
static void usbf_ep0_fill_req(struct usbf_ep *ep0, struct usbf_req *req,
void *buf, unsigned int length,
void (*complete)(struct usb_ep *_ep,
struct usb_request *_req))
{
if (buf && length)
memcpy(ep0->udc->ep0_buf, buf, length);
req->req.buf = ep0->udc->ep0_buf;
req->req.length = length;
req->req.dma = 0;
req->req.zero = true;
req->req.complete = complete ? complete : usbf_ep0_req_complete;
req->req.status = -EINPROGRESS;
req->req.context = NULL;
req->req.actual = 0;
}
static struct usbf_ep *usbf_get_ep_by_addr(struct usbf_udc *udc, u8 address)
{
struct usbf_ep *ep;
unsigned int i;
if ((address & USB_ENDPOINT_NUMBER_MASK) == 0)
return &udc->ep[0];
for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
ep = &udc->ep[i];
if (!ep->ep.desc)
continue;
if (ep->ep.desc->bEndpointAddress == address)
return ep;
}
return NULL;
}
static int usbf_req_delegate(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
int ret;
spin_unlock(&udc->lock);
ret = udc->driver->setup(&udc->gadget, ctrlrequest);
spin_lock(&udc->lock);
if (ret < 0) {
dev_dbg(udc->dev, "udc driver setup failed %d\n", ret);
return ret;
}
if (ret == USB_GADGET_DELAYED_STATUS) {
dev_dbg(udc->dev, "delayed status set\n");
udc->ep[0].delayed_status = 1;
return 0;
}
return ret;
}
static int usbf_req_get_status(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
struct usbf_ep *ep;
u16 status_data;
u16 wLength;
u16 wValue;
u16 wIndex;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
switch (ctrlrequest->bRequestType) {
case USB_DIR_IN | USB_RECIP_DEVICE | USB_TYPE_STANDARD:
if ((wValue != 0) || (wIndex != 0) || (wLength != 2))
goto delegate;
status_data = 0;
if (udc->gadget.is_selfpowered)
status_data |= BIT(USB_DEVICE_SELF_POWERED);
if (udc->is_remote_wakeup)
status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
break;
case USB_DIR_IN | USB_RECIP_ENDPOINT | USB_TYPE_STANDARD:
if ((wValue != 0) || (wLength != 2))
goto delegate;
ep = usbf_get_ep_by_addr(udc, wIndex);
if (!ep)
return -EINVAL;
status_data = 0;
if (usbf_ep_is_stalled(ep))
status_data |= cpu_to_le16(1);
break;
case USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_STANDARD:
if ((wValue != 0) || (wLength != 2))
goto delegate;
status_data = 0;
break;
default:
goto delegate;
}
usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, &status_data,
sizeof(status_data), NULL);
usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
return 0;
delegate:
return usbf_req_delegate(udc, ctrlrequest);
}
static int usbf_req_clear_set_feature(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest,
bool is_set)
{
struct usbf_ep *ep;
u16 wLength;
u16 wValue;
u16 wIndex;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
switch (ctrlrequest->bRequestType) {
case USB_DIR_OUT | USB_RECIP_DEVICE:
if ((wIndex != 0) || (wLength != 0))
goto delegate;
if (wValue != cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
goto delegate;
udc->is_remote_wakeup = is_set;
break;
case USB_DIR_OUT | USB_RECIP_ENDPOINT:
if (wLength != 0)
goto delegate;
ep = usbf_get_ep_by_addr(udc, wIndex);
if (!ep)
return -EINVAL;
if ((ep->id == 0) && is_set) {
/* Endpoint 0 cannot be halted (stalled)
* Returning an error code leads to a STALL on this ep0
* but keep the automate in a consistent state.
*/
return -EINVAL;
}
if (ep->is_wedged && !is_set) {
/* Ignore CLEAR_FEATURE(HALT ENDPOINT) when the
* endpoint is wedged
*/
break;
}
usbf_ep_stall(ep, is_set);
break;
default:
goto delegate;
}
return 0;
delegate:
return usbf_req_delegate(udc, ctrlrequest);
}
static void usbf_ep0_req_set_address_complete(struct usb_ep *_ep,
struct usb_request *_req)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
/* The status phase of the SET_ADDRESS request is completed ... */
if (_req->status == 0) {
/* ... without any errors -> Signaled the state to the core. */
usb_gadget_set_state(&ep->udc->gadget, USB_STATE_ADDRESS);
}
/* In case of request failure, there is no need to revert the address
* value set to the hardware as the hardware will take care of the
* value only if the status stage is completed normally.
*/
}
static int usbf_req_set_address(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
u16 wLength;
u16 wValue;
u16 wIndex;
u32 addr;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
if (ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
goto delegate;
if ((wIndex != 0) || (wLength != 0) || (wValue > 127))
return -EINVAL;
addr = wValue;
/* The hardware will take care of this USB address after the status
* stage of the SET_ADDRESS request is completed normally.
* It is safe to write it now
*/
usbf_reg_writel(udc, USBF_REG_USB_ADDRESS, USBF_USB_SET_USB_ADDR(addr));
/* Queued the status request */
usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, NULL, 0,
usbf_ep0_req_set_address_complete);
usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
return 0;
delegate:
return usbf_req_delegate(udc, ctrlrequest);
}
static int usbf_req_set_configuration(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
u16 wLength;
u16 wValue;
u16 wIndex;
int ret;
ret = usbf_req_delegate(udc, ctrlrequest);
if (ret)
return ret;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
if ((ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) ||
(wIndex != 0) || (wLength != 0)) {
/* No error detected by driver->setup() but it is not an USB2.0
* Ch9 SET_CONFIGURATION.
* Nothing more to do
*/
return 0;
}
if (wValue & 0x00FF) {
usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
} else {
usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
/* Go back to Address State */
spin_unlock(&udc->lock);
usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
spin_lock(&udc->lock);
}
return 0;
}
static int usbf_handle_ep0_setup(struct usbf_ep *ep0)
{
union {
struct usb_ctrlrequest ctrlreq;
u32 raw[2];
} crq;
struct usbf_udc *udc = ep0->udc;
int ret;
/* Read setup data (ie the USB control request) */
crq.raw[0] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA0);
crq.raw[1] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA1);
dev_dbg(ep0->udc->dev,
"ep0 req%02x.%02x, wValue 0x%04x, wIndex 0x%04x, wLength 0x%04x\n",
crq.ctrlreq.bRequestType, crq.ctrlreq.bRequest,
crq.ctrlreq.wValue, crq.ctrlreq.wIndex, crq.ctrlreq.wLength);
/* Set current EP0 state according to the received request */
if (crq.ctrlreq.wLength) {
if (crq.ctrlreq.bRequestType & USB_DIR_IN) {
udc->ep0state = EP0_IN_DATA_PHASE;
usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_INAK,
USBF_EP0_INAK_EN);
ep0->is_in = 1;
} else {
udc->ep0state = EP0_OUT_DATA_PHASE;
usbf_ep_reg_bitclr(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_ONAK);
ep0->is_in = 0;
}
} else {
udc->ep0state = EP0_IN_STATUS_START_PHASE;
ep0->is_in = 1;
}
/* We starts a new control transfer -> Clear the delayed status flag */
ep0->delayed_status = 0;
if ((crq.ctrlreq.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
/* This is not a USB standard request -> delelate */
goto delegate;
}
switch (crq.ctrlreq.bRequest) {
case USB_REQ_GET_STATUS:
ret = usbf_req_get_status(udc, &crq.ctrlreq);
break;
case USB_REQ_CLEAR_FEATURE:
ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, false);
break;
case USB_REQ_SET_FEATURE:
ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, true);
break;
case USB_REQ_SET_ADDRESS:
ret = usbf_req_set_address(udc, &crq.ctrlreq);
break;
case USB_REQ_SET_CONFIGURATION:
ret = usbf_req_set_configuration(udc, &crq.ctrlreq);
break;
default:
goto delegate;
}
return ret;
delegate:
return usbf_req_delegate(udc, &crq.ctrlreq);
}
static int usbf_handle_ep0_data_status(struct usbf_ep *ep0,
const char *ep0state_name,
enum usbf_ep0state next_ep0state)
{
struct usbf_udc *udc = ep0->udc;
int ret;
ret = usbf_ep_process_queue(ep0);
switch (ret) {
case -ENOENT:
dev_err(udc->dev,
"no request available for ep0 %s phase\n",
ep0state_name);
break;
case -EINPROGRESS:
/* More data needs to be processed */
ret = 0;
break;
case 0:
/* All requests in the queue are processed */
udc->ep0state = next_ep0state;
break;
default:
dev_err(udc->dev,
"process queue failed for ep0 %s phase (%d)\n",
ep0state_name, ret);
break;
}
return ret;
}
static int usbf_handle_ep0_out_status_start(struct usbf_ep *ep0)
{
struct usbf_udc *udc = ep0->udc;
struct usbf_req *req;
usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_ONAK,
USBF_EP0_PIDCLR);
ep0->is_in = 0;
req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
if (!req) {
usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL, 0, NULL);
usbf_ep0_queue(ep0, &udc->setup_reply, GFP_ATOMIC);
} else {
if (req->req.length) {
dev_err(udc->dev,
"queued request length %u for ep0 out status phase\n",
req->req.length);
}
}
udc->ep0state = EP0_OUT_STATUS_PHASE;
return 0;
}
static int usbf_handle_ep0_in_status_start(struct usbf_ep *ep0)
{
struct usbf_udc *udc = ep0->udc;
struct usbf_req *req;
int ret;
usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_INAK,
USBF_EP0_INAK_EN | USBF_EP0_PIDCLR);
ep0->is_in = 1;
/* Queue request for status if needed */
req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
if (!req) {
if (ep0->delayed_status) {
dev_dbg(ep0->udc->dev,
"EP0_IN_STATUS_START_PHASE ep0->delayed_status set\n");
udc->ep0state = EP0_IN_STATUS_PHASE;
return 0;
}
usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL,
0, NULL);
usbf_ep0_queue(ep0, &udc->setup_reply,
GFP_ATOMIC);
req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
} else {
if (req->req.length) {
dev_err(udc->dev,
"queued request length %u for ep0 in status phase\n",
req->req.length);
}
}
ret = usbf_ep0_pio_in(ep0, req);
if (ret != -EINPROGRESS) {
usbf_ep_req_done(ep0, req, ret);
udc->ep0state = EP0_IN_STATUS_END_PHASE;
return 0;
}
udc->ep0state = EP0_IN_STATUS_PHASE;
return 0;
}
static void usbf_ep0_interrupt(struct usbf_ep *ep0)
{
struct usbf_udc *udc = ep0->udc;
u32 sts, prev_sts;
int prev_ep0state;
int ret;
ep0->status = usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS);
usbf_ep_reg_writel(ep0, USBF_REG_EP0_STATUS, ~ep0->status);
dev_dbg(ep0->udc->dev, "ep0 status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
ep0->status,
usbf_ep_reg_readl(ep0, USBF_REG_EP0_INT_ENA),
usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL));
sts = ep0->status & (USBF_EP0_SETUP_INT | USBF_EP0_IN_INT | USBF_EP0_OUT_INT |
USBF_EP0_OUT_NULL_INT | USBF_EP0_STG_START_INT |
USBF_EP0_STG_END_INT);
ret = 0;
do {
dev_dbg(ep0->udc->dev, "udc->ep0state=%d\n", udc->ep0state);
prev_sts = sts;
prev_ep0state = udc->ep0state;
switch (udc->ep0state) {
case EP0_IDLE:
if (!(sts & USBF_EP0_SETUP_INT))
break;
sts &= ~USBF_EP0_SETUP_INT;
dev_dbg(ep0->udc->dev, "ep0 handle setup\n");
ret = usbf_handle_ep0_setup(ep0);
break;
case EP0_IN_DATA_PHASE:
if (!(sts & USBF_EP0_IN_INT))
break;
sts &= ~USBF_EP0_IN_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in data phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"in data", EP0_OUT_STATUS_START_PHASE);
break;
case EP0_OUT_STATUS_START_PHASE:
if (!(sts & USBF_EP0_STG_START_INT))
break;
sts &= ~USBF_EP0_STG_START_INT;
dev_dbg(ep0->udc->dev, "ep0 handle out status start phase\n");
ret = usbf_handle_ep0_out_status_start(ep0);
break;
case EP0_OUT_STATUS_PHASE:
if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
break;
sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
dev_dbg(ep0->udc->dev, "ep0 handle out status phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"out status",
EP0_OUT_STATUS_END_PHASE);
break;
case EP0_OUT_STATUS_END_PHASE:
if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
break;
sts &= ~USBF_EP0_STG_END_INT;
dev_dbg(ep0->udc->dev, "ep0 handle out status end phase\n");
udc->ep0state = EP0_IDLE;
break;
case EP0_OUT_DATA_PHASE:
if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
break;
sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
dev_dbg(ep0->udc->dev, "ep0 handle out data phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"out data", EP0_IN_STATUS_START_PHASE);
break;
case EP0_IN_STATUS_START_PHASE:
if (!(sts & USBF_EP0_STG_START_INT))
break;
sts &= ~USBF_EP0_STG_START_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in status start phase\n");
ret = usbf_handle_ep0_in_status_start(ep0);
break;
case EP0_IN_STATUS_PHASE:
if (!(sts & USBF_EP0_IN_INT))
break;
sts &= ~USBF_EP0_IN_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in status phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"in status", EP0_IN_STATUS_END_PHASE);
break;
case EP0_IN_STATUS_END_PHASE:
if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
break;
sts &= ~USBF_EP0_STG_END_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in status end\n");
udc->ep0state = EP0_IDLE;
break;
default:
udc->ep0state = EP0_IDLE;
break;
}
if (ret) {
dev_dbg(ep0->udc->dev, "ep0 failed (%d)\n", ret);
/* Failure -> stall.
* This stall state will be automatically cleared when
* the IP receives the next SETUP packet
*/
usbf_ep_stall(ep0, true);
/* Remove anything that was pending */
usbf_ep_nuke(ep0, -EPROTO);
udc->ep0state = EP0_IDLE;
break;
}
} while ((prev_ep0state != udc->ep0state) || (prev_sts != sts));
dev_dbg(ep0->udc->dev, "ep0 done udc->ep0state=%d, status=0x%08x. next=0x%08x\n",
udc->ep0state, sts,
usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS));
}
static void usbf_epn_process_queue(struct usbf_ep *epn)
{
int ret;
ret = usbf_ep_process_queue(epn);
switch (ret) {
case -ENOENT:
dev_warn(epn->udc->dev, "ep%u %s, no request available\n",
epn->id, epn->is_in ? "in" : "out");
break;
case -EINPROGRESS:
/* More data needs to be processed */
ret = 0;
break;
case 0:
/* All requests in the queue are processed */
break;
default:
dev_err(epn->udc->dev, "ep%u %s, process queue failed (%d)\n",
epn->id, epn->is_in ? "in" : "out", ret);
break;
}
if (ret) {
dev_dbg(epn->udc->dev, "ep%u %s failed (%d)\n", epn->id,
epn->is_in ? "in" : "out", ret);
usbf_ep_stall(epn, true);
usbf_ep_nuke(epn, ret);
}
}
static void usbf_epn_interrupt(struct usbf_ep *epn)
{
u32 sts;
u32 ena;
epn->status = usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS);
ena = usbf_ep_reg_readl(epn, USBF_REG_EPN_INT_ENA);
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(epn->status & ena));
dev_dbg(epn->udc->dev, "ep%u %s status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
epn->id, epn->is_in ? "in" : "out", epn->status, ena,
usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL));
if (epn->disabled) {
dev_warn(epn->udc->dev, "ep%u %s, interrupt while disabled\n",
epn->id, epn->is_in ? "in" : "out");
return;
}
sts = epn->status & ena;
if (sts & (USBF_EPN_IN_END_INT | USBF_EPN_IN_INT)) {
sts &= ~(USBF_EPN_IN_END_INT | USBF_EPN_IN_INT);
dev_dbg(epn->udc->dev, "ep%u %s process queue (in interrupts)\n",
epn->id, epn->is_in ? "in" : "out");
usbf_epn_process_queue(epn);
}
if (sts & (USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT)) {
sts &= ~(USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
dev_dbg(epn->udc->dev, "ep%u %s process queue (out interrupts)\n",
epn->id, epn->is_in ? "in" : "out");
usbf_epn_process_queue(epn);
}
dev_dbg(epn->udc->dev, "ep%u %s done status=0x%08x. next=0x%08x\n",
epn->id, epn->is_in ? "in" : "out",
sts, usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS));
}
static void usbf_ep_reset(struct usbf_ep *ep)
{
ep->status = 0;
/* Remove anything that was pending */
usbf_ep_nuke(ep, -ESHUTDOWN);
}
static void usbf_reset(struct usbf_udc *udc)
{
int i;
for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
if (udc->ep[i].disabled)
continue;
usbf_ep_reset(&udc->ep[i]);
}
if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
/* Remote wakeup feature must be disabled on USB bus reset */
udc->is_remote_wakeup = false;
/* Enable endpoint zero */
usbf_ep0_enable(&udc->ep[0]);
if (udc->driver) {
/* Signal the reset */
spin_unlock(&udc->lock);
usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_lock(&udc->lock);
}
}
static void usbf_driver_suspend(struct usbf_udc *udc)
{
if (udc->is_usb_suspended) {
dev_dbg(udc->dev, "already suspended\n");
return;
}
dev_dbg(udc->dev, "do usb suspend\n");
udc->is_usb_suspended = true;
if (udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
/* The datasheet tells to set the USB_CONTROL register SUSPEND
* bit when the USB bus suspend is detected.
* This bit stops the clocks (clocks for EPC, SIE, USBPHY) but
* these clocks seems not used only by the USB device. Some
* UARTs can be lost ...
* So, do not set the USB_CONTROL register SUSPEND bit.
*/
}
}
static void usbf_driver_resume(struct usbf_udc *udc)
{
if (!udc->is_usb_suspended)
return;
dev_dbg(udc->dev, "do usb resume\n");
udc->is_usb_suspended = false;
if (udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
static irqreturn_t usbf_epc_irq(int irq, void *_udc)
{
struct usbf_udc *udc = (struct usbf_udc *)_udc;
unsigned long flags;
struct usbf_ep *ep;
u32 int_sts;
u32 int_en;
int i;
spin_lock_irqsave(&udc->lock, flags);
int_en = usbf_reg_readl(udc, USBF_REG_USB_INT_ENA);
int_sts = usbf_reg_readl(udc, USBF_REG_USB_INT_STA) & int_en;
usbf_reg_writel(udc, USBF_REG_USB_INT_STA, ~int_sts);
dev_dbg(udc->dev, "int_sts=0x%08x\n", int_sts);
if (int_sts & USBF_USB_RSUM_INT) {
dev_dbg(udc->dev, "handle resume\n");
usbf_driver_resume(udc);
}
if (int_sts & USBF_USB_USB_RST_INT) {
dev_dbg(udc->dev, "handle bus reset\n");
usbf_driver_resume(udc);
usbf_reset(udc);
}
if (int_sts & USBF_USB_SPEED_MODE_INT) {
if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
dev_dbg(udc->dev, "handle speed change (%s)\n",
udc->gadget.speed == USB_SPEED_HIGH ? "High" : "Full");
}
if (int_sts & USBF_USB_EPN_INT(0)) {
usbf_driver_resume(udc);
usbf_ep0_interrupt(&udc->ep[0]);
}
for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
ep = &udc->ep[i];
if (int_sts & USBF_USB_EPN_INT(i)) {
usbf_driver_resume(udc);
usbf_epn_interrupt(ep);
}
}
if (int_sts & USBF_USB_SPND_INT) {
dev_dbg(udc->dev, "handle suspend\n");
usbf_driver_suspend(udc);
}
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t usbf_ahb_epc_irq(int irq, void *_udc)
{
struct usbf_udc *udc = (struct usbf_udc *)_udc;
unsigned long flags;
struct usbf_ep *epn;
u32 sysbint;
void (*ep_action)(struct usbf_ep *epn);
int i;
spin_lock_irqsave(&udc->lock, flags);
/* Read and ack interrupts */
sysbint = usbf_reg_readl(udc, USBF_REG_AHBBINT);
usbf_reg_writel(udc, USBF_REG_AHBBINT, sysbint);
if ((sysbint & USBF_SYS_VBUS_INT) == USBF_SYS_VBUS_INT) {
if (usbf_reg_readl(udc, USBF_REG_EPCTR) & USBF_SYS_VBUS_LEVEL) {
dev_dbg(udc->dev, "handle vbus (1)\n");
spin_unlock(&udc->lock);
usb_udc_vbus_handler(&udc->gadget, true);
usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
spin_lock(&udc->lock);
} else {
dev_dbg(udc->dev, "handle vbus (0)\n");
udc->is_usb_suspended = false;
spin_unlock(&udc->lock);
usb_udc_vbus_handler(&udc->gadget, false);
usb_gadget_set_state(&udc->gadget,
USB_STATE_NOTATTACHED);
spin_lock(&udc->lock);
}
}
for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
if (sysbint & USBF_SYS_DMA_ENDINT_EPN(i)) {
epn = &udc->ep[i];
dev_dbg(epn->udc->dev,
"ep%u handle DMA complete. action=%ps\n",
epn->id, epn->bridge_on_dma_end);
ep_action = epn->bridge_on_dma_end;
if (ep_action) {
epn->bridge_on_dma_end = NULL;
ep_action(epn);
}
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
static int usbf_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
dev_info(udc->dev, "start (driver '%s')\n", driver->driver.name);
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver */
udc->driver = driver;
/* Enable VBUS interrupt */
usbf_reg_writel(udc, USBF_REG_AHBBINTEN, USBF_SYS_VBUS_INTEN);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usbf_udc_stop(struct usb_gadget *gadget)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* Disable VBUS interrupt */
usbf_reg_writel(udc, USBF_REG_AHBBINTEN, 0);
udc->driver = NULL;
spin_unlock_irqrestore(&udc->lock, flags);
dev_info(udc->dev, "stopped\n");
return 0;
}
static int usbf_get_frame(struct usb_gadget *gadget)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
return USBF_USB_GET_FRAME(usbf_reg_readl(udc, USBF_REG_USB_ADDRESS));
}
static void usbf_attach(struct usbf_udc *udc)
{
/* Enable USB signal to Function PHY
* D+ signal Pull-up
* Disable endpoint 0, it will be automatically enable when a USB reset
* is received.
* Disable the other endpoints
*/
usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
USBF_USB_CONNECTB | USBF_USB_DEFAULT | USBF_USB_CONF,
USBF_USB_PUE2);
/* Enable reset and mode change interrupts */
usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA,
USBF_USB_USB_RST_EN | USBF_USB_SPEED_MODE_EN | USBF_USB_RSUM_EN | USBF_USB_SPND_EN);
}
static void usbf_detach(struct usbf_udc *udc)
{
int i;
/* Disable interrupts */
usbf_reg_writel(udc, USBF_REG_USB_INT_ENA, 0);
for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
if (udc->ep[i].disabled)
continue;
usbf_ep_reset(&udc->ep[i]);
}
/* Disable USB signal to Function PHY
* Do not Pull-up D+ signal
* Disable endpoint 0
* Disable the other endpoints
*/
usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
USBF_USB_PUE2 | USBF_USB_DEFAULT | USBF_USB_CONF,
USBF_USB_CONNECTB);
}
static int usbf_pullup(struct usb_gadget *gadget, int is_on)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
dev_dbg(udc->dev, "pullup %d\n", is_on);
spin_lock_irqsave(&udc->lock, flags);
if (is_on)
usbf_attach(udc);
else
usbf_detach(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usbf_udc_set_selfpowered(struct usb_gadget *gadget,
int is_selfpowered)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
gadget->is_selfpowered = (is_selfpowered != 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usbf_udc_wakeup(struct usb_gadget *gadget)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
int ret;
spin_lock_irqsave(&udc->lock, flags);
if (!udc->is_remote_wakeup) {
dev_dbg(udc->dev, "remote wakeup not allowed\n");
ret = -EINVAL;
goto end;
}
dev_dbg(udc->dev, "do wakeup\n");
/* Send the resume signal */
usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
ret = 0;
end:
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static struct usb_gadget_ops usbf_gadget_ops = {
.get_frame = usbf_get_frame,
.pullup = usbf_pullup,
.udc_start = usbf_udc_start,
.udc_stop = usbf_udc_stop,
.set_selfpowered = usbf_udc_set_selfpowered,
.wakeup = usbf_udc_wakeup,
};
static int usbf_epn_check(struct usbf_ep *epn)
{
const char *type_txt;
const char *buf_txt;
int ret = 0;
u32 ctrl;
ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
switch (ctrl & USBF_EPN_MODE_MASK) {
case USBF_EPN_MODE_BULK:
type_txt = "bulk";
if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
!epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
dev_err(epn->udc->dev,
"ep%u caps mismatch, bulk expected\n", epn->id);
ret = -EINVAL;
}
break;
case USBF_EPN_MODE_INTR:
type_txt = "intr";
if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
epn->ep.caps.type_bulk || !epn->ep.caps.type_int) {
dev_err(epn->udc->dev,
"ep%u caps mismatch, int expected\n", epn->id);
ret = -EINVAL;
}
break;
case USBF_EPN_MODE_ISO:
type_txt = "iso";
if (epn->ep.caps.type_control || !epn->ep.caps.type_iso ||
epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
dev_err(epn->udc->dev,
"ep%u caps mismatch, iso expected\n", epn->id);
ret = -EINVAL;
}
break;
default:
type_txt = "unknown";
dev_err(epn->udc->dev, "ep%u unknown type\n", epn->id);
ret = -EINVAL;
break;
}
if (ctrl & USBF_EPN_BUF_TYPE_DOUBLE) {
buf_txt = "double";
if (!usbf_ep_info[epn->id].is_double) {
dev_err(epn->udc->dev,
"ep%u buffer mismatch, double expected\n",
epn->id);
ret = -EINVAL;
}
} else {
buf_txt = "single";
if (usbf_ep_info[epn->id].is_double) {
dev_err(epn->udc->dev,
"ep%u buffer mismatch, single expected\n",
epn->id);
ret = -EINVAL;
}
}
dev_dbg(epn->udc->dev, "ep%u (%s) %s, %s buffer %u, checked %s\n",
epn->id, epn->ep.name, type_txt, buf_txt,
epn->ep.maxpacket_limit, ret ? "failed" : "ok");
return ret;
}
static int usbf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usbf_udc *udc;
struct usbf_ep *ep;
unsigned int i;
int irq;
int ret;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
platform_set_drvdata(pdev, udc);
udc->dev = dev;
spin_lock_init(&udc->lock);
udc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
devm_pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
dev_info(dev, "USBF version: %08x\n",
usbf_reg_readl(udc, USBF_REG_USBSSVER));
/* Resetting the PLL is handled via the clock driver as it has common
* registers with USB Host
*/
usbf_reg_bitclr(udc, USBF_REG_EPCTR, USBF_SYS_EPC_RST);
/* modify in register gadget process */
udc->gadget.speed = USB_SPEED_FULL;
udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.ops = &usbf_gadget_ops;
udc->gadget.name = dev->driver->name;
udc->gadget.dev.parent = dev;
udc->gadget.ep0 = &udc->ep[0].ep;
/* The hardware DMA controller needs dma addresses aligned on 32bit.
* A fallback to pio is done if DMA addresses are not aligned.
*/
udc->gadget.quirk_avoids_skb_reserve = 1;
INIT_LIST_HEAD(&udc->gadget.ep_list);
/* we have a canned request structure to allow sending packets as reply
* to get_status requests
*/
INIT_LIST_HEAD(&udc->setup_reply.queue);
for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
ep = &udc->ep[i];
if (!(usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
USBF_SYS_EP_AVAILABLE(i))) {
continue;
}
INIT_LIST_HEAD(&ep->queue);
ep->id = i;
ep->disabled = 1;
ep->udc = udc;
ep->ep.ops = &usbf_ep_ops;
ep->ep.name = usbf_ep_info[i].name;
ep->ep.caps = usbf_ep_info[i].caps;
usb_ep_set_maxpacket_limit(&ep->ep,
usbf_ep_info[i].maxpacket_limit);
if (ep->id == 0) {
ep->regs = ep->udc->regs + USBF_BASE_EP0;
} else {
ep->regs = ep->udc->regs + USBF_BASE_EPN(ep->id - 1);
ret = usbf_epn_check(ep);
if (ret)
return ret;
if (usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
USBF_SYS_DMA_AVAILABLE(i)) {
ep->dma_regs = ep->udc->regs +
USBF_BASE_DMA_EPN(ep->id - 1);
}
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, usbf_epc_irq, 0, "usbf-epc", udc);
if (ret) {
dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
return ret;
}
irq = platform_get_irq(pdev, 1);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, usbf_ahb_epc_irq, 0, "usbf-ahb-epc", udc);
if (ret) {
dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
return ret;
}
usbf_reg_bitset(udc, USBF_REG_AHBMCTR, USBF_SYS_WBURST_TYPE);
usbf_reg_bitset(udc, USBF_REG_USB_CONTROL,
USBF_USB_INT_SEL | USBF_USB_SOF_RCV | USBF_USB_SOF_CLK_MODE);
ret = usb_add_gadget_udc(dev, &udc->gadget);
if (ret)
return ret;
return 0;
}
static void usbf_remove(struct platform_device *pdev)
{
struct usbf_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
pm_runtime_put(&pdev->dev);
}
static const struct of_device_id usbf_match[] = {
{ .compatible = "renesas,rzn1-usbf" },
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, usbf_match);
static struct platform_driver udc_driver = {
.driver = {
.name = "usbf_renesas",
.of_match_table = usbf_match,
},
.probe = usbf_probe,
.remove_new = usbf_remove,
};
module_platform_driver(udc_driver);
MODULE_AUTHOR("Herve Codina <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 USB Function driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/renesas_usbf.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
*
* Maintainer: Alan Stern <[email protected]>
*
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
*/
/*
* This exposes a device side "USB gadget" API, driven by requests to a
* Linux-USB host controller driver. USB traffic is simulated; there's
* no need for USB hardware. Use this with two other drivers:
*
* - Gadget driver, responding to requests (device);
* - Host-side device driver, as already familiar in Linux.
*
* Having this all in one kernel can help some stages of development,
* bypassing some hardware (and driver) issues. UML could help too.
*
* Note: The emulation does not include isochronous transfers!
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#define DRIVER_DESC "USB Host+Gadget Emulator"
#define DRIVER_VERSION "02 May 2005"
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
#define POWER_BUDGET_3 900 /* in mA */
static const char driver_name[] = "dummy_hcd";
static const char driver_desc[] = "USB Host+Gadget Emulator";
static const char gadget_name[] = "dummy_udc";
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
struct dummy_hcd_module_parameters {
bool is_super_speed;
bool is_high_speed;
unsigned int num;
};
static struct dummy_hcd_module_parameters mod_data = {
.is_super_speed = false,
.is_high_speed = true,
.num = 1,
};
module_param_named(is_super_speed, mod_data.is_super_speed, bool, S_IRUGO);
MODULE_PARM_DESC(is_super_speed, "true to simulate SuperSpeed connection");
module_param_named(is_high_speed, mod_data.is_high_speed, bool, S_IRUGO);
MODULE_PARM_DESC(is_high_speed, "true to simulate HighSpeed connection");
module_param_named(num, mod_data.num, uint, S_IRUGO);
MODULE_PARM_DESC(num, "number of emulated controllers");
/*-------------------------------------------------------------------------*/
/* gadget side driver data structres */
struct dummy_ep {
struct list_head queue;
unsigned long last_io; /* jiffies timestamp */
struct usb_gadget *gadget;
const struct usb_endpoint_descriptor *desc;
struct usb_ep ep;
unsigned halted:1;
unsigned wedged:1;
unsigned already_seen:1;
unsigned setup_stage:1;
unsigned stream_en:1;
};
struct dummy_request {
struct list_head queue; /* ep's requests */
struct usb_request req;
};
static inline struct dummy_ep *usb_ep_to_dummy_ep(struct usb_ep *_ep)
{
return container_of(_ep, struct dummy_ep, ep);
}
static inline struct dummy_request *usb_request_to_dummy_request
(struct usb_request *_req)
{
return container_of(_req, struct dummy_request, req);
}
/*-------------------------------------------------------------------------*/
/*
* Every device has ep0 for control requests, plus up to 30 more endpoints,
* in one of two types:
*
* - Configurable: direction (in/out), type (bulk, iso, etc), and endpoint
* number can be changed. Names like "ep-a" are used for this type.
*
* - Fixed Function: in other cases. some characteristics may be mutable;
* that'd be hardware-specific. Names like "ep12out-bulk" are used.
*
* Gadget drivers are responsible for not setting up conflicting endpoint
* configurations, illegal or unsupported packet lengths, and so on.
*/
static const char ep0name[] = "ep0";
static const struct {
const char *name;
const struct usb_ep_caps caps;
} ep_info[] = {
#define EP_INFO(_name, _caps) \
{ \
.name = _name, \
.caps = _caps, \
}
/* we don't provide isochronous endpoints since we don't support them */
#define TYPE_BULK_OR_INT (USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
/* everyone has ep0 */
EP_INFO(ep0name,
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
/* act like a pxa250: fifteen fixed function endpoints */
EP_INFO("ep1in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep2out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
/*
EP_INFO("ep3in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep4out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
*/
EP_INFO("ep5in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep6in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep7out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
/*
EP_INFO("ep8in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep9out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
*/
EP_INFO("ep10in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep11in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep12out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
/*
EP_INFO("ep13in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep14out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
*/
EP_INFO("ep15in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
/* or like sa1100: two fixed function endpoints */
EP_INFO("ep1out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep2in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
/* and now some generic EPs so we have enough in multi config */
EP_INFO("ep-aout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep-bin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep-cout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep-dout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep-ein",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep-fout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep-gin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep-hout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep-iout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep-jin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep-kout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep-lin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep-mout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
#undef EP_INFO
};
#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_info)
/*-------------------------------------------------------------------------*/
#define FIFO_SIZE 64
struct urbp {
struct urb *urb;
struct list_head urbp_list;
struct sg_mapping_iter miter;
u32 miter_started;
};
enum dummy_rh_state {
DUMMY_RH_RESET,
DUMMY_RH_SUSPENDED,
DUMMY_RH_RUNNING
};
struct dummy_hcd {
struct dummy *dum;
enum dummy_rh_state rh_state;
struct timer_list timer;
u32 port_status;
u32 old_status;
unsigned long re_timeout;
struct usb_device *udev;
struct list_head urbp_list;
struct urbp *next_frame_urbp;
u32 stream_en_ep;
u8 num_stream[30 / 2];
unsigned active:1;
unsigned old_active:1;
unsigned resuming:1;
};
struct dummy {
spinlock_t lock;
/*
* DEVICE/GADGET side support
*/
struct dummy_ep ep[DUMMY_ENDPOINTS];
int address;
int callback_usage;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf[FIFO_SIZE];
u16 devstatus;
unsigned ints_enabled:1;
unsigned udc_suspended:1;
unsigned pullup:1;
/*
* HOST side support
*/
struct dummy_hcd *hs_hcd;
struct dummy_hcd *ss_hcd;
};
static inline struct dummy_hcd *hcd_to_dummy_hcd(struct usb_hcd *hcd)
{
return (struct dummy_hcd *) (hcd->hcd_priv);
}
static inline struct usb_hcd *dummy_hcd_to_hcd(struct dummy_hcd *dum)
{
return container_of((void *) dum, struct usb_hcd, hcd_priv);
}
static inline struct device *dummy_dev(struct dummy_hcd *dum)
{
return dummy_hcd_to_hcd(dum)->self.controller;
}
static inline struct device *udc_dev(struct dummy *dum)
{
return dum->gadget.dev.parent;
}
static inline struct dummy *ep_to_dummy(struct dummy_ep *ep)
{
return container_of(ep->gadget, struct dummy, gadget);
}
static inline struct dummy_hcd *gadget_to_dummy_hcd(struct usb_gadget *gadget)
{
struct dummy *dum = container_of(gadget, struct dummy, gadget);
if (dum->gadget.speed == USB_SPEED_SUPER)
return dum->ss_hcd;
else
return dum->hs_hcd;
}
static inline struct dummy *gadget_dev_to_dummy(struct device *dev)
{
return container_of(dev, struct dummy, gadget.dev);
}
/*-------------------------------------------------------------------------*/
/* DEVICE/GADGET SIDE UTILITY ROUTINES */
/* called with spinlock held */
static void nuke(struct dummy *dum, struct dummy_ep *ep)
{
while (!list_empty(&ep->queue)) {
struct dummy_request *req;
req = list_entry(ep->queue.next, struct dummy_request, queue);
list_del_init(&req->queue);
req->req.status = -ESHUTDOWN;
spin_unlock(&dum->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dum->lock);
}
}
/* caller must hold lock */
static void stop_activity(struct dummy *dum)
{
int i;
/* prevent any more requests */
dum->address = 0;
/* The timer is left running so that outstanding URBs can fail */
/* nuke any pending requests first, so driver i/o is quiesced */
for (i = 0; i < DUMMY_ENDPOINTS; ++i)
nuke(dum, &dum->ep[i]);
/* driver now does any non-usb quiescing necessary */
}
/**
* set_link_state_by_speed() - Sets the current state of the link according to
* the hcd speed
* @dum_hcd: pointer to the dummy_hcd structure to update the link state for
*
* This function updates the port_status according to the link state and the
* speed of the hcd.
*/
static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) {
if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) == 0) {
dum_hcd->port_status = 0;
} else if (!dum->pullup || dum->udc_suspended) {
/* UDC suspend must cause a disconnect */
dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE);
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) != 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
} else {
/* device is connected and not suspended */
dum_hcd->port_status |= (USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_SPEED_5GBPS) ;
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) == 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
(dum_hcd->port_status &
USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
dum_hcd->active = 1;
}
} else {
if ((dum_hcd->port_status & USB_PORT_STAT_POWER) == 0) {
dum_hcd->port_status = 0;
} else if (!dum->pullup || dum->udc_suspended) {
/* UDC suspend must cause a disconnect */
dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_SUSPEND);
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) != 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
} else {
dum_hcd->port_status |= USB_PORT_STAT_CONNECTION;
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) == 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0)
dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
else if ((dum_hcd->port_status &
USB_PORT_STAT_SUSPEND) == 0 &&
dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
dum_hcd->active = 1;
}
}
}
/* caller must hold lock */
static void set_link_state(struct dummy_hcd *dum_hcd)
__must_hold(&dum->lock)
{
struct dummy *dum = dum_hcd->dum;
unsigned int power_bit;
dum_hcd->active = 0;
if (dum->pullup)
if ((dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 &&
dum->gadget.speed != USB_SPEED_SUPER) ||
(dummy_hcd_to_hcd(dum_hcd)->speed != HCD_USB3 &&
dum->gadget.speed == USB_SPEED_SUPER))
return;
set_link_state_by_speed(dum_hcd);
power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
dum_hcd->active)
dum_hcd->resuming = 0;
/* Currently !connected or in reset */
if ((dum_hcd->port_status & power_bit) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
unsigned int disconnect = power_bit &
dum_hcd->old_status & (~dum_hcd->port_status);
unsigned int reset = USB_PORT_STAT_RESET &
(~dum_hcd->old_status) & dum_hcd->port_status;
/* Report reset and disconnect events to the driver */
if (dum->ints_enabled && (disconnect || reset)) {
stop_activity(dum);
++dum->callback_usage;
spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
spin_lock(&dum->lock);
--dum->callback_usage;
}
} else if (dum_hcd->active != dum_hcd->old_active &&
dum->ints_enabled) {
++dum->callback_usage;
spin_unlock(&dum->lock);
if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
spin_lock(&dum->lock);
--dum->callback_usage;
}
dum_hcd->old_status = dum_hcd->port_status;
dum_hcd->old_active = dum_hcd->active;
}
/*-------------------------------------------------------------------------*/
/* DEVICE/GADGET SIDE DRIVER
*
* This only tracks gadget state. All the work is done when the host
* side tries some (emulated) i/o operation. Real device controller
* drivers would do real i/o using dma, fifos, irqs, timers, etc.
*/
#define is_enabled(dum) \
(dum->port_status & USB_PORT_STAT_ENABLE)
static int dummy_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct dummy *dum;
struct dummy_hcd *dum_hcd;
struct dummy_ep *ep;
unsigned max;
int retval;
ep = usb_ep_to_dummy_ep(_ep);
if (!_ep || !desc || ep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dum = ep_to_dummy(ep);
if (!dum->driver)
return -ESHUTDOWN;
dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
if (!is_enabled(dum_hcd))
return -ESHUTDOWN;
/*
* For HS/FS devices only bits 0..10 of the wMaxPacketSize represent the
* maximum packet size.
* For SS devices the wMaxPacketSize is limited by 1024.
*/
max = usb_endpoint_maxp(desc);
/* drivers must not request bad settings, since lower levels
* (hardware or its drivers) may not check. some endpoints
* can't do iso, many have maxpacket limitations, etc.
*
* since this "hardware" driver is here to help debugging, we
* have some extra sanity checks. (there could be more though,
* especially for "ep9out" style fixed function ones.)
*/
retval = -EINVAL;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
if (strstr(ep->ep.name, "-iso")
|| strstr(ep->ep.name, "-int")) {
goto done;
}
switch (dum->gadget.speed) {
case USB_SPEED_SUPER:
if (max == 1024)
break;
goto done;
case USB_SPEED_HIGH:
if (max == 512)
break;
goto done;
case USB_SPEED_FULL:
if (max == 8 || max == 16 || max == 32 || max == 64)
/* we'll fake any legal size */
break;
/* save a return statement */
fallthrough;
default:
goto done;
}
break;
case USB_ENDPOINT_XFER_INT:
if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
if (max <= 1024)
break;
/* save a return statement */
fallthrough;
case USB_SPEED_FULL:
if (max <= 64)
break;
/* save a return statement */
fallthrough;
default:
if (max <= 8)
break;
goto done;
}
break;
case USB_ENDPOINT_XFER_ISOC:
if (strstr(ep->ep.name, "-bulk")
|| strstr(ep->ep.name, "-int"))
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
if (max <= 1024)
break;
/* save a return statement */
fallthrough;
case USB_SPEED_FULL:
if (max <= 1023)
break;
/* save a return statement */
fallthrough;
default:
goto done;
}
break;
default:
/* few chips support control except on ep0 */
goto done;
}
_ep->maxpacket = max;
if (usb_ss_max_streams(_ep->comp_desc)) {
if (!usb_endpoint_xfer_bulk(desc)) {
dev_err(udc_dev(dum), "Can't enable stream support on "
"non-bulk ep %s\n", _ep->name);
return -EINVAL;
}
ep->stream_en = 1;
}
ep->desc = desc;
dev_dbg(udc_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d stream %s\n",
_ep->name,
desc->bEndpointAddress & 0x0f,
(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
usb_ep_type_string(usb_endpoint_type(desc)),
max, ep->stream_en ? "enabled" : "disabled");
/* at this point real hardware should be NAKing transfers
* to that endpoint, until a buffer is queued to it.
*/
ep->halted = ep->wedged = 0;
retval = 0;
done:
return retval;
}
static int dummy_disable(struct usb_ep *_ep)
{
struct dummy_ep *ep;
struct dummy *dum;
unsigned long flags;
ep = usb_ep_to_dummy_ep(_ep);
if (!_ep || !ep->desc || _ep->name == ep0name)
return -EINVAL;
dum = ep_to_dummy(ep);
spin_lock_irqsave(&dum->lock, flags);
ep->desc = NULL;
ep->stream_en = 0;
nuke(dum, ep);
spin_unlock_irqrestore(&dum->lock, flags);
dev_dbg(udc_dev(dum), "disabled %s\n", _ep->name);
return 0;
}
static struct usb_request *dummy_alloc_request(struct usb_ep *_ep,
gfp_t mem_flags)
{
struct dummy_request *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof(*req), mem_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void dummy_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct dummy_request *req;
if (!_ep || !_req) {
WARN_ON(1);
return;
}
req = usb_request_to_dummy_request(_req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
static void fifo_complete(struct usb_ep *ep, struct usb_request *req)
{
}
static int dummy_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t mem_flags)
{
struct dummy_ep *ep;
struct dummy_request *req;
struct dummy *dum;
struct dummy_hcd *dum_hcd;
unsigned long flags;
req = usb_request_to_dummy_request(_req);
if (!_req || !list_empty(&req->queue) || !_req->complete)
return -EINVAL;
ep = usb_ep_to_dummy_ep(_ep);
if (!_ep || (!ep->desc && _ep->name != ep0name))
return -EINVAL;
dum = ep_to_dummy(ep);
dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
if (!dum->driver || !is_enabled(dum_hcd))
return -ESHUTDOWN;
#if 0
dev_dbg(udc_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n",
ep, _req, _ep->name, _req->length, _req->buf);
#endif
_req->status = -EINPROGRESS;
_req->actual = 0;
spin_lock_irqsave(&dum->lock, flags);
/* implement an emulated single-request FIFO */
if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
list_empty(&dum->fifo_req.queue) &&
list_empty(&ep->queue) &&
_req->length <= FIFO_SIZE) {
req = &dum->fifo_req;
req->req = *_req;
req->req.buf = dum->fifo_buf;
memcpy(dum->fifo_buf, _req->buf, _req->length);
req->req.context = dum;
req->req.complete = fifo_complete;
list_add_tail(&req->queue, &ep->queue);
spin_unlock(&dum->lock);
_req->actual = _req->length;
_req->status = 0;
usb_gadget_giveback_request(_ep, _req);
spin_lock(&dum->lock);
} else
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&dum->lock, flags);
/* real hardware would likely enable transfers here, in case
* it'd been left NAKing.
*/
return 0;
}
static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct dummy_ep *ep;
struct dummy *dum;
int retval = -EINVAL;
unsigned long flags;
struct dummy_request *req = NULL, *iter;
if (!_ep || !_req)
return retval;
ep = usb_ep_to_dummy_ep(_ep);
dum = ep_to_dummy(ep);
if (!dum->driver)
return -ESHUTDOWN;
local_irq_save(flags);
spin_lock(&dum->lock);
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
list_del_init(&iter->queue);
_req->status = -ECONNRESET;
req = iter;
retval = 0;
break;
}
spin_unlock(&dum->lock);
if (retval == 0) {
dev_dbg(udc_dev(dum),
"dequeued req %p from %s, len %d buf %p\n",
req, _ep->name, _req->length, _req->buf);
usb_gadget_giveback_request(_ep, _req);
}
local_irq_restore(flags);
return retval;
}
static int
dummy_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct dummy_ep *ep;
struct dummy *dum;
if (!_ep)
return -EINVAL;
ep = usb_ep_to_dummy_ep(_ep);
dum = ep_to_dummy(ep);
if (!dum->driver)
return -ESHUTDOWN;
if (!value)
ep->halted = ep->wedged = 0;
else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
!list_empty(&ep->queue))
return -EAGAIN;
else {
ep->halted = 1;
if (wedged)
ep->wedged = 1;
}
/* FIXME clear emulated data toggle too */
return 0;
}
static int
dummy_set_halt(struct usb_ep *_ep, int value)
{
return dummy_set_halt_and_wedge(_ep, value, 0);
}
static int dummy_set_wedge(struct usb_ep *_ep)
{
if (!_ep || _ep->name == ep0name)
return -EINVAL;
return dummy_set_halt_and_wedge(_ep, 1, 1);
}
static const struct usb_ep_ops dummy_ep_ops = {
.enable = dummy_enable,
.disable = dummy_disable,
.alloc_request = dummy_alloc_request,
.free_request = dummy_free_request,
.queue = dummy_queue,
.dequeue = dummy_dequeue,
.set_halt = dummy_set_halt,
.set_wedge = dummy_set_wedge,
};
/*-------------------------------------------------------------------------*/
/* there are both host and device side versions of this call ... */
static int dummy_g_get_frame(struct usb_gadget *_gadget)
{
struct timespec64 ts64;
ktime_get_ts64(&ts64);
return ts64.tv_nsec / NSEC_PER_MSEC;
}
static int dummy_wakeup(struct usb_gadget *_gadget)
{
struct dummy_hcd *dum_hcd;
dum_hcd = gadget_to_dummy_hcd(_gadget);
if (!(dum_hcd->dum->devstatus & ((1 << USB_DEVICE_B_HNP_ENABLE)
| (1 << USB_DEVICE_REMOTE_WAKEUP))))
return -EINVAL;
if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0)
return -ENOLINK;
if ((dum_hcd->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
return -EIO;
/* FIXME: What if the root hub is suspended but the port isn't? */
/* hub notices our request, issues downstream resume, etc */
dum_hcd->resuming = 1;
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(20);
mod_timer(&dummy_hcd_to_hcd(dum_hcd)->rh_timer, dum_hcd->re_timeout);
return 0;
}
static int dummy_set_selfpowered(struct usb_gadget *_gadget, int value)
{
struct dummy *dum;
_gadget->is_selfpowered = (value != 0);
dum = gadget_to_dummy_hcd(_gadget)->dum;
if (value)
dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
else
dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
return 0;
}
static void dummy_udc_update_ep0(struct dummy *dum)
{
if (dum->gadget.speed == USB_SPEED_SUPER)
dum->ep[0].ep.maxpacket = 9;
else
dum->ep[0].ep.maxpacket = 64;
}
static int dummy_pullup(struct usb_gadget *_gadget, int value)
{
struct dummy_hcd *dum_hcd;
struct dummy *dum;
unsigned long flags;
dum = gadget_dev_to_dummy(&_gadget->dev);
dum_hcd = gadget_to_dummy_hcd(_gadget);
spin_lock_irqsave(&dum->lock, flags);
dum->pullup = (value != 0);
set_link_state(dum_hcd);
if (value == 0) {
/*
* Emulate synchronize_irq(): wait for callbacks to finish.
* This seems to be the best place to emulate the call to
* synchronize_irq() that's in usb_gadget_remove_driver().
* Doing it in dummy_udc_stop() would be too late since it
* is called after the unbind callback and unbind shouldn't
* be invoked until all the other callbacks are finished.
*/
while (dum->callback_usage > 0) {
spin_unlock_irqrestore(&dum->lock, flags);
usleep_range(1000, 2000);
spin_lock_irqsave(&dum->lock, flags);
}
}
spin_unlock_irqrestore(&dum->lock, flags);
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
static void dummy_udc_set_speed(struct usb_gadget *_gadget,
enum usb_device_speed speed)
{
struct dummy *dum;
dum = gadget_dev_to_dummy(&_gadget->dev);
dum->gadget.speed = speed;
dummy_udc_update_ep0(dum);
}
static void dummy_udc_async_callbacks(struct usb_gadget *_gadget, bool enable)
{
struct dummy *dum = gadget_dev_to_dummy(&_gadget->dev);
spin_lock_irq(&dum->lock);
dum->ints_enabled = enable;
spin_unlock_irq(&dum->lock);
}
static int dummy_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int dummy_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops dummy_ops = {
.get_frame = dummy_g_get_frame,
.wakeup = dummy_wakeup,
.set_selfpowered = dummy_set_selfpowered,
.pullup = dummy_pullup,
.udc_start = dummy_udc_start,
.udc_stop = dummy_udc_stop,
.udc_set_speed = dummy_udc_set_speed,
.udc_async_callbacks = dummy_udc_async_callbacks,
};
/*-------------------------------------------------------------------------*/
/* "function" sysfs attribute */
static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dummy *dum = gadget_dev_to_dummy(dev);
if (!dum->driver || !dum->driver->function)
return 0;
return scnprintf(buf, PAGE_SIZE, "%s\n", dum->driver->function);
}
static DEVICE_ATTR_RO(function);
/*-------------------------------------------------------------------------*/
/*
* Driver registration/unregistration.
*
* This is basically hardware-specific; there's usually only one real USB
* device (not host) controller since that's how USB devices are intended
* to work. So most implementations of these api calls will rely on the
* fact that only one driver will ever bind to the hardware. But curious
* hardware can be built with discrete components, so the gadget API doesn't
* require that assumption.
*
* For this emulator, it might be convenient to create a usb device
* for each driver that registers: just add to a big root hub.
*/
static int dummy_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
switch (g->speed) {
/* All the speeds we support */
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
break;
default:
dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
driver->max_speed);
return -EINVAL;
}
/*
* DEVICE side init ... the layer above hardware, which
* can't enumerate without help from the driver we're binding.
*/
spin_lock_irq(&dum->lock);
dum->devstatus = 0;
dum->driver = driver;
spin_unlock_irq(&dum->lock);
return 0;
}
static int dummy_udc_stop(struct usb_gadget *g)
{
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
spin_lock_irq(&dum->lock);
dum->ints_enabled = 0;
stop_activity(dum);
dum->driver = NULL;
spin_unlock_irq(&dum->lock);
return 0;
}
#undef is_enabled
/* The gadget structure is stored inside the hcd structure and will be
* released along with it. */
static void init_dummy_udc_hw(struct dummy *dum)
{
int i;
INIT_LIST_HEAD(&dum->gadget.ep_list);
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
struct dummy_ep *ep = &dum->ep[i];
if (!ep_info[i].name)
break;
ep->ep.name = ep_info[i].name;
ep->ep.caps = ep_info[i].caps;
ep->ep.ops = &dummy_ep_ops;
list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
ep->halted = ep->wedged = ep->already_seen =
ep->setup_stage = 0;
usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.max_streams = 16;
ep->last_io = jiffies;
ep->gadget = &dum->gadget;
ep->desc = NULL;
INIT_LIST_HEAD(&ep->queue);
}
dum->gadget.ep0 = &dum->ep[0].ep;
list_del_init(&dum->ep[0].ep.ep_list);
INIT_LIST_HEAD(&dum->fifo_req.queue);
#ifdef CONFIG_USB_OTG
dum->gadget.is_otg = 1;
#endif
}
static int dummy_udc_probe(struct platform_device *pdev)
{
struct dummy *dum;
int rc;
dum = *((void **)dev_get_platdata(&pdev->dev));
/* Clear usb_gadget region for new registration to udc-core */
memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
if (mod_data.is_super_speed)
dum->gadget.max_speed = USB_SPEED_SUPER;
else if (mod_data.is_high_speed)
dum->gadget.max_speed = USB_SPEED_HIGH;
else
dum->gadget.max_speed = USB_SPEED_FULL;
dum->gadget.dev.parent = &pdev->dev;
init_dummy_udc_hw(dum);
rc = usb_add_gadget_udc(&pdev->dev, &dum->gadget);
if (rc < 0)
goto err_udc;
rc = device_create_file(&dum->gadget.dev, &dev_attr_function);
if (rc < 0)
goto err_dev;
platform_set_drvdata(pdev, dum);
return rc;
err_dev:
usb_del_gadget_udc(&dum->gadget);
err_udc:
return rc;
}
static void dummy_udc_remove(struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata(pdev);
device_remove_file(&dum->gadget.dev, &dev_attr_function);
usb_del_gadget_udc(&dum->gadget);
}
static void dummy_udc_pm(struct dummy *dum, struct dummy_hcd *dum_hcd,
int suspend)
{
spin_lock_irq(&dum->lock);
dum->udc_suspended = suspend;
set_link_state(dum_hcd);
spin_unlock_irq(&dum->lock);
}
static int dummy_udc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct dummy *dum = platform_get_drvdata(pdev);
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
dev_dbg(&pdev->dev, "%s\n", __func__);
dummy_udc_pm(dum, dum_hcd, 1);
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
static int dummy_udc_resume(struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata(pdev);
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
dev_dbg(&pdev->dev, "%s\n", __func__);
dummy_udc_pm(dum, dum_hcd, 0);
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
static struct platform_driver dummy_udc_driver = {
.probe = dummy_udc_probe,
.remove_new = dummy_udc_remove,
.suspend = dummy_udc_suspend,
.resume = dummy_udc_resume,
.driver = {
.name = gadget_name,
},
};
/*-------------------------------------------------------------------------*/
static unsigned int dummy_get_ep_idx(const struct usb_endpoint_descriptor *desc)
{
unsigned int index;
index = usb_endpoint_num(desc) << 1;
if (usb_endpoint_dir_in(desc))
index |= 1;
return index;
}
/* HOST SIDE DRIVER
*
* this uses the hcd framework to hook up to host side drivers.
* its root hub will only have one device, otherwise it acts like
* a normal host controller.
*
* when urbs are queued, they're just stuck on a list that we
* scan in a timer callback. that callback connects writes from
* the host with reads from the device, and so on, based on the
* usb 2.0 rules.
*/
static int dummy_ep_stream_en(struct dummy_hcd *dum_hcd, struct urb *urb)
{
const struct usb_endpoint_descriptor *desc = &urb->ep->desc;
u32 index;
if (!usb_endpoint_xfer_bulk(desc))
return 0;
index = dummy_get_ep_idx(desc);
return (1 << index) & dum_hcd->stream_en_ep;
}
/*
* The max stream number is saved as a nibble so for the 30 possible endpoints
* we only 15 bytes of memory. Therefore we are limited to max 16 streams (0
* means we use only 1 stream). The maximum according to the spec is 16bit so
* if the 16 stream limit is about to go, the array size should be incremented
* to 30 elements of type u16.
*/
static int get_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
unsigned int pipe)
{
int max_streams;
max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
if (usb_pipeout(pipe))
max_streams >>= 4;
else
max_streams &= 0xf;
max_streams++;
return max_streams;
}
static void set_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
unsigned int pipe, unsigned int streams)
{
int max_streams;
streams--;
max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
if (usb_pipeout(pipe)) {
streams <<= 4;
max_streams &= 0xf;
} else {
max_streams &= 0xf0;
}
max_streams |= streams;
dum_hcd->num_stream[usb_pipeendpoint(pipe)] = max_streams;
}
static int dummy_validate_stream(struct dummy_hcd *dum_hcd, struct urb *urb)
{
unsigned int max_streams;
int enabled;
enabled = dummy_ep_stream_en(dum_hcd, urb);
if (!urb->stream_id) {
if (enabled)
return -EINVAL;
return 0;
}
if (!enabled)
return -EINVAL;
max_streams = get_max_streams_for_pipe(dum_hcd,
usb_pipeendpoint(urb->pipe));
if (urb->stream_id > max_streams) {
dev_err(dummy_dev(dum_hcd), "Stream id %d is out of range.\n",
urb->stream_id);
BUG();
return -EINVAL;
}
return 0;
}
static int dummy_urb_enqueue(
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct dummy_hcd *dum_hcd;
struct urbp *urbp;
unsigned long flags;
int rc;
urbp = kmalloc(sizeof *urbp, mem_flags);
if (!urbp)
return -ENOMEM;
urbp->urb = urb;
urbp->miter_started = 0;
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
rc = dummy_validate_stream(dum_hcd, urb);
if (rc) {
kfree(urbp);
goto done;
}
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc) {
kfree(urbp);
goto done;
}
if (!dum_hcd->udev) {
dum_hcd->udev = urb->dev;
usb_get_dev(dum_hcd->udev);
} else if (unlikely(dum_hcd->udev != urb->dev))
dev_err(dummy_dev(dum_hcd), "usb_device address has changed!\n");
list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
if (!dum_hcd->next_frame_urbp)
dum_hcd->next_frame_urbp = urbp;
if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
/* kick the scheduler, it'll do the rest */
if (!timer_pending(&dum_hcd->timer))
mod_timer(&dum_hcd->timer, jiffies + 1);
done:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
}
static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct dummy_hcd *dum_hcd;
unsigned long flags;
int rc;
/* giveback happens automatically in timer callback,
* so make sure the callback happens */
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
!list_empty(&dum_hcd->urbp_list))
mod_timer(&dum_hcd->timer, jiffies);
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
}
static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 len)
{
void *ubuf, *rbuf;
struct urbp *urbp = urb->hcpriv;
int to_host;
struct sg_mapping_iter *miter = &urbp->miter;
u32 trans = 0;
u32 this_sg;
bool next_sg;
to_host = usb_urb_dir_in(urb);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
ubuf = urb->transfer_buffer + urb->actual_length;
if (to_host)
memcpy(ubuf, rbuf, len);
else
memcpy(rbuf, ubuf, len);
return len;
}
if (!urbp->miter_started) {
u32 flags = SG_MITER_ATOMIC;
if (to_host)
flags |= SG_MITER_TO_SG;
else
flags |= SG_MITER_FROM_SG;
sg_miter_start(miter, urb->sg, urb->num_sgs, flags);
urbp->miter_started = 1;
}
next_sg = sg_miter_next(miter);
if (next_sg == false) {
WARN_ON_ONCE(1);
return -EINVAL;
}
do {
ubuf = miter->addr;
this_sg = min_t(u32, len, miter->length);
miter->consumed = this_sg;
trans += this_sg;
if (to_host)
memcpy(ubuf, rbuf, this_sg);
else
memcpy(rbuf, ubuf, this_sg);
len -= this_sg;
if (!len)
break;
next_sg = sg_miter_next(miter);
if (next_sg == false) {
WARN_ON_ONCE(1);
return -EINVAL;
}
rbuf += this_sg;
} while (1);
sg_miter_stop(miter);
return trans;
}
/* transfer up to a frame's worth; caller must own lock */
static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
struct dummy_ep *ep, int limit, int *status)
{
struct dummy *dum = dum_hcd->dum;
struct dummy_request *req;
int sent = 0;
top:
/* if there's no request queued, the device is NAKing; return */
list_for_each_entry(req, &ep->queue, queue) {
unsigned host_len, dev_len, len;
int is_short, to_host;
int rescan = 0;
if (dummy_ep_stream_en(dum_hcd, urb)) {
if ((urb->stream_id != req->req.stream_id))
continue;
}
/* 1..N packets of ep->ep.maxpacket each ... the last one
* may be short (including zero length).
*
* writer can send a zlp explicitly (length 0) or implicitly
* (length mod maxpacket zero, and 'zero' flag); they always
* terminate reads.
*/
host_len = urb->transfer_buffer_length - urb->actual_length;
dev_len = req->req.length - req->req.actual;
len = min(host_len, dev_len);
/* FIXME update emulated data toggle too */
to_host = usb_urb_dir_in(urb);
if (unlikely(len == 0))
is_short = 1;
else {
/* not enough bandwidth left? */
if (limit < ep->ep.maxpacket && limit < len)
break;
len = min_t(unsigned, len, limit);
if (len == 0)
break;
/* send multiple of maxpacket first, then remainder */
if (len >= ep->ep.maxpacket) {
is_short = 0;
if (len % ep->ep.maxpacket)
rescan = 1;
len -= len % ep->ep.maxpacket;
} else {
is_short = 1;
}
len = dummy_perform_transfer(urb, req, len);
ep->last_io = jiffies;
if ((int)len < 0) {
req->req.status = len;
} else {
limit -= len;
sent += len;
urb->actual_length += len;
req->req.actual += len;
}
}
/* short packets terminate, maybe with overflow/underflow.
* it's only really an error to write too much.
*
* partially filling a buffer optionally blocks queue advances
* (so completion handlers can clean up the queue) but we don't
* need to emulate such data-in-flight.
*/
if (is_short) {
if (host_len == dev_len) {
req->req.status = 0;
*status = 0;
} else if (to_host) {
req->req.status = 0;
if (dev_len > host_len)
*status = -EOVERFLOW;
else
*status = 0;
} else {
*status = 0;
if (host_len > dev_len)
req->req.status = -EOVERFLOW;
else
req->req.status = 0;
}
/*
* many requests terminate without a short packet.
* send a zlp if demanded by flags.
*/
} else {
if (req->req.length == req->req.actual) {
if (req->req.zero && to_host)
rescan = 1;
else
req->req.status = 0;
}
if (urb->transfer_buffer_length == urb->actual_length) {
if (urb->transfer_flags & URB_ZERO_PACKET &&
!to_host)
rescan = 1;
else
*status = 0;
}
}
/* device side completion --> continuable */
if (req->req.status != -EINPROGRESS) {
list_del_init(&req->queue);
spin_unlock(&dum->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dum->lock);
/* requests might have been unlinked... */
rescan = 1;
}
/* host side completion --> terminate */
if (*status != -EINPROGRESS)
break;
/* rescan to continue with any other queued i/o */
if (rescan)
goto top;
}
return sent;
}
static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
{
int limit = ep->ep.maxpacket;
if (dum->gadget.speed == USB_SPEED_HIGH) {
int tmp;
/* high bandwidth mode */
tmp = usb_endpoint_maxp_mult(ep->desc);
tmp *= 8 /* applies to entire frame */;
limit += limit * tmp;
}
if (dum->gadget.speed == USB_SPEED_SUPER) {
switch (usb_endpoint_type(ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* Sec. 4.4.8.2 USB3.0 Spec */
limit = 3 * 16 * 1024 * 8;
break;
case USB_ENDPOINT_XFER_INT:
/* Sec. 4.4.7.2 USB3.0 Spec */
limit = 3 * 1024 * 8;
break;
case USB_ENDPOINT_XFER_BULK:
default:
break;
}
}
return limit;
}
#define is_active(dum_hcd) ((dum_hcd->port_status & \
(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
USB_PORT_STAT_SUSPEND)) \
== (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
{
int i;
if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
dum->ss_hcd : dum->hs_hcd)))
return NULL;
if (!dum->ints_enabled)
return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep[0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
struct dummy_ep *ep = &dum->ep[i];
if (!ep->desc)
continue;
if (ep->desc->bEndpointAddress == address)
return ep;
}
return NULL;
}
#undef is_active
#define Dev_Request (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
#define Dev_InRequest (Dev_Request | USB_DIR_IN)
#define Intf_Request (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
#define Intf_InRequest (Intf_Request | USB_DIR_IN)
#define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
#define Ep_InRequest (Ep_Request | USB_DIR_IN)
/**
* handle_control_request() - handles all control transfers
* @dum_hcd: pointer to dummy (the_controller)
* @urb: the urb request to handle
* @setup: pointer to the setup data for a USB device control
* request
* @status: pointer to request handling status
*
* Return 0 - if the request was handled
* 1 - if the request wasn't handles
* error code on error
*/
static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
struct usb_ctrlrequest *setup,
int *status)
{
struct dummy_ep *ep2;
struct dummy *dum = dum_hcd->dum;
int ret_val = 1;
unsigned w_index;
unsigned w_value;
w_index = le16_to_cpu(setup->wIndex);
w_value = le16_to_cpu(setup->wValue);
switch (setup->bRequest) {
case USB_REQ_SET_ADDRESS:
if (setup->bRequestType != Dev_Request)
break;
dum->address = w_value;
*status = 0;
dev_dbg(udc_dev(dum), "set_address = %d\n",
w_value);
ret_val = 0;
break;
case USB_REQ_SET_FEATURE:
if (setup->bRequestType == Dev_Request) {
ret_val = 0;
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
break;
case USB_DEVICE_B_HNP_ENABLE:
dum->gadget.b_hnp_enable = 1;
break;
case USB_DEVICE_A_HNP_SUPPORT:
dum->gadget.a_hnp_support = 1;
break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
dum->gadget.a_alt_hnp_support = 1;
break;
case USB_DEVICE_U1_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U1_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_U2_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U2_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_LTM_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_LTM_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
default:
ret_val = -EOPNOTSUPP;
}
if (ret_val == 0) {
dum->devstatus |= (1 << w_value);
*status = 0;
}
} else if (setup->bRequestType == Ep_Request) {
/* endpoint halt */
ep2 = find_endpoint(dum, w_index);
if (!ep2 || ep2->ep.name == ep0name) {
ret_val = -EOPNOTSUPP;
break;
}
ep2->halted = 1;
ret_val = 0;
*status = 0;
}
break;
case USB_REQ_CLEAR_FEATURE:
if (setup->bRequestType == Dev_Request) {
ret_val = 0;
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
w_value = USB_DEVICE_REMOTE_WAKEUP;
break;
case USB_DEVICE_U1_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U1_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_U2_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U2_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_LTM_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_LTM_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
default:
ret_val = -EOPNOTSUPP;
break;
}
if (ret_val == 0) {
dum->devstatus &= ~(1 << w_value);
*status = 0;
}
} else if (setup->bRequestType == Ep_Request) {
/* endpoint halt */
ep2 = find_endpoint(dum, w_index);
if (!ep2) {
ret_val = -EOPNOTSUPP;
break;
}
if (!ep2->wedged)
ep2->halted = 0;
ret_val = 0;
*status = 0;
}
break;
case USB_REQ_GET_STATUS:
if (setup->bRequestType == Dev_InRequest
|| setup->bRequestType == Intf_InRequest
|| setup->bRequestType == Ep_InRequest) {
char *buf;
/*
* device: remote wakeup, selfpowered
* interface: nothing
* endpoint: halt
*/
buf = (char *)urb->transfer_buffer;
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType == Ep_InRequest) {
ep2 = find_endpoint(dum, w_index);
if (!ep2) {
ret_val = -EOPNOTSUPP;
break;
}
buf[0] = ep2->halted;
} else if (setup->bRequestType ==
Dev_InRequest) {
buf[0] = (u8)dum->devstatus;
} else
buf[0] = 0;
}
if (urb->transfer_buffer_length > 1)
buf[1] = 0;
urb->actual_length = min_t(u32, 2,
urb->transfer_buffer_length);
ret_val = 0;
*status = 0;
}
break;
}
return ret_val;
}
/*
* Drive both sides of the transfers; looks like irq handlers to both
* drivers except that the callbacks are invoked from soft interrupt
* context.
*/
static void dummy_timer(struct timer_list *t)
{
struct dummy_hcd *dum_hcd = from_timer(dum_hcd, t, timer);
struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
int limit, total;
int i;
/* simplistic model for one frame's bandwidth */
/* FIXME: account for transaction and packet overhead */
switch (dum->gadget.speed) {
case USB_SPEED_LOW:
total = 8/*bytes*/ * 12/*packets*/;
break;
case USB_SPEED_FULL:
total = 64/*bytes*/ * 19/*packets*/;
break;
case USB_SPEED_HIGH:
total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
break;
case USB_SPEED_SUPER:
/* Bus speed is 500000 bytes/ms, so use a little less */
total = 490000;
break;
default: /* Can't happen */
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
total = 0;
break;
}
/* FIXME if HZ != 1000 this will probably misbehave ... */
/* look at each urb queued by the host side driver */
spin_lock_irqsave(&dum->lock, flags);
if (!dum_hcd->udev) {
dev_err(dummy_dev(dum_hcd),
"timer fired with no URBs pending?\n");
spin_unlock_irqrestore(&dum->lock, flags);
return;
}
dum_hcd->next_frame_urbp = NULL;
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_info[i].name)
break;
dum->ep[i].already_seen = 0;
}
restart:
list_for_each_entry_safe(urbp, tmp, &dum_hcd->urbp_list, urbp_list) {
struct urb *urb;
struct dummy_request *req;
u8 address;
struct dummy_ep *ep = NULL;
int status = -EINPROGRESS;
/* stop when we reach URBs queued after the timer interrupt */
if (urbp == dum_hcd->next_frame_urbp)
break;
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
continue;
/* Used up this frame's bandwidth? */
if (total <= 0)
continue;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
if (usb_urb_dir_in(urb))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
/* set_configuration() disagreement */
dev_dbg(dummy_dev(dum_hcd),
"no ep configured for urb %p\n",
urb);
status = -EPROTO;
goto return_urb;
}
if (ep->already_seen)
continue;
ep->already_seen = 1;
if (ep == &dum->ep[0] && urb->error_count) {
ep->setup_stage = 1; /* a new urb */
urb->error_count = 0;
}
if (ep->halted && !ep->setup_stage) {
/* NOTE: must not be iso! */
dev_dbg(dummy_dev(dum_hcd), "ep %s halted, urb %p\n",
ep->ep.name, urb);
status = -EPIPE;
goto return_urb;
}
/* FIXME make sure both ends agree on maxpacket */
/* handle control requests */
if (ep == &dum->ep[0] && ep->setup_stage) {
struct usb_ctrlrequest setup;
int value;
setup = *(struct usb_ctrlrequest *) urb->setup_packet;
/* paranoia, in case of stale queued data */
list_for_each_entry(req, &ep->queue, queue) {
list_del_init(&req->queue);
req->req.status = -EOVERFLOW;
dev_dbg(udc_dev(dum), "stale req = %p\n",
req);
spin_unlock(&dum->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dum->lock);
ep->already_seen = 0;
goto restart;
}
/* gadget driver never sees set_address or operations
* on standard feature flags. some hardware doesn't
* even expose them.
*/
ep->last_io = jiffies;
ep->setup_stage = 0;
ep->halted = 0;
value = handle_control_request(dum_hcd, urb, &setup,
&status);
/* gadget driver handles all other requests. block
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
++dum->callback_usage;
spin_unlock(&dum->lock);
value = dum->driver->setup(&dum->gadget,
&setup);
spin_lock(&dum->lock);
--dum->callback_usage;
if (value >= 0) {
/* no delays (max 64KB data stage) */
limit = 64*1024;
goto treat_control_like_bulk;
}
/* error, see below */
}
if (value < 0) {
if (value != -EOPNOTSUPP)
dev_dbg(udc_dev(dum),
"setup --> %d\n",
value);
status = -EPIPE;
urb->actual_length = 0;
}
goto return_urb;
}
/* non-control requests */
limit = total;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
/*
* We don't support isochronous. But if we did,
* here are some of the issues we'd have to face:
*
* Is it urb->interval since the last xfer?
* Use urb->iso_frame_desc[i].
* Complete whether or not ep has requests queued.
* Report random errors, to debug drivers.
*/
limit = max(limit, periodic_bytes(dum, ep));
status = -EINVAL; /* fail all xfers */
break;
case PIPE_INTERRUPT:
/* FIXME is it urb->interval since the last xfer?
* this almost certainly polls too fast.
*/
limit = max(limit, periodic_bytes(dum, ep));
fallthrough;
default:
treat_control_like_bulk:
ep->last_io = jiffies;
total -= transfer(dum_hcd, urb, ep, limit, &status);
break;
}
/* incomplete transfer? */
if (status == -EINPROGRESS)
continue;
return_urb:
list_del(&urbp->urbp_list);
kfree(urbp);
if (ep)
ep->already_seen = ep->setup_stage = 0;
usb_hcd_unlink_urb_from_ep(dummy_hcd_to_hcd(dum_hcd), urb);
spin_unlock(&dum->lock);
usb_hcd_giveback_urb(dummy_hcd_to_hcd(dum_hcd), urb, status);
spin_lock(&dum->lock);
goto restart;
}
if (list_empty(&dum_hcd->urbp_list)) {
usb_put_dev(dum_hcd->udev);
dum_hcd->udev = NULL;
} else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
/* want a 1 msec delay here */
mod_timer(&dum_hcd->timer, jiffies + msecs_to_jiffies(1));
}
spin_unlock_irqrestore(&dum->lock, flags);
}
/*-------------------------------------------------------------------------*/
#define PORT_C_MASK \
((USB_PORT_STAT_C_CONNECTION \
| USB_PORT_STAT_C_ENABLE \
| USB_PORT_STAT_C_SUSPEND \
| USB_PORT_STAT_C_OVERCURRENT \
| USB_PORT_STAT_C_RESET) << 16)
static int dummy_hub_status(struct usb_hcd *hcd, char *buf)
{
struct dummy_hcd *dum_hcd;
unsigned long flags;
int retval = 0;
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
if (dum_hcd->resuming && time_after_eq(jiffies, dum_hcd->re_timeout)) {
dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
set_link_state(dum_hcd);
}
if ((dum_hcd->port_status & PORT_C_MASK) != 0) {
*buf = (1 << 1);
dev_dbg(dummy_dev(dum_hcd), "port status 0x%08x has changes\n",
dum_hcd->port_status);
retval = 1;
if (dum_hcd->rh_state == DUMMY_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
}
done:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return retval;
}
/* usb 3.0 root hub device descriptor */
static struct {
struct usb_bos_descriptor bos;
struct usb_ss_cap_descriptor ss_cap;
} __packed usb3_bos_desc = {
.bos = {
.bLength = USB_DT_BOS_SIZE,
.bDescriptorType = USB_DT_BOS,
.wTotalLength = cpu_to_le16(sizeof(usb3_bos_desc)),
.bNumDeviceCaps = 1,
},
.ss_cap = {
.bLength = USB_DT_USB_SS_CAP_SIZE,
.bDescriptorType = USB_DT_DEVICE_CAPABILITY,
.bDevCapabilityType = USB_SS_CAP_TYPE,
.wSpeedSupported = cpu_to_le16(USB_5GBPS_OPERATION),
.bFunctionalitySupport = ilog2(USB_5GBPS_OPERATION),
},
};
static inline void
ss_hub_descriptor(struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof *desc);
desc->bDescriptorType = USB_DT_SS_HUB;
desc->bDescLength = 12;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM |
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
desc->u.ss.DeviceRemovable = 0;
}
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof *desc);
desc->bDescriptorType = USB_DT_HUB;
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM |
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
desc->u.hs.DeviceRemovable[0] = 0;
desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
}
static int dummy_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
) {
struct dummy_hcd *dum_hcd;
int retval = 0;
unsigned long flags;
if (!HCD_HW_ACCESSIBLE(hcd))
return -ETIMEDOUT;
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
switch (typeReq) {
case ClearHubFeature:
break;
case ClearPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed == HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
}
if (dum_hcd->port_status & USB_PORT_STAT_SUSPEND) {
/* 20msec resume signaling */
dum_hcd->resuming = 1;
dum_hcd->re_timeout = jiffies +
msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_POWER:
dev_dbg(dummy_dev(dum_hcd), "power-off\n");
if (hcd->speed == HCD_USB3)
dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
else
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
case USB_PORT_FEAT_ENABLE:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
/* Not allowed for USB-3 */
if (hcd->speed == HCD_USB3)
goto error;
fallthrough;
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
break;
default:
/* Disallow INDICATOR and C_OVER_CURRENT */
goto error;
}
break;
case GetHubDescriptor:
if (hcd->speed == HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
dev_dbg(dummy_dev(dum_hcd),
"Wrong hub descriptor type for "
"USB 3.0 roothub.\n");
goto error;
}
if (hcd->speed == HCD_USB3)
ss_hub_descriptor((struct usb_hub_descriptor *) buf);
else
hub_descriptor((struct usb_hub_descriptor *) buf);
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
if (hcd->speed != HCD_USB3)
goto error;
if ((wValue >> 8) != USB_DT_BOS)
goto error;
memcpy(buf, &usb3_bos_desc, sizeof(usb3_bos_desc));
retval = sizeof(usb3_bos_desc);
break;
case GetHubStatus:
*(__le32 *) buf = cpu_to_le32(0);
break;
case GetPortStatus:
if (wIndex != 1)
retval = -EPIPE;
/* whoever resets or resumes must GetPortStatus to
* complete it!!
*/
if (dum_hcd->resuming &&
time_after_eq(jiffies, dum_hcd->re_timeout)) {
dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
}
if ((dum_hcd->port_status & USB_PORT_STAT_RESET) != 0 &&
time_after_eq(jiffies, dum_hcd->re_timeout)) {
dum_hcd->port_status |= (USB_PORT_STAT_C_RESET << 16);
dum_hcd->port_status &= ~USB_PORT_STAT_RESET;
if (dum_hcd->dum->pullup) {
dum_hcd->port_status |= USB_PORT_STAT_ENABLE;
if (hcd->speed < HCD_USB3) {
switch (dum_hcd->dum->gadget.speed) {
case USB_SPEED_HIGH:
dum_hcd->port_status |=
USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_LOW:
dum_hcd->dum->gadget.ep0->
maxpacket = 8;
dum_hcd->port_status |=
USB_PORT_STAT_LOW_SPEED;
break;
default:
break;
}
}
}
}
set_link_state(dum_hcd);
((__le16 *) buf)[0] = cpu_to_le16(dum_hcd->port_status);
((__le16 *) buf)[1] = cpu_to_le16(dum_hcd->port_status >> 16);
break;
case SetHubFeature:
retval = -EPIPE;
break;
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_LINK_STATE:
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_LINK_STATE req not "
"supported for USB 2.0 roothub\n");
goto error;
}
/*
* Since this is dummy we don't have an actual link so
* there is nothing to do for the SET_LINK_STATE cmd
*/
break;
case USB_PORT_FEAT_U1_TIMEOUT:
case USB_PORT_FEAT_U2_TIMEOUT:
/* TODO: add suspend/resume support! */
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_U1/2_TIMEOUT req not "
"supported for USB 2.0 roothub\n");
goto error;
}
break;
case USB_PORT_FEAT_SUSPEND:
/* Applicable only for USB2.0 hub */
if (hcd->speed == HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
}
if (dum_hcd->active) {
dum_hcd->port_status |= USB_PORT_STAT_SUSPEND;
/* HNP would happen here; for now we
* assume b_bus_req is always true.
*/
set_link_state(dum_hcd);
if (((1 << USB_DEVICE_B_HNP_ENABLE)
& dum_hcd->dum->devstatus) != 0)
dev_dbg(dummy_dev(dum_hcd),
"no HNP yet!\n");
}
break;
case USB_PORT_FEAT_POWER:
if (hcd->speed == HCD_USB3)
dum_hcd->port_status |= USB_SS_PORT_STAT_POWER;
else
dum_hcd->port_status |= USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
/* Applicable only for USB3.0 hub */
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_BH_PORT_RESET req not "
"supported for USB 2.0 roothub\n");
goto error;
}
fallthrough;
case USB_PORT_FEAT_RESET:
if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
break;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
} else {
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
dum_hcd->port_status |= USB_PORT_STAT_RESET;
}
/*
* We want to reset device status. All but the
* Self powered feature
*/
dum_hcd->dum->devstatus &=
(1 << USB_DEVICE_SELF_POWERED);
/*
* FIXME USB3.0: what is the correct reset signaling
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
set_link_state(dum_hcd);
break;
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
/* Not allowed for USB-3, and ignored for USB-2 */
if (hcd->speed == HCD_USB3)
goto error;
break;
default:
/* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
goto error;
}
break;
case GetPortErrorCount:
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"GetPortErrorCount req not "
"supported for USB 2.0 roothub\n");
goto error;
}
/* We'll always return 0 since this is a dummy hub */
*(__le32 *) buf = cpu_to_le32(0);
break;
case SetHubDepth:
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"SetHubDepth req not supported for "
"USB 2.0 roothub\n");
goto error;
}
break;
default:
dev_dbg(dummy_dev(dum_hcd),
"hub control req%04x v%04x i%04x l%d\n",
typeReq, wValue, wIndex, wLength);
error:
/* "protocol stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
if ((dum_hcd->port_status & PORT_C_MASK) != 0)
usb_hcd_poll_rh_status(hcd);
return retval;
}
static int dummy_bus_suspend(struct usb_hcd *hcd)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq(&dum_hcd->dum->lock);
dum_hcd->rh_state = DUMMY_RH_SUSPENDED;
set_link_state(dum_hcd);
hcd->state = HC_STATE_SUSPENDED;
spin_unlock_irq(&dum_hcd->dum->lock);
return 0;
}
static int dummy_bus_resume(struct usb_hcd *hcd)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
int rc = 0;
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq(&dum_hcd->dum->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
rc = -ESHUTDOWN;
} else {
dum_hcd->rh_state = DUMMY_RH_RUNNING;
set_link_state(dum_hcd);
if (!list_empty(&dum_hcd->urbp_list))
mod_timer(&dum_hcd->timer, jiffies);
hcd->state = HC_STATE_RUNNING;
}
spin_unlock_irq(&dum_hcd->dum->lock);
return rc;
}
/*-------------------------------------------------------------------------*/
static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
{
int ep = usb_pipeendpoint(urb->pipe);
return scnprintf(buf, size,
"urb/%p %s ep%d%s%s len %d/%d\n",
urb,
({ char *s;
switch (urb->dev->speed) {
case USB_SPEED_LOW:
s = "ls";
break;
case USB_SPEED_FULL:
s = "fs";
break;
case USB_SPEED_HIGH:
s = "hs";
break;
case USB_SPEED_SUPER:
s = "ss";
break;
default:
s = "?";
break;
} s; }),
ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
s = ""; \
break; \
case PIPE_BULK: \
s = "-bulk"; \
break; \
case PIPE_INTERRUPT: \
s = "-int"; \
break; \
default: \
s = "-iso"; \
break; \
} s; }),
urb->actual_length, urb->transfer_buffer_length);
}
static ssize_t urbs_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
struct urbp *urbp;
size_t size = 0;
unsigned long flags;
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
list_for_each_entry(urbp, &dum_hcd->urbp_list, urbp_list) {
size_t temp;
temp = show_urb(buf, PAGE_SIZE - size, urbp->urb);
buf += temp;
size += temp;
}
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return size;
}
static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
dummy_hcd_to_hcd(dum_hcd)->self.otg_port = 1;
#endif
return 0;
/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
}
static int dummy_start(struct usb_hcd *hcd)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
/*
* HOST side init ... we emulate a root hub that'll only ever
* talk to one device (the gadget side). Also appears in sysfs,
* just like more familiar pci-based HCDs.
*/
if (!usb_hcd_is_primary_hcd(hcd))
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
hcd->power_budget = POWER_BUDGET;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
hcd->self.otg_port = 1;
#endif
/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
}
static void dummy_stop(struct usb_hcd *hcd)
{
device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
}
/*-------------------------------------------------------------------------*/
static int dummy_h_get_frame(struct usb_hcd *hcd)
{
return dummy_g_get_frame(NULL);
}
static int dummy_setup(struct usb_hcd *hcd)
{
struct dummy *dum;
dum = *((void **)dev_get_platdata(hcd->self.controller));
hcd->self.sg_tablesize = ~0;
if (usb_hcd_is_primary_hcd(hcd)) {
dum->hs_hcd = hcd_to_dummy_hcd(hcd);
dum->hs_hcd->dum = dum;
/*
* Mark the first roothub as being USB 2.0.
* The USB 3.0 roothub will be registered later by
* dummy_hcd_probe()
*/
hcd->speed = HCD_USB2;
hcd->self.root_hub->speed = USB_SPEED_HIGH;
} else {
dum->ss_hcd = hcd_to_dummy_hcd(hcd);
dum->ss_hcd->dum = dum;
hcd->speed = HCD_USB3;
hcd->self.root_hub->speed = USB_SPEED_SUPER;
}
return 0;
}
/* Change a group of bulk endpoints to support multiple stream IDs */
static int dummy_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
unsigned long flags;
int max_stream;
int ret_streams = num_streams;
unsigned int index;
unsigned int i;
if (!num_eps)
return -EINVAL;
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
if ((1 << index) & dum_hcd->stream_en_ep) {
ret_streams = -EINVAL;
goto out;
}
max_stream = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (!max_stream) {
ret_streams = -EINVAL;
goto out;
}
if (max_stream < ret_streams) {
dev_dbg(dummy_dev(dum_hcd), "Ep 0x%x only supports %u "
"stream IDs.\n",
eps[i]->desc.bEndpointAddress,
max_stream);
ret_streams = max_stream;
}
}
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
dum_hcd->stream_en_ep |= 1 << index;
set_max_streams_for_pipe(dum_hcd,
usb_endpoint_num(&eps[i]->desc), ret_streams);
}
out:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return ret_streams;
}
/* Reverts a group of bulk endpoints back to not using stream IDs. */
static int dummy_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
unsigned long flags;
int ret;
unsigned int index;
unsigned int i;
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
if (!((1 << index) & dum_hcd->stream_en_ep)) {
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
dum_hcd->stream_en_ep &= ~(1 << index);
set_max_streams_for_pipe(dum_hcd,
usb_endpoint_num(&eps[i]->desc), 0);
}
ret = 0;
out:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return ret;
}
static struct hc_driver dummy_hcd = {
.description = (char *) driver_name,
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy_hcd),
.reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
.urb_enqueue = dummy_urb_enqueue,
.urb_dequeue = dummy_urb_dequeue,
.get_frame_number = dummy_h_get_frame,
.hub_status_data = dummy_hub_status,
.hub_control = dummy_hub_control,
.bus_suspend = dummy_bus_suspend,
.bus_resume = dummy_bus_resume,
.alloc_streams = dummy_alloc_streams,
.free_streams = dummy_free_streams,
};
static int dummy_hcd_probe(struct platform_device *pdev)
{
struct dummy *dum;
struct usb_hcd *hs_hcd;
struct usb_hcd *ss_hcd;
int retval;
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
dum = *((void **)dev_get_platdata(&pdev->dev));
if (mod_data.is_super_speed)
dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
else if (mod_data.is_high_speed)
dummy_hcd.flags = HCD_USB2;
else
dummy_hcd.flags = HCD_USB11;
hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hs_hcd)
return -ENOMEM;
hs_hcd->has_tt = 1;
retval = usb_add_hcd(hs_hcd, 0, 0);
if (retval)
goto put_usb2_hcd;
if (mod_data.is_super_speed) {
ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
dev_name(&pdev->dev), hs_hcd);
if (!ss_hcd) {
retval = -ENOMEM;
goto dealloc_usb2_hcd;
}
retval = usb_add_hcd(ss_hcd, 0, 0);
if (retval)
goto put_usb3_hcd;
}
return 0;
put_usb3_hcd:
usb_put_hcd(ss_hcd);
dealloc_usb2_hcd:
usb_remove_hcd(hs_hcd);
put_usb2_hcd:
usb_put_hcd(hs_hcd);
dum->hs_hcd = dum->ss_hcd = NULL;
return retval;
}
static void dummy_hcd_remove(struct platform_device *pdev)
{
struct dummy *dum;
dum = hcd_to_dummy_hcd(platform_get_drvdata(pdev))->dum;
if (dum->ss_hcd) {
usb_remove_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
usb_put_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
}
usb_remove_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
usb_put_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
dum->hs_hcd = NULL;
dum->ss_hcd = NULL;
}
static int dummy_hcd_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd;
struct dummy_hcd *dum_hcd;
int rc = 0;
dev_dbg(&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata(pdev);
dum_hcd = hcd_to_dummy_hcd(hcd);
if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
dev_warn(&pdev->dev, "Root hub isn't suspended!\n");
rc = -EBUSY;
} else
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
return rc;
}
static int dummy_hcd_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd;
dev_dbg(&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata(pdev);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
return 0;
}
static struct platform_driver dummy_hcd_driver = {
.probe = dummy_hcd_probe,
.remove_new = dummy_hcd_remove,
.suspend = dummy_hcd_suspend,
.resume = dummy_hcd_resume,
.driver = {
.name = driver_name,
},
};
/*-------------------------------------------------------------------------*/
#define MAX_NUM_UDC 32
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
static int __init dummy_hcd_init(void)
{
int retval = -ENOMEM;
int i;
struct dummy *dum[MAX_NUM_UDC] = {};
if (usb_disabled())
return -ENODEV;
if (!mod_data.is_high_speed && mod_data.is_super_speed)
return -EINVAL;
if (mod_data.num < 1 || mod_data.num > MAX_NUM_UDC) {
pr_err("Number of emulated UDC must be in range of 1...%d\n",
MAX_NUM_UDC);
return -EINVAL;
}
for (i = 0; i < mod_data.num; i++) {
the_hcd_pdev[i] = platform_device_alloc(driver_name, i);
if (!the_hcd_pdev[i]) {
i--;
while (i >= 0)
platform_device_put(the_hcd_pdev[i--]);
return retval;
}
}
for (i = 0; i < mod_data.num; i++) {
the_udc_pdev[i] = platform_device_alloc(gadget_name, i);
if (!the_udc_pdev[i]) {
i--;
while (i >= 0)
platform_device_put(the_udc_pdev[i--]);
goto err_alloc_udc;
}
}
for (i = 0; i < mod_data.num; i++) {
dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL);
if (!dum[i]) {
retval = -ENOMEM;
goto err_add_pdata;
}
retval = platform_device_add_data(the_hcd_pdev[i], &dum[i],
sizeof(void *));
if (retval)
goto err_add_pdata;
retval = platform_device_add_data(the_udc_pdev[i], &dum[i],
sizeof(void *));
if (retval)
goto err_add_pdata;
}
retval = platform_driver_register(&dummy_hcd_driver);
if (retval < 0)
goto err_add_pdata;
retval = platform_driver_register(&dummy_udc_driver);
if (retval < 0)
goto err_register_udc_driver;
for (i = 0; i < mod_data.num; i++) {
retval = platform_device_add(the_hcd_pdev[i]);
if (retval < 0) {
i--;
while (i >= 0)
platform_device_del(the_hcd_pdev[i--]);
goto err_add_hcd;
}
}
for (i = 0; i < mod_data.num; i++) {
if (!dum[i]->hs_hcd ||
(!dum[i]->ss_hcd && mod_data.is_super_speed)) {
/*
* The hcd was added successfully but its probe
* function failed for some reason.
*/
retval = -EINVAL;
goto err_add_udc;
}
}
for (i = 0; i < mod_data.num; i++) {
retval = platform_device_add(the_udc_pdev[i]);
if (retval < 0) {
i--;
while (i >= 0)
platform_device_del(the_udc_pdev[i--]);
goto err_add_udc;
}
}
for (i = 0; i < mod_data.num; i++) {
if (!platform_get_drvdata(the_udc_pdev[i])) {
/*
* The udc was added successfully but its probe
* function failed for some reason.
*/
retval = -EINVAL;
goto err_probe_udc;
}
}
return retval;
err_probe_udc:
for (i = 0; i < mod_data.num; i++)
platform_device_del(the_udc_pdev[i]);
err_add_udc:
for (i = 0; i < mod_data.num; i++)
platform_device_del(the_hcd_pdev[i]);
err_add_hcd:
platform_driver_unregister(&dummy_udc_driver);
err_register_udc_driver:
platform_driver_unregister(&dummy_hcd_driver);
err_add_pdata:
for (i = 0; i < mod_data.num; i++)
kfree(dum[i]);
for (i = 0; i < mod_data.num; i++)
platform_device_put(the_udc_pdev[i]);
err_alloc_udc:
for (i = 0; i < mod_data.num; i++)
platform_device_put(the_hcd_pdev[i]);
return retval;
}
module_init(dummy_hcd_init);
static void __exit dummy_hcd_cleanup(void)
{
int i;
for (i = 0; i < mod_data.num; i++) {
struct dummy *dum;
dum = *((void **)dev_get_platdata(&the_udc_pdev[i]->dev));
platform_device_unregister(the_udc_pdev[i]);
platform_device_unregister(the_hcd_pdev[i]);
kfree(dum);
}
platform_driver_unregister(&dummy_udc_driver);
platform_driver_unregister(&dummy_hcd_driver);
}
module_exit(dummy_hcd_cleanup);
| linux-master | drivers/usb/gadget/udc/dummy_hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/V2M USB3DRD driver
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/usb/rzv2m_usb3drd.h>
#define USB_PERI_DRD_CON 0x000
#define USB_PERI_DRD_CON_PERI_RST BIT(31)
#define USB_PERI_DRD_CON_HOST_RST BIT(30)
#define USB_PERI_DRD_CON_PERI_CON BIT(24)
static void rzv2m_usb3drd_set_bit(struct rzv2m_usb3drd *usb3, u32 bits,
u32 offs)
{
u32 val = readl(usb3->reg + offs);
val |= bits;
writel(val, usb3->reg + offs);
}
static void rzv2m_usb3drd_clear_bit(struct rzv2m_usb3drd *usb3, u32 bits,
u32 offs)
{
u32 val = readl(usb3->reg + offs);
val &= ~bits;
writel(val, usb3->reg + offs);
}
void rzv2m_usb3drd_reset(struct device *dev, bool host)
{
struct rzv2m_usb3drd *usb3 = dev_get_drvdata(dev);
if (host) {
rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_PERI_CON,
USB_PERI_DRD_CON);
rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_HOST_RST,
USB_PERI_DRD_CON);
rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_PERI_RST,
USB_PERI_DRD_CON);
} else {
rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_PERI_CON,
USB_PERI_DRD_CON);
rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_HOST_RST,
USB_PERI_DRD_CON);
rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_PERI_RST,
USB_PERI_DRD_CON);
}
}
EXPORT_SYMBOL_GPL(rzv2m_usb3drd_reset);
static void rzv2m_usb3drd_remove(struct platform_device *pdev)
{
struct rzv2m_usb3drd *usb3 = platform_get_drvdata(pdev);
of_platform_depopulate(usb3->dev);
pm_runtime_put(usb3->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(usb3->drd_rstc);
}
static int rzv2m_usb3drd_probe(struct platform_device *pdev)
{
struct rzv2m_usb3drd *usb3;
int ret;
usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return -ENOMEM;
usb3->dev = &pdev->dev;
usb3->drd_irq = platform_get_irq_byname(pdev, "drd");
if (usb3->drd_irq < 0)
return usb3->drd_irq;
usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
platform_set_drvdata(pdev, usb3);
usb3->drd_rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(usb3->drd_rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(usb3->drd_rstc),
"failed to get drd reset");
reset_control_deassert(usb3->drd_rstc);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(usb3->dev);
if (ret)
goto err_rst;
ret = of_platform_populate(usb3->dev->of_node, NULL, NULL, usb3->dev);
if (ret)
goto err_pm;
return 0;
err_pm:
pm_runtime_put(usb3->dev);
err_rst:
pm_runtime_disable(&pdev->dev);
reset_control_assert(usb3->drd_rstc);
return ret;
}
static const struct of_device_id rzv2m_usb3drd_of_match[] = {
{ .compatible = "renesas,rzv2m-usb3drd", },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzv2m_usb3drd_of_match);
static struct platform_driver rzv2m_usb3drd_driver = {
.driver = {
.name = "rzv2m-usb3drd",
.of_match_table = rzv2m_usb3drd_of_match,
},
.probe = rzv2m_usb3drd_probe,
.remove_new = rzv2m_usb3drd_remove,
};
module_platform_driver(rzv2m_usb3drd_driver);
MODULE_AUTHOR("Biju Das <[email protected]>");
MODULE_DESCRIPTION("Renesas RZ/V2M USB3DRD driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rzv2m_usb3drd");
| linux-master | drivers/usb/gadget/udc/rzv2m_usb3drd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2021 Aspeed Technology Inc.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/slab.h>
#define AST_UDC_NUM_ENDPOINTS (1 + 4)
#define AST_UDC_EP0_MAX_PACKET 64 /* EP0's max packet size */
#define AST_UDC_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */
#define AST_UDC_DESCS_COUNT 256 /* Use 256 stages descriptor mode (32/256) */
#define AST_UDC_DESC_MODE 1 /* Single/Multiple Stage(s) Descriptor Mode */
#define AST_UDC_EP_DMA_SIZE (AST_UDC_EPn_MAX_PACKET + 8 * AST_UDC_DESCS_COUNT)
/*****************************
* *
* UDC register definitions *
* *
*****************************/
#define AST_UDC_FUNC_CTRL 0x00 /* Root Function Control & Status Register */
#define AST_UDC_CONFIG 0x04 /* Root Configuration Setting Register */
#define AST_UDC_IER 0x08 /* Interrupt Control Register */
#define AST_UDC_ISR 0x0C /* Interrupt Status Register */
#define AST_UDC_EP_ACK_IER 0x10 /* Programmable ep Pool ACK Interrupt Enable Reg */
#define AST_UDC_EP_NAK_IER 0x14 /* Programmable ep Pool NAK Interrupt Enable Reg */
#define AST_UDC_EP_ACK_ISR 0x18 /* Programmable ep Pool ACK Interrupt Status Reg */
#define AST_UDC_EP_NAK_ISR 0x1C /* Programmable ep Pool NAK Interrupt Status Reg */
#define AST_UDC_DEV_RESET 0x20 /* Device Controller Soft Reset Enable Register */
#define AST_UDC_STS 0x24 /* USB Status Register */
#define AST_VHUB_EP_DATA 0x28 /* Programmable ep Pool Data Toggle Value Set */
#define AST_VHUB_ISO_TX_FAIL 0x2C /* Isochronous Transaction Fail Accumulator */
#define AST_UDC_EP0_CTRL 0x30 /* Endpoint 0 Control/Status Register */
#define AST_UDC_EP0_DATA_BUFF 0x34 /* Base Address of ep0 IN/OUT Data Buffer Reg */
#define AST_UDC_SETUP0 0x80 /* Root Device Setup Data Buffer0 */
#define AST_UDC_SETUP1 0x84 /* Root Device Setup Data Buffer1 */
/* Main control reg */
#define USB_PHY_CLK_EN BIT(31)
#define USB_FIFO_DYN_PWRD_EN BIT(19)
#define USB_EP_LONG_DESC BIT(18)
#define USB_BIST_TEST_PASS BIT(13)
#define USB_BIST_TURN_ON BIT(12)
#define USB_PHY_RESET_DIS BIT(11)
#define USB_TEST_MODE(x) ((x) << 8)
#define USB_FORCE_TIMER_HS BIT(7)
#define USB_FORCE_HS BIT(6)
#define USB_REMOTE_WAKEUP_12MS BIT(5)
#define USB_REMOTE_WAKEUP_EN BIT(4)
#define USB_AUTO_REMOTE_WAKEUP_EN BIT(3)
#define USB_STOP_CLK_IN_SUPEND BIT(2)
#define USB_UPSTREAM_FS BIT(1)
#define USB_UPSTREAM_EN BIT(0)
/* Main config reg */
#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
#define UDC_CFG_ADDR_MASK (0x3f)
/* Interrupt ctrl & status reg */
#define UDC_IRQ_EP_POOL_NAK BIT(17)
#define UDC_IRQ_EP_POOL_ACK_STALL BIT(16)
#define UDC_IRQ_BUS_RESUME BIT(8)
#define UDC_IRQ_BUS_SUSPEND BIT(7)
#define UDC_IRQ_BUS_RESET BIT(6)
#define UDC_IRQ_EP0_IN_DATA_NAK BIT(4)
#define UDC_IRQ_EP0_IN_ACK_STALL BIT(3)
#define UDC_IRQ_EP0_OUT_NAK BIT(2)
#define UDC_IRQ_EP0_OUT_ACK_STALL BIT(1)
#define UDC_IRQ_EP0_SETUP BIT(0)
#define UDC_IRQ_ACK_ALL (0x1ff)
/* EP isr reg */
#define USB_EP3_ISR BIT(3)
#define USB_EP2_ISR BIT(2)
#define USB_EP1_ISR BIT(1)
#define USB_EP0_ISR BIT(0)
#define UDC_IRQ_EP_ACK_ALL (0xf)
/*Soft reset reg */
#define ROOT_UDC_SOFT_RESET BIT(0)
/* USB status reg */
#define UDC_STS_HIGHSPEED BIT(27)
/* Programmable EP data toggle */
#define EP_TOGGLE_SET_EPNUM(x) ((x) & 0x3)
/* EP0 ctrl reg */
#define EP0_GET_RX_LEN(x) ((x >> 16) & 0x7f)
#define EP0_TX_LEN(x) ((x & 0x7f) << 8)
#define EP0_RX_BUFF_RDY BIT(2)
#define EP0_TX_BUFF_RDY BIT(1)
#define EP0_STALL BIT(0)
/*************************************
* *
* per-endpoint register definitions *
* *
*************************************/
#define AST_UDC_EP_CONFIG 0x00 /* Endpoint Configuration Register */
#define AST_UDC_EP_DMA_CTRL 0x04 /* DMA Descriptor List Control/Status Register */
#define AST_UDC_EP_DMA_BUFF 0x08 /* DMA Descriptor/Buffer Base Address */
#define AST_UDC_EP_DMA_STS 0x0C /* DMA Descriptor List R/W Pointer and Status */
#define AST_UDC_EP_BASE 0x200
#define AST_UDC_EP_OFFSET 0x10
/* EP config reg */
#define EP_SET_MAX_PKT(x) ((x & 0x3ff) << 16)
#define EP_DATA_FETCH_CTRL(x) ((x & 0x3) << 14)
#define EP_AUTO_DATA_DISABLE (0x1 << 13)
#define EP_SET_EP_STALL (0x1 << 12)
#define EP_SET_EP_NUM(x) ((x & 0xf) << 8)
#define EP_SET_TYPE_MASK(x) ((x) << 5)
#define EP_TYPE_BULK (0x1)
#define EP_TYPE_INT (0x2)
#define EP_TYPE_ISO (0x3)
#define EP_DIR_OUT (0x1 << 4)
#define EP_ALLOCATED_MASK (0x7 << 1)
#define EP_ENABLE BIT(0)
/* EP DMA ctrl reg */
#define EP_DMA_CTRL_GET_PROC_STS(x) ((x >> 4) & 0xf)
#define EP_DMA_CTRL_STS_RX_IDLE 0x0
#define EP_DMA_CTRL_STS_TX_IDLE 0x8
#define EP_DMA_CTRL_IN_LONG_MODE (0x1 << 3)
#define EP_DMA_CTRL_RESET (0x1 << 2)
#define EP_DMA_SINGLE_STAGE (0x1 << 1)
#define EP_DMA_DESC_MODE (0x1 << 0)
/* EP DMA status reg */
#define EP_DMA_SET_TX_SIZE(x) ((x & 0x7ff) << 16)
#define EP_DMA_GET_TX_SIZE(x) (((x) >> 16) & 0x7ff)
#define EP_DMA_GET_RPTR(x) (((x) >> 8) & 0xff)
#define EP_DMA_GET_WPTR(x) ((x) & 0xff)
#define EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */
/* EP desc reg */
#define AST_EP_DMA_DESC_INTR_ENABLE BIT(31)
#define AST_EP_DMA_DESC_PID_DATA0 (0 << 14)
#define AST_EP_DMA_DESC_PID_DATA2 BIT(14)
#define AST_EP_DMA_DESC_PID_DATA1 (2 << 14)
#define AST_EP_DMA_DESC_PID_MDATA (3 << 14)
#define EP_DESC1_IN_LEN(x) ((x) & 0x1fff)
#define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */
struct ast_udc_request {
struct usb_request req;
struct list_head queue;
unsigned mapped:1;
unsigned int actual_dma_length;
u32 saved_dma_wptr;
};
#define to_ast_req(__req) container_of(__req, struct ast_udc_request, req)
struct ast_dma_desc {
u32 des_0;
u32 des_1;
};
struct ast_udc_ep {
struct usb_ep ep;
/* Request queue */
struct list_head queue;
struct ast_udc_dev *udc;
void __iomem *ep_reg;
void *epn_buf;
dma_addr_t epn_buf_dma;
const struct usb_endpoint_descriptor *desc;
/* DMA Descriptors */
struct ast_dma_desc *descs;
dma_addr_t descs_dma;
u32 descs_wptr;
u32 chunk_max;
bool dir_in:1;
unsigned stopped:1;
bool desc_mode:1;
};
#define to_ast_ep(__ep) container_of(__ep, struct ast_udc_ep, ep)
struct ast_udc_dev {
struct platform_device *pdev;
void __iomem *reg;
int irq;
spinlock_t lock;
struct clk *clk;
struct work_struct wake_work;
/* EP0 DMA buffers allocated in one chunk */
void *ep0_buf;
dma_addr_t ep0_buf_dma;
struct ast_udc_ep ep[AST_UDC_NUM_ENDPOINTS];
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
void __iomem *creq;
enum usb_device_state suspended_from;
int desc_mode;
/* Force full speed only */
bool force_usb1:1;
unsigned is_control_tx:1;
bool wakeup_en:1;
};
#define to_ast_dev(__g) container_of(__g, struct ast_udc_dev, gadget)
static const char * const ast_ep_name[] = {
"ep0", "ep1", "ep2", "ep3", "ep4"
};
#ifdef AST_UDC_DEBUG_ALL
#define AST_UDC_DEBUG
#define AST_SETUP_DEBUG
#define AST_EP_DEBUG
#define AST_ISR_DEBUG
#endif
#ifdef AST_SETUP_DEBUG
#define SETUP_DBG(u, fmt, ...) \
dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
#else
#define SETUP_DBG(u, fmt, ...)
#endif
#ifdef AST_EP_DEBUG
#define EP_DBG(e, fmt, ...) \
dev_dbg(&(e)->udc->pdev->dev, "%s():%s " fmt, __func__, \
(e)->ep.name, ##__VA_ARGS__)
#else
#define EP_DBG(ep, fmt, ...) ((void)(ep))
#endif
#ifdef AST_UDC_DEBUG
#define UDC_DBG(u, fmt, ...) \
dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
#else
#define UDC_DBG(u, fmt, ...)
#endif
#ifdef AST_ISR_DEBUG
#define ISR_DBG(u, fmt, ...) \
dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
#else
#define ISR_DBG(u, fmt, ...)
#endif
/*-------------------------------------------------------------------------*/
#define ast_udc_read(udc, offset) \
readl((udc)->reg + (offset))
#define ast_udc_write(udc, val, offset) \
writel((val), (udc)->reg + (offset))
#define ast_ep_read(ep, reg) \
readl((ep)->ep_reg + (reg))
#define ast_ep_write(ep, val, reg) \
writel((val), (ep)->ep_reg + (reg))
/*-------------------------------------------------------------------------*/
static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req,
int status)
{
struct ast_udc_dev *udc = ep->udc;
EP_DBG(ep, "req @%p, len (%d/%d), buf:0x%x, dir:0x%x\n",
req, req->req.actual, req->req.length,
(u32)req->req.buf, ep->dir_in);
list_del(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
if (status && status != -ESHUTDOWN)
EP_DBG(ep, "done req:%p, status:%d\n", req, status);
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
static void ast_udc_nuke(struct ast_udc_ep *ep, int status)
{
int count = 0;
while (!list_empty(&ep->queue)) {
struct ast_udc_request *req;
req = list_entry(ep->queue.next, struct ast_udc_request,
queue);
ast_udc_done(ep, req, status);
count++;
}
if (count)
EP_DBG(ep, "Nuked %d request(s)\n", count);
}
/*
* Stop activity on all endpoints.
* Device controller for which EP activity is to be stopped.
*
* All the endpoints are stopped and any pending transfer requests if any on
* the endpoint are terminated.
*/
static void ast_udc_stop_activity(struct ast_udc_dev *udc)
{
struct ast_udc_ep *ep;
int i;
for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
ep->stopped = 1;
ast_udc_nuke(ep, -ESHUTDOWN);
}
}
static int ast_udc_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
u16 maxpacket = usb_endpoint_maxp(desc);
struct ast_udc_ep *ep = to_ast_ep(_ep);
struct ast_udc_dev *udc = ep->udc;
u8 epnum = usb_endpoint_num(desc);
unsigned long flags;
u32 ep_conf = 0;
u8 dir_in;
u8 type;
if (!_ep || !ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
EP_DBG(ep, "Failed, invalid EP enable param\n");
return -EINVAL;
}
if (!udc->driver) {
EP_DBG(ep, "bogus device state\n");
return -ESHUTDOWN;
}
EP_DBG(ep, "maxpacket:0x%x\n", maxpacket);
spin_lock_irqsave(&udc->lock, flags);
ep->desc = desc;
ep->stopped = 0;
ep->ep.maxpacket = maxpacket;
ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN;
if (maxpacket < AST_UDC_EPn_MAX_PACKET)
ep_conf = EP_SET_MAX_PKT(maxpacket);
ep_conf |= EP_SET_EP_NUM(epnum);
type = usb_endpoint_type(desc);
dir_in = usb_endpoint_dir_in(desc);
ep->dir_in = dir_in;
if (!ep->dir_in)
ep_conf |= EP_DIR_OUT;
EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in);
switch (type) {
case USB_ENDPOINT_XFER_ISOC:
ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO);
break;
case USB_ENDPOINT_XFER_BULK:
ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_BULK);
break;
case USB_ENDPOINT_XFER_INT:
ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_INT);
break;
}
ep->desc_mode = udc->desc_mode && ep->descs_dma && ep->dir_in;
if (ep->desc_mode) {
ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
ast_ep_write(ep, ep->descs_dma, AST_UDC_EP_DMA_BUFF);
/* Enable Long Descriptor Mode */
ast_ep_write(ep, EP_DMA_CTRL_IN_LONG_MODE | EP_DMA_DESC_MODE,
AST_UDC_EP_DMA_CTRL);
ep->descs_wptr = 0;
} else {
ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
ast_ep_write(ep, EP_DMA_SINGLE_STAGE, AST_UDC_EP_DMA_CTRL);
ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
}
/* Cleanup data toggle just in case */
ast_udc_write(udc, EP_TOGGLE_SET_EPNUM(epnum), AST_VHUB_EP_DATA);
/* Enable EP */
ast_ep_write(ep, ep_conf | EP_ENABLE, AST_UDC_EP_CONFIG);
EP_DBG(ep, "ep_config: 0x%x\n", ast_ep_read(ep, AST_UDC_EP_CONFIG));
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int ast_udc_ep_disable(struct usb_ep *_ep)
{
struct ast_udc_ep *ep = to_ast_ep(_ep);
struct ast_udc_dev *udc = ep->udc;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
ep->ep.desc = NULL;
ep->stopped = 1;
ast_udc_nuke(ep, -ESHUTDOWN);
ast_ep_write(ep, 0, AST_UDC_EP_CONFIG);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_request *ast_udc_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct ast_udc_ep *ep = to_ast_ep(_ep);
struct ast_udc_request *req;
req = kzalloc(sizeof(struct ast_udc_request), gfp_flags);
if (!req) {
EP_DBG(ep, "request allocation failed\n");
return NULL;
}
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void ast_udc_ep_free_request(struct usb_ep *_ep,
struct usb_request *_req)
{
struct ast_udc_request *req = to_ast_req(_req);
kfree(req);
}
static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
u16 tx_len, struct ast_udc_request *req)
{
struct ast_udc_dev *udc = ep->udc;
struct device *dev = &udc->pdev->dev;
bool last = false;
int chunk, count;
u32 offset;
if (!ep->descs) {
dev_warn(dev, "%s: Empty DMA descs list failure\n",
ep->ep.name);
return -EINVAL;
}
chunk = tx_len;
offset = count = 0;
EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req,
"wptr", ep->descs_wptr, "dma_buf", dma_buf,
"tx_len", tx_len);
/* Create Descriptor Lists */
while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) {
ep->descs[ep->descs_wptr].des_0 = dma_buf + offset;
if (chunk > ep->chunk_max) {
ep->descs[ep->descs_wptr].des_1 = ep->chunk_max;
} else {
ep->descs[ep->descs_wptr].des_1 = chunk;
last = true;
}
chunk -= ep->chunk_max;
EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n",
ep->descs_wptr,
ep->descs[ep->descs_wptr].des_0,
ep->descs[ep->descs_wptr].des_1);
if (count == 0)
req->saved_dma_wptr = ep->descs_wptr;
ep->descs_wptr++;
count++;
if (ep->descs_wptr >= AST_UDC_DESCS_COUNT)
ep->descs_wptr = 0;
offset = ep->chunk_max * count;
}
return 0;
}
static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req)
{
u32 tx_len;
u32 last;
last = req->req.length - req->req.actual;
tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n",
req, tx_len, ep->dir_in);
ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF);
/* Start DMA */
ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len), AST_UDC_EP_DMA_STS);
ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len) | EP_DMA_SINGLE_KICK,
AST_UDC_EP_DMA_STS);
}
static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep,
struct ast_udc_request *req)
{
u32 descs_max_size;
u32 tx_len;
u32 last;
descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT;
last = req->req.length - req->req.actual;
tx_len = last > descs_max_size ? descs_max_size : last;
EP_DBG(ep, "kick req @%p, %s:%d, %s:0x%x, %s:0x%x (%d/%d), %s:0x%x\n",
req, "tx_len", tx_len, "dir_in", ep->dir_in,
"dma", req->req.dma + req->req.actual,
req->req.actual, req->req.length,
"descs_max_size", descs_max_size);
if (!ast_dma_descriptor_setup(ep, req->req.dma + req->req.actual,
tx_len, req))
req->actual_dma_length += tx_len;
/* make sure CPU done everything before triggering DMA */
mb();
ast_ep_write(ep, ep->descs_wptr, AST_UDC_EP_DMA_STS);
EP_DBG(ep, "descs_wptr:%d, dstat:0x%x, dctrl:0x%x\n",
ep->descs_wptr,
ast_ep_read(ep, AST_UDC_EP_DMA_STS),
ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
}
static void ast_udc_ep0_queue(struct ast_udc_ep *ep,
struct ast_udc_request *req)
{
struct ast_udc_dev *udc = ep->udc;
u32 tx_len;
u32 last;
last = req->req.length - req->req.actual;
tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
ast_udc_write(udc, req->req.dma + req->req.actual,
AST_UDC_EP0_DATA_BUFF);
if (ep->dir_in) {
/* IN requests, send data */
SETUP_DBG(udc, "IN: %s:0x%x, %s:0x%x, %s:%d (%d/%d), %s:%d\n",
"buf", (u32)req->req.buf,
"dma", req->req.dma + req->req.actual,
"tx_len", tx_len,
req->req.actual, req->req.length,
"dir_in", ep->dir_in);
req->req.actual += tx_len;
ast_udc_write(udc, EP0_TX_LEN(tx_len), AST_UDC_EP0_CTRL);
ast_udc_write(udc, EP0_TX_LEN(tx_len) | EP0_TX_BUFF_RDY,
AST_UDC_EP0_CTRL);
} else {
/* OUT requests, receive data */
SETUP_DBG(udc, "OUT: %s:%x, %s:%x, %s:(%d/%d), %s:%d\n",
"buf", (u32)req->req.buf,
"dma", req->req.dma + req->req.actual,
"len", req->req.actual, req->req.length,
"dir_in", ep->dir_in);
if (!req->req.length) {
/* 0 len request, send tx as completion */
ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
ep->dir_in = 0x1;
} else
ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
}
}
static int ast_udc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct ast_udc_request *req = to_ast_req(_req);
struct ast_udc_ep *ep = to_ast_ep(_ep);
struct ast_udc_dev *udc = ep->udc;
struct device *dev = &udc->pdev->dev;
unsigned long flags;
int rc;
if (unlikely(!_req || !_req->complete || !_req->buf || !_ep)) {
dev_warn(dev, "Invalid EP request !\n");
return -EINVAL;
}
if (ep->stopped) {
dev_warn(dev, "%s is already stopped !\n", _ep->name);
return -ESHUTDOWN;
}
spin_lock_irqsave(&udc->lock, flags);
list_add_tail(&req->queue, &ep->queue);
req->req.actual = 0;
req->req.status = -EINPROGRESS;
req->actual_dma_length = 0;
rc = usb_gadget_map_request(&udc->gadget, &req->req, ep->dir_in);
if (rc) {
EP_DBG(ep, "Request mapping failure %d\n", rc);
dev_warn(dev, "Request mapping failure %d\n", rc);
goto end;
}
EP_DBG(ep, "enqueue req @%p\n", req);
EP_DBG(ep, "l=%d, dma:0x%x, zero:%d, is_in:%d\n",
_req->length, _req->dma, _req->zero, ep->dir_in);
/* EP0 request enqueue */
if (ep->ep.desc == NULL) {
if ((req->req.dma % 4) != 0) {
dev_warn(dev, "EP0 req dma alignment error\n");
rc = -ESHUTDOWN;
goto end;
}
ast_udc_ep0_queue(ep, req);
goto end;
}
/* EPn request enqueue */
if (list_is_singular(&ep->queue)) {
if (ep->desc_mode)
ast_udc_epn_kick_desc(ep, req);
else
ast_udc_epn_kick(ep, req);
}
end:
spin_unlock_irqrestore(&udc->lock, flags);
return rc;
}
static int ast_udc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct ast_udc_ep *ep = to_ast_ep(_ep);
struct ast_udc_dev *udc = ep->udc;
struct ast_udc_request *req;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req) {
list_del_init(&req->queue);
ast_udc_done(ep, req, -ESHUTDOWN);
_req->status = -ECONNRESET;
break;
}
}
/* dequeue request not found */
if (&req->req != _req)
rc = -EINVAL;
spin_unlock_irqrestore(&udc->lock, flags);
return rc;
}
static int ast_udc_ep_set_halt(struct usb_ep *_ep, int value)
{
struct ast_udc_ep *ep = to_ast_ep(_ep);
struct ast_udc_dev *udc = ep->udc;
unsigned long flags;
int epnum;
u32 ctrl;
EP_DBG(ep, "val:%d\n", value);
spin_lock_irqsave(&udc->lock, flags);
epnum = usb_endpoint_num(ep->desc);
/* EP0 */
if (epnum == 0) {
ctrl = ast_udc_read(udc, AST_UDC_EP0_CTRL);
if (value)
ctrl |= EP0_STALL;
else
ctrl &= ~EP0_STALL;
ast_udc_write(udc, ctrl, AST_UDC_EP0_CTRL);
} else {
/* EPn */
ctrl = ast_udc_read(udc, AST_UDC_EP_CONFIG);
if (value)
ctrl |= EP_SET_EP_STALL;
else
ctrl &= ~EP_SET_EP_STALL;
ast_ep_write(ep, ctrl, AST_UDC_EP_CONFIG);
/* only epn is stopped and waits for clear */
ep->stopped = value ? 1 : 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static const struct usb_ep_ops ast_udc_ep_ops = {
.enable = ast_udc_ep_enable,
.disable = ast_udc_ep_disable,
.alloc_request = ast_udc_ep_alloc_request,
.free_request = ast_udc_ep_free_request,
.queue = ast_udc_ep_queue,
.dequeue = ast_udc_ep_dequeue,
.set_halt = ast_udc_ep_set_halt,
/* there's only imprecise fifo status reporting */
};
static void ast_udc_ep0_rx(struct ast_udc_dev *udc)
{
ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
}
static void ast_udc_ep0_tx(struct ast_udc_dev *udc)
{
ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
}
static void ast_udc_ep0_out(struct ast_udc_dev *udc)
{
struct device *dev = &udc->pdev->dev;
struct ast_udc_ep *ep = &udc->ep[0];
struct ast_udc_request *req;
u16 rx_len;
if (list_empty(&ep->queue))
return;
req = list_entry(ep->queue.next, struct ast_udc_request, queue);
rx_len = EP0_GET_RX_LEN(ast_udc_read(udc, AST_UDC_EP0_CTRL));
req->req.actual += rx_len;
SETUP_DBG(udc, "req %p (%d/%d)\n", req,
req->req.actual, req->req.length);
if ((rx_len < ep->ep.maxpacket) ||
(req->req.actual == req->req.length)) {
ast_udc_ep0_tx(udc);
if (!ep->dir_in)
ast_udc_done(ep, req, 0);
} else {
if (rx_len > req->req.length) {
// Issue Fix
dev_warn(dev, "Something wrong (%d/%d)\n",
req->req.actual, req->req.length);
ast_udc_ep0_tx(udc);
ast_udc_done(ep, req, 0);
return;
}
ep->dir_in = 0;
/* More works */
ast_udc_ep0_queue(ep, req);
}
}
static void ast_udc_ep0_in(struct ast_udc_dev *udc)
{
struct ast_udc_ep *ep = &udc->ep[0];
struct ast_udc_request *req;
if (list_empty(&ep->queue)) {
if (udc->is_control_tx) {
ast_udc_ep0_rx(udc);
udc->is_control_tx = 0;
}
return;
}
req = list_entry(ep->queue.next, struct ast_udc_request, queue);
SETUP_DBG(udc, "req %p (%d/%d)\n", req,
req->req.actual, req->req.length);
if (req->req.length == req->req.actual) {
if (req->req.length)
ast_udc_ep0_rx(udc);
if (ep->dir_in)
ast_udc_done(ep, req, 0);
} else {
/* More works */
ast_udc_ep0_queue(ep, req);
}
}
static void ast_udc_epn_handle(struct ast_udc_dev *udc, u16 ep_num)
{
struct ast_udc_ep *ep = &udc->ep[ep_num];
struct ast_udc_request *req;
u16 len = 0;
if (list_empty(&ep->queue))
return;
req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
len = EP_DMA_GET_TX_SIZE(ast_ep_read(ep, AST_UDC_EP_DMA_STS));
req->req.actual += len;
EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
req->req.actual, req->req.length, "len", len);
/* Done this request */
if (req->req.length == req->req.actual) {
ast_udc_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue,
struct ast_udc_request,
queue);
} else {
/* Check for short packet */
if (len < ep->ep.maxpacket) {
ast_udc_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue,
struct ast_udc_request,
queue);
}
}
/* More requests */
if (req)
ast_udc_epn_kick(ep, req);
}
static void ast_udc_epn_handle_desc(struct ast_udc_dev *udc, u16 ep_num)
{
struct ast_udc_ep *ep = &udc->ep[ep_num];
struct device *dev = &udc->pdev->dev;
struct ast_udc_request *req;
u32 proc_sts, wr_ptr, rd_ptr;
u32 len_in_desc, ctrl;
u16 total_len = 0;
int i;
if (list_empty(&ep->queue)) {
dev_warn(dev, "%s request queue empty!\n", ep->ep.name);
return;
}
req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_CTRL);
proc_sts = EP_DMA_CTRL_GET_PROC_STS(ctrl);
/* Check processing status is idle */
if (proc_sts != EP_DMA_CTRL_STS_RX_IDLE &&
proc_sts != EP_DMA_CTRL_STS_TX_IDLE) {
dev_warn(dev, "EP DMA CTRL: 0x%x, PS:0x%x\n",
ast_ep_read(ep, AST_UDC_EP_DMA_CTRL),
proc_sts);
return;
}
ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_STS);
rd_ptr = EP_DMA_GET_RPTR(ctrl);
wr_ptr = EP_DMA_GET_WPTR(ctrl);
if (rd_ptr != wr_ptr) {
dev_warn(dev, "desc list is not empty ! %s:%d, %s:%d\n",
"rptr", rd_ptr, "wptr", wr_ptr);
return;
}
EP_DBG(ep, "rd_ptr:%d, wr_ptr:%d\n", rd_ptr, wr_ptr);
i = req->saved_dma_wptr;
do {
len_in_desc = EP_DESC1_IN_LEN(ep->descs[i].des_1);
EP_DBG(ep, "desc[%d] len: %d\n", i, len_in_desc);
total_len += len_in_desc;
i++;
if (i >= AST_UDC_DESCS_COUNT)
i = 0;
} while (i != wr_ptr);
req->req.actual += total_len;
EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
req->req.actual, req->req.length, "len", total_len);
/* Done this request */
if (req->req.length == req->req.actual) {
ast_udc_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue,
struct ast_udc_request,
queue);
} else {
/* Check for short packet */
if (total_len < ep->ep.maxpacket) {
ast_udc_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue,
struct ast_udc_request,
queue);
}
}
/* More requests & dma descs not setup yet */
if (req && (req->actual_dma_length == req->req.actual)) {
EP_DBG(ep, "More requests\n");
ast_udc_epn_kick_desc(ep, req);
}
}
static void ast_udc_ep0_data_tx(struct ast_udc_dev *udc, u8 *tx_data, u32 len)
{
if (len) {
memcpy(udc->ep0_buf, tx_data, len);
ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
ast_udc_write(udc, EP0_TX_LEN(len), AST_UDC_EP0_CTRL);
ast_udc_write(udc, EP0_TX_LEN(len) | EP0_TX_BUFF_RDY,
AST_UDC_EP0_CTRL);
udc->is_control_tx = 1;
} else
ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
}
static void ast_udc_getstatus(struct ast_udc_dev *udc)
{
struct usb_ctrlrequest crq;
struct ast_udc_ep *ep;
u16 status = 0;
u16 epnum = 0;
memcpy_fromio(&crq, udc->creq, sizeof(crq));
switch (crq.bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
/* Get device status */
status = 1 << USB_DEVICE_SELF_POWERED;
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:
epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
status = udc->ep[epnum].stopped;
break;
default:
goto stall;
}
ep = &udc->ep[epnum];
EP_DBG(ep, "status: 0x%x\n", status);
ast_udc_ep0_data_tx(udc, (u8 *)&status, sizeof(status));
return;
stall:
EP_DBG(ep, "Can't respond request\n");
ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
AST_UDC_EP0_CTRL);
}
static void ast_udc_ep0_handle_setup(struct ast_udc_dev *udc)
{
struct ast_udc_ep *ep = &udc->ep[0];
struct ast_udc_request *req;
struct usb_ctrlrequest crq;
int req_num = 0;
int rc = 0;
u32 reg;
memcpy_fromio(&crq, udc->creq, sizeof(crq));
SETUP_DBG(udc, "SETUP packet: %02x/%02x/%04x/%04x/%04x\n",
crq.bRequestType, crq.bRequest, le16_to_cpu(crq.wValue),
le16_to_cpu(crq.wIndex), le16_to_cpu(crq.wLength));
/*
* Cleanup ep0 request(s) in queue because
* there is a new control setup comes.
*/
list_for_each_entry(req, &udc->ep[0].queue, queue) {
req_num++;
EP_DBG(ep, "there is req %p in ep0 queue !\n", req);
}
if (req_num)
ast_udc_nuke(&udc->ep[0], -ETIMEDOUT);
udc->ep[0].dir_in = crq.bRequestType & USB_DIR_IN;
if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (crq.bRequest) {
case USB_REQ_SET_ADDRESS:
if (ast_udc_read(udc, AST_UDC_STS) & UDC_STS_HIGHSPEED)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
SETUP_DBG(udc, "set addr: 0x%x\n", crq.wValue);
reg = ast_udc_read(udc, AST_UDC_CONFIG);
reg &= ~UDC_CFG_ADDR_MASK;
reg |= UDC_CFG_SET_ADDR(crq.wValue);
ast_udc_write(udc, reg, AST_UDC_CONFIG);
goto req_complete;
case USB_REQ_CLEAR_FEATURE:
SETUP_DBG(udc, "ep0: CLEAR FEATURE\n");
goto req_driver;
case USB_REQ_SET_FEATURE:
SETUP_DBG(udc, "ep0: SET FEATURE\n");
goto req_driver;
case USB_REQ_GET_STATUS:
ast_udc_getstatus(udc);
return;
default:
goto req_driver;
}
}
req_driver:
if (udc->driver) {
SETUP_DBG(udc, "Forwarding %s to gadget...\n",
udc->gadget.name);
spin_unlock(&udc->lock);
rc = udc->driver->setup(&udc->gadget, &crq);
spin_lock(&udc->lock);
} else {
SETUP_DBG(udc, "No gadget for request !\n");
}
if (rc >= 0)
return;
/* Stall if gadget failed */
SETUP_DBG(udc, "Stalling, rc:0x%x\n", rc);
ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
AST_UDC_EP0_CTRL);
return;
req_complete:
SETUP_DBG(udc, "ep0: Sending IN status without data\n");
ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
}
static irqreturn_t ast_udc_isr(int irq, void *data)
{
struct ast_udc_dev *udc = (struct ast_udc_dev *)data;
struct ast_udc_ep *ep;
u32 isr, ep_isr;
int i;
spin_lock(&udc->lock);
isr = ast_udc_read(udc, AST_UDC_ISR);
if (!isr)
goto done;
/* Ack interrupts */
ast_udc_write(udc, isr, AST_UDC_ISR);
if (isr & UDC_IRQ_BUS_RESET) {
ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n");
udc->gadget.speed = USB_SPEED_UNKNOWN;
ep = &udc->ep[1];
EP_DBG(ep, "dctrl:0x%x\n",
ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
if (udc->driver && udc->driver->reset) {
spin_unlock(&udc->lock);
udc->driver->reset(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (isr & UDC_IRQ_BUS_SUSPEND) {
ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n");
udc->suspended_from = udc->gadget.state;
usb_gadget_set_state(&udc->gadget, USB_STATE_SUSPENDED);
if (udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (isr & UDC_IRQ_BUS_RESUME) {
ISR_DBG(udc, "UDC_IRQ_BUS_RESUME\n");
usb_gadget_set_state(&udc->gadget, udc->suspended_from);
if (udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (isr & UDC_IRQ_EP0_IN_ACK_STALL) {
ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n");
ast_udc_ep0_in(udc);
}
if (isr & UDC_IRQ_EP0_OUT_ACK_STALL) {
ISR_DBG(udc, "UDC_IRQ_EP0_OUT_ACK_STALL\n");
ast_udc_ep0_out(udc);
}
if (isr & UDC_IRQ_EP0_SETUP) {
ISR_DBG(udc, "UDC_IRQ_EP0_SETUP\n");
ast_udc_ep0_handle_setup(udc);
}
if (isr & UDC_IRQ_EP_POOL_ACK_STALL) {
ISR_DBG(udc, "UDC_IRQ_EP_POOL_ACK_STALL\n");
ep_isr = ast_udc_read(udc, AST_UDC_EP_ACK_ISR);
/* Ack EP interrupts */
ast_udc_write(udc, ep_isr, AST_UDC_EP_ACK_ISR);
/* Handle each EP */
for (i = 0; i < AST_UDC_NUM_ENDPOINTS - 1; i++) {
if (ep_isr & (0x1 << i)) {
ep = &udc->ep[i + 1];
if (ep->desc_mode)
ast_udc_epn_handle_desc(udc, i + 1);
else
ast_udc_epn_handle(udc, i + 1);
}
}
}
done:
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static int ast_udc_gadget_getframe(struct usb_gadget *gadget)
{
struct ast_udc_dev *udc = to_ast_dev(gadget);
return (ast_udc_read(udc, AST_UDC_STS) >> 16) & 0x7ff;
}
static void ast_udc_wake_work(struct work_struct *work)
{
struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev,
wake_work);
unsigned long flags;
u32 ctrl;
spin_lock_irqsave(&udc->lock, flags);
UDC_DBG(udc, "Wakeup Host !\n");
ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL);
ast_udc_write(udc, ctrl | USB_REMOTE_WAKEUP_EN, AST_UDC_FUNC_CTRL);
spin_unlock_irqrestore(&udc->lock, flags);
}
static void ast_udc_wakeup_all(struct ast_udc_dev *udc)
{
/*
* A device is trying to wake the world, because this
* can recurse into the device, we break the call chain
* using a work queue
*/
schedule_work(&udc->wake_work);
}
static int ast_udc_wakeup(struct usb_gadget *gadget)
{
struct ast_udc_dev *udc = to_ast_dev(gadget);
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&udc->lock, flags);
if (!udc->wakeup_en) {
UDC_DBG(udc, "Remote Wakeup is disabled\n");
rc = -EINVAL;
goto err;
}
UDC_DBG(udc, "Device initiated wakeup\n");
ast_udc_wakeup_all(udc);
err:
spin_unlock_irqrestore(&udc->lock, flags);
return rc;
}
/*
* Activate/Deactivate link with host
*/
static int ast_udc_pullup(struct usb_gadget *gadget, int is_on)
{
struct ast_udc_dev *udc = to_ast_dev(gadget);
unsigned long flags;
u32 ctrl;
spin_lock_irqsave(&udc->lock, flags);
UDC_DBG(udc, "is_on: %d\n", is_on);
if (is_on)
ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) | USB_UPSTREAM_EN;
else
ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int ast_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct ast_udc_dev *udc = to_ast_dev(gadget);
struct ast_udc_ep *ep;
unsigned long flags;
int i;
spin_lock_irqsave(&udc->lock, flags);
UDC_DBG(udc, "\n");
udc->driver = driver;
udc->gadget.dev.of_node = udc->pdev->dev.of_node;
for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
ep->stopped = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int ast_udc_stop(struct usb_gadget *gadget)
{
struct ast_udc_dev *udc = to_ast_dev(gadget);
unsigned long flags;
u32 ctrl;
spin_lock_irqsave(&udc->lock, flags);
UDC_DBG(udc, "\n");
ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->driver = NULL;
ast_udc_stop_activity(udc);
usb_gadget_set_state(&udc->gadget, USB_STATE_NOTATTACHED);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static const struct usb_gadget_ops ast_udc_ops = {
.get_frame = ast_udc_gadget_getframe,
.wakeup = ast_udc_wakeup,
.pullup = ast_udc_pullup,
.udc_start = ast_udc_start,
.udc_stop = ast_udc_stop,
};
/*
* Support 1 Control Endpoint.
* Support multiple programmable endpoints that can be configured to
* Bulk IN/OUT, Interrupt IN/OUT, and Isochronous IN/OUT type endpoint.
*/
static void ast_udc_init_ep(struct ast_udc_dev *udc)
{
struct ast_udc_ep *ep;
int i;
for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
ep->ep.name = ast_ep_name[i];
if (i == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
ep->ep.ops = &ast_udc_ep_ops;
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
if (i == 0) {
usb_ep_set_maxpacket_limit(&ep->ep,
AST_UDC_EP0_MAX_PACKET);
continue;
}
ep->ep_reg = udc->reg + AST_UDC_EP_BASE +
(AST_UDC_EP_OFFSET * (i - 1));
ep->epn_buf = udc->ep0_buf + (i * AST_UDC_EP_DMA_SIZE);
ep->epn_buf_dma = udc->ep0_buf_dma + (i * AST_UDC_EP_DMA_SIZE);
usb_ep_set_maxpacket_limit(&ep->ep, AST_UDC_EPn_MAX_PACKET);
ep->descs = ep->epn_buf + AST_UDC_EPn_MAX_PACKET;
ep->descs_dma = ep->epn_buf_dma + AST_UDC_EPn_MAX_PACKET;
ep->descs_wptr = 0;
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
}
static void ast_udc_init_dev(struct ast_udc_dev *udc)
{
INIT_WORK(&udc->wake_work, ast_udc_wake_work);
}
static void ast_udc_init_hw(struct ast_udc_dev *udc)
{
u32 ctrl;
/* Enable PHY */
ctrl = USB_PHY_CLK_EN | USB_PHY_RESET_DIS;
ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
udelay(1);
ast_udc_write(udc, 0, AST_UDC_DEV_RESET);
/* Set descriptor ring size */
if (AST_UDC_DESCS_COUNT == 256) {
ctrl |= USB_EP_LONG_DESC;
ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
}
/* Mask & ack all interrupts before installing the handler */
ast_udc_write(udc, 0, AST_UDC_IER);
ast_udc_write(udc, UDC_IRQ_ACK_ALL, AST_UDC_ISR);
/* Enable some interrupts */
ctrl = UDC_IRQ_EP_POOL_ACK_STALL | UDC_IRQ_BUS_RESUME |
UDC_IRQ_BUS_SUSPEND | UDC_IRQ_BUS_RESET |
UDC_IRQ_EP0_IN_ACK_STALL | UDC_IRQ_EP0_OUT_ACK_STALL |
UDC_IRQ_EP0_SETUP;
ast_udc_write(udc, ctrl, AST_UDC_IER);
/* Cleanup and enable ep ACK interrupts */
ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_IER);
ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_ISR);
ast_udc_write(udc, 0, AST_UDC_EP0_CTRL);
}
static int ast_udc_remove(struct platform_device *pdev)
{
struct ast_udc_dev *udc = platform_get_drvdata(pdev);
unsigned long flags;
u32 ctrl;
usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
spin_lock_irqsave(&udc->lock, flags);
/* Disable upstream port connection */
ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
clk_disable_unprepare(udc->clk);
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->ep0_buf)
dma_free_coherent(&pdev->dev,
AST_UDC_EP_DMA_SIZE * AST_UDC_NUM_ENDPOINTS,
udc->ep0_buf,
udc->ep0_buf_dma);
udc->ep0_buf = NULL;
return 0;
}
static int ast_udc_probe(struct platform_device *pdev)
{
enum usb_device_speed max_speed;
struct device *dev = &pdev->dev;
struct ast_udc_dev *udc;
int rc;
udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL);
if (!udc)
return -ENOMEM;
udc->gadget.dev.parent = dev;
udc->pdev = pdev;
spin_lock_init(&udc->lock);
udc->gadget.ops = &ast_udc_ops;
udc->gadget.ep0 = &udc->ep[0].ep;
udc->gadget.name = "aspeed-udc";
udc->gadget.dev.init_name = "gadget";
udc->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->reg)) {
dev_err(&pdev->dev, "Failed to map resources\n");
return PTR_ERR(udc->reg);
}
platform_set_drvdata(pdev, udc);
udc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(udc->clk)) {
rc = PTR_ERR(udc->clk);
goto err;
}
rc = clk_prepare_enable(udc->clk);
if (rc) {
dev_err(&pdev->dev, "Failed to enable clock (0x%x)\n", rc);
goto err;
}
/* Check if we need to limit the HW to USB1 */
max_speed = usb_get_maximum_speed(&pdev->dev);
if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
udc->force_usb1 = true;
/*
* Allocate DMA buffers for all EPs in one chunk
*/
udc->ep0_buf = dma_alloc_coherent(&pdev->dev,
AST_UDC_EP_DMA_SIZE *
AST_UDC_NUM_ENDPOINTS,
&udc->ep0_buf_dma, GFP_KERNEL);
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.max_speed = USB_SPEED_HIGH;
udc->creq = udc->reg + AST_UDC_SETUP0;
/*
* Support single stage mode or 32/256 stages descriptor mode.
* Set default as Descriptor Mode.
*/
udc->desc_mode = AST_UDC_DESC_MODE;
dev_info(&pdev->dev, "DMA %s\n", udc->desc_mode ?
"descriptor mode" : "single mode");
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
/* Initialized udc ep */
ast_udc_init_ep(udc);
/* Initialized udc device */
ast_udc_init_dev(udc);
/* Initialized udc hardware */
ast_udc_init_hw(udc);
/* Find interrupt and install handler */
udc->irq = platform_get_irq(pdev, 0);
if (udc->irq < 0) {
rc = udc->irq;
goto err;
}
rc = devm_request_irq(&pdev->dev, udc->irq, ast_udc_isr, 0,
KBUILD_MODNAME, udc);
if (rc) {
dev_err(&pdev->dev, "Failed to request interrupt\n");
goto err;
}
rc = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (rc) {
dev_err(&pdev->dev, "Failed to add gadget udc\n");
goto err;
}
dev_info(&pdev->dev, "Initialized udc in USB%s mode\n",
udc->force_usb1 ? "1" : "2");
return 0;
err:
dev_err(&pdev->dev, "Failed to udc probe, rc:0x%x\n", rc);
ast_udc_remove(pdev);
return rc;
}
static const struct of_device_id ast_udc_of_dt_ids[] = {
{ .compatible = "aspeed,ast2600-udc", },
{}
};
MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids);
static struct platform_driver ast_udc_driver = {
.probe = ast_udc_probe,
.remove = ast_udc_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ast_udc_of_dt_ids,
},
};
module_platform_driver(ast_udc_driver);
MODULE_DESCRIPTION("ASPEED UDC driver");
MODULE_AUTHOR("Neal Liu <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/aspeed_udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536.c -- AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2005-2007 AMD (https://www.amd.com)
* Author: Thomas Dahlmann
*/
/*
* This file does the core driver implementation for the UDC that is based
* on Synopsys device controller IP (different than HS OTG IP) that is either
* connected through PCI bus or integrated to SoC platforms.
*/
/* Driver strings */
#define UDC_MOD_DESCRIPTION "Synopsys USB Device Controller"
#define UDC_DRIVER_VERSION_STRING "01.00.0206"
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <linux/dmapool.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include "amd5536udc.h"
static void udc_setup_endpoints(struct udc *dev);
static void udc_soft_reset(struct udc *dev);
static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
/* description */
static const char mod_desc[] = UDC_MOD_DESCRIPTION;
static const char name[] = "udc";
/* structure to hold endpoint function pointers */
static const struct usb_ep_ops udc_ep_ops;
/* received setup data */
static union udc_setup_data setup_data;
/* pointer to device object */
static struct udc *udc;
/* irq spin lock for soft reset */
static DEFINE_SPINLOCK(udc_irq_spinlock);
/* stall spin lock */
static DEFINE_SPINLOCK(udc_stall_spinlock);
/*
* slave mode: pending bytes in rx fifo after nyet,
* used if EPIN irq came but no req was available
*/
static unsigned int udc_rxfifo_pending;
/* count soft resets after suspend to avoid loop */
static int soft_reset_occured;
static int soft_reset_after_usbreset_occured;
/* timer */
static struct timer_list udc_timer;
static int stop_timer;
/* set_rde -- Is used to control enabling of RX DMA. Problem is
* that UDC has only one bit (RDE) to enable/disable RX DMA for
* all OUT endpoints. So we have to handle race conditions like
* when OUT data reaches the fifo but no request was queued yet.
* This cannot be solved by letting the RX DMA disabled until a
* request gets queued because there may be other OUT packets
* in the FIFO (important for not blocking control traffic).
* The value of set_rde controls the corresponding timer.
*
* set_rde -1 == not used, means it is alloed to be set to 0 or 1
* set_rde 0 == do not touch RDE, do no start the RDE timer
* set_rde 1 == timer function will look whether FIFO has data
* set_rde 2 == set by timer function to enable RX DMA on next call
*/
static int set_rde = -1;
static DECLARE_COMPLETION(on_exit);
static struct timer_list udc_pollstall_timer;
static int stop_pollstall_timer;
static DECLARE_COMPLETION(on_pollstall_exit);
/* endpoint names used for print */
static const char ep0_string[] = "ep0in";
static const struct {
const char *name;
const struct usb_ep_caps caps;
} ep_info[] = {
#define EP_INFO(_name, _caps) \
{ \
.name = _name, \
.caps = _caps, \
}
EP_INFO(ep0_string,
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep1in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep2in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep3in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep4in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep5in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep6in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep7in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep8in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep9in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep10in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep11in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep12in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep13in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep14in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep15in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep0out",
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep1out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep2out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep3out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep4out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep5out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep6out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep7out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep8out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep9out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep10out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep11out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep12out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep13out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep14out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep15out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
#undef EP_INFO
};
/* buffer fill mode */
static int use_dma_bufferfill_mode;
/* tx buffer size for high speed */
static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
/*---------------------------------------------------------------------------*/
/* Prints UDC device registers and endpoint irq registers */
static void print_regs(struct udc *dev)
{
DBG(dev, "------- Device registers -------\n");
DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
DBG(dev, "\n");
DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
DBG(dev, "\n");
DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
DBG(dev, "\n");
DBG(dev, "USE DMA = %d\n", use_dma);
if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
DBG(dev, "DMA mode = PPBNDU (packet per buffer "
"WITHOUT desc. update)\n");
dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
DBG(dev, "DMA mode = PPBDU (packet per buffer "
"WITH desc. update)\n");
dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
}
if (use_dma && use_dma_bufferfill_mode) {
DBG(dev, "DMA mode = BF (buffer fill mode)\n");
dev_info(dev->dev, "DMA mode (%s)\n", "BF");
}
if (!use_dma)
dev_info(dev->dev, "FIFO mode\n");
DBG(dev, "-------------------------------------------------------\n");
}
/* Masks unused interrupts */
int udc_mask_unused_interrupts(struct udc *dev)
{
u32 tmp;
/* mask all dev interrupts */
tmp = AMD_BIT(UDC_DEVINT_SVC) |
AMD_BIT(UDC_DEVINT_ENUM) |
AMD_BIT(UDC_DEVINT_US) |
AMD_BIT(UDC_DEVINT_UR) |
AMD_BIT(UDC_DEVINT_ES) |
AMD_BIT(UDC_DEVINT_SI) |
AMD_BIT(UDC_DEVINT_SOF)|
AMD_BIT(UDC_DEVINT_SC);
writel(tmp, &dev->regs->irqmsk);
/* mask all ep interrupts */
writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
return 0;
}
EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
/* Enables endpoint 0 interrupts */
static int udc_enable_ep0_interrupts(struct udc *dev)
{
u32 tmp;
DBG(dev, "udc_enable_ep0_interrupts()\n");
/* read irq mask */
tmp = readl(&dev->regs->ep_irqmsk);
/* enable ep0 irq's */
tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
writel(tmp, &dev->regs->ep_irqmsk);
return 0;
}
/* Enables device interrupts for SET_INTF and SET_CONFIG */
int udc_enable_dev_setup_interrupts(struct udc *dev)
{
u32 tmp;
DBG(dev, "enable device interrupts for setup data\n");
/* read irq mask */
tmp = readl(&dev->regs->irqmsk);
/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
& AMD_UNMASK_BIT(UDC_DEVINT_SC)
& AMD_UNMASK_BIT(UDC_DEVINT_UR)
& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
writel(tmp, &dev->regs->irqmsk);
return 0;
}
EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
/* Calculates fifo start of endpoint based on preceding endpoints */
static int udc_set_txfifo_addr(struct udc_ep *ep)
{
struct udc *dev;
u32 tmp;
int i;
if (!ep || !(ep->in))
return -EINVAL;
dev = ep->dev;
ep->txfifo = dev->txfifo;
/* traverse ep's */
for (i = 0; i < ep->num; i++) {
if (dev->ep[i].regs) {
/* read fifo size */
tmp = readl(&dev->ep[i].regs->bufin_framenum);
tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
ep->txfifo += tmp;
}
}
return 0;
}
/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
static u32 cnak_pending;
static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
{
if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
cnak_pending |= 1 << (num);
ep->naking = 1;
} else
cnak_pending = cnak_pending & (~(1 << (num)));
}
/* Enables endpoint, is called by gadget driver */
static int
udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
{
struct udc_ep *ep;
struct udc *dev;
u32 tmp;
unsigned long iflags;
u8 udc_csr_epix;
unsigned maxpacket;
if (!usbep
|| usbep->name == ep0_string
|| !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
ep = container_of(usbep, struct udc_ep, ep);
dev = ep->dev;
DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&dev->lock, iflags);
ep->ep.desc = desc;
ep->halted = 0;
/* set traffic type */
tmp = readl(&dev->ep[ep->num].regs->ctl);
tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
writel(tmp, &dev->ep[ep->num].regs->ctl);
/* set max packet size */
maxpacket = usb_endpoint_maxp(desc);
tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
ep->ep.maxpacket = maxpacket;
writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
/* IN ep */
if (ep->in) {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num;
/* set buffer size (tx fifo entries) */
tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
/* double buffering: fifo size = 2 x max packet size */
tmp = AMD_ADDBITS(
tmp,
maxpacket * UDC_EPIN_BUFF_SIZE_MULT
/ UDC_DWORD_BYTES,
UDC_EPIN_BUFF_SIZE);
writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
/* calc. tx fifo base addr */
udc_set_txfifo_addr(ep);
/* flush fifo */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_F);
writel(tmp, &ep->regs->ctl);
/* OUT ep */
} else {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
/* set max packet size UDC CSR */
tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
tmp = AMD_ADDBITS(tmp, maxpacket,
UDC_CSR_NE_MAX_PKT);
writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
if (use_dma && !ep->in) {
/* alloc and init BNA dummy request */
ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
ep->bna_occurred = 0;
}
if (ep->num != UDC_EP0OUT_IX)
dev->data_ep_enabled = 1;
}
/* set ep values */
tmp = readl(&dev->csr->ne[udc_csr_epix]);
/* max packet */
tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
/* ep number */
tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
/* ep direction */
tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
/* ep type */
tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
/* ep config */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
/* ep interface */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
/* ep alt */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
/* write reg */
writel(tmp, &dev->csr->ne[udc_csr_epix]);
/* enable ep irq */
tmp = readl(&dev->regs->ep_irqmsk);
tmp &= AMD_UNMASK_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
/*
* clear NAK by writing CNAK
* avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
*/
if (!use_dma || ep->in) {
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
tmp = desc->bEndpointAddress;
DBG(dev, "%s enabled\n", usbep->name);
spin_unlock_irqrestore(&dev->lock, iflags);
return 0;
}
/* Resets endpoint */
static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
{
u32 tmp;
VDBG(ep->dev, "ep-%d reset\n", ep->num);
ep->ep.desc = NULL;
ep->ep.ops = &udc_ep_ops;
INIT_LIST_HEAD(&ep->queue);
usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
/* set NAK */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_SNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 1;
/* disable interrupt */
tmp = readl(®s->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp, ®s->ep_irqmsk);
if (ep->in) {
/* unset P and IN bit of potential former DMA */
tmp = readl(&ep->regs->ctl);
tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
writel(tmp, &ep->regs->ctl);
tmp = readl(&ep->regs->sts);
tmp |= AMD_BIT(UDC_EPSTS_IN);
writel(tmp, &ep->regs->sts);
/* flush the fifo */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_F);
writel(tmp, &ep->regs->ctl);
}
/* reset desc pointer */
writel(0, &ep->regs->desptr);
}
/* Disables endpoint, is called by gadget driver */
static int udc_ep_disable(struct usb_ep *usbep)
{
struct udc_ep *ep = NULL;
unsigned long iflags;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct udc_ep, ep);
if (usbep->name == ep0_string || !ep->ep.desc)
return -EINVAL;
DBG(ep->dev, "Disable ep-%d\n", ep->num);
spin_lock_irqsave(&ep->dev->lock, iflags);
udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
empty_req_queue(ep);
ep_init(ep->dev->regs, ep);
spin_unlock_irqrestore(&ep->dev->lock, iflags);
return 0;
}
/* Allocates request packet, called by gadget driver */
static struct usb_request *
udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
{
struct udc_request *req;
struct udc_data_dma *dma_desc;
struct udc_ep *ep;
if (!usbep)
return NULL;
ep = container_of(usbep, struct udc_ep, ep);
VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
req = kzalloc(sizeof(struct udc_request), gfp);
if (!req)
return NULL;
req->req.dma = DMA_DONT_USE;
INIT_LIST_HEAD(&req->queue);
if (ep->dma) {
/* ep0 in requests are allocated from data pool here */
dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
&req->td_phys);
if (!dma_desc) {
kfree(req);
return NULL;
}
VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
"td_phys = %lx\n",
req, dma_desc,
(unsigned long)req->td_phys);
/* prevent from using desc. - set HOST BUSY */
dma_desc->status = AMD_ADDBITS(dma_desc->status,
UDC_DMA_STP_STS_BS_HOST_BUSY,
UDC_DMA_STP_STS_BS);
dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
req->td_data = dma_desc;
req->td_data_last = NULL;
req->chain_len = 1;
}
return &req->req;
}
/* frees pci pool descriptors of a DMA chain */
static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
{
struct udc_data_dma *td = req->td_data;
unsigned int i;
dma_addr_t addr_next = 0x00;
dma_addr_t addr = (dma_addr_t)td->next;
DBG(dev, "free chain req = %p\n", req);
/* do not free first desc., will be done by free for request */
for (i = 1; i < req->chain_len; i++) {
td = phys_to_virt(addr);
addr_next = (dma_addr_t)td->next;
dma_pool_free(dev->data_requests, td, addr);
addr = addr_next;
}
}
/* Frees request packet, called by gadget driver */
static void
udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
{
struct udc_ep *ep;
struct udc_request *req;
if (!usbep || !usbreq)
return;
ep = container_of(usbep, struct udc_ep, ep);
req = container_of(usbreq, struct udc_request, req);
VDBG(ep->dev, "free_req req=%p\n", req);
BUG_ON(!list_empty(&req->queue));
if (req->td_data) {
VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
/* free dma chain if created */
if (req->chain_len > 1)
udc_free_dma_chain(ep->dev, req);
dma_pool_free(ep->dev->data_requests, req->td_data,
req->td_phys);
}
kfree(req);
}
/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
static void udc_init_bna_dummy(struct udc_request *req)
{
if (req) {
/* set last bit */
req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
/* set next pointer to itself */
req->td_data->next = req->td_phys;
/* set HOST BUSY */
req->td_data->status
= AMD_ADDBITS(req->td_data->status,
UDC_DMA_STP_STS_BS_DMA_DONE,
UDC_DMA_STP_STS_BS);
#ifdef UDC_VERBOSE
pr_debug("bna desc = %p, sts = %08x\n",
req->td_data, req->td_data->status);
#endif
}
}
/* Allocate BNA dummy descriptor */
static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
{
struct udc_request *req = NULL;
struct usb_request *_req = NULL;
/* alloc the dummy request */
_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
if (_req) {
req = container_of(_req, struct udc_request, req);
ep->bna_dummy_req = req;
udc_init_bna_dummy(req);
}
return req;
}
/* Write data to TX fifo for IN packets */
static void
udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
{
u8 *req_buf;
u32 *buf;
int i, j;
unsigned bytes = 0;
unsigned remaining = 0;
if (!req || !ep)
return;
req_buf = req->buf + req->actual;
prefetch(req_buf);
remaining = req->length - req->actual;
buf = (u32 *) req_buf;
bytes = ep->ep.maxpacket;
if (bytes > remaining)
bytes = remaining;
/* dwords first */
for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
writel(*(buf + i), ep->txfifo);
/* remaining bytes must be written by byte access */
for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
ep->txfifo);
}
/* dummy write confirm */
writel(0, &ep->regs->confirm);
}
/* Read dwords from RX fifo for OUT transfers */
static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
{
int i;
VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
for (i = 0; i < dwords; i++)
*(buf + i) = readl(dev->rxfifo);
return 0;
}
/* Read bytes from RX fifo for OUT transfers */
static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
{
int i, j;
u32 tmp;
VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
/* dwords first */
for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
/* remaining bytes must be read by byte access */
if (bytes % UDC_DWORD_BYTES) {
tmp = readl(dev->rxfifo);
for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
tmp = tmp >> UDC_BITS_PER_BYTE;
}
}
return 0;
}
/* Read data from RX fifo for OUT transfers */
static int
udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
{
u8 *buf;
unsigned buf_space;
unsigned bytes = 0;
unsigned finished = 0;
/* received number bytes */
bytes = readl(&ep->regs->sts);
bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
buf_space = req->req.length - req->req.actual;
buf = req->req.buf + req->req.actual;
if (bytes > buf_space) {
if ((buf_space % ep->ep.maxpacket) != 0) {
DBG(ep->dev,
"%s: rx %d bytes, rx-buf space = %d bytesn\n",
ep->ep.name, bytes, buf_space);
req->req.status = -EOVERFLOW;
}
bytes = buf_space;
}
req->req.actual += bytes;
/* last packet ? */
if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
|| ((req->req.actual == req->req.length) && !req->req.zero))
finished = 1;
/* read rx fifo bytes */
VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
udc_rxfifo_read_bytes(ep->dev, buf, bytes);
return finished;
}
/* Creates or re-inits a DMA chain */
static int udc_create_dma_chain(
struct udc_ep *ep,
struct udc_request *req,
unsigned long buf_len, gfp_t gfp_flags
)
{
unsigned long bytes = req->req.length;
unsigned int i;
dma_addr_t dma_addr;
struct udc_data_dma *td = NULL;
struct udc_data_dma *last = NULL;
unsigned long txbytes;
unsigned create_new_chain = 0;
unsigned len;
VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
bytes, buf_len);
dma_addr = DMA_DONT_USE;
/* unset L bit in first desc for OUT */
if (!ep->in)
req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
/* alloc only new desc's if not already available */
len = req->req.length / ep->ep.maxpacket;
if (req->req.length % ep->ep.maxpacket)
len++;
if (len > req->chain_len) {
/* shorter chain already allocated before */
if (req->chain_len > 1)
udc_free_dma_chain(ep->dev, req);
req->chain_len = len;
create_new_chain = 1;
}
td = req->td_data;
/* gen. required number of descriptors and buffers */
for (i = buf_len; i < bytes; i += buf_len) {
/* create or determine next desc. */
if (create_new_chain) {
td = dma_pool_alloc(ep->dev->data_requests,
gfp_flags, &dma_addr);
if (!td)
return -ENOMEM;
td->status = 0;
} else if (i == buf_len) {
/* first td */
td = (struct udc_data_dma *)phys_to_virt(
req->td_data->next);
td->status = 0;
} else {
td = (struct udc_data_dma *)phys_to_virt(last->next);
td->status = 0;
}
if (td)
td->bufptr = req->req.dma + i; /* assign buffer */
else
break;
/* short packet ? */
if ((bytes - i) >= buf_len) {
txbytes = buf_len;
} else {
/* short packet */
txbytes = bytes - i;
}
/* link td and assign tx bytes */
if (i == buf_len) {
if (create_new_chain)
req->td_data->next = dma_addr;
/*
* else
* req->td_data->next = virt_to_phys(td);
*/
/* write tx bytes */
if (ep->in) {
/* first desc */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
ep->ep.maxpacket,
UDC_DMA_IN_STS_TXBYTES);
/* second desc */
td->status = AMD_ADDBITS(td->status,
txbytes,
UDC_DMA_IN_STS_TXBYTES);
}
} else {
if (create_new_chain)
last->next = dma_addr;
/*
* else
* last->next = virt_to_phys(td);
*/
if (ep->in) {
/* write tx bytes */
td->status = AMD_ADDBITS(td->status,
txbytes,
UDC_DMA_IN_STS_TXBYTES);
}
}
last = td;
}
/* set last bit */
if (td) {
td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
/* last desc. points to itself */
req->td_data_last = td;
}
return 0;
}
/* create/re-init a DMA descriptor or a DMA descriptor chain */
static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
{
int retval = 0;
u32 tmp;
VDBG(ep->dev, "prep_dma\n");
VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
ep->num, req->td_data);
/* set buffer pointer */
req->td_data->bufptr = req->req.dma;
/* set last bit */
req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
if (use_dma_ppb) {
retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
if (retval != 0) {
if (retval == -ENOMEM)
DBG(ep->dev, "Out of DMA memory\n");
return retval;
}
if (ep->in) {
if (req->req.length == ep->ep.maxpacket) {
/* write tx bytes */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
ep->ep.maxpacket,
UDC_DMA_IN_STS_TXBYTES);
}
}
}
if (ep->in) {
VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
"maxpacket=%d ep%d\n",
use_dma_ppb, req->req.length,
ep->ep.maxpacket, ep->num);
/*
* if bytes < max packet then tx bytes must
* be written in packet per buffer mode
*/
if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
|| ep->num == UDC_EP0OUT_IX
|| ep->num == UDC_EP0IN_IX) {
/* write tx bytes */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
req->req.length,
UDC_DMA_IN_STS_TXBYTES);
/* reset frame num */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
0,
UDC_DMA_IN_STS_FRAMENUM);
}
/* set HOST BUSY */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
UDC_DMA_STP_STS_BS_HOST_BUSY,
UDC_DMA_STP_STS_BS);
} else {
VDBG(ep->dev, "OUT set host ready\n");
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
UDC_DMA_STP_STS_BS_HOST_READY,
UDC_DMA_STP_STS_BS);
/* clear NAK by writing CNAK */
if (ep->naking) {
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
}
return retval;
}
/* Completes request packet ... caller MUST hold lock */
static void
complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
__releases(ep->dev->lock)
__acquires(ep->dev->lock)
{
struct udc *dev;
unsigned halted;
VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
dev = ep->dev;
/* unmap DMA */
if (ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
halted = ep->halted;
ep->halted = 1;
/* set new status if pending */
if (req->req.status == -EINPROGRESS)
req->req.status = sts;
/* remove from ep queue */
list_del_init(&req->queue);
VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
&req->req, req->req.length, ep->ep.name, sts);
spin_unlock(&dev->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->halted = halted;
}
/* Iterates to the end of a DMA chain and returns last descriptor */
static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
{
struct udc_data_dma *td;
td = req->td_data;
while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
td = phys_to_virt(td->next);
return td;
}
/* Iterates to the end of a DMA chain and counts bytes received */
static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
{
struct udc_data_dma *td;
u32 count;
td = req->td_data;
/* received number bytes */
count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
td = phys_to_virt(td->next);
/* received number bytes */
if (td) {
count += AMD_GETBITS(td->status,
UDC_DMA_OUT_STS_RXBYTES);
}
}
return count;
}
/* Enabling RX DMA */
static void udc_set_rde(struct udc *dev)
{
u32 tmp;
VDBG(dev, "udc_set_rde()\n");
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* set RDE */
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_RDE);
writel(tmp, &dev->regs->ctl);
}
/* Queues a request packet, called by gadget driver */
static int
udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
{
int retval = 0;
u8 open_rxfifo = 0;
unsigned long iflags;
struct udc_ep *ep;
struct udc_request *req;
struct udc *dev;
u32 tmp;
/* check the inputs */
req = container_of(usbreq, struct udc_request, req);
if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
|| !list_empty(&req->queue))
return -EINVAL;
ep = container_of(usbep, struct udc_ep, ep);
if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
return -EINVAL;
VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* map dma (usually done before) */
if (ep->dma) {
VDBG(dev, "DMA map req %p\n", req);
retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
if (retval)
return retval;
}
VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
usbep->name, usbreq, usbreq->length,
req->td_data, usbreq->buf);
spin_lock_irqsave(&dev->lock, iflags);
usbreq->actual = 0;
usbreq->status = -EINPROGRESS;
req->dma_done = 0;
/* on empty queue just do first transfer */
if (list_empty(&ep->queue)) {
/* zlp */
if (usbreq->length == 0) {
/* IN zlp's are handled by hardware */
complete_req(ep, req, 0);
VDBG(dev, "%s: zlp\n", ep->ep.name);
/*
* if set_config or set_intf is waiting for ack by zlp
* then set CSR_DONE
*/
if (dev->set_cfg_not_acked) {
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
writel(tmp, &dev->regs->ctl);
dev->set_cfg_not_acked = 0;
}
/* setup command is ACK'ed now by zlp */
if (dev->waiting_zlp_ack_ep0in) {
/* clear NAK by writing CNAK in EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
UDC_EP0IN_IX);
dev->waiting_zlp_ack_ep0in = 0;
}
goto finished;
}
if (ep->dma) {
retval = prep_dma(ep, req, GFP_ATOMIC);
if (retval != 0)
goto finished;
/* write desc pointer to enable DMA */
if (ep->in) {
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
UDC_DMA_IN_STS_BS_HOST_READY,
UDC_DMA_IN_STS_BS);
}
/* disabled rx dma while descriptor update */
if (!ep->in) {
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* clear RDE */
tmp = readl(&dev->regs->ctl);
tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
writel(tmp, &dev->regs->ctl);
open_rxfifo = 1;
/*
* if BNA occurred then let BNA dummy desc.
* point to current desc.
*/
if (ep->bna_occurred) {
VDBG(dev, "copy to BNA dummy desc.\n");
memcpy(ep->bna_dummy_req->td_data,
req->td_data,
sizeof(struct udc_data_dma));
}
}
/* write desc pointer */
writel(req->td_phys, &ep->regs->desptr);
/* clear NAK by writing CNAK */
if (ep->naking) {
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
if (ep->in) {
/* enable ep irq */
tmp = readl(&dev->regs->ep_irqmsk);
tmp &= AMD_UNMASK_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
}
} else if (ep->in) {
/* enable ep irq */
tmp = readl(&dev->regs->ep_irqmsk);
tmp &= AMD_UNMASK_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
}
} else if (ep->dma) {
/*
* prep_dma not used for OUT ep's, this is not possible
* for PPB modes, because of chain creation reasons
*/
if (ep->in) {
retval = prep_dma(ep, req, GFP_ATOMIC);
if (retval != 0)
goto finished;
}
}
VDBG(dev, "list_add\n");
/* add request to ep queue */
if (req) {
list_add_tail(&req->queue, &ep->queue);
/* open rxfifo if out data queued */
if (open_rxfifo) {
/* enable DMA */
req->dma_going = 1;
udc_set_rde(dev);
if (ep->num != UDC_EP0OUT_IX)
dev->data_ep_queued = 1;
}
/* stop OUT naking */
if (!ep->in) {
if (!use_dma && udc_rxfifo_pending) {
DBG(dev, "udc_queue(): pending bytes in "
"rxfifo after nyet\n");
/*
* read pending bytes afer nyet:
* referring to isr
*/
if (udc_rxfifo_read(ep, req)) {
/* finish */
complete_req(ep, req, 0);
}
udc_rxfifo_pending = 0;
}
}
}
finished:
spin_unlock_irqrestore(&dev->lock, iflags);
return retval;
}
/* Empty request queue of an endpoint; caller holds spinlock */
void empty_req_queue(struct udc_ep *ep)
{
struct udc_request *req;
ep->halted = 1;
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct udc_request,
queue);
complete_req(ep, req, -ESHUTDOWN);
}
}
EXPORT_SYMBOL_GPL(empty_req_queue);
/* Dequeues a request packet, called by gadget driver */
static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
{
struct udc_ep *ep;
struct udc_request *req;
unsigned halted;
unsigned long iflags;
ep = container_of(usbep, struct udc_ep, ep);
if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
&& ep->num != UDC_EP0OUT_IX)))
return -EINVAL;
req = container_of(usbreq, struct udc_request, req);
spin_lock_irqsave(&ep->dev->lock, iflags);
halted = ep->halted;
ep->halted = 1;
/* request in processing or next one */
if (ep->queue.next == &req->queue) {
if (ep->dma && req->dma_going) {
if (ep->in)
ep->cancel_transfer = 1;
else {
u32 tmp;
u32 dma_sts;
/* stop potential receive DMA */
tmp = readl(&udc->regs->ctl);
writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
&udc->regs->ctl);
/*
* Cancel transfer later in ISR
* if descriptor was touched.
*/
dma_sts = AMD_GETBITS(req->td_data->status,
UDC_DMA_OUT_STS_BS);
if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
ep->cancel_transfer = 1;
else {
udc_init_bna_dummy(ep->req);
writel(ep->bna_dummy_req->td_phys,
&ep->regs->desptr);
}
writel(tmp, &udc->regs->ctl);
}
}
}
complete_req(ep, req, -ECONNRESET);
ep->halted = halted;
spin_unlock_irqrestore(&ep->dev->lock, iflags);
return 0;
}
/* Halt or clear halt of endpoint */
static int
udc_set_halt(struct usb_ep *usbep, int halt)
{
struct udc_ep *ep;
u32 tmp;
unsigned long iflags;
int retval = 0;
if (!usbep)
return -EINVAL;
pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
ep = container_of(usbep, struct udc_ep, ep);
if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
return -EINVAL;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&udc_stall_spinlock, iflags);
/* halt or clear halt */
if (halt) {
if (ep->num == 0)
ep->dev->stall_ep0in = 1;
else {
/*
* set STALL
* rxfifo empty not taken into acount
*/
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
ep->halted = 1;
/* setup poll timer */
if (!timer_pending(&udc_pollstall_timer)) {
udc_pollstall_timer.expires = jiffies +
HZ * UDC_POLLSTALL_TIMER_USECONDS
/ (1000 * 1000);
if (!stop_pollstall_timer) {
DBG(ep->dev, "start polltimer\n");
add_timer(&udc_pollstall_timer);
}
}
}
} else {
/* ep is halted by set_halt() before */
if (ep->halted) {
tmp = readl(&ep->regs->ctl);
/* clear stall bit */
tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
/* clear NAK by writing CNAK */
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->halted = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
}
spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
return retval;
}
/* gadget interface */
static const struct usb_ep_ops udc_ep_ops = {
.enable = udc_ep_enable,
.disable = udc_ep_disable,
.alloc_request = udc_alloc_request,
.free_request = udc_free_request,
.queue = udc_queue,
.dequeue = udc_dequeue,
.set_halt = udc_set_halt,
/* fifo ops not implemented */
};
/*-------------------------------------------------------------------------*/
/* Get frame counter (not implemented) */
static int udc_get_frame(struct usb_gadget *gadget)
{
return -EOPNOTSUPP;
}
/* Initiates a remote wakeup */
static int udc_remote_wakeup(struct udc *dev)
{
unsigned long flags;
u32 tmp;
DBG(dev, "UDC initiates remote wakeup\n");
spin_lock_irqsave(&dev->lock, flags);
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_RES);
writel(tmp, &dev->regs->ctl);
tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
writel(tmp, &dev->regs->ctl);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* Remote wakeup gadget interface */
static int udc_wakeup(struct usb_gadget *gadget)
{
struct udc *dev;
if (!gadget)
return -EINVAL;
dev = container_of(gadget, struct udc, gadget);
udc_remote_wakeup(dev);
return 0;
}
static int amd5536_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int amd5536_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops udc_ops = {
.wakeup = udc_wakeup,
.get_frame = udc_get_frame,
.udc_start = amd5536_udc_start,
.udc_stop = amd5536_udc_stop,
};
/* Setups endpoint parameters, adds endpoints to linked list */
static void make_ep_lists(struct udc *dev)
{
/* make gadget ep lists */
INIT_LIST_HEAD(&dev->gadget.ep_list);
list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
&dev->gadget.ep_list);
list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
&dev->gadget.ep_list);
list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
&dev->gadget.ep_list);
/* fifo config */
dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
if (dev->gadget.speed == USB_SPEED_FULL)
dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
else if (dev->gadget.speed == USB_SPEED_HIGH)
dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
}
/* Inits UDC context */
void udc_basic_init(struct udc *dev)
{
u32 tmp;
DBG(dev, "udc_basic_init()\n");
dev->gadget.speed = USB_SPEED_UNKNOWN;
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* stop poll stall timer */
if (timer_pending(&udc_pollstall_timer))
mod_timer(&udc_pollstall_timer, jiffies - 1);
/* disable DMA */
tmp = readl(&dev->regs->ctl);
tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
writel(tmp, &dev->regs->ctl);
/* enable dynamic CSR programming */
tmp = readl(&dev->regs->cfg);
tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
/* set self powered */
tmp |= AMD_BIT(UDC_DEVCFG_SP);
/* set remote wakeupable */
tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
writel(tmp, &dev->regs->cfg);
make_ep_lists(dev);
dev->data_ep_enabled = 0;
dev->data_ep_queued = 0;
}
EXPORT_SYMBOL_GPL(udc_basic_init);
/* init registers at driver load time */
static int startup_registers(struct udc *dev)
{
u32 tmp;
/* init controller by soft reset */
udc_soft_reset(dev);
/* mask not needed interrupts */
udc_mask_unused_interrupts(dev);
/* put into initial config */
udc_basic_init(dev);
/* link up all endpoints */
udc_setup_endpoints(dev);
/* program speed */
tmp = readl(&dev->regs->cfg);
if (use_fullspeed)
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
else
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
writel(tmp, &dev->regs->cfg);
return 0;
}
/* Sets initial endpoint parameters */
static void udc_setup_endpoints(struct udc *dev)
{
struct udc_ep *ep;
u32 tmp;
u32 reg;
DBG(dev, "udc_setup_endpoints()\n");
/* read enum speed */
tmp = readl(&dev->regs->sts);
tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
dev->gadget.speed = USB_SPEED_HIGH;
else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
dev->gadget.speed = USB_SPEED_FULL;
/* set basic ep parameters */
for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
ep = &dev->ep[tmp];
ep->dev = dev;
ep->ep.name = ep_info[tmp].name;
ep->ep.caps = ep_info[tmp].caps;
ep->num = tmp;
/* txfifo size is calculated at enable time */
ep->txfifo = dev->txfifo;
/* fifo size */
if (tmp < UDC_EPIN_NUM) {
ep->fifo_depth = UDC_TXFIFO_SIZE;
ep->in = 1;
} else {
ep->fifo_depth = UDC_RXFIFO_SIZE;
ep->in = 0;
}
ep->regs = &dev->ep_regs[tmp];
/*
* ep will be reset only if ep was not enabled before to avoid
* disabling ep interrupts when ENUM interrupt occurs but ep is
* not enabled by gadget driver
*/
if (!ep->ep.desc)
ep_init(dev->regs, ep);
if (use_dma) {
/*
* ep->dma is not really used, just to indicate that
* DMA is active: remove this
* dma regs = dev control regs
*/
ep->dma = &dev->regs->ctl;
/* nak OUT endpoints until enable - not for ep0 */
if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
&& tmp > UDC_EPIN_NUM) {
/* set NAK */
reg = readl(&dev->ep[tmp].regs->ctl);
reg |= AMD_BIT(UDC_EPCTL_SNAK);
writel(reg, &dev->ep[tmp].regs->ctl);
dev->ep[tmp].naking = 1;
}
}
}
/* EP0 max packet */
if (dev->gadget.speed == USB_SPEED_FULL) {
usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
UDC_FS_EP0IN_MAX_PKT_SIZE);
usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
UDC_FS_EP0OUT_MAX_PKT_SIZE);
} else if (dev->gadget.speed == USB_SPEED_HIGH) {
usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
UDC_EP0IN_MAX_PKT_SIZE);
usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
UDC_EP0OUT_MAX_PKT_SIZE);
}
/*
* with suspend bug workaround, ep0 params for gadget driver
* are set at gadget driver bind() call
*/
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
dev->ep[UDC_EP0IN_IX].halted = 0;
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
/* init cfg/alt/int */
dev->cur_config = 0;
dev->cur_intf = 0;
dev->cur_alt = 0;
}
/* Bringup after Connect event, initial bringup to be ready for ep0 events */
static void usb_connect(struct udc *dev)
{
/* Return if already connected */
if (dev->connected)
return;
dev_info(dev->dev, "USB Connect\n");
dev->connected = 1;
/* put into initial config */
udc_basic_init(dev);
/* enable device setup interrupts */
udc_enable_dev_setup_interrupts(dev);
}
/*
* Calls gadget with disconnect event and resets the UDC and makes
* initial bringup to be ready for ep0 events
*/
static void usb_disconnect(struct udc *dev)
{
u32 tmp;
/* Return if already disconnected */
if (!dev->connected)
return;
dev_info(dev->dev, "USB Disconnect\n");
dev->connected = 0;
/* mask interrupts */
udc_mask_unused_interrupts(dev);
if (dev->driver) {
spin_unlock(&dev->lock);
dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
/* empty queues */
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&dev->ep[tmp]);
}
/* disable ep0 */
ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
if (!soft_reset_occured) {
/* init controller by soft reset */
udc_soft_reset(dev);
soft_reset_occured++;
}
/* re-enable dev interrupts */
udc_enable_dev_setup_interrupts(dev);
/* back to full speed ? */
if (use_fullspeed) {
tmp = readl(&dev->regs->cfg);
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
writel(tmp, &dev->regs->cfg);
}
}
/* Reset the UDC core */
static void udc_soft_reset(struct udc *dev)
{
unsigned long flags;
DBG(dev, "Soft reset\n");
/*
* reset possible waiting interrupts, because int.
* status is lost after soft reset,
* ep int. status reset
*/
writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
/* device int. status reset */
writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
/* Don't do this for Broadcom UDC since this is a reserved
* bit.
*/
if (dev->chiprev != UDC_BCM_REV) {
spin_lock_irqsave(&udc_irq_spinlock, flags);
writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
readl(&dev->regs->cfg);
spin_unlock_irqrestore(&udc_irq_spinlock, flags);
}
}
/* RDE timer callback to set RDE bit */
static void udc_timer_function(struct timer_list *unused)
{
u32 tmp;
spin_lock_irq(&udc_irq_spinlock);
if (set_rde > 0) {
/*
* open the fifo if fifo was filled on last timer call
* conditionally
*/
if (set_rde > 1) {
/* set RDE to receive setup data */
tmp = readl(&udc->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_RDE);
writel(tmp, &udc->regs->ctl);
set_rde = -1;
} else if (readl(&udc->regs->sts)
& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
/*
* if fifo empty setup polling, do not just
* open the fifo
*/
udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
if (!stop_timer)
add_timer(&udc_timer);
} else {
/*
* fifo contains data now, setup timer for opening
* the fifo when timer expires to be able to receive
* setup packets, when data packets gets queued by
* gadget layer then timer will forced to expire with
* set_rde=0 (RDE is set in udc_queue())
*/
set_rde++;
/* debug: lhadmot_timer_start = 221070 */
udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
if (!stop_timer)
add_timer(&udc_timer);
}
} else
set_rde = -1; /* RDE was set by udc_queue() */
spin_unlock_irq(&udc_irq_spinlock);
if (stop_timer)
complete(&on_exit);
}
/* Handle halt state, used in stall poll timer */
static void udc_handle_halt_state(struct udc_ep *ep)
{
u32 tmp;
/* set stall as long not halted */
if (ep->halted == 1) {
tmp = readl(&ep->regs->ctl);
/* STALL cleared ? */
if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
/*
* FIXME: MSC spec requires that stall remains
* even on receivng of CLEAR_FEATURE HALT. So
* we would set STALL again here to be compliant.
* But with current mass storage drivers this does
* not work (would produce endless host retries).
* So we clear halt on CLEAR_FEATURE.
*
DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);*/
/* clear NAK by writing CNAK */
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->halted = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
}
}
/* Stall timer callback to poll S bit and set it again after */
static void udc_pollstall_timer_function(struct timer_list *unused)
{
struct udc_ep *ep;
int halted = 0;
spin_lock_irq(&udc_stall_spinlock);
/*
* only one IN and OUT endpoints are handled
* IN poll stall
*/
ep = &udc->ep[UDC_EPIN_IX];
udc_handle_halt_state(ep);
if (ep->halted)
halted = 1;
/* OUT poll stall */
ep = &udc->ep[UDC_EPOUT_IX];
udc_handle_halt_state(ep);
if (ep->halted)
halted = 1;
/* setup timer again when still halted */
if (!stop_pollstall_timer && halted) {
udc_pollstall_timer.expires = jiffies +
HZ * UDC_POLLSTALL_TIMER_USECONDS
/ (1000 * 1000);
add_timer(&udc_pollstall_timer);
}
spin_unlock_irq(&udc_stall_spinlock);
if (stop_pollstall_timer)
complete(&on_pollstall_exit);
}
/* Inits endpoint 0 so that SETUP packets are processed */
static void activate_control_endpoints(struct udc *dev)
{
u32 tmp;
DBG(dev, "activate_control_endpoints\n");
/* flush fifo */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_F);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
/* set ep0 directions */
dev->ep[UDC_EP0IN_IX].in = 1;
dev->ep[UDC_EP0OUT_IX].in = 0;
/* set buffer size (tx fifo entries) of EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
UDC_EPIN_BUFF_SIZE);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
UDC_EPIN_BUFF_SIZE);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
/* set max packet size of EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
/* set max packet size of EP0_OUT */
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
/* set max packet size of EP0 in UDC CSR */
tmp = readl(&dev->csr->ne[0]);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
UDC_CSR_NE_MAX_PKT);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
UDC_CSR_NE_MAX_PKT);
writel(tmp, &dev->csr->ne[0]);
if (use_dma) {
dev->ep[UDC_EP0OUT_IX].td->status |=
AMD_BIT(UDC_DMA_OUT_STS_L);
/* write dma desc address */
writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
&dev->ep[UDC_EP0OUT_IX].regs->subptr);
writel(dev->ep[UDC_EP0OUT_IX].td_phys,
&dev->ep[UDC_EP0OUT_IX].regs->desptr);
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* stop pollstall timer */
if (timer_pending(&udc_pollstall_timer))
mod_timer(&udc_pollstall_timer, jiffies - 1);
/* enable DMA */
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_MODE)
| AMD_BIT(UDC_DEVCTL_RDE)
| AMD_BIT(UDC_DEVCTL_TDE);
if (use_dma_bufferfill_mode)
tmp |= AMD_BIT(UDC_DEVCTL_BF);
else if (use_dma_ppb_du)
tmp |= AMD_BIT(UDC_DEVCTL_DU);
writel(tmp, &dev->regs->ctl);
}
/* clear NAK by writing CNAK for EP0IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
/* clear NAK by writing CNAK for EP0OUT */
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
dev->ep[UDC_EP0OUT_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
}
/* Make endpoint 0 ready for control traffic */
static int setup_ep0(struct udc *dev)
{
activate_control_endpoints(dev);
/* enable ep0 interrupts */
udc_enable_ep0_interrupts(dev);
/* enable device setup interrupts */
udc_enable_dev_setup_interrupts(dev);
return 0;
}
/* Called by gadget driver to register itself */
static int amd5536_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct udc *dev = to_amd5536_udc(g);
u32 tmp;
dev->driver = driver;
/* Some gadget drivers use both ep0 directions.
* NOTE: to gadget driver, ep0 is just one endpoint...
*/
dev->ep[UDC_EP0OUT_IX].ep.driver_data =
dev->ep[UDC_EP0IN_IX].ep.driver_data;
/* get ready for ep0 traffic */
setup_ep0(dev);
/* clear SD */
tmp = readl(&dev->regs->ctl);
tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
writel(tmp, &dev->regs->ctl);
usb_connect(dev);
return 0;
}
/* shutdown requests and disconnect from gadget */
static void
shutdown(struct udc *dev, struct usb_gadget_driver *driver)
__releases(dev->lock)
__acquires(dev->lock)
{
int tmp;
/* empty queues and init hardware */
udc_basic_init(dev);
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&dev->ep[tmp]);
udc_setup_endpoints(dev);
}
/* Called by gadget driver to unregister itself */
static int amd5536_udc_stop(struct usb_gadget *g)
{
struct udc *dev = to_amd5536_udc(g);
unsigned long flags;
u32 tmp;
spin_lock_irqsave(&dev->lock, flags);
udc_mask_unused_interrupts(dev);
shutdown(dev, NULL);
spin_unlock_irqrestore(&dev->lock, flags);
dev->driver = NULL;
/* set SD */
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_SD);
writel(tmp, &dev->regs->ctl);
return 0;
}
/* Clear pending NAK bits */
static void udc_process_cnak_queue(struct udc *dev)
{
u32 tmp;
u32 reg;
/* check epin's */
DBG(dev, "CNAK pending queue processing\n");
for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
if (cnak_pending & (1 << tmp)) {
DBG(dev, "CNAK pending for ep%d\n", tmp);
/* clear NAK by writing CNAK */
reg = readl(&dev->ep[tmp].regs->ctl);
reg |= AMD_BIT(UDC_EPCTL_CNAK);
writel(reg, &dev->ep[tmp].regs->ctl);
dev->ep[tmp].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
}
}
/* ... and ep0out */
if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
/* clear NAK by writing CNAK */
reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
reg |= AMD_BIT(UDC_EPCTL_CNAK);
writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
dev->ep[UDC_EP0OUT_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
dev->ep[UDC_EP0OUT_IX].num);
}
}
/* Enabling RX DMA after setup packet */
static void udc_ep0_set_rde(struct udc *dev)
{
if (use_dma) {
/*
* only enable RXDMA when no data endpoint enabled
* or data is queued
*/
if (!dev->data_ep_enabled || dev->data_ep_queued) {
udc_set_rde(dev);
} else {
/*
* setup timer for enabling RDE (to not enable
* RXFIFO DMA for data endpoints to early)
*/
if (set_rde != 0 && !timer_pending(&udc_timer)) {
udc_timer.expires =
jiffies + HZ/UDC_RDE_TIMER_DIV;
set_rde = 1;
if (!stop_timer)
add_timer(&udc_timer);
}
}
}
}
/* Interrupt handler for data OUT traffic */
static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
struct udc_ep *ep;
struct udc_request *req;
unsigned int count;
struct udc_data_dma *td = NULL;
unsigned dma_done;
VDBG(dev, "ep%d irq\n", ep_ix);
ep = &dev->ep[ep_ix];
tmp = readl(&ep->regs->sts);
if (use_dma) {
/* BNA event ? */
if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
ep->num, readl(&ep->regs->desptr));
/* clear BNA */
writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
if (!ep->cancel_transfer)
ep->bna_occurred = 1;
else
ep->cancel_transfer = 0;
ret_val = IRQ_HANDLED;
goto finished;
}
}
/* HE event ? */
if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
/* clear HE */
writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
ret_val = IRQ_HANDLED;
goto finished;
}
if (!list_empty(&ep->queue)) {
/* next request */
req = list_entry(ep->queue.next,
struct udc_request, queue);
} else {
req = NULL;
udc_rxfifo_pending = 1;
}
VDBG(dev, "req = %p\n", req);
/* fifo mode */
if (!use_dma) {
/* read fifo */
if (req && udc_rxfifo_read(ep, req)) {
ret_val = IRQ_HANDLED;
/* finish */
complete_req(ep, req, 0);
/* next request */
if (!list_empty(&ep->queue) && !ep->halted) {
req = list_entry(ep->queue.next,
struct udc_request, queue);
} else
req = NULL;
}
/* DMA */
} else if (!ep->cancel_transfer && req) {
ret_val = IRQ_HANDLED;
/* check for DMA done */
if (!use_dma_ppb) {
dma_done = AMD_GETBITS(req->td_data->status,
UDC_DMA_OUT_STS_BS);
/* packet per buffer mode - rx bytes */
} else {
/*
* if BNA occurred then recover desc. from
* BNA dummy desc.
*/
if (ep->bna_occurred) {
VDBG(dev, "Recover desc. from BNA dummy\n");
memcpy(req->td_data, ep->bna_dummy_req->td_data,
sizeof(struct udc_data_dma));
ep->bna_occurred = 0;
udc_init_bna_dummy(ep->req);
}
td = udc_get_last_dma_desc(req);
dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
}
if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
/* buffer fill mode - rx bytes */
if (!use_dma_ppb) {
/* received number bytes */
count = AMD_GETBITS(req->td_data->status,
UDC_DMA_OUT_STS_RXBYTES);
VDBG(dev, "rx bytes=%u\n", count);
/* packet per buffer mode - rx bytes */
} else {
VDBG(dev, "req->td_data=%p\n", req->td_data);
VDBG(dev, "last desc = %p\n", td);
/* received number bytes */
if (use_dma_ppb_du) {
/* every desc. counts bytes */
count = udc_get_ppbdu_rxbytes(req);
} else {
/* last desc. counts bytes */
count = AMD_GETBITS(td->status,
UDC_DMA_OUT_STS_RXBYTES);
if (!count && req->req.length
== UDC_DMA_MAXPACKET) {
/*
* on 64k packets the RXBYTES
* field is zero
*/
count = UDC_DMA_MAXPACKET;
}
}
VDBG(dev, "last desc rx bytes=%u\n", count);
}
tmp = req->req.length - req->req.actual;
if (count > tmp) {
if ((tmp % ep->ep.maxpacket) != 0) {
DBG(dev, "%s: rx %db, space=%db\n",
ep->ep.name, count, tmp);
req->req.status = -EOVERFLOW;
}
count = tmp;
}
req->req.actual += count;
req->dma_going = 0;
/* complete request */
complete_req(ep, req, 0);
/* next request */
if (!list_empty(&ep->queue) && !ep->halted) {
req = list_entry(ep->queue.next,
struct udc_request,
queue);
/*
* DMA may be already started by udc_queue()
* called by gadget drivers completion
* routine. This happens when queue
* holds one request only.
*/
if (req->dma_going == 0) {
/* next dma */
if (prep_dma(ep, req, GFP_ATOMIC) != 0)
goto finished;
/* write desc pointer */
writel(req->td_phys,
&ep->regs->desptr);
req->dma_going = 1;
/* enable DMA */
udc_set_rde(dev);
}
} else {
/*
* implant BNA dummy descriptor to allow
* RXFIFO opening by RDE
*/
if (ep->bna_dummy_req) {
/* write desc pointer */
writel(ep->bna_dummy_req->td_phys,
&ep->regs->desptr);
ep->bna_occurred = 0;
}
/*
* schedule timer for setting RDE if queue
* remains empty to allow ep0 packets pass
* through
*/
if (set_rde != 0
&& !timer_pending(&udc_timer)) {
udc_timer.expires =
jiffies
+ HZ*UDC_RDE_TIMER_SECONDS;
set_rde = 1;
if (!stop_timer)
add_timer(&udc_timer);
}
if (ep->num != UDC_EP0OUT_IX)
dev->data_ep_queued = 0;
}
} else {
/*
* RX DMA must be reenabled for each desc in PPBDU mode
* and must be enabled for PPBNDU mode in case of BNA
*/
udc_set_rde(dev);
}
} else if (ep->cancel_transfer) {
ret_val = IRQ_HANDLED;
ep->cancel_transfer = 0;
}
/* check pending CNAKS */
if (cnak_pending) {
/* CNAk processing when rxfifo empty only */
if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
udc_process_cnak_queue(dev);
}
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
finished:
return ret_val;
}
/* Interrupt handler for data IN traffic */
static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
u32 epsts;
struct udc_ep *ep;
struct udc_request *req;
struct udc_data_dma *td;
unsigned len;
ep = &dev->ep[ep_ix];
epsts = readl(&ep->regs->sts);
if (use_dma) {
/* BNA ? */
if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
dev_err(dev->dev,
"BNA ep%din occurred - DESPTR = %08lx\n",
ep->num,
(unsigned long) readl(&ep->regs->desptr));
/* clear BNA */
writel(epsts, &ep->regs->sts);
ret_val = IRQ_HANDLED;
goto finished;
}
}
/* HE event ? */
if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
dev_err(dev->dev,
"HE ep%dn occurred - DESPTR = %08lx\n",
ep->num, (unsigned long) readl(&ep->regs->desptr));
/* clear HE */
writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
ret_val = IRQ_HANDLED;
goto finished;
}
/* DMA completion */
if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
VDBG(dev, "TDC set- completion\n");
ret_val = IRQ_HANDLED;
if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct udc_request, queue);
/*
* length bytes transferred
* check dma done of last desc. in PPBDU mode
*/
if (use_dma_ppb_du) {
td = udc_get_last_dma_desc(req);
if (td)
req->req.actual = req->req.length;
} else {
/* assume all bytes transferred */
req->req.actual = req->req.length;
}
if (req->req.actual == req->req.length) {
/* complete req */
complete_req(ep, req, 0);
req->dma_going = 0;
/* further request available ? */
if (list_empty(&ep->queue)) {
/* disable interrupt */
tmp = readl(&dev->regs->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
}
}
}
ep->cancel_transfer = 0;
}
/*
* status reg has IN bit set and TDC not set (if TDC was handled,
* IN must not be handled (UDC defect) ?
*/
if ((epsts & AMD_BIT(UDC_EPSTS_IN))
&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
ret_val = IRQ_HANDLED;
if (!list_empty(&ep->queue)) {
/* next request */
req = list_entry(ep->queue.next,
struct udc_request, queue);
/* FIFO mode */
if (!use_dma) {
/* write fifo */
udc_txfifo_write(ep, &req->req);
len = req->req.length - req->req.actual;
if (len > ep->ep.maxpacket)
len = ep->ep.maxpacket;
req->req.actual += len;
if (req->req.actual == req->req.length
|| (len != ep->ep.maxpacket)) {
/* complete req */
complete_req(ep, req, 0);
}
/* DMA */
} else if (req && !req->dma_going) {
VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
req, req->td_data);
if (req->td_data) {
req->dma_going = 1;
/*
* unset L bit of first desc.
* for chain
*/
if (use_dma_ppb && req->req.length >
ep->ep.maxpacket) {
req->td_data->status &=
AMD_CLEAR_BIT(
UDC_DMA_IN_STS_L);
}
/* write desc pointer */
writel(req->td_phys, &ep->regs->desptr);
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(
req->td_data->status,
UDC_DMA_IN_STS_BS_HOST_READY,
UDC_DMA_IN_STS_BS);
/* set poll demand bit */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_P);
writel(tmp, &ep->regs->ctl);
}
}
} else if (!use_dma && ep->in) {
/* disable interrupt */
tmp = readl(
&dev->regs->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp,
&dev->regs->ep_irqmsk);
}
}
/* clear status bits */
writel(epsts, &ep->regs->sts);
finished:
return ret_val;
}
/* Interrupt handler for Control OUT traffic */
static irqreturn_t udc_control_out_isr(struct udc *dev)
__releases(dev->lock)
__acquires(dev->lock)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
int setup_supported;
u32 count;
int set = 0;
struct udc_ep *ep;
struct udc_ep *ep_tmp;
ep = &dev->ep[UDC_EP0OUT_IX];
/* clear irq */
writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
/* check BNA and clear if set */
if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
VDBG(dev, "ep0: BNA set\n");
writel(AMD_BIT(UDC_EPSTS_BNA),
&dev->ep[UDC_EP0OUT_IX].regs->sts);
ep->bna_occurred = 1;
ret_val = IRQ_HANDLED;
goto finished;
}
/* type of data: SETUP or DATA 0 bytes */
tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
VDBG(dev, "data_typ = %x\n", tmp);
/* setup data */
if (tmp == UDC_EPSTS_OUT_SETUP) {
ret_val = IRQ_HANDLED;
ep->dev->stall_ep0in = 0;
dev->waiting_zlp_ack_ep0in = 0;
/* set NAK for EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_SNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 1;
/* get setup data */
if (use_dma) {
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR,
&dev->ep[UDC_EP0OUT_IX].regs->sts);
setup_data.data[0] =
dev->ep[UDC_EP0OUT_IX].td_stp->data12;
setup_data.data[1] =
dev->ep[UDC_EP0OUT_IX].td_stp->data34;
/* set HOST READY */
dev->ep[UDC_EP0OUT_IX].td_stp->status =
UDC_DMA_STP_STS_BS_HOST_READY;
} else {
/* read fifo */
udc_rxfifo_read_dwords(dev, setup_data.data, 2);
}
/* determine direction of control data */
if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
/* enable RDE */
udc_ep0_set_rde(dev);
set = 0;
} else {
dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
/*
* implant BNA dummy descriptor to allow RXFIFO opening
* by RDE
*/
if (ep->bna_dummy_req) {
/* write desc pointer */
writel(ep->bna_dummy_req->td_phys,
&dev->ep[UDC_EP0OUT_IX].regs->desptr);
ep->bna_occurred = 0;
}
set = 1;
dev->ep[UDC_EP0OUT_IX].naking = 1;
/*
* setup timer for enabling RDE (to not enable
* RXFIFO DMA for data to early)
*/
set_rde = 1;
if (!timer_pending(&udc_timer)) {
udc_timer.expires = jiffies +
HZ/UDC_RDE_TIMER_DIV;
if (!stop_timer)
add_timer(&udc_timer);
}
}
/*
* mass storage reset must be processed here because
* next packet may be a CLEAR_FEATURE HALT which would not
* clear the stall bit when no STALL handshake was received
* before (autostall can cause this)
*/
if (setup_data.data[0] == UDC_MSCRES_DWORD0
&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
DBG(dev, "MSC Reset\n");
/*
* clear stall bits
* only one IN and OUT endpoints are handled
*/
ep_tmp = &udc->ep[UDC_EPIN_IX];
udc_set_halt(&ep_tmp->ep, 0);
ep_tmp = &udc->ep[UDC_EPOUT_IX];
udc_set_halt(&ep_tmp->ep, 0);
}
/* call gadget with setup data received */
spin_unlock(&dev->lock);
setup_supported = dev->driver->setup(&dev->gadget,
&setup_data.request);
spin_lock(&dev->lock);
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
/* ep0 in returns data (not zlp) on IN phase */
if (setup_supported >= 0 && setup_supported <
UDC_EP0IN_MAXPACKET) {
/* clear NAK by writing CNAK in EP0_IN */
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
/* if unsupported request then stall */
} else if (setup_supported < 0) {
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
} else
dev->waiting_zlp_ack_ep0in = 1;
/* clear NAK by writing CNAK in EP0_OUT */
if (!set) {
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
dev->ep[UDC_EP0OUT_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
}
if (!use_dma) {
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR,
&dev->ep[UDC_EP0OUT_IX].regs->sts);
}
/* data packet 0 bytes */
} else if (tmp == UDC_EPSTS_OUT_DATA) {
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
/* get setup data: only 0 packet */
if (use_dma) {
/* no req if 0 packet, just reactivate */
if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
VDBG(dev, "ZLP\n");
/* set HOST READY */
dev->ep[UDC_EP0OUT_IX].td->status =
AMD_ADDBITS(
dev->ep[UDC_EP0OUT_IX].td->status,
UDC_DMA_OUT_STS_BS_HOST_READY,
UDC_DMA_OUT_STS_BS);
/* enable RDE */
udc_ep0_set_rde(dev);
ret_val = IRQ_HANDLED;
} else {
/* control write */
ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
/* re-program desc. pointer for possible ZLPs */
writel(dev->ep[UDC_EP0OUT_IX].td_phys,
&dev->ep[UDC_EP0OUT_IX].regs->desptr);
/* enable RDE */
udc_ep0_set_rde(dev);
}
} else {
/* received number bytes */
count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
/* out data for fifo mode not working */
count = 0;
/* 0 packet or real data ? */
if (count != 0) {
ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
} else {
/* dummy read confirm */
readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
ret_val = IRQ_HANDLED;
}
}
}
/* check pending CNAKS */
if (cnak_pending) {
/* CNAk processing when rxfifo empty only */
if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
udc_process_cnak_queue(dev);
}
finished:
return ret_val;
}
/* Interrupt handler for Control IN traffic */
static irqreturn_t udc_control_in_isr(struct udc *dev)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
struct udc_ep *ep;
struct udc_request *req;
unsigned len;
ep = &dev->ep[UDC_EP0IN_IX];
/* clear irq */
writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
/* DMA completion */
if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
VDBG(dev, "isr: TDC clear\n");
ret_val = IRQ_HANDLED;
/* clear TDC bit */
writel(AMD_BIT(UDC_EPSTS_TDC),
&dev->ep[UDC_EP0IN_IX].regs->sts);
/* status reg has IN bit set ? */
} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
ret_val = IRQ_HANDLED;
if (ep->dma) {
/* clear IN bit */
writel(AMD_BIT(UDC_EPSTS_IN),
&dev->ep[UDC_EP0IN_IX].regs->sts);
}
if (dev->stall_ep0in) {
DBG(dev, "stall ep0in\n");
/* halt ep0in */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
} else {
if (!list_empty(&ep->queue)) {
/* next request */
req = list_entry(ep->queue.next,
struct udc_request, queue);
if (ep->dma) {
/* write desc pointer */
writel(req->td_phys, &ep->regs->desptr);
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(
req->td_data->status,
UDC_DMA_STP_STS_BS_HOST_READY,
UDC_DMA_STP_STS_BS);
/* set poll demand bit */
tmp =
readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_P);
writel(tmp,
&dev->ep[UDC_EP0IN_IX].regs->ctl);
/* all bytes will be transferred */
req->req.actual = req->req.length;
/* complete req */
complete_req(ep, req, 0);
} else {
/* write fifo */
udc_txfifo_write(ep, &req->req);
/* lengh bytes transferred */
len = req->req.length - req->req.actual;
if (len > ep->ep.maxpacket)
len = ep->ep.maxpacket;
req->req.actual += len;
if (req->req.actual == req->req.length
|| (len != ep->ep.maxpacket)) {
/* complete req */
complete_req(ep, req, 0);
}
}
}
}
ep->halted = 0;
dev->stall_ep0in = 0;
if (!ep->dma) {
/* clear IN bit */
writel(AMD_BIT(UDC_EPSTS_IN),
&dev->ep[UDC_EP0IN_IX].regs->sts);
}
}
return ret_val;
}
/* Interrupt handler for global device events */
static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
__releases(dev->lock)
__acquires(dev->lock)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
u32 cfg;
struct udc_ep *ep;
u16 i;
u8 udc_csr_epix;
/* SET_CONFIG irq ? */
if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
ret_val = IRQ_HANDLED;
/* read config value */
tmp = readl(&dev->regs->sts);
cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
dev->cur_config = cfg;
dev->set_cfg_not_acked = 1;
/* make usb request for gadget driver */
memset(&setup_data, 0 , sizeof(union udc_setup_data));
setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
setup_data.request.wValue = cpu_to_le16(dev->cur_config);
/* programm the NE registers */
for (i = 0; i < UDC_EP_NUM; i++) {
ep = &dev->ep[i];
if (ep->in) {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num;
/* OUT ep */
} else {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
}
tmp = readl(&dev->csr->ne[udc_csr_epix]);
/* ep cfg */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
UDC_CSR_NE_CFG);
/* write reg */
writel(tmp, &dev->csr->ne[udc_csr_epix]);
/* clear stall bits */
ep->halted = 0;
tmp = readl(&ep->regs->ctl);
tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
}
/* call gadget zero with setup data received */
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
spin_lock(&dev->lock);
} /* SET_INTERFACE ? */
if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
ret_val = IRQ_HANDLED;
dev->set_cfg_not_acked = 1;
/* read interface and alt setting values */
tmp = readl(&dev->regs->sts);
dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
/* make usb request for gadget driver */
memset(&setup_data, 0 , sizeof(union udc_setup_data));
setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
setup_data.request.bRequestType = USB_RECIP_INTERFACE;
setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
dev->cur_alt, dev->cur_intf);
/* programm the NE registers */
for (i = 0; i < UDC_EP_NUM; i++) {
ep = &dev->ep[i];
if (ep->in) {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num;
/* OUT ep */
} else {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
}
/* UDC CSR reg */
/* set ep values */
tmp = readl(&dev->csr->ne[udc_csr_epix]);
/* ep interface */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
UDC_CSR_NE_INTF);
/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
/* ep alt */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
UDC_CSR_NE_ALT);
/* write reg */
writel(tmp, &dev->csr->ne[udc_csr_epix]);
/* clear stall bits */
ep->halted = 0;
tmp = readl(&ep->regs->ctl);
tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
}
/* call gadget zero with setup data received */
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
spin_lock(&dev->lock);
} /* USB reset */
if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
DBG(dev, "USB Reset interrupt\n");
ret_val = IRQ_HANDLED;
/* allow soft reset when suspend occurs */
soft_reset_occured = 0;
dev->waiting_zlp_ack_ep0in = 0;
dev->set_cfg_not_acked = 0;
/* mask not needed interrupts */
udc_mask_unused_interrupts(dev);
/* call gadget to resume and reset configs etc. */
spin_unlock(&dev->lock);
if (dev->sys_suspended && dev->driver->resume) {
dev->driver->resume(&dev->gadget);
dev->sys_suspended = 0;
}
usb_gadget_udc_reset(&dev->gadget, dev->driver);
spin_lock(&dev->lock);
/* disable ep0 to empty req queue */
empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
/* soft reset when rxfifo not empty */
tmp = readl(&dev->regs->sts);
if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
&& !soft_reset_after_usbreset_occured) {
udc_soft_reset(dev);
soft_reset_after_usbreset_occured++;
}
/*
* DMA reset to kill potential old DMA hw hang,
* POLL bit is already reset by ep_init() through
* disconnect()
*/
DBG(dev, "DMA machine reset\n");
tmp = readl(&dev->regs->cfg);
writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
writel(tmp, &dev->regs->cfg);
/* put into initial config */
udc_basic_init(dev);
/* enable device setup interrupts */
udc_enable_dev_setup_interrupts(dev);
/* enable suspend interrupt */
tmp = readl(&dev->regs->irqmsk);
tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
writel(tmp, &dev->regs->irqmsk);
} /* USB suspend */
if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
DBG(dev, "USB Suspend interrupt\n");
ret_val = IRQ_HANDLED;
if (dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->sys_suspended = 1;
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
} /* new speed ? */
if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
DBG(dev, "ENUM interrupt\n");
ret_val = IRQ_HANDLED;
soft_reset_after_usbreset_occured = 0;
/* disable ep0 to empty req queue */
empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
/* link up all endpoints */
udc_setup_endpoints(dev);
dev_info(dev->dev, "Connect: %s\n",
usb_speed_string(dev->gadget.speed));
/* init ep 0 */
activate_control_endpoints(dev);
/* enable ep0 interrupts */
udc_enable_ep0_interrupts(dev);
}
/* session valid change interrupt */
if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
DBG(dev, "USB SVC interrupt\n");
ret_val = IRQ_HANDLED;
/* check that session is not valid to detect disconnect */
tmp = readl(&dev->regs->sts);
if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
/* disable suspend interrupt */
tmp = readl(&dev->regs->irqmsk);
tmp |= AMD_BIT(UDC_DEVINT_US);
writel(tmp, &dev->regs->irqmsk);
DBG(dev, "USB Disconnect (session valid low)\n");
/* cleanup on disconnect */
usb_disconnect(udc);
}
}
return ret_val;
}
/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
irqreturn_t udc_irq(int irq, void *pdev)
{
struct udc *dev = pdev;
u32 reg;
u16 i;
u32 ep_irq;
irqreturn_t ret_val = IRQ_NONE;
spin_lock(&dev->lock);
/* check for ep irq */
reg = readl(&dev->regs->ep_irqsts);
if (reg) {
if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
ret_val |= udc_control_out_isr(dev);
if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
ret_val |= udc_control_in_isr(dev);
/*
* data endpoint
* iterate ep's
*/
for (i = 1; i < UDC_EP_NUM; i++) {
ep_irq = 1 << i;
if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
continue;
/* clear irq status */
writel(ep_irq, &dev->regs->ep_irqsts);
/* irq for out ep ? */
if (i > UDC_EPIN_NUM)
ret_val |= udc_data_out_isr(dev, i);
else
ret_val |= udc_data_in_isr(dev, i);
}
}
/* check for dev irq */
reg = readl(&dev->regs->irqsts);
if (reg) {
/* clear irq */
writel(reg, &dev->regs->irqsts);
ret_val |= udc_dev_isr(dev, reg);
}
spin_unlock(&dev->lock);
return ret_val;
}
EXPORT_SYMBOL_GPL(udc_irq);
/* Tears down device */
void gadget_release(struct device *pdev)
{
struct amd5536udc *dev = dev_get_drvdata(pdev);
kfree(dev);
}
EXPORT_SYMBOL_GPL(gadget_release);
/* Cleanup on device remove */
void udc_remove(struct udc *dev)
{
/* remove timer */
stop_timer++;
if (timer_pending(&udc_timer))
wait_for_completion(&on_exit);
del_timer_sync(&udc_timer);
/* remove pollstall timer */
stop_pollstall_timer++;
if (timer_pending(&udc_pollstall_timer))
wait_for_completion(&on_pollstall_exit);
del_timer_sync(&udc_pollstall_timer);
udc = NULL;
}
EXPORT_SYMBOL_GPL(udc_remove);
/* free all the dma pools */
void free_dma_pools(struct udc *dev)
{
dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
dev->ep[UDC_EP0OUT_IX].td_phys);
dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
dev->ep[UDC_EP0OUT_IX].td_stp_dma);
dma_pool_destroy(dev->stp_requests);
dma_pool_destroy(dev->data_requests);
}
EXPORT_SYMBOL_GPL(free_dma_pools);
/* create dma pools on init */
int init_dma_pools(struct udc *dev)
{
struct udc_stp_dma *td_stp;
struct udc_data_dma *td_data;
int retval;
/* consistent DMA mode setting ? */
if (use_dma_ppb) {
use_dma_bufferfill_mode = 0;
} else {
use_dma_ppb_du = 0;
use_dma_bufferfill_mode = 1;
}
/* DMA setup */
dev->data_requests = dma_pool_create("data_requests", dev->dev,
sizeof(struct udc_data_dma), 0, 0);
if (!dev->data_requests) {
DBG(dev, "can't get request data pool\n");
return -ENOMEM;
}
/* EP0 in dma regs = dev control regs */
dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
/* dma desc for setup data */
dev->stp_requests = dma_pool_create("setup requests", dev->dev,
sizeof(struct udc_stp_dma), 0, 0);
if (!dev->stp_requests) {
DBG(dev, "can't get stp request pool\n");
retval = -ENOMEM;
goto err_create_dma_pool;
}
/* setup */
td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
if (!td_stp) {
retval = -ENOMEM;
goto err_alloc_dma;
}
dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
/* data: 0 packets !? */
td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IX].td_phys);
if (!td_data) {
retval = -ENOMEM;
goto err_alloc_phys;
}
dev->ep[UDC_EP0OUT_IX].td = td_data;
return 0;
err_alloc_phys:
dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
dev->ep[UDC_EP0OUT_IX].td_stp_dma);
err_alloc_dma:
dma_pool_destroy(dev->stp_requests);
dev->stp_requests = NULL;
err_create_dma_pool:
dma_pool_destroy(dev->data_requests);
dev->data_requests = NULL;
return retval;
}
EXPORT_SYMBOL_GPL(init_dma_pools);
/* general probe */
int udc_probe(struct udc *dev)
{
char tmp[128];
u32 reg;
int retval;
/* device struct setup */
dev->gadget.ops = &udc_ops;
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.name = name;
dev->gadget.max_speed = USB_SPEED_HIGH;
/* init registers, interrupts, ... */
startup_registers(dev);
dev_info(dev->dev, "%s\n", mod_desc);
snprintf(tmp, sizeof(tmp), "%d", dev->irq);
/* Print this device info for AMD chips only*/
if (dev->chiprev == UDC_HSA0_REV ||
dev->chiprev == UDC_HSB1_REV) {
dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
tmp, dev->phys_addr, dev->chiprev,
(dev->chiprev == UDC_HSA0_REV) ?
"A0" : "B1");
strcpy(tmp, UDC_DRIVER_VERSION_STRING);
if (dev->chiprev == UDC_HSA0_REV) {
dev_err(dev->dev, "chip revision is A0; too old\n");
retval = -ENODEV;
goto finished;
}
dev_info(dev->dev,
"driver version: %s(for Geode5536 B1)\n", tmp);
}
udc = dev;
retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
gadget_release);
if (retval)
goto finished;
/* timer init */
timer_setup(&udc_timer, udc_timer_function, 0);
timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
/* set SD */
reg = readl(&dev->regs->ctl);
reg |= AMD_BIT(UDC_DEVCTL_SD);
writel(reg, &dev->regs->ctl);
/* print dev register info */
print_regs(dev);
return 0;
finished:
return retval;
}
EXPORT_SYMBOL_GPL(udc_probe);
MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
MODULE_AUTHOR("Thomas Dahlmann");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/snps_udc_core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004-2007,2011-2012 Freescale Semiconductor, Inc.
* All rights reserved.
*
* Author: Li Yang <[email protected]>
* Jiang Bo <[email protected]>
*
* Description:
* Freescale high-speed USB SOC DR module device controller driver.
* This can be found on MPC8349E/MPC8313E/MPC5121E cpus.
* The driver is previously named as mpc_udc. Based on bare board
* code from Dave Liu and Shlomi Gridish.
*/
#undef VERBOSE
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/dmapool.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/unaligned.h>
#include <asm/dma.h>
#include "fsl_usb2_udc.h"
#define DRIVER_DESC "Freescale High-Speed USB SOC Device Controller driver"
#define DRIVER_AUTHOR "Li Yang/Jiang Bo"
#define DRIVER_VERSION "Apr 20, 2007"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name[] = "fsl-usb2-udc";
static struct usb_dr_device __iomem *dr_regs;
static struct usb_sys_interface __iomem *usb_sys_regs;
/* it is initialized in probe() */
static struct fsl_udc *udc_controller = NULL;
static const struct usb_endpoint_descriptor
fsl_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
};
static void fsl_ep_fifo_flush(struct usb_ep *_ep);
#ifdef CONFIG_PPC32
/*
* On some SoCs, the USB controller registers can be big or little endian,
* depending on the version of the chip. In order to be able to run the
* same kernel binary on 2 different versions of an SoC, the BE/LE decision
* must be made at run time. _fsl_readl and fsl_writel are pointers to the
* BE or LE readl() and writel() functions, and fsl_readl() and fsl_writel()
* call through those pointers. Platform code for SoCs that have BE USB
* registers should set pdata->big_endian_mmio flag.
*
* This also applies to controller-to-cpu accessors for the USB descriptors,
* since their endianness is also SoC dependant. Platform code for SoCs that
* have BE USB descriptors should set pdata->big_endian_desc flag.
*/
static u32 _fsl_readl_be(const unsigned __iomem *p)
{
return in_be32(p);
}
static u32 _fsl_readl_le(const unsigned __iomem *p)
{
return in_le32(p);
}
static void _fsl_writel_be(u32 v, unsigned __iomem *p)
{
out_be32(p, v);
}
static void _fsl_writel_le(u32 v, unsigned __iomem *p)
{
out_le32(p, v);
}
static u32 (*_fsl_readl)(const unsigned __iomem *p);
static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
#define fsl_readl(p) (*_fsl_readl)((p))
#define fsl_writel(v, p) (*_fsl_writel)((v), (p))
static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
{
if (pdata->big_endian_mmio) {
_fsl_readl = _fsl_readl_be;
_fsl_writel = _fsl_writel_be;
} else {
_fsl_readl = _fsl_readl_le;
_fsl_writel = _fsl_writel_le;
}
}
static inline u32 cpu_to_hc32(const u32 x)
{
return udc_controller->pdata->big_endian_desc
? (__force u32)cpu_to_be32(x)
: (__force u32)cpu_to_le32(x);
}
static inline u32 hc32_to_cpu(const u32 x)
{
return udc_controller->pdata->big_endian_desc
? be32_to_cpu((__force __be32)x)
: le32_to_cpu((__force __le32)x);
}
#else /* !CONFIG_PPC32 */
static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
#define fsl_readl(addr) readl(addr)
#define fsl_writel(val32, addr) writel(val32, addr)
#define cpu_to_hc32(x) cpu_to_le32(x)
#define hc32_to_cpu(x) le32_to_cpu(x)
#endif /* CONFIG_PPC32 */
/********************************************************************
* Internal Used Function
********************************************************************/
/*-----------------------------------------------------------------
* done() - retire a request; caller blocked irqs
* @status : request status to be set, only works when
* request is still in progress.
*--------------------------------------------------------------*/
static void done(struct fsl_ep *ep, struct fsl_req *req, int status)
__releases(ep->udc->lock)
__acquires(ep->udc->lock)
{
struct fsl_udc *udc = NULL;
unsigned char stopped = ep->stopped;
struct ep_td_struct *curr_td, *next_td;
int j;
udc = (struct fsl_udc *)ep->udc;
/* Removed the req from fsl_ep->queue */
list_del_init(&req->queue);
/* req.status should be set as -EINPROGRESS in ep_queue() */
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
/* Free dtd for the request */
next_td = req->head;
for (j = 0; j < req->dtd_count; j++) {
curr_td = next_td;
if (j != req->dtd_count - 1) {
next_td = curr_td->next_td_virt;
}
dma_pool_free(udc->td_pool, curr_td, curr_td->td_dma);
}
usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
if (status && (status != -ESHUTDOWN))
VDBG("complete %s req %p stat %d len %u/%u",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
ep->stopped = 1;
spin_unlock(&ep->udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->udc->lock);
ep->stopped = stopped;
}
/*-----------------------------------------------------------------
* nuke(): delete all requests related to this ep
* called with spinlock held
*--------------------------------------------------------------*/
static void nuke(struct fsl_ep *ep, int status)
{
ep->stopped = 1;
/* Flush fifo */
fsl_ep_fifo_flush(&ep->ep);
/* Whether this eq has request linked */
while (!list_empty(&ep->queue)) {
struct fsl_req *req = NULL;
req = list_entry(ep->queue.next, struct fsl_req, queue);
done(ep, req, status);
}
}
/*------------------------------------------------------------------
Internal Hardware related function
------------------------------------------------------------------*/
static int dr_controller_setup(struct fsl_udc *udc)
{
unsigned int tmp, portctrl, ep_num;
unsigned int max_no_of_ep;
unsigned int ctrl;
unsigned long timeout;
#define FSL_UDC_RESET_TIMEOUT 1000
/* Config PHY interface */
portctrl = fsl_readl(&dr_regs->portsc1);
portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
switch (udc->phy_mode) {
case FSL_USB2_PHY_ULPI:
if (udc->pdata->have_sysif_regs) {
if (udc->pdata->controller_ver) {
/* controller version 1.6 or above */
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl &= ~USB_CTRL_UTMI_PHY_EN;
ctrl |= USB_CTRL_USB_EN;
__raw_writel(ctrl, &usb_sys_regs->control);
}
}
portctrl |= PORTSCX_PTS_ULPI;
break;
case FSL_USB2_PHY_UTMI_WIDE:
portctrl |= PORTSCX_PTW_16BIT;
fallthrough;
case FSL_USB2_PHY_UTMI:
case FSL_USB2_PHY_UTMI_DUAL:
if (udc->pdata->have_sysif_regs) {
if (udc->pdata->controller_ver) {
/* controller version 1.6 or above */
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl |= (USB_CTRL_UTMI_PHY_EN |
USB_CTRL_USB_EN);
__raw_writel(ctrl, &usb_sys_regs->control);
mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI
PHY CLK to become stable - 10ms*/
}
}
portctrl |= PORTSCX_PTS_UTMI;
break;
case FSL_USB2_PHY_SERIAL:
portctrl |= PORTSCX_PTS_FSLS;
break;
default:
return -EINVAL;
}
fsl_writel(portctrl, &dr_regs->portsc1);
/* Stop and reset the usb controller */
tmp = fsl_readl(&dr_regs->usbcmd);
tmp &= ~USB_CMD_RUN_STOP;
fsl_writel(tmp, &dr_regs->usbcmd);
tmp = fsl_readl(&dr_regs->usbcmd);
tmp |= USB_CMD_CTRL_RESET;
fsl_writel(tmp, &dr_regs->usbcmd);
/* Wait for reset to complete */
timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
if (time_after(jiffies, timeout)) {
ERR("udc reset timeout!\n");
return -ETIMEDOUT;
}
cpu_relax();
}
/* Set the controller as device mode */
tmp = fsl_readl(&dr_regs->usbmode);
tmp &= ~USB_MODE_CTRL_MODE_MASK; /* clear mode bits */
tmp |= USB_MODE_CTRL_MODE_DEVICE;
/* Disable Setup Lockout */
tmp |= USB_MODE_SETUP_LOCK_OFF;
if (udc->pdata->es)
tmp |= USB_MODE_ES;
fsl_writel(tmp, &dr_regs->usbmode);
/* Clear the setup status */
fsl_writel(0, &dr_regs->usbsts);
tmp = udc->ep_qh_dma;
tmp &= USB_EP_LIST_ADDRESS_MASK;
fsl_writel(tmp, &dr_regs->endpointlistaddr);
VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
udc->ep_qh, (int)tmp,
fsl_readl(&dr_regs->endpointlistaddr));
max_no_of_ep = (0x0000001F & fsl_readl(&dr_regs->dccparams));
for (ep_num = 1; ep_num < max_no_of_ep; ep_num++) {
tmp = fsl_readl(&dr_regs->endptctrl[ep_num]);
tmp &= ~(EPCTRL_TX_TYPE | EPCTRL_RX_TYPE);
tmp |= (EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT)
| (EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT);
fsl_writel(tmp, &dr_regs->endptctrl[ep_num]);
}
/* Config control enable i/o output, cpu endian register */
if (udc->pdata->have_sysif_regs) {
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl |= USB_CTRL_IOENB;
__raw_writel(ctrl, &usb_sys_regs->control);
}
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
/* Turn on cache snooping hardware, since some PowerPC platforms
* wholly rely on hardware to deal with cache coherent. */
if (udc->pdata->have_sysif_regs) {
/* Setup Snooping for all the 4GB space */
tmp = SNOOP_SIZE_2GB; /* starts from 0x0, size 2G */
__raw_writel(tmp, &usb_sys_regs->snoop1);
tmp |= 0x80000000; /* starts from 0x8000000, size 2G */
__raw_writel(tmp, &usb_sys_regs->snoop2);
}
#endif
return 0;
}
/* Enable DR irq and set controller to run state */
static void dr_controller_run(struct fsl_udc *udc)
{
u32 temp;
/* Enable DR irq reg */
temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
| USB_INTR_PTC_DETECT_EN | USB_INTR_RESET_EN
| USB_INTR_DEVICE_SUSPEND | USB_INTR_SYS_ERR_EN;
fsl_writel(temp, &dr_regs->usbintr);
/* Clear stopped bit */
udc->stopped = 0;
/* Set the controller as device mode */
temp = fsl_readl(&dr_regs->usbmode);
temp |= USB_MODE_CTRL_MODE_DEVICE;
fsl_writel(temp, &dr_regs->usbmode);
/* Set controller to Run */
temp = fsl_readl(&dr_regs->usbcmd);
temp |= USB_CMD_RUN_STOP;
fsl_writel(temp, &dr_regs->usbcmd);
}
static void dr_controller_stop(struct fsl_udc *udc)
{
unsigned int tmp;
pr_debug("%s\n", __func__);
/* if we're in OTG mode, and the Host is currently using the port,
* stop now and don't rip the controller out from under the
* ehci driver
*/
if (udc->gadget.is_otg) {
if (!(fsl_readl(&dr_regs->otgsc) & OTGSC_STS_USB_ID)) {
pr_debug("udc: Leaving early\n");
return;
}
}
/* disable all INTR */
fsl_writel(0, &dr_regs->usbintr);
/* Set stopped bit for isr */
udc->stopped = 1;
/* disable IO output */
/* usb_sys_regs->control = 0; */
/* set controller to Stop */
tmp = fsl_readl(&dr_regs->usbcmd);
tmp &= ~USB_CMD_RUN_STOP;
fsl_writel(tmp, &dr_regs->usbcmd);
}
static void dr_ep_setup(unsigned char ep_num, unsigned char dir,
unsigned char ep_type)
{
unsigned int tmp_epctrl = 0;
tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
if (dir) {
if (ep_num)
tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
tmp_epctrl |= EPCTRL_TX_ENABLE;
tmp_epctrl &= ~EPCTRL_TX_TYPE;
tmp_epctrl |= ((unsigned int)(ep_type)
<< EPCTRL_TX_EP_TYPE_SHIFT);
} else {
if (ep_num)
tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
tmp_epctrl |= EPCTRL_RX_ENABLE;
tmp_epctrl &= ~EPCTRL_RX_TYPE;
tmp_epctrl |= ((unsigned int)(ep_type)
<< EPCTRL_RX_EP_TYPE_SHIFT);
}
fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
}
static void
dr_ep_change_stall(unsigned char ep_num, unsigned char dir, int value)
{
u32 tmp_epctrl = 0;
tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
if (value) {
/* set the stall bit */
if (dir)
tmp_epctrl |= EPCTRL_TX_EP_STALL;
else
tmp_epctrl |= EPCTRL_RX_EP_STALL;
} else {
/* clear the stall bit and reset data toggle */
if (dir) {
tmp_epctrl &= ~EPCTRL_TX_EP_STALL;
tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
} else {
tmp_epctrl &= ~EPCTRL_RX_EP_STALL;
tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
}
}
fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
}
/* Get stall status of a specific ep
Return: 0: not stalled; 1:stalled */
static int dr_ep_get_stall(unsigned char ep_num, unsigned char dir)
{
u32 epctrl;
epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
if (dir)
return (epctrl & EPCTRL_TX_EP_STALL) ? 1 : 0;
else
return (epctrl & EPCTRL_RX_EP_STALL) ? 1 : 0;
}
/********************************************************************
Internal Structure Build up functions
********************************************************************/
/*------------------------------------------------------------------
* struct_ep_qh_setup(): set the Endpoint Capabilites field of QH
* @zlt: Zero Length Termination Select (1: disable; 0: enable)
* @mult: Mult field
------------------------------------------------------------------*/
static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
unsigned char dir, unsigned char ep_type,
unsigned int max_pkt_len,
unsigned int zlt, unsigned char mult)
{
struct ep_queue_head *p_QH = &udc->ep_qh[2 * ep_num + dir];
unsigned int tmp = 0;
/* set the Endpoint Capabilites in QH */
switch (ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
/* Interrupt On Setup (IOS). for control ep */
tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
| EP_QUEUE_HEAD_IOS;
break;
case USB_ENDPOINT_XFER_ISOC:
tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
| (mult << EP_QUEUE_HEAD_MULT_POS);
break;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
break;
default:
VDBG("error ep type is %d", ep_type);
return;
}
if (zlt)
tmp |= EP_QUEUE_HEAD_ZLT_SEL;
p_QH->max_pkt_length = cpu_to_hc32(tmp);
p_QH->next_dtd_ptr = 1;
p_QH->size_ioc_int_sts = 0;
}
/* Setup qh structure and ep register for ep0. */
static void ep0_setup(struct fsl_udc *udc)
{
/* the initialization of an ep includes: fields in QH, Regs,
* fsl_ep struct */
struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL,
USB_MAX_CTRL_PAYLOAD, 0, 0);
struct_ep_qh_setup(udc, 0, USB_SEND, USB_ENDPOINT_XFER_CONTROL,
USB_MAX_CTRL_PAYLOAD, 0, 0);
dr_ep_setup(0, USB_RECV, USB_ENDPOINT_XFER_CONTROL);
dr_ep_setup(0, USB_SEND, USB_ENDPOINT_XFER_CONTROL);
return;
}
/***********************************************************************
Endpoint Management Functions
***********************************************************************/
/*-------------------------------------------------------------------------
* when configurations are set, or when interface settings change
* for example the do_set_interface() in gadget layer,
* the driver will enable or disable the relevant endpoints
* ep0 doesn't use this routine. It is always enabled.
-------------------------------------------------------------------------*/
static int fsl_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct fsl_udc *udc = NULL;
struct fsl_ep *ep = NULL;
unsigned short max = 0;
unsigned char mult = 0, zlt;
int retval = -EINVAL;
unsigned long flags;
ep = container_of(_ep, struct fsl_ep, ep);
/* catch various bogus parameters */
if (!_ep || !desc
|| (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
udc = ep->udc;
if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
max = usb_endpoint_maxp(desc);
/* Disable automatic zlp generation. Driver is responsible to indicate
* explicitly through req->req.zero. This is needed to enable multi-td
* request. */
zlt = 1;
/* Assume the max packet size from gadget is always correct */
switch (desc->bmAttributes & 0x03) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
/* mult = 0. Execute N Transactions as demonstrated by
* the USB variable length packet protocol where N is
* computed using the Maximum Packet Length (dQH) and
* the Total Bytes field (dTD) */
mult = 0;
break;
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
mult = usb_endpoint_maxp_mult(desc);
/* 3 transactions at most */
if (mult > 3)
goto en_done;
break;
default:
goto en_done;
}
spin_lock_irqsave(&udc->lock, flags);
ep->ep.maxpacket = max;
ep->ep.desc = desc;
ep->stopped = 0;
/* Controller related setup */
/* Init EPx Queue Head (Ep Capabilites field in QH
* according to max, zlt, mult) */
struct_ep_qh_setup(udc, (unsigned char) ep_index(ep),
(unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
? USB_SEND : USB_RECV),
(unsigned char) (desc->bmAttributes
& USB_ENDPOINT_XFERTYPE_MASK),
max, zlt, mult);
/* Init endpoint ctrl register */
dr_ep_setup((unsigned char) ep_index(ep),
(unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
? USB_SEND : USB_RECV),
(unsigned char) (desc->bmAttributes
& USB_ENDPOINT_XFERTYPE_MASK));
spin_unlock_irqrestore(&udc->lock, flags);
retval = 0;
VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
ep->ep.desc->bEndpointAddress & 0x0f,
(desc->bEndpointAddress & USB_DIR_IN)
? "in" : "out", max);
en_done:
return retval;
}
/*---------------------------------------------------------------------
* @ep : the ep being unconfigured. May not be ep0
* Any pending and uncomplete req will complete with status (-ESHUTDOWN)
*---------------------------------------------------------------------*/
static int fsl_ep_disable(struct usb_ep *_ep)
{
struct fsl_udc *udc = NULL;
struct fsl_ep *ep = NULL;
unsigned long flags;
u32 epctrl;
int ep_num;
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || !ep->ep.desc) {
VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
return -EINVAL;
}
/* disable ep on controller */
ep_num = ep_index(ep);
epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
if (ep_is_in(ep)) {
epctrl &= ~(EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE);
epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT;
} else {
epctrl &= ~(EPCTRL_RX_ENABLE | EPCTRL_TX_TYPE);
epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT;
}
fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
udc = (struct fsl_udc *)ep->udc;
spin_lock_irqsave(&udc->lock, flags);
/* nuke all pending requests (does flush) */
nuke(ep, -ESHUTDOWN);
ep->ep.desc = NULL;
ep->stopped = 1;
spin_unlock_irqrestore(&udc->lock, flags);
VDBG("disabled %s OK", _ep->name);
return 0;
}
/*---------------------------------------------------------------------
* allocate a request object used by this endpoint
* the main operation is to insert the req->queue to the eq->queue
* Returns the request, or null if one could not be allocated
*---------------------------------------------------------------------*/
static struct usb_request *
fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct fsl_req *req;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct fsl_req *req = NULL;
req = container_of(_req, struct fsl_req, req);
if (_req)
kfree(req);
}
/* Actually add a dTD chain to an empty dQH and let go */
static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
{
struct ep_queue_head *qh = get_qh_by_ep(ep);
/* Write dQH next pointer and terminate bit to 0 */
qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
& EP_QUEUE_HEAD_NEXT_POINTER_MASK);
/* Clear active and halt bit */
qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
| EP_QUEUE_HEAD_STATUS_HALT));
/* Ensure that updates to the QH will occur before priming. */
wmb();
/* Prime endpoint by writing correct bit to ENDPTPRIME */
fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
: (1 << (ep_index(ep))), &dr_regs->endpointprime);
}
/* Add dTD chain to the dQH of an EP */
static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
u32 temp, bitmask, tmp_stat;
/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
bitmask = ep_is_in(ep)
? (1 << (ep_index(ep) + 16))
: (1 << (ep_index(ep)));
/* check if the pipe is empty */
if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
/* Add td to the end */
struct fsl_req *lastreq;
lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
lastreq->tail->next_td_ptr =
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
/* Ensure dTD's next dtd pointer to be updated */
wmb();
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
return;
do {
/* Set ATDTW bit in USBCMD */
temp = fsl_readl(&dr_regs->usbcmd);
fsl_writel(temp | USB_CMD_ATDTW, &dr_regs->usbcmd);
/* Read correct status bit */
tmp_stat = fsl_readl(&dr_regs->endptstatus) & bitmask;
} while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_ATDTW));
/* Write ATDTW bit to 0 */
temp = fsl_readl(&dr_regs->usbcmd);
fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
if (tmp_stat)
return;
}
fsl_prime_ep(ep, req->head);
}
/* Fill in the dTD structure
* @req: request that the transfer belongs to
* @length: return actually data length of the dTD
* @dma: return dma address of the dTD
* @is_last: return flag if it is the last dTD of the request
* return: pointer to the built dTD */
static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
{
u32 swap_temp;
struct ep_td_struct *dtd;
/* how big will this transfer be? */
*length = min(req->req.length - req->req.actual,
(unsigned)EP_MAX_LENGTH_TRANSFER);
dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
if (dtd == NULL)
return dtd;
dtd->td_dma = *dma;
/* Clear reserved field */
swap_temp = hc32_to_cpu(dtd->size_ioc_sts);
swap_temp &= ~DTD_RESERVED_FIELDS;
dtd->size_ioc_sts = cpu_to_hc32(swap_temp);
/* Init all of buffer page pointers */
swap_temp = (u32) (req->req.dma + req->req.actual);
dtd->buff_ptr0 = cpu_to_hc32(swap_temp);
dtd->buff_ptr1 = cpu_to_hc32(swap_temp + 0x1000);
dtd->buff_ptr2 = cpu_to_hc32(swap_temp + 0x2000);
dtd->buff_ptr3 = cpu_to_hc32(swap_temp + 0x3000);
dtd->buff_ptr4 = cpu_to_hc32(swap_temp + 0x4000);
req->req.actual += *length;
/* zlp is needed if req->req.zero is set */
if (req->req.zero) {
if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
*is_last = 1;
else
*is_last = 0;
} else if (req->req.length == req->req.actual)
*is_last = 1;
else
*is_last = 0;
if ((*is_last) == 0)
VDBG("multi-dtd request!");
/* Fill in the transfer size; set active bit */
swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
/* Enable interrupt for the last dtd of a request */
if (*is_last && !req->req.no_interrupt)
swap_temp |= DTD_IOC;
dtd->size_ioc_sts = cpu_to_hc32(swap_temp);
mb();
VDBG("length = %d address= 0x%x", *length, (int)*dma);
return dtd;
}
/* Generate dtd chain for a request */
static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
{
unsigned count;
int is_last;
int is_first =1;
struct ep_td_struct *last_dtd = NULL, *dtd;
dma_addr_t dma;
do {
dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
if (dtd == NULL)
return -ENOMEM;
if (is_first) {
is_first = 0;
req->head = dtd;
} else {
last_dtd->next_td_ptr = cpu_to_hc32(dma);
last_dtd->next_td_virt = dtd;
}
last_dtd = dtd;
req->dtd_count++;
} while (!is_last);
dtd->next_td_ptr = cpu_to_hc32(DTD_NEXT_TERMINATE);
req->tail = dtd;
return 0;
}
/* queues (submits) an I/O request to an endpoint */
static int
fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
struct fsl_udc *udc;
unsigned long flags;
int ret;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
VDBG("%s, bad params", __func__);
return -EINVAL;
}
if (unlikely(!_ep || !ep->ep.desc)) {
VDBG("%s, bad ep", __func__);
return -EINVAL;
}
if (usb_endpoint_xfer_isoc(ep->ep.desc)) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
req->ep = ep;
ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
if (ret)
return ret;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->dtd_count = 0;
/* build dtds and push them to device queue */
if (!fsl_req_to_dtd(req, gfp_flags)) {
spin_lock_irqsave(&udc->lock, flags);
fsl_queue_td(ep, req);
} else {
return -ENOMEM;
}
/* irq handler advances the queue */
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = NULL;
struct fsl_req *iter;
unsigned long flags;
int ep_num, stopped, ret = 0;
u32 epctrl;
if (!_ep || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
stopped = ep->stopped;
/* Stop the ep before we deal with the queue */
ep->stopped = 1;
ep_num = ep_index(ep);
epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
if (ep_is_in(ep))
epctrl &= ~EPCTRL_TX_ENABLE;
else
epctrl &= ~EPCTRL_RX_ENABLE;
fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
ret = -EINVAL;
goto out;
}
/* The request is in progress, or completed but not dequeued */
if (ep->queue.next == &req->queue) {
_req->status = -ECONNRESET;
fsl_ep_fifo_flush(_ep); /* flush current transfer */
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
struct fsl_req *next_req;
next_req = list_entry(req->queue.next, struct fsl_req,
queue);
/* prime with dTD of next request */
fsl_prime_ep(ep, next_req->head);
}
/* The request hasn't been processed, patch up the TD chain */
} else {
struct fsl_req *prev_req;
prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
}
done(ep, req, -ECONNRESET);
/* Enable EP */
out: epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
if (ep_is_in(ep))
epctrl |= EPCTRL_TX_ENABLE;
else
epctrl |= EPCTRL_RX_ENABLE;
fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
ep->stopped = stopped;
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
/*-------------------------------------------------------------------------*/
/*-----------------------------------------------------------------
* modify the endpoint halt feature
* @ep: the non-isochronous endpoint being stalled
* @value: 1--set halt 0--clear halt
* Returns zero, or a negative error code.
*----------------------------------------------------------------*/
static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
{
struct fsl_ep *ep = NULL;
unsigned long flags;
int status = -EOPNOTSUPP; /* operation not supported */
unsigned char ep_dir = 0, ep_num = 0;
struct fsl_udc *udc = NULL;
ep = container_of(_ep, struct fsl_ep, ep);
udc = ep->udc;
if (!_ep || !ep->ep.desc) {
status = -EINVAL;
goto out;
}
if (usb_endpoint_xfer_isoc(ep->ep.desc)) {
status = -EOPNOTSUPP;
goto out;
}
/* Attempt to halt IN ep will fail if any transfer requests
* are still queue */
if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
status = -EAGAIN;
goto out;
}
status = 0;
ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
ep_num = (unsigned char)(ep_index(ep));
spin_lock_irqsave(&ep->udc->lock, flags);
dr_ep_change_stall(ep_num, ep_dir, value);
spin_unlock_irqrestore(&ep->udc->lock, flags);
if (ep_index(ep) == 0) {
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = 0;
}
out:
VDBG(" %s %s halt stat %d", ep->ep.name,
value ? "set" : "clear", status);
return status;
}
static int fsl_ep_fifo_status(struct usb_ep *_ep)
{
struct fsl_ep *ep;
struct fsl_udc *udc;
int size = 0;
u32 bitmask;
struct ep_queue_head *qh;
if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF))
return -ENODEV;
ep = container_of(_ep, struct fsl_ep, ep);
udc = (struct fsl_udc *)ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
qh = get_qh_by_ep(ep);
bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
(1 << (ep_index(ep)));
if (fsl_readl(&dr_regs->endptstatus) & bitmask)
size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
pr_debug("%s %u\n", __func__, size);
return size;
}
static void fsl_ep_fifo_flush(struct usb_ep *_ep)
{
struct fsl_ep *ep;
int ep_num, ep_dir;
u32 bits;
unsigned long timeout;
#define FSL_UDC_FLUSH_TIMEOUT 1000
if (!_ep) {
return;
} else {
ep = container_of(_ep, struct fsl_ep, ep);
if (!ep->ep.desc)
return;
}
ep_num = ep_index(ep);
ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
if (ep_num == 0)
bits = (1 << 16) | 1;
else if (ep_dir == USB_SEND)
bits = 1 << (16 + ep_num);
else
bits = 1 << ep_num;
timeout = jiffies + FSL_UDC_FLUSH_TIMEOUT;
do {
fsl_writel(bits, &dr_regs->endptflush);
/* Wait until flush complete */
while (fsl_readl(&dr_regs->endptflush)) {
if (time_after(jiffies, timeout)) {
ERR("ep flush timeout\n");
return;
}
cpu_relax();
}
/* See if we need to flush again */
} while (fsl_readl(&dr_regs->endptstatus) & bits);
}
static const struct usb_ep_ops fsl_ep_ops = {
.enable = fsl_ep_enable,
.disable = fsl_ep_disable,
.alloc_request = fsl_alloc_request,
.free_request = fsl_free_request,
.queue = fsl_ep_queue,
.dequeue = fsl_ep_dequeue,
.set_halt = fsl_ep_set_halt,
.fifo_status = fsl_ep_fifo_status,
.fifo_flush = fsl_ep_fifo_flush, /* flush fifo */
};
/*-------------------------------------------------------------------------
Gadget Driver Layer Operations
-------------------------------------------------------------------------*/
/*----------------------------------------------------------------------
* Get the current frame number (from DR frame_index Reg )
*----------------------------------------------------------------------*/
static int fsl_get_frame(struct usb_gadget *gadget)
{
return (int)(fsl_readl(&dr_regs->frindex) & USB_FRINDEX_MASKS);
}
/*-----------------------------------------------------------------------
* Tries to wake up the host connected to this gadget
-----------------------------------------------------------------------*/
static int fsl_wakeup(struct usb_gadget *gadget)
{
struct fsl_udc *udc = container_of(gadget, struct fsl_udc, gadget);
u32 portsc;
/* Remote wakeup feature not enabled by host */
if (!udc->remote_wakeup)
return -ENOTSUPP;
portsc = fsl_readl(&dr_regs->portsc1);
/* not suspended? */
if (!(portsc & PORTSCX_PORT_SUSPEND))
return 0;
/* trigger force resume */
portsc |= PORTSCX_PORT_FORCE_RESUME;
fsl_writel(portsc, &dr_regs->portsc1);
return 0;
}
static int can_pullup(struct fsl_udc *udc)
{
return udc->driver && udc->softconnect && udc->vbus_active;
}
/* Notify controller that VBUS is powered, Called by whatever
detects VBUS sessions */
static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct fsl_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
VDBG("VBUS %s", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
&dr_regs->usbcmd);
else
fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
&dr_regs->usbcmd);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/* constrain controller's VBUS power usage
* This call is used by gadget drivers during SET_CONFIGURATION calls,
* reporting how much power the device may consume. For example, this
* could affect how quickly batteries are recharged.
*
* Returns zero on success, else negative errno.
*/
static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct fsl_udc *udc;
udc = container_of(gadget, struct fsl_udc, gadget);
if (!IS_ERR_OR_NULL(udc->transceiver))
return usb_phy_set_power(udc->transceiver, mA);
return -ENOTSUPP;
}
/* Change Data+ pullup status
* this func is used by usb_gadget_connect/disconnect
*/
static int fsl_pullup(struct usb_gadget *gadget, int is_on)
{
struct fsl_udc *udc;
udc = container_of(gadget, struct fsl_udc, gadget);
if (!udc->vbus_active)
return -EOPNOTSUPP;
udc->softconnect = (is_on != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
&dr_regs->usbcmd);
else
fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
&dr_regs->usbcmd);
return 0;
}
static int fsl_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int fsl_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops fsl_gadget_ops = {
.get_frame = fsl_get_frame,
.wakeup = fsl_wakeup,
/* .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
.vbus_session = fsl_vbus_session,
.vbus_draw = fsl_vbus_draw,
.pullup = fsl_pullup,
.udc_start = fsl_udc_start,
.udc_stop = fsl_udc_stop,
};
/*
* Empty complete function used by this driver to fill in the req->complete
* field when creating a request since the complete field is mandatory.
*/
static void fsl_noop_complete(struct usb_ep *ep, struct usb_request *req) { }
/* Set protocol stall on ep0, protocol stall will automatically be cleared
on new transaction */
static void ep0stall(struct fsl_udc *udc)
{
u32 tmp;
/* must set tx and rx to stall at the same time */
tmp = fsl_readl(&dr_regs->endptctrl[0]);
tmp |= EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL;
fsl_writel(tmp, &dr_regs->endptctrl[0]);
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = 0;
}
/* Prime a status phase for ep0 */
static int ep0_prime_status(struct fsl_udc *udc, int direction)
{
struct fsl_req *req = udc->status_req;
struct fsl_ep *ep;
int ret;
if (direction == EP_DIR_IN)
udc->ep0_dir = USB_DIR_IN;
else
udc->ep0_dir = USB_DIR_OUT;
ep = &udc->eps[0];
if (udc->ep0_state != DATA_STATE_XMIT)
udc->ep0_state = WAIT_FOR_OUT_STATUS;
req->ep = ep;
req->req.length = 0;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->req.complete = fsl_noop_complete;
req->dtd_count = 0;
ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
if (ret)
return ret;
if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
fsl_queue_td(ep, req);
else
return -ENOMEM;
list_add_tail(&req->queue, &ep->queue);
return 0;
}
static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
{
struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
if (ep->ep.name)
nuke(ep, -ESHUTDOWN);
}
/*
* ch9 Set address
*/
static void ch9setaddress(struct fsl_udc *udc, u16 value, u16 index, u16 length)
{
/* Save the new address to device struct */
udc->device_address = (u8) value;
/* Update usb state */
udc->usb_state = USB_STATE_ADDRESS;
/* Status phase */
if (ep0_prime_status(udc, EP_DIR_IN))
ep0stall(udc);
}
/*
* ch9 Get status
*/
static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
u16 index, u16 length)
{
u16 tmp = 0; /* Status, cpu endian */
struct fsl_req *req;
struct fsl_ep *ep;
int ret;
ep = &udc->eps[0];
if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
/* Get device status */
tmp = udc->gadget.is_selfpowered;
tmp |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
/* Get interface status */
/* We don't have interface information in udc driver */
tmp = 0;
} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
/* Get endpoint status */
struct fsl_ep *target_ep;
target_ep = get_ep_by_pipe(udc, get_pipe_by_windex(index));
/* stall if endpoint doesn't exist */
if (!target_ep->ep.desc)
goto stall;
tmp = dr_ep_get_stall(ep_index(target_ep), ep_is_in(target_ep))
<< USB_ENDPOINT_HALT;
}
udc->ep0_dir = USB_DIR_IN;
/* Borrow the per device status_req */
req = udc->status_req;
/* Fill in the reqest structure */
*((u16 *) req->req.buf) = cpu_to_le16(tmp);
req->ep = ep;
req->req.length = 2;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->req.complete = fsl_noop_complete;
req->dtd_count = 0;
ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
if (ret)
goto stall;
/* prime the data phase */
if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
fsl_queue_td(ep, req);
else /* no mem */
goto stall;
list_add_tail(&req->queue, &ep->queue);
udc->ep0_state = DATA_STATE_XMIT;
if (ep0_prime_status(udc, EP_DIR_OUT))
ep0stall(udc);
return;
stall:
ep0stall(udc);
}
static void setup_received_irq(struct fsl_udc *udc,
struct usb_ctrlrequest *setup)
__releases(udc->lock)
__acquires(udc->lock)
{
u16 wValue = le16_to_cpu(setup->wValue);
u16 wIndex = le16_to_cpu(setup->wIndex);
u16 wLength = le16_to_cpu(setup->wLength);
udc_reset_ep_queue(udc, 0);
/* We process some stardard setup requests here */
switch (setup->bRequest) {
case USB_REQ_GET_STATUS:
/* Data+Status phase from udc */
if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
!= (USB_DIR_IN | USB_TYPE_STANDARD))
break;
ch9getstatus(udc, setup->bRequestType, wValue, wIndex, wLength);
return;
case USB_REQ_SET_ADDRESS:
/* Status phase from udc */
if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
| USB_RECIP_DEVICE))
break;
ch9setaddress(udc, wValue, wIndex, wLength);
return;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
/* Status phase from udc */
{
int rc = -EOPNOTSUPP;
u16 ptc = 0;
if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
int pipe = get_pipe_by_windex(wIndex);
struct fsl_ep *ep;
if (wValue != 0 || wLength != 0 || pipe >= udc->max_ep)
break;
ep = get_ep_by_pipe(udc, pipe);
spin_unlock(&udc->lock);
rc = fsl_ep_set_halt(&ep->ep,
(setup->bRequest == USB_REQ_SET_FEATURE)
? 1 : 0);
spin_lock(&udc->lock);
} else if ((setup->bRequestType & (USB_RECIP_MASK
| USB_TYPE_MASK)) == (USB_RECIP_DEVICE
| USB_TYPE_STANDARD)) {
/* Note: The driver has not include OTG support yet.
* This will be set when OTG support is added */
if (wValue == USB_DEVICE_TEST_MODE)
ptc = wIndex >> 8;
else if (gadget_is_otg(&udc->gadget)) {
if (setup->bRequest ==
USB_DEVICE_B_HNP_ENABLE)
udc->gadget.b_hnp_enable = 1;
else if (setup->bRequest ==
USB_DEVICE_A_HNP_SUPPORT)
udc->gadget.a_hnp_support = 1;
else if (setup->bRequest ==
USB_DEVICE_A_ALT_HNP_SUPPORT)
udc->gadget.a_alt_hnp_support = 1;
}
rc = 0;
} else
break;
if (rc == 0) {
if (ep0_prime_status(udc, EP_DIR_IN))
ep0stall(udc);
}
if (ptc) {
u32 tmp;
mdelay(10);
tmp = fsl_readl(&dr_regs->portsc1) | (ptc << 16);
fsl_writel(tmp, &dr_regs->portsc1);
printk(KERN_INFO "udc: switch to test mode %d.\n", ptc);
}
return;
}
default:
break;
}
/* Requests handled by gadget */
if (wLength) {
/* Data phase from gadget, status phase from udc */
udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
? USB_DIR_IN : USB_DIR_OUT;
spin_unlock(&udc->lock);
if (udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
ep0stall(udc);
spin_lock(&udc->lock);
udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
? DATA_STATE_XMIT : DATA_STATE_RECV;
/*
* If the data stage is IN, send status prime immediately.
* See 2.0 Spec chapter 8.5.3.3 for detail.
*/
if (udc->ep0_state == DATA_STATE_XMIT)
if (ep0_prime_status(udc, EP_DIR_OUT))
ep0stall(udc);
} else {
/* No data phase, IN status from gadget */
udc->ep0_dir = USB_DIR_IN;
spin_unlock(&udc->lock);
if (udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
ep0stall(udc);
spin_lock(&udc->lock);
udc->ep0_state = WAIT_FOR_OUT_STATUS;
}
}
/* Process request for Data or Status phase of ep0
* prime status phase if needed */
static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
struct fsl_req *req)
{
if (udc->usb_state == USB_STATE_ADDRESS) {
/* Set the new address */
u32 new_address = (u32) udc->device_address;
fsl_writel(new_address << USB_DEVICE_ADDRESS_BIT_POS,
&dr_regs->deviceaddr);
}
done(ep0, req, 0);
switch (udc->ep0_state) {
case DATA_STATE_XMIT:
/* already primed at setup_received_irq */
udc->ep0_state = WAIT_FOR_OUT_STATUS;
break;
case DATA_STATE_RECV:
/* send status phase */
if (ep0_prime_status(udc, EP_DIR_IN))
ep0stall(udc);
break;
case WAIT_FOR_OUT_STATUS:
udc->ep0_state = WAIT_FOR_SETUP;
break;
case WAIT_FOR_SETUP:
ERR("Unexpected ep0 packets\n");
break;
default:
ep0stall(udc);
break;
}
}
/* Tripwire mechanism to ensure a setup packet payload is extracted without
* being corrupted by another incoming setup packet */
static void tripwire_handler(struct fsl_udc *udc, u8 ep_num, u8 *buffer_ptr)
{
u32 temp;
struct ep_queue_head *qh;
struct fsl_usb2_platform_data *pdata = udc->pdata;
qh = &udc->ep_qh[ep_num * 2 + EP_DIR_OUT];
/* Clear bit in ENDPTSETUPSTAT */
temp = fsl_readl(&dr_regs->endptsetupstat);
fsl_writel(temp | (1 << ep_num), &dr_regs->endptsetupstat);
/* while a hazard exists when setup package arrives */
do {
/* Set Setup Tripwire */
temp = fsl_readl(&dr_regs->usbcmd);
fsl_writel(temp | USB_CMD_SUTW, &dr_regs->usbcmd);
/* Copy the setup packet to local buffer */
if (pdata->le_setup_buf) {
u32 *p = (u32 *)buffer_ptr;
u32 *s = (u32 *)qh->setup_buffer;
/* Convert little endian setup buffer to CPU endian */
*p++ = le32_to_cpu(*s++);
*p = le32_to_cpu(*s);
} else {
memcpy(buffer_ptr, (u8 *) qh->setup_buffer, 8);
}
} while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_SUTW));
/* Clear Setup Tripwire */
temp = fsl_readl(&dr_regs->usbcmd);
fsl_writel(temp & ~USB_CMD_SUTW, &dr_regs->usbcmd);
}
/* process-ep_req(): free the completed Tds for this req */
static int process_ep_req(struct fsl_udc *udc, int pipe,
struct fsl_req *curr_req)
{
struct ep_td_struct *curr_td;
int actual, remaining_length, j, tmp;
int status = 0;
int errors = 0;
struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
int direction = pipe % 2;
curr_td = curr_req->head;
actual = curr_req->req.length;
for (j = 0; j < curr_req->dtd_count; j++) {
remaining_length = (hc32_to_cpu(curr_td->size_ioc_sts)
& DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
actual -= remaining_length;
errors = hc32_to_cpu(curr_td->size_ioc_sts);
if (errors & DTD_ERROR_MASK) {
if (errors & DTD_STATUS_HALTED) {
ERR("dTD error %08x QH=%d\n", errors, pipe);
/* Clear the errors and Halt condition */
tmp = hc32_to_cpu(curr_qh->size_ioc_int_sts);
tmp &= ~errors;
curr_qh->size_ioc_int_sts = cpu_to_hc32(tmp);
status = -EPIPE;
/* FIXME: continue with next queued TD? */
break;
}
if (errors & DTD_STATUS_DATA_BUFF_ERR) {
VDBG("Transfer overflow");
status = -EPROTO;
break;
} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
VDBG("ISO error");
status = -EILSEQ;
break;
} else
ERR("Unknown error has occurred (0x%x)!\n",
errors);
} else if (hc32_to_cpu(curr_td->size_ioc_sts)
& DTD_STATUS_ACTIVE) {
VDBG("Request not complete");
status = REQ_UNCOMPLETE;
return status;
} else if (remaining_length) {
if (direction) {
VDBG("Transmit dTD remaining length not zero");
status = -EPROTO;
break;
} else {
break;
}
} else {
VDBG("dTD transmitted successful");
}
if (j != curr_req->dtd_count - 1)
curr_td = (struct ep_td_struct *)curr_td->next_td_virt;
}
if (status)
return status;
curr_req->req.actual = actual;
return 0;
}
/* Process a DTD completion interrupt */
static void dtd_complete_irq(struct fsl_udc *udc)
{
u32 bit_pos;
int i, ep_num, direction, bit_mask, status;
struct fsl_ep *curr_ep;
struct fsl_req *curr_req, *temp_req;
/* Clear the bits in the register */
bit_pos = fsl_readl(&dr_regs->endptcomplete);
fsl_writel(bit_pos, &dr_regs->endptcomplete);
if (!bit_pos)
return;
for (i = 0; i < udc->max_ep; i++) {
ep_num = i >> 1;
direction = i % 2;
bit_mask = 1 << (ep_num + 16 * direction);
if (!(bit_pos & bit_mask))
continue;
curr_ep = get_ep_by_pipe(udc, i);
/* If the ep is configured */
if (!curr_ep->ep.name) {
WARNING("Invalid EP?");
continue;
}
/* process the req queue until an uncomplete request */
list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue,
queue) {
status = process_ep_req(udc, i, curr_req);
VDBG("status of process_ep_req= %d, ep = %d",
status, ep_num);
if (status == REQ_UNCOMPLETE)
break;
/* write back status to req */
curr_req->req.status = status;
if (ep_num == 0) {
ep0_req_complete(udc, curr_ep, curr_req);
break;
} else
done(curr_ep, curr_req, status);
}
}
}
static inline enum usb_device_speed portscx_device_speed(u32 reg)
{
switch (reg & PORTSCX_PORT_SPEED_MASK) {
case PORTSCX_PORT_SPEED_HIGH:
return USB_SPEED_HIGH;
case PORTSCX_PORT_SPEED_FULL:
return USB_SPEED_FULL;
case PORTSCX_PORT_SPEED_LOW:
return USB_SPEED_LOW;
default:
return USB_SPEED_UNKNOWN;
}
}
/* Process a port change interrupt */
static void port_change_irq(struct fsl_udc *udc)
{
if (udc->bus_reset)
udc->bus_reset = 0;
/* Bus resetting is finished */
if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET))
/* Get the speed */
udc->gadget.speed =
portscx_device_speed(fsl_readl(&dr_regs->portsc1));
/* Update USB state */
if (!udc->resume_state)
udc->usb_state = USB_STATE_DEFAULT;
}
/* Process suspend interrupt */
static void suspend_irq(struct fsl_udc *udc)
{
udc->resume_state = udc->usb_state;
udc->usb_state = USB_STATE_SUSPENDED;
/* report suspend to the driver, serial.c does not support this */
if (udc->driver->suspend)
udc->driver->suspend(&udc->gadget);
}
static void bus_resume(struct fsl_udc *udc)
{
udc->usb_state = udc->resume_state;
udc->resume_state = 0;
/* report resume to the driver, serial.c does not support this */
if (udc->driver->resume)
udc->driver->resume(&udc->gadget);
}
/* Clear up all ep queues */
static int reset_queues(struct fsl_udc *udc, bool bus_reset)
{
u8 pipe;
for (pipe = 0; pipe < udc->max_pipes; pipe++)
udc_reset_ep_queue(udc, pipe);
/* report disconnect; the driver is already quiesced */
spin_unlock(&udc->lock);
if (bus_reset)
usb_gadget_udc_reset(&udc->gadget, udc->driver);
else
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
return 0;
}
/* Process reset interrupt */
static void reset_irq(struct fsl_udc *udc)
{
u32 temp;
unsigned long timeout;
/* Clear the device address */
temp = fsl_readl(&dr_regs->deviceaddr);
fsl_writel(temp & ~USB_DEVICE_ADDRESS_MASK, &dr_regs->deviceaddr);
udc->device_address = 0;
/* Clear usb state */
udc->resume_state = 0;
udc->ep0_dir = 0;
udc->ep0_state = WAIT_FOR_SETUP;
udc->remote_wakeup = 0; /* default to 0 on reset */
udc->gadget.b_hnp_enable = 0;
udc->gadget.a_hnp_support = 0;
udc->gadget.a_alt_hnp_support = 0;
/* Clear all the setup token semaphores */
temp = fsl_readl(&dr_regs->endptsetupstat);
fsl_writel(temp, &dr_regs->endptsetupstat);
/* Clear all the endpoint complete status bits */
temp = fsl_readl(&dr_regs->endptcomplete);
fsl_writel(temp, &dr_regs->endptcomplete);
timeout = jiffies + 100;
while (fsl_readl(&dr_regs->endpointprime)) {
/* Wait until all endptprime bits cleared */
if (time_after(jiffies, timeout)) {
ERR("Timeout for reset\n");
break;
}
cpu_relax();
}
/* Write 1s to the flush register */
fsl_writel(0xffffffff, &dr_regs->endptflush);
if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
VDBG("Bus reset");
/* Bus is reseting */
udc->bus_reset = 1;
/* Reset all the queues, include XD, dTD, EP queue
* head and TR Queue */
reset_queues(udc, true);
udc->usb_state = USB_STATE_DEFAULT;
} else {
VDBG("Controller reset");
/* initialize usb hw reg except for regs for EP, not
* touch usbintr reg */
dr_controller_setup(udc);
/* Reset all internal used Queues */
reset_queues(udc, false);
ep0_setup(udc);
/* Enable DR IRQ reg, Set Run bit, change udc state */
dr_controller_run(udc);
udc->usb_state = USB_STATE_ATTACHED;
}
}
/*
* USB device controller interrupt handler
*/
static irqreturn_t fsl_udc_irq(int irq, void *_udc)
{
struct fsl_udc *udc = _udc;
u32 irq_src;
irqreturn_t status = IRQ_NONE;
unsigned long flags;
/* Disable ISR for OTG host mode */
if (udc->stopped)
return IRQ_NONE;
spin_lock_irqsave(&udc->lock, flags);
irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
/* Clear notification bits */
fsl_writel(irq_src, &dr_regs->usbsts);
/* VDBG("irq_src [0x%8x]", irq_src); */
/* Need to resume? */
if (udc->usb_state == USB_STATE_SUSPENDED)
if ((fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_SUSPEND) == 0)
bus_resume(udc);
/* USB Interrupt */
if (irq_src & USB_STS_INT) {
VDBG("Packet int");
/* Setup package, we only support ep0 as control ep */
if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
tripwire_handler(udc, 0,
(u8 *) (&udc->local_setup_buff));
setup_received_irq(udc, &udc->local_setup_buff);
status = IRQ_HANDLED;
}
/* completion of dtd */
if (fsl_readl(&dr_regs->endptcomplete)) {
dtd_complete_irq(udc);
status = IRQ_HANDLED;
}
}
/* SOF (for ISO transfer) */
if (irq_src & USB_STS_SOF) {
status = IRQ_HANDLED;
}
/* Port Change */
if (irq_src & USB_STS_PORT_CHANGE) {
port_change_irq(udc);
status = IRQ_HANDLED;
}
/* Reset Received */
if (irq_src & USB_STS_RESET) {
VDBG("reset int");
reset_irq(udc);
status = IRQ_HANDLED;
}
/* Sleep Enable (Suspend) */
if (irq_src & USB_STS_SUSPEND) {
suspend_irq(udc);
status = IRQ_HANDLED;
}
if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
VDBG("Error IRQ %x", irq_src);
}
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
/*----------------------------------------------------------------*
* Hook to gadget drivers
* Called by initialization code of gadget drivers
*----------------------------------------------------------------*/
static int fsl_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
int retval = 0;
unsigned long flags;
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc_controller->lock, flags);
/* hook up the driver */
udc_controller->driver = driver;
spin_unlock_irqrestore(&udc_controller->lock, flags);
g->is_selfpowered = 1;
if (!IS_ERR_OR_NULL(udc_controller->transceiver)) {
/* Suspend the controller until OTG enable it */
udc_controller->stopped = 1;
printk(KERN_INFO "Suspend udc for OTG auto detect\n");
/* connect to bus through transceiver */
if (!IS_ERR_OR_NULL(udc_controller->transceiver)) {
retval = otg_set_peripheral(
udc_controller->transceiver->otg,
&udc_controller->gadget);
if (retval < 0) {
ERR("can't bind to transceiver\n");
udc_controller->driver = NULL;
return retval;
}
}
} else {
/* Enable DR IRQ reg and set USBCMD reg Run bit */
dr_controller_run(udc_controller);
udc_controller->usb_state = USB_STATE_ATTACHED;
udc_controller->ep0_state = WAIT_FOR_SETUP;
udc_controller->ep0_dir = 0;
}
return retval;
}
/* Disconnect from gadget driver */
static int fsl_udc_stop(struct usb_gadget *g)
{
struct fsl_ep *loop_ep;
unsigned long flags;
if (!IS_ERR_OR_NULL(udc_controller->transceiver))
otg_set_peripheral(udc_controller->transceiver->otg, NULL);
/* stop DR, disable intr */
dr_controller_stop(udc_controller);
/* in fact, no needed */
udc_controller->usb_state = USB_STATE_ATTACHED;
udc_controller->ep0_state = WAIT_FOR_SETUP;
udc_controller->ep0_dir = 0;
/* stand operation */
spin_lock_irqsave(&udc_controller->lock, flags);
udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
nuke(&udc_controller->eps[0], -ESHUTDOWN);
list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
ep.ep_list)
nuke(loop_ep, -ESHUTDOWN);
spin_unlock_irqrestore(&udc_controller->lock, flags);
udc_controller->driver = NULL;
return 0;
}
/*-------------------------------------------------------------------------
PROC File System Support
-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#include <linux/seq_file.h>
static const char proc_filename[] = "driver/fsl_usb2_udc";
static int fsl_proc_read(struct seq_file *m, void *v)
{
unsigned long flags;
int i;
u32 tmp_reg;
struct fsl_ep *ep = NULL;
struct fsl_req *req;
struct fsl_udc *udc = udc_controller;
spin_lock_irqsave(&udc->lock, flags);
/* ------basic driver information ---- */
seq_printf(m,
DRIVER_DESC "\n"
"%s version: %s\n"
"Gadget driver: %s\n\n",
driver_name, DRIVER_VERSION,
udc->driver ? udc->driver->driver.name : "(none)");
/* ------ DR Registers ----- */
tmp_reg = fsl_readl(&dr_regs->usbcmd);
seq_printf(m,
"USBCMD reg:\n"
"SetupTW: %d\n"
"Run/Stop: %s\n\n",
(tmp_reg & USB_CMD_SUTW) ? 1 : 0,
(tmp_reg & USB_CMD_RUN_STOP) ? "Run" : "Stop");
tmp_reg = fsl_readl(&dr_regs->usbsts);
seq_printf(m,
"USB Status Reg:\n"
"Dr Suspend: %d Reset Received: %d System Error: %s "
"USB Error Interrupt: %s\n\n",
(tmp_reg & USB_STS_SUSPEND) ? 1 : 0,
(tmp_reg & USB_STS_RESET) ? 1 : 0,
(tmp_reg & USB_STS_SYS_ERR) ? "Err" : "Normal",
(tmp_reg & USB_STS_ERR) ? "Err detected" : "No err");
tmp_reg = fsl_readl(&dr_regs->usbintr);
seq_printf(m,
"USB Interrupt Enable Reg:\n"
"Sleep Enable: %d SOF Received Enable: %d "
"Reset Enable: %d\n"
"System Error Enable: %d "
"Port Change Detected Enable: %d\n"
"USB Error Intr Enable: %d USB Intr Enable: %d\n\n",
(tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
(tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
(tmp_reg & USB_INTR_RESET_EN) ? 1 : 0,
(tmp_reg & USB_INTR_SYS_ERR_EN) ? 1 : 0,
(tmp_reg & USB_INTR_PTC_DETECT_EN) ? 1 : 0,
(tmp_reg & USB_INTR_ERR_INT_EN) ? 1 : 0,
(tmp_reg & USB_INTR_INT_EN) ? 1 : 0);
tmp_reg = fsl_readl(&dr_regs->frindex);
seq_printf(m,
"USB Frame Index Reg: Frame Number is 0x%x\n\n",
(tmp_reg & USB_FRINDEX_MASKS));
tmp_reg = fsl_readl(&dr_regs->deviceaddr);
seq_printf(m,
"USB Device Address Reg: Device Addr is 0x%x\n\n",
(tmp_reg & USB_DEVICE_ADDRESS_MASK));
tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
seq_printf(m,
"USB Endpoint List Address Reg: "
"Device Addr is 0x%x\n\n",
(tmp_reg & USB_EP_LIST_ADDRESS_MASK));
tmp_reg = fsl_readl(&dr_regs->portsc1);
seq_printf(m,
"USB Port Status&Control Reg:\n"
"Port Transceiver Type : %s Port Speed: %s\n"
"PHY Low Power Suspend: %s Port Reset: %s "
"Port Suspend Mode: %s\n"
"Over-current Change: %s "
"Port Enable/Disable Change: %s\n"
"Port Enabled/Disabled: %s "
"Current Connect Status: %s\n\n", ( {
const char *s;
switch (tmp_reg & PORTSCX_PTS_FSLS) {
case PORTSCX_PTS_UTMI:
s = "UTMI"; break;
case PORTSCX_PTS_ULPI:
s = "ULPI "; break;
case PORTSCX_PTS_FSLS:
s = "FS/LS Serial"; break;
default:
s = "None"; break;
}
s;} ),
usb_speed_string(portscx_device_speed(tmp_reg)),
(tmp_reg & PORTSCX_PHY_LOW_POWER_SPD) ?
"Normal PHY mode" : "Low power mode",
(tmp_reg & PORTSCX_PORT_RESET) ? "In Reset" :
"Not in Reset",
(tmp_reg & PORTSCX_PORT_SUSPEND) ? "In " : "Not in",
(tmp_reg & PORTSCX_OVER_CURRENT_CHG) ? "Dected" :
"No",
(tmp_reg & PORTSCX_PORT_EN_DIS_CHANGE) ? "Disable" :
"Not change",
(tmp_reg & PORTSCX_PORT_ENABLE) ? "Enable" :
"Not correct",
(tmp_reg & PORTSCX_CURRENT_CONNECT_STATUS) ?
"Attached" : "Not-Att");
tmp_reg = fsl_readl(&dr_regs->usbmode);
seq_printf(m,
"USB Mode Reg: Controller Mode is: %s\n\n", ( {
const char *s;
switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
case USB_MODE_CTRL_MODE_IDLE:
s = "Idle"; break;
case USB_MODE_CTRL_MODE_DEVICE:
s = "Device Controller"; break;
case USB_MODE_CTRL_MODE_HOST:
s = "Host Controller"; break;
default:
s = "None"; break;
}
s;
} ));
tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
seq_printf(m,
"Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n",
(tmp_reg & EP_SETUP_STATUS_MASK));
for (i = 0; i < udc->max_ep / 2; i++) {
tmp_reg = fsl_readl(&dr_regs->endptctrl[i]);
seq_printf(m, "EP Ctrl Reg [0x%x]: = [0x%x]\n", i, tmp_reg);
}
tmp_reg = fsl_readl(&dr_regs->endpointprime);
seq_printf(m, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
if (udc->pdata->have_sysif_regs) {
tmp_reg = usb_sys_regs->snoop1;
seq_printf(m, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
tmp_reg = usb_sys_regs->control;
seq_printf(m, "General Control Reg : = [0x%x]\n\n", tmp_reg);
}
/* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
ep = &udc->eps[0];
seq_printf(m, "For %s Maxpkt is 0x%x index is 0x%x\n",
ep->ep.name, ep_maxpacket(ep), ep_index(ep));
if (list_empty(&ep->queue)) {
seq_puts(m, "its req queue is empty\n\n");
} else {
list_for_each_entry(req, &ep->queue, queue) {
seq_printf(m,
"req %p actual 0x%x length 0x%x buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
}
}
/* other gadget->eplist ep */
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
if (ep->ep.desc) {
seq_printf(m,
"\nFor %s Maxpkt is 0x%x "
"index is 0x%x\n",
ep->ep.name, ep_maxpacket(ep),
ep_index(ep));
if (list_empty(&ep->queue)) {
seq_puts(m, "its req queue is empty\n\n");
} else {
list_for_each_entry(req, &ep->queue, queue) {
seq_printf(m,
"req %p actual 0x%x length "
"0x%x buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
} /* end for each_entry of ep req */
} /* end for else */
} /* end for if(ep->queue) */
} /* end (ep->desc) */
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
#define create_proc_file() \
proc_create_single(proc_filename, 0, NULL, fsl_proc_read)
#define remove_proc_file() remove_proc_entry(proc_filename, NULL)
#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
#define create_proc_file() do {} while (0)
#define remove_proc_file() do {} while (0)
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
/* Release udc structures */
static void fsl_udc_release(struct device *dev)
{
complete(udc_controller->done);
dma_free_coherent(dev->parent, udc_controller->ep_qh_size,
udc_controller->ep_qh, udc_controller->ep_qh_dma);
kfree(udc_controller);
}
/******************************************************************
Internal structure setup functions
*******************************************************************/
/*------------------------------------------------------------------
* init resource for global controller called by fsl_udc_probe()
* On success the udc handle is initialized, on failure it is
* unchanged (reset).
* Return 0 on success and -1 on allocation failure
------------------------------------------------------------------*/
static int struct_udc_setup(struct fsl_udc *udc,
struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
size_t size;
pdata = dev_get_platdata(&pdev->dev);
udc->phy_mode = pdata->phy_mode;
udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
if (!udc->eps) {
ERR("kmalloc udc endpoint status failed\n");
goto eps_alloc_failed;
}
/* initialized QHs, take care of alignment */
size = udc->max_ep * sizeof(struct ep_queue_head);
if (size < QH_ALIGNMENT)
size = QH_ALIGNMENT;
else if ((size % QH_ALIGNMENT) != 0) {
size += QH_ALIGNMENT + 1;
size &= ~(QH_ALIGNMENT - 1);
}
udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
&udc->ep_qh_dma, GFP_KERNEL);
if (!udc->ep_qh) {
ERR("malloc QHs for udc failed\n");
goto ep_queue_alloc_failed;
}
udc->ep_qh_size = size;
/* Initialize ep0 status request structure */
/* FIXME: fsl_alloc_request() ignores ep argument */
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
if (!udc->status_req) {
ERR("kzalloc for udc status request failed\n");
goto udc_status_alloc_failed;
}
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
if (!udc->status_req->req.buf) {
ERR("kzalloc for udc request buffer failed\n");
goto udc_req_buf_alloc_failed;
}
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
udc->ep0_dir = 0;
udc->remote_wakeup = 0; /* default to 0 on reset */
return 0;
udc_req_buf_alloc_failed:
kfree(udc->status_req);
udc_status_alloc_failed:
kfree(udc->ep_qh);
udc->ep_qh_size = 0;
ep_queue_alloc_failed:
kfree(udc->eps);
eps_alloc_failed:
udc->phy_mode = 0;
return -1;
}
/*----------------------------------------------------------------
* Setup the fsl_ep struct for eps
* Link fsl_ep->ep to gadget->ep_list
* ep0out is not used so do nothing here
* ep0in should be taken care
*--------------------------------------------------------------*/
static int struct_ep_setup(struct fsl_udc *udc, unsigned char index,
char *name, int link)
{
struct fsl_ep *ep = &udc->eps[index];
ep->udc = udc;
strcpy(ep->name, name);
ep->ep.name = ep->name;
ep->ep.ops = &fsl_ep_ops;
ep->stopped = 0;
if (index == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
if (index & 1)
ep->ep.caps.dir_in = true;
else
ep->ep.caps.dir_out = true;
/* for ep0: maxP defined in desc
* for other eps, maxP is set by epautoconfig() called by gadget layer
*/
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
/* the queue lists any req for this ep */
INIT_LIST_HEAD(&ep->queue);
/* gagdet.ep_list used for ep_autoconfig so no ep0 */
if (link)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
ep->gadget = &udc->gadget;
ep->qh = &udc->ep_qh[index];
return 0;
}
/* Driver probe function
* all initialization operations implemented here except enabling usb_intr reg
* board setup should have been done in the platform code
*/
static int fsl_udc_probe(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct resource *res;
int ret = -ENODEV;
unsigned int i;
u32 dccparams;
udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
if (udc_controller == NULL)
return -ENOMEM;
pdata = dev_get_platdata(&pdev->dev);
udc_controller->pdata = pdata;
spin_lock_init(&udc_controller->lock);
udc_controller->stopped = 1;
#ifdef CONFIG_USB_OTG
if (pdata->operating_mode == FSL_USB2_DR_OTG) {
udc_controller->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(udc_controller->transceiver)) {
ERR("Can't find OTG driver!\n");
ret = -ENODEV;
goto err_kfree;
}
}
#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENXIO;
goto err_kfree;
}
if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
if (!request_mem_region(res->start, resource_size(res),
driver_name)) {
ERR("request mem region for %s failed\n", pdev->name);
ret = -EBUSY;
goto err_kfree;
}
}
dr_regs = ioremap(res->start, resource_size(res));
if (!dr_regs) {
ret = -ENOMEM;
goto err_release_mem_region;
}
pdata->regs = (void __iomem *)dr_regs;
/*
* do platform specific init: check the clock, grab/config pins, etc.
*/
if (pdata->init && pdata->init(pdev)) {
ret = -ENODEV;
goto err_iounmap;
}
/* Set accessors only after pdata->init() ! */
fsl_set_accessors(pdata);
if (pdata->have_sysif_regs)
usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
/* Read Device Controller Capability Parameters register */
dccparams = fsl_readl(&dr_regs->dccparams);
if (!(dccparams & DCCPARAMS_DC)) {
ERR("This SOC doesn't support device role\n");
ret = -ENODEV;
goto err_exit;
}
/* Get max device endpoints */
/* DEN is bidirectional ep number, max_ep doubles the number */
udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
ret = ret ? : -ENODEV;
goto err_exit;
}
udc_controller->irq = ret;
ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
driver_name, udc_controller);
if (ret != 0) {
ERR("cannot request irq %d err %d\n",
udc_controller->irq, ret);
goto err_exit;
}
/* Initialize the udc structure including QH member and other member */
if (struct_udc_setup(udc_controller, pdev)) {
ERR("Can't initialize udc data structure\n");
ret = -ENOMEM;
goto err_free_irq;
}
if (IS_ERR_OR_NULL(udc_controller->transceiver)) {
/* initialize usb hw reg except for regs for EP,
* leave usbintr reg untouched */
dr_controller_setup(udc_controller);
}
/* Setup gadget structure */
udc_controller->gadget.ops = &fsl_gadget_ops;
udc_controller->gadget.max_speed = USB_SPEED_HIGH;
udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
udc_controller->gadget.name = driver_name;
/* Setup gadget.dev and register with kernel */
dev_set_name(&udc_controller->gadget.dev, "gadget");
udc_controller->gadget.dev.of_node = pdev->dev.of_node;
if (!IS_ERR_OR_NULL(udc_controller->transceiver))
udc_controller->gadget.is_otg = 1;
/* setup QH and epctrl for ep0 */
ep0_setup(udc_controller);
/* setup udc->eps[] for ep0 */
struct_ep_setup(udc_controller, 0, "ep0", 0);
/* for ep0: the desc defined here;
* for other eps, gadget layer called ep_enable with defined desc
*/
udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
usb_ep_set_maxpacket_limit(&udc_controller->eps[0].ep,
USB_MAX_CTRL_PAYLOAD);
/* setup the udc->eps[] for non-control endpoints and link
* to gadget.ep_list */
for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
char name[14];
sprintf(name, "ep%dout", i);
struct_ep_setup(udc_controller, i * 2, name, 1);
sprintf(name, "ep%din", i);
struct_ep_setup(udc_controller, i * 2 + 1, name, 1);
}
/* use dma_pool for TD management */
udc_controller->td_pool = dma_pool_create("udc_td", &pdev->dev,
sizeof(struct ep_td_struct),
DTD_ALIGNMENT, UDC_DMA_BOUNDARY);
if (udc_controller->td_pool == NULL) {
ret = -ENOMEM;
goto err_free_irq;
}
ret = usb_add_gadget_udc_release(&pdev->dev, &udc_controller->gadget,
fsl_udc_release);
if (ret)
goto err_del_udc;
create_proc_file();
return 0;
err_del_udc:
dma_pool_destroy(udc_controller->td_pool);
err_free_irq:
free_irq(udc_controller->irq, udc_controller);
err_exit:
if (pdata->exit)
pdata->exit(pdev);
err_iounmap:
iounmap(dr_regs);
err_release_mem_region:
if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
release_mem_region(res->start, resource_size(res));
err_kfree:
kfree(udc_controller);
udc_controller = NULL;
return ret;
}
/* Driver removal function
* Free resources and finish pending transactions
*/
static int fsl_udc_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
DECLARE_COMPLETION_ONSTACK(done);
if (!udc_controller)
return -ENODEV;
udc_controller->done = &done;
usb_del_gadget_udc(&udc_controller->gadget);
/* DR has been stopped in usb_gadget_unregister_driver() */
remove_proc_file();
/* Free allocated memory */
kfree(udc_controller->status_req->req.buf);
kfree(udc_controller->status_req);
kfree(udc_controller->eps);
dma_pool_destroy(udc_controller->td_pool);
free_irq(udc_controller->irq, udc_controller);
iounmap(dr_regs);
if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
release_mem_region(res->start, resource_size(res));
/* free udc --wait for the release() finished */
wait_for_completion(&done);
/*
* do platform specific un-initialization:
* release iomux pins, etc.
*/
if (pdata->exit)
pdata->exit(pdev);
return 0;
}
/*-----------------------------------------------------------------
* Modify Power management attributes
* Used by OTG statemachine to disable gadget temporarily
-----------------------------------------------------------------*/
static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
{
dr_controller_stop(udc_controller);
return 0;
}
/*-----------------------------------------------------------------
* Invoked on USB resume. May be called in_interrupt.
* Here we start the DR controller and enable the irq
*-----------------------------------------------------------------*/
static int fsl_udc_resume(struct platform_device *pdev)
{
/* Enable DR irq reg and set controller Run */
if (udc_controller->stopped) {
dr_controller_setup(udc_controller);
dr_controller_run(udc_controller);
}
udc_controller->usb_state = USB_STATE_ATTACHED;
udc_controller->ep0_state = WAIT_FOR_SETUP;
udc_controller->ep0_dir = 0;
return 0;
}
static int fsl_udc_otg_suspend(struct device *dev, pm_message_t state)
{
struct fsl_udc *udc = udc_controller;
u32 mode, usbcmd;
mode = fsl_readl(&dr_regs->usbmode) & USB_MODE_CTRL_MODE_MASK;
pr_debug("%s(): mode 0x%x stopped %d\n", __func__, mode, udc->stopped);
/*
* If the controller is already stopped, then this must be a
* PM suspend. Remember this fact, so that we will leave the
* controller stopped at PM resume time.
*/
if (udc->stopped) {
pr_debug("gadget already stopped, leaving early\n");
udc->already_stopped = 1;
return 0;
}
if (mode != USB_MODE_CTRL_MODE_DEVICE) {
pr_debug("gadget not in device mode, leaving early\n");
return 0;
}
/* stop the controller */
usbcmd = fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP;
fsl_writel(usbcmd, &dr_regs->usbcmd);
udc->stopped = 1;
pr_info("USB Gadget suspended\n");
return 0;
}
static int fsl_udc_otg_resume(struct device *dev)
{
pr_debug("%s(): stopped %d already_stopped %d\n", __func__,
udc_controller->stopped, udc_controller->already_stopped);
/*
* If the controller was stopped at suspend time, then
* don't resume it now.
*/
if (udc_controller->already_stopped) {
udc_controller->already_stopped = 0;
pr_debug("gadget was already stopped, leaving early\n");
return 0;
}
pr_info("USB Gadget resume\n");
return fsl_udc_resume(NULL);
}
/*-------------------------------------------------------------------------
Register entry point for the peripheral controller driver
--------------------------------------------------------------------------*/
static const struct platform_device_id fsl_udc_devtype[] = {
{
.name = "fsl-usb2-udc",
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
static struct platform_driver udc_driver = {
.remove = fsl_udc_remove,
.id_table = fsl_udc_devtype,
/* these suspend and resume are not usb suspend and resume */
.suspend = fsl_udc_suspend,
.resume = fsl_udc_resume,
.driver = {
.name = driver_name,
/* udc suspend/resume called from OTG driver */
.suspend = fsl_udc_otg_suspend,
.resume = fsl_udc_otg_resume,
},
};
module_platform_driver_probe(udc_driver, fsl_udc_probe);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:fsl-usb2-udc");
| linux-master | drivers/usb/gadget/udc/fsl_udc_core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Handles the Intel 27x USB Device Controller (UDC)
*
* Inspired by original driver by Frank Becker, David Brownell, and others.
* Copyright (C) 2008 Robert Jarzmik
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/phy.h>
#include "pxa27x_udc.h"
/*
* This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
* series processors.
*
* Such controller drivers work with a gadget driver. The gadget driver
* returns descriptors, implements configuration and data protocols used
* by the host to interact with this device, and allocates endpoints to
* the different protocol interfaces. The controller driver virtualizes
* usb hardware so that the gadget drivers will be more portable.
*
* This UDC hardware wants to implement a bit too much USB protocol. The
* biggest issues are: that the endpoints have to be set up before the
* controller can be enabled (minor, and not uncommon); and each endpoint
* can only have one configuration, interface and alternative interface
* number (major, and very unusual). Once set up, these cannot be changed
* without a controller reset.
*
* The workaround is to setup all combinations necessary for the gadgets which
* will work with this driver. This is done in pxa_udc structure, statically.
* See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
* (You could modify this if needed. Some drivers have a "fifo_mode" module
* parameter to facilitate such changes.)
*
* The combinations have been tested with these gadgets :
* - zero gadget
* - file storage gadget
* - ether gadget
*
* The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
* made of UDC's double buffering either. USB "On-The-Go" is not implemented.
*
* All the requests are handled the same way :
* - the drivers tries to handle the request directly to the IO
* - if the IO fifo is not big enough, the remaining is send/received in
* interrupt handling.
*/
#define DRIVER_VERSION "2008-04-18"
#define DRIVER_DESC "PXA 27x USB Device Controller driver"
static const char driver_name[] = "pxa27x_udc";
static struct pxa_udc *the_controller;
static void handle_ep(struct pxa_ep *ep);
/*
* Debug filesystem
*/
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
static int state_dbg_show(struct seq_file *s, void *p)
{
struct pxa_udc *udc = s->private;
u32 tmp;
if (!udc->driver)
return -ENODEV;
/* basic device status */
seq_printf(s, DRIVER_DESC "\n"
"%s version: %s\n"
"Gadget driver: %s\n",
driver_name, DRIVER_VERSION,
udc->driver ? udc->driver->driver.name : "(none)");
tmp = udc_readl(udc, UDCCR);
seq_printf(s,
"udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), con=%d,inter=%d,altinter=%d\n",
tmp,
(tmp & UDCCR_OEN) ? " oen":"",
(tmp & UDCCR_AALTHNP) ? " aalthnp":"",
(tmp & UDCCR_AHNP) ? " rem" : "",
(tmp & UDCCR_BHNP) ? " rstir" : "",
(tmp & UDCCR_DWRE) ? " dwre" : "",
(tmp & UDCCR_SMAC) ? " smac" : "",
(tmp & UDCCR_EMCE) ? " emce" : "",
(tmp & UDCCR_UDR) ? " udr" : "",
(tmp & UDCCR_UDA) ? " uda" : "",
(tmp & UDCCR_UDE) ? " ude" : "",
(tmp & UDCCR_ACN) >> UDCCR_ACN_S,
(tmp & UDCCR_AIN) >> UDCCR_AIN_S,
(tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
/* registers for device and ep0 */
seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, reconfig=%lu\n",
udc->stats.irqs_reset, udc->stats.irqs_suspend,
udc->stats.irqs_resume, udc->stats.irqs_reconfig);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(state_dbg);
static int queues_dbg_show(struct seq_file *s, void *p)
{
struct pxa_udc *udc = s->private;
struct pxa_ep *ep;
struct pxa27x_request *req;
int i, maxpkt;
if (!udc->driver)
return -ENODEV;
/* dump endpoint queues */
for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
ep = &udc->pxa_ep[i];
maxpkt = ep->fifo_size;
seq_printf(s, "%-12s max_pkt=%d %s\n",
EPNAME(ep), maxpkt, "pio");
if (list_empty(&ep->queue)) {
seq_puts(s, "\t(nothing queued)\n");
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
seq_printf(s, "\treq %p len %d/%d buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(queues_dbg);
static int eps_dbg_show(struct seq_file *s, void *p)
{
struct pxa_udc *udc = s->private;
struct pxa_ep *ep;
int i;
u32 tmp;
if (!udc->driver)
return -ENODEV;
ep = &udc->pxa_ep[0];
tmp = udc_ep_readl(ep, UDCCSR);
seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n",
tmp,
(tmp & UDCCSR0_SA) ? " sa" : "",
(tmp & UDCCSR0_RNE) ? " rne" : "",
(tmp & UDCCSR0_FST) ? " fst" : "",
(tmp & UDCCSR0_SST) ? " sst" : "",
(tmp & UDCCSR0_DME) ? " dme" : "",
(tmp & UDCCSR0_IPR) ? " ipr" : "",
(tmp & UDCCSR0_OPC) ? " opc" : "");
for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
ep = &udc->pxa_ep[i];
tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n",
EPNAME(ep),
ep->stats.in_bytes, ep->stats.in_ops,
ep->stats.out_bytes, ep->stats.out_ops,
ep->stats.irqs,
tmp, udc_ep_readl(ep, UDCCSR),
udc_ep_readl(ep, UDCBCR));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(eps_dbg);
static void pxa_init_debugfs(struct pxa_udc *udc)
{
struct dentry *root;
root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
debugfs_create_file("queues", 0400, root, udc, &queues_dbg_fops);
debugfs_create_file("epstate", 0400, root, udc, &eps_dbg_fops);
}
static void pxa_cleanup_debugfs(struct pxa_udc *udc)
{
debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
}
#else
static inline void pxa_init_debugfs(struct pxa_udc *udc)
{
}
static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
{
}
#endif
/**
* is_match_usb_pxa - check if usb_ep and pxa_ep match
* @udc_usb_ep: usb endpoint
* @ep: pxa endpoint
* @config: configuration required in pxa_ep
* @interface: interface required in pxa_ep
* @altsetting: altsetting required in pxa_ep
*
* Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
*/
static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
int config, int interface, int altsetting)
{
if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
return 0;
if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
return 0;
if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
return 0;
if ((ep->config != config) || (ep->interface != interface)
|| (ep->alternate != altsetting))
return 0;
return 1;
}
/**
* find_pxa_ep - find pxa_ep structure matching udc_usb_ep
* @udc: pxa udc
* @udc_usb_ep: udc_usb_ep structure
*
* Match udc_usb_ep and all pxa_ep available, to see if one matches.
* This is necessary because of the strong pxa hardware restriction requiring
* that once pxa endpoints are initialized, their configuration is freezed, and
* no change can be made to their address, direction, or in which configuration,
* interface or altsetting they are active ... which differs from more usual
* models which have endpoints be roughly just addressable fifos, and leave
* configuration events up to gadget drivers (like all control messages).
*
* Note that there is still a blurred point here :
* - we rely on UDCCR register "active interface" and "active altsetting".
* This is a nonsense in regard of USB spec, where multiple interfaces are
* active at the same time.
* - if we knew for sure that the pxa can handle multiple interface at the
* same time, assuming Intel's Developer Guide is wrong, this function
* should be reviewed, and a cache of couples (iface, altsetting) should
* be kept in the pxa_udc structure. In this case this function would match
* against the cache of couples instead of the "last altsetting" set up.
*
* Returns the matched pxa_ep structure or NULL if none found
*/
static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
struct udc_usb_ep *udc_usb_ep)
{
int i;
struct pxa_ep *ep;
int cfg = udc->config;
int iface = udc->last_interface;
int alt = udc->last_alternate;
if (udc_usb_ep == &udc->udc_usb_ep[0])
return &udc->pxa_ep[0];
for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
ep = &udc->pxa_ep[i];
if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
return ep;
}
return NULL;
}
/**
* update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
* @udc: pxa udc
*
* Context: interrupt handler
*
* Updates all pxa_ep fields in udc_usb_ep structures, if this field was
* previously set up (and is not NULL). The update is necessary is a
* configuration change or altsetting change was issued by the USB host.
*/
static void update_pxa_ep_matches(struct pxa_udc *udc)
{
int i;
struct udc_usb_ep *udc_usb_ep;
for (i = 1; i < NR_USB_ENDPOINTS; i++) {
udc_usb_ep = &udc->udc_usb_ep[i];
if (udc_usb_ep->pxa_ep)
udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
}
}
/**
* pio_irq_enable - Enables irq generation for one endpoint
* @ep: udc endpoint
*/
static void pio_irq_enable(struct pxa_ep *ep)
{
struct pxa_udc *udc = ep->dev;
int index = EPIDX(ep);
u32 udcicr0 = udc_readl(udc, UDCICR0);
u32 udcicr1 = udc_readl(udc, UDCICR1);
if (index < 16)
udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
else
udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
}
/**
* pio_irq_disable - Disables irq generation for one endpoint
* @ep: udc endpoint
*/
static void pio_irq_disable(struct pxa_ep *ep)
{
struct pxa_udc *udc = ep->dev;
int index = EPIDX(ep);
u32 udcicr0 = udc_readl(udc, UDCICR0);
u32 udcicr1 = udc_readl(udc, UDCICR1);
if (index < 16)
udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
else
udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
}
/**
* udc_set_mask_UDCCR - set bits in UDCCR
* @udc: udc device
* @mask: bits to set in UDCCR
*
* Sets bits in UDCCR, leaving DME and FST bits as they were.
*/
static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
{
u32 udccr = udc_readl(udc, UDCCR);
udc_writel(udc, UDCCR,
(udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
}
/**
* udc_clear_mask_UDCCR - clears bits in UDCCR
* @udc: udc device
* @mask: bit to clear in UDCCR
*
* Clears bits in UDCCR, leaving DME and FST bits as they were.
*/
static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
{
u32 udccr = udc_readl(udc, UDCCR);
udc_writel(udc, UDCCR,
(udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
}
/**
* ep_write_UDCCSR - set bits in UDCCSR
* @ep: udc endpoint
* @mask: bits to set in UDCCR
*
* Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
*
* A specific case is applied to ep0 : the ACM bit is always set to 1, for
* SET_INTERFACE and SET_CONFIGURATION.
*/
static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
{
if (is_ep0(ep))
mask |= UDCCSR0_ACM;
udc_ep_writel(ep, UDCCSR, mask);
}
/**
* ep_count_bytes_remain - get how many bytes in udc endpoint
* @ep: udc endpoint
*
* Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
*/
static int ep_count_bytes_remain(struct pxa_ep *ep)
{
if (ep->dir_in)
return -EOPNOTSUPP;
return udc_ep_readl(ep, UDCBCR) & 0x3ff;
}
/**
* ep_is_empty - checks if ep has byte ready for reading
* @ep: udc endpoint
*
* If endpoint is the control endpoint, checks if there are bytes in the
* control endpoint fifo. If endpoint is a data endpoint, checks if bytes
* are ready for reading on OUT endpoint.
*
* Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
*/
static int ep_is_empty(struct pxa_ep *ep)
{
int ret;
if (!is_ep0(ep) && ep->dir_in)
return -EOPNOTSUPP;
if (is_ep0(ep))
ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
else
ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
return ret;
}
/**
* ep_is_full - checks if ep has place to write bytes
* @ep: udc endpoint
*
* If endpoint is not the control endpoint and is an IN endpoint, checks if
* there is place to write bytes into the endpoint.
*
* Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
*/
static int ep_is_full(struct pxa_ep *ep)
{
if (is_ep0(ep))
return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
if (!ep->dir_in)
return -EOPNOTSUPP;
return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
}
/**
* epout_has_pkt - checks if OUT endpoint fifo has a packet available
* @ep: pxa endpoint
*
* Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
*/
static int epout_has_pkt(struct pxa_ep *ep)
{
if (!is_ep0(ep) && ep->dir_in)
return -EOPNOTSUPP;
if (is_ep0(ep))
return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
}
/**
* set_ep0state - Set ep0 automata state
* @udc: udc device
* @state: state
*/
static void set_ep0state(struct pxa_udc *udc, int state)
{
struct pxa_ep *ep = &udc->pxa_ep[0];
char *old_stname = EP0_STNAME(udc);
udc->ep0state = state;
ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
udc_ep_readl(ep, UDCBCR));
}
/**
* ep0_idle - Put control endpoint into idle state
* @dev: udc device
*/
static void ep0_idle(struct pxa_udc *dev)
{
set_ep0state(dev, WAIT_FOR_SETUP);
}
/**
* inc_ep_stats_reqs - Update ep stats counts
* @ep: physical endpoint
* @is_in: ep direction (USB_DIR_IN or 0)
*
*/
static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
{
if (is_in)
ep->stats.in_ops++;
else
ep->stats.out_ops++;
}
/**
* inc_ep_stats_bytes - Update ep stats counts
* @ep: physical endpoint
* @count: bytes transferred on endpoint
* @is_in: ep direction (USB_DIR_IN or 0)
*/
static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
{
if (is_in)
ep->stats.in_bytes += count;
else
ep->stats.out_bytes += count;
}
/**
* pxa_ep_setup - Sets up an usb physical endpoint
* @ep: pxa27x physical endpoint
*
* Find the physical pxa27x ep, and setup its UDCCR
*/
static void pxa_ep_setup(struct pxa_ep *ep)
{
u32 new_udccr;
new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
| ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
| ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
| ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
| ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
| ((ep->dir_in) ? UDCCONR_ED : 0)
| ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
| UDCCONR_EE;
udc_ep_writel(ep, UDCCR, new_udccr);
}
/**
* pxa_eps_setup - Sets up all usb physical endpoints
* @dev: udc device
*
* Setup all pxa physical endpoints, except ep0
*/
static void pxa_eps_setup(struct pxa_udc *dev)
{
unsigned int i;
dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
for (i = 1; i < NR_PXA_ENDPOINTS; i++)
pxa_ep_setup(&dev->pxa_ep[i]);
}
/**
* pxa_ep_alloc_request - Allocate usb request
* @_ep: usb endpoint
* @gfp_flags:
*
* For the pxa27x, these can just wrap kmalloc/kfree. gadget drivers
* must still pass correctly initialized endpoints, since other controller
* drivers may care about how it's currently set up (dma issues etc).
*/
static struct usb_request *
pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct pxa27x_request *req;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
req->in_use = 0;
req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
return &req->req;
}
/**
* pxa_ep_free_request - Free usb request
* @_ep: usb endpoint
* @_req: usb request
*
* Wrapper around kfree to free _req
*/
static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct pxa27x_request *req;
req = container_of(_req, struct pxa27x_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
/**
* ep_add_request - add a request to the endpoint's queue
* @ep: usb endpoint
* @req: usb request
*
* Context: ep->lock held
*
* Queues the request in the endpoint's queue, and enables the interrupts
* on the endpoint.
*/
static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
{
if (unlikely(!req))
return;
ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
req->req.length, udc_ep_readl(ep, UDCCSR));
req->in_use = 1;
list_add_tail(&req->queue, &ep->queue);
pio_irq_enable(ep);
}
/**
* ep_del_request - removes a request from the endpoint's queue
* @ep: usb endpoint
* @req: usb request
*
* Context: ep->lock held
*
* Unqueue the request from the endpoint's queue. If there are no more requests
* on the endpoint, and if it's not the control endpoint, interrupts are
* disabled on the endpoint.
*/
static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
{
if (unlikely(!req))
return;
ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
req->req.length, udc_ep_readl(ep, UDCCSR));
list_del_init(&req->queue);
req->in_use = 0;
if (!is_ep0(ep) && list_empty(&ep->queue))
pio_irq_disable(ep);
}
/**
* req_done - Complete an usb request
* @ep: pxa physical endpoint
* @req: pxa request
* @status: usb request status sent to gadget API
* @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
* Context: ep->lock held if flags not NULL, else ep->lock released
*
* Retire a pxa27x usb request. Endpoint must be locked.
*/
static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
unsigned long *pflags)
{
unsigned long flags;
ep_del_request(ep, req);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
if (status && status != -ESHUTDOWN)
ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
&req->req, status,
req->req.actual, req->req.length);
if (pflags)
spin_unlock_irqrestore(&ep->lock, *pflags);
local_irq_save(flags);
usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req);
local_irq_restore(flags);
if (pflags)
spin_lock_irqsave(&ep->lock, *pflags);
}
/**
* ep_end_out_req - Ends endpoint OUT request
* @ep: physical endpoint
* @req: pxa request
* @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
* Context: ep->lock held or released (see req_done())
*
* Ends endpoint OUT request (completes usb request).
*/
static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
unsigned long *pflags)
{
inc_ep_stats_reqs(ep, !USB_DIR_IN);
req_done(ep, req, 0, pflags);
}
/**
* ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
* @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
* Context: ep->lock held or released (see req_done())
*
* Ends control endpoint OUT request (completes usb request), and puts
* control endpoint into idle state
*/
static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
unsigned long *pflags)
{
set_ep0state(ep->dev, OUT_STATUS_STAGE);
ep_end_out_req(ep, req, pflags);
ep0_idle(ep->dev);
}
/**
* ep_end_in_req - Ends endpoint IN request
* @ep: physical endpoint
* @req: pxa request
* @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
* Context: ep->lock held or released (see req_done())
*
* Ends endpoint IN request (completes usb request).
*/
static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
unsigned long *pflags)
{
inc_ep_stats_reqs(ep, USB_DIR_IN);
req_done(ep, req, 0, pflags);
}
/**
* ep0_end_in_req - Ends control endpoint IN request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
* @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
* Context: ep->lock held or released (see req_done())
*
* Ends control endpoint IN request (completes usb request), and puts
* control endpoint into status state
*/
static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
unsigned long *pflags)
{
set_ep0state(ep->dev, IN_STATUS_STAGE);
ep_end_in_req(ep, req, pflags);
}
/**
* nuke - Dequeue all requests
* @ep: pxa endpoint
* @status: usb request status
*
* Context: ep->lock released
*
* Dequeues all requests on an endpoint. As a side effect, interrupts will be
* disabled on that endpoint (because no more requests).
*/
static void nuke(struct pxa_ep *ep, int status)
{
struct pxa27x_request *req;
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pxa27x_request, queue);
req_done(ep, req, status, &flags);
}
spin_unlock_irqrestore(&ep->lock, flags);
}
/**
* read_packet - transfer 1 packet from an OUT endpoint into request
* @ep: pxa physical endpoint
* @req: usb request
*
* Takes bytes from OUT endpoint and transfers them info the usb request.
* If there is less space in request than bytes received in OUT endpoint,
* bytes are left in the OUT endpoint.
*
* Returns how many bytes were actually transferred
*/
static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
{
u32 *buf;
int bytes_ep, bufferspace, count, i;
bytes_ep = ep_count_bytes_remain(ep);
bufferspace = req->req.length - req->req.actual;
buf = (u32 *)(req->req.buf + req->req.actual);
prefetchw(buf);
if (likely(!ep_is_empty(ep)))
count = min(bytes_ep, bufferspace);
else /* zlp */
count = 0;
for (i = count; i > 0; i -= 4)
*buf++ = udc_ep_readl(ep, UDCDR);
req->req.actual += count;
ep_write_UDCCSR(ep, UDCCSR_PC);
return count;
}
/**
* write_packet - transfer 1 packet from request into an IN endpoint
* @ep: pxa physical endpoint
* @req: usb request
* @max: max bytes that fit into endpoint
*
* Takes bytes from usb request, and transfers them into the physical
* endpoint. If there are no bytes to transfer, doesn't write anything
* to physical endpoint.
*
* Returns how many bytes were actually transferred.
*/
static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
unsigned int max)
{
int length, count, remain, i;
u32 *buf;
u8 *buf_8;
buf = (u32 *)(req->req.buf + req->req.actual);
prefetch(buf);
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
remain = length & 0x3;
count = length & ~(0x3);
for (i = count; i > 0 ; i -= 4)
udc_ep_writel(ep, UDCDR, *buf++);
buf_8 = (u8 *)buf;
for (i = remain; i > 0; i--)
udc_ep_writeb(ep, UDCDR, *buf_8++);
ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
udc_ep_readl(ep, UDCCSR));
return length;
}
/**
* read_fifo - Transfer packets from OUT endpoint into usb request
* @ep: pxa physical endpoint
* @req: usb request
*
* Context: interrupt handler
*
* Unload as many packets as possible from the fifo we use for usb OUT
* transfers and put them into the request. Caller should have made sure
* there's at least one packet ready.
* Doesn't complete the request, that's the caller's job
*
* Returns 1 if the request completed, 0 otherwise
*/
static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
{
int count, is_short, completed = 0;
while (epout_has_pkt(ep)) {
count = read_packet(ep, req);
inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
is_short = (count < ep->fifo_size);
ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
&req->req, req->req.actual, req->req.length);
/* completion */
if (is_short || req->req.actual == req->req.length) {
completed = 1;
break;
}
/* finished that packet. the next one may be waiting... */
}
return completed;
}
/**
* write_fifo - transfer packets from usb request into an IN endpoint
* @ep: pxa physical endpoint
* @req: pxa usb request
*
* Write to an IN endpoint fifo, as many packets as possible.
* irqs will use this to write the rest later.
* caller guarantees at least one packet buffer is ready (or a zlp).
* Doesn't complete the request, that's the caller's job
*
* Returns 1 if request fully transferred, 0 if partial transfer
*/
static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
{
unsigned max;
int count, is_short, is_last = 0, completed = 0, totcount = 0;
u32 udccsr;
max = ep->fifo_size;
do {
udccsr = udc_ep_readl(ep, UDCCSR);
if (udccsr & UDCCSR_PC) {
ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
udccsr);
ep_write_UDCCSR(ep, UDCCSR_PC);
}
if (udccsr & UDCCSR_TRN) {
ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
udccsr);
ep_write_UDCCSR(ep, UDCCSR_TRN);
}
count = write_packet(ep, req, max);
inc_ep_stats_bytes(ep, count, USB_DIR_IN);
totcount += count;
/* last packet is usually short (or a zlp) */
if (unlikely(count < max)) {
is_last = 1;
is_short = 1;
} else {
if (likely(req->req.length > req->req.actual)
|| req->req.zero)
is_last = 0;
else
is_last = 1;
/* interrupt/iso maxpacket may not fill the fifo */
is_short = unlikely(max < ep->fifo_size);
}
if (is_short)
ep_write_UDCCSR(ep, UDCCSR_SP);
/* requests complete when all IN data is in the FIFO */
if (is_last) {
completed = 1;
break;
}
} while (!ep_is_full(ep));
ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
totcount, is_last ? "/L" : "", is_short ? "/S" : "",
req->req.length - req->req.actual, &req->req);
return completed;
}
/**
* read_ep0_fifo - Transfer packets from control endpoint into usb request
* @ep: control endpoint
* @req: pxa usb request
*
* Special ep0 version of the above read_fifo. Reads as many bytes from control
* endpoint as can be read, and stores them into usb request (limited by request
* maximum length).
*
* Returns 0 if usb request only partially filled, 1 if fully filled
*/
static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
{
int count, is_short, completed = 0;
while (epout_has_pkt(ep)) {
count = read_packet(ep, req);
ep_write_UDCCSR(ep, UDCCSR0_OPC);
inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
is_short = (count < ep->fifo_size);
ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
&req->req, req->req.actual, req->req.length);
if (is_short || req->req.actual >= req->req.length) {
completed = 1;
break;
}
}
return completed;
}
/**
* write_ep0_fifo - Send a request to control endpoint (ep0 in)
* @ep: control endpoint
* @req: request
*
* Context: interrupt handler
*
* Sends a request (or a part of the request) to the control endpoint (ep0 in).
* If the request doesn't fit, the remaining part will be sent from irq.
* The request is considered fully written only if either :
* - last write transferred all remaining bytes, but fifo was not fully filled
* - last write was a 0 length write
*
* Returns 1 if request fully written, 0 if request only partially sent
*/
static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
{
unsigned count;
int is_last, is_short;
count = write_packet(ep, req, EP0_FIFO_SIZE);
inc_ep_stats_bytes(ep, count, USB_DIR_IN);
is_short = (count < EP0_FIFO_SIZE);
is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
/* Sends either a short packet or a 0 length packet */
if (unlikely(is_short))
ep_write_UDCCSR(ep, UDCCSR0_IPR);
ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
count, is_short ? "/S" : "", is_last ? "/L" : "",
req->req.length - req->req.actual,
&req->req, udc_ep_readl(ep, UDCCSR));
return is_last;
}
/**
* pxa_ep_queue - Queue a request into an IN endpoint
* @_ep: usb endpoint
* @_req: usb request
* @gfp_flags: flags
*
* Context: thread context or from the interrupt handler in the
* special case of ep0 setup :
* (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
*
* Returns 0 if succedeed, error otherwise
*/
static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct udc_usb_ep *udc_usb_ep;
struct pxa_ep *ep;
struct pxa27x_request *req;
struct pxa_udc *dev;
unsigned long flags;
int rc = 0;
int is_first_req;
unsigned length;
int recursion_detected;
req = container_of(_req, struct pxa27x_request, req);
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
if (unlikely(!_req || !_req->complete || !_req->buf))
return -EINVAL;
if (unlikely(!_ep))
return -EINVAL;
ep = udc_usb_ep->pxa_ep;
if (unlikely(!ep))
return -EINVAL;
dev = ep->dev;
if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
ep_dbg(ep, "bogus device state\n");
return -ESHUTDOWN;
}
/* iso is always one packet per request, that's the only way
* we can report per-packet status. that also helps with dma.
*/
if (unlikely(EPXFERTYPE_is_ISO(ep)
&& req->req.length > ep->fifo_size))
return -EMSGSIZE;
spin_lock_irqsave(&ep->lock, flags);
recursion_detected = ep->in_handle_ep;
is_first_req = list_empty(&ep->queue);
ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
_req, is_first_req ? "yes" : "no",
_req->length, _req->buf);
if (!ep->enabled) {
_req->status = -ESHUTDOWN;
rc = -ESHUTDOWN;
goto out_locked;
}
if (req->in_use) {
ep_err(ep, "refusing to queue req %p (already queued)\n", req);
goto out_locked;
}
length = _req->length;
_req->status = -EINPROGRESS;
_req->actual = 0;
ep_add_request(ep, req);
spin_unlock_irqrestore(&ep->lock, flags);
if (is_ep0(ep)) {
switch (dev->ep0state) {
case WAIT_ACK_SET_CONF_INTERF:
if (length == 0) {
ep_end_in_req(ep, req, NULL);
} else {
ep_err(ep, "got a request of %d bytes while"
"in state WAIT_ACK_SET_CONF_INTERF\n",
length);
ep_del_request(ep, req);
rc = -EL2HLT;
}
ep0_idle(ep->dev);
break;
case IN_DATA_STAGE:
if (!ep_is_full(ep))
if (write_ep0_fifo(ep, req))
ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE:
if ((length == 0) || !epout_has_pkt(ep))
if (read_ep0_fifo(ep, req))
ep0_end_out_req(ep, req, NULL);
break;
default:
ep_err(ep, "odd state %s to send me a request\n",
EP0_STNAME(ep->dev));
ep_del_request(ep, req);
rc = -EL2HLT;
break;
}
} else {
if (!recursion_detected)
handle_ep(ep);
}
out:
return rc;
out_locked:
spin_unlock_irqrestore(&ep->lock, flags);
goto out;
}
/**
* pxa_ep_dequeue - Dequeue one request
* @_ep: usb endpoint
* @_req: usb request
*
* Return 0 if no error, -EINVAL or -ECONNRESET otherwise
*/
static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
struct pxa27x_request *req = NULL, *iter;
unsigned long flags;
int rc = -EINVAL;
if (!_ep)
return rc;
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
ep = udc_usb_ep->pxa_ep;
if (!ep || is_ep0(ep))
return rc;
spin_lock_irqsave(&ep->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
rc = 0;
break;
}
spin_unlock_irqrestore(&ep->lock, flags);
if (!rc)
req_done(ep, req, -ECONNRESET, NULL);
return rc;
}
/**
* pxa_ep_set_halt - Halts operations on one endpoint
* @_ep: usb endpoint
* @value:
*
* Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
*/
static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
unsigned long flags;
int rc;
if (!_ep)
return -EINVAL;
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
ep = udc_usb_ep->pxa_ep;
if (!ep || is_ep0(ep))
return -EINVAL;
if (value == 0) {
/*
* This path (reset toggle+halt) is needed to implement
* SET_INTERFACE on normal hardware. but it can't be
* done from software on the PXA UDC, and the hardware
* forgets to do it as part of SET_INTERFACE automagic.
*/
ep_dbg(ep, "only host can clear halt\n");
return -EROFS;
}
spin_lock_irqsave(&ep->lock, flags);
rc = -EAGAIN;
if (ep->dir_in && (ep_is_full(ep) || !list_empty(&ep->queue)))
goto out;
/* FST, FEF bits are the same for control and non control endpoints */
rc = 0;
ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
if (is_ep0(ep))
set_ep0state(ep->dev, STALL);
out:
spin_unlock_irqrestore(&ep->lock, flags);
return rc;
}
/**
* pxa_ep_fifo_status - Get how many bytes in physical endpoint
* @_ep: usb endpoint
*
* Returns number of bytes in OUT fifos. Broken for IN fifos.
*/
static int pxa_ep_fifo_status(struct usb_ep *_ep)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
if (!_ep)
return -ENODEV;
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
ep = udc_usb_ep->pxa_ep;
if (!ep || is_ep0(ep))
return -ENODEV;
if (ep->dir_in)
return -EOPNOTSUPP;
if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
return 0;
else
return ep_count_bytes_remain(ep) + 1;
}
/**
* pxa_ep_fifo_flush - Flushes one endpoint
* @_ep: usb endpoint
*
* Discards all data in one endpoint(IN or OUT), except control endpoint.
*/
static void pxa_ep_fifo_flush(struct usb_ep *_ep)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
unsigned long flags;
if (!_ep)
return;
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
ep = udc_usb_ep->pxa_ep;
if (!ep || is_ep0(ep))
return;
spin_lock_irqsave(&ep->lock, flags);
if (unlikely(!list_empty(&ep->queue)))
ep_dbg(ep, "called while queue list not empty\n");
ep_dbg(ep, "called\n");
/* for OUT, just read and discard the FIFO contents. */
if (!ep->dir_in) {
while (!ep_is_empty(ep))
udc_ep_readl(ep, UDCDR);
} else {
/* most IN status is the same, but ISO can't stall */
ep_write_UDCCSR(ep,
UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
}
spin_unlock_irqrestore(&ep->lock, flags);
}
/**
* pxa_ep_enable - Enables usb endpoint
* @_ep: usb endpoint
* @desc: usb endpoint descriptor
*
* Nothing much to do here, as ep configuration is done once and for all
* before udc is enabled. After udc enable, no physical endpoint configuration
* can be changed.
* Function makes sanity checks and flushes the endpoint.
*/
static int pxa_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
struct pxa_udc *udc;
if (!_ep || !desc)
return -EINVAL;
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
if (udc_usb_ep->pxa_ep) {
ep = udc_usb_ep->pxa_ep;
ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
_ep->name);
} else {
ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
}
if (!ep || is_ep0(ep)) {
dev_err(udc_usb_ep->dev->dev,
"unable to match pxa_ep for ep %s\n",
_ep->name);
return -EINVAL;
}
if ((desc->bDescriptorType != USB_DT_ENDPOINT)
|| (ep->type != usb_endpoint_type(desc))) {
ep_err(ep, "type mismatch\n");
return -EINVAL;
}
if (ep->fifo_size < usb_endpoint_maxp(desc)) {
ep_err(ep, "bad maxpacket\n");
return -ERANGE;
}
udc_usb_ep->pxa_ep = ep;
udc = ep->dev;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
ep_err(ep, "bogus device state\n");
return -ESHUTDOWN;
}
ep->enabled = 1;
/* flush fifo (mostly for OUT buffers) */
pxa_ep_fifo_flush(_ep);
ep_dbg(ep, "enabled\n");
return 0;
}
/**
* pxa_ep_disable - Disable usb endpoint
* @_ep: usb endpoint
*
* Same as for pxa_ep_enable, no physical endpoint configuration can be
* changed.
* Function flushes the endpoint and related requests.
*/
static int pxa_ep_disable(struct usb_ep *_ep)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
if (!_ep)
return -EINVAL;
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
ep = udc_usb_ep->pxa_ep;
if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
return -EINVAL;
ep->enabled = 0;
nuke(ep, -ESHUTDOWN);
pxa_ep_fifo_flush(_ep);
udc_usb_ep->pxa_ep = NULL;
ep_dbg(ep, "disabled\n");
return 0;
}
static const struct usb_ep_ops pxa_ep_ops = {
.enable = pxa_ep_enable,
.disable = pxa_ep_disable,
.alloc_request = pxa_ep_alloc_request,
.free_request = pxa_ep_free_request,
.queue = pxa_ep_queue,
.dequeue = pxa_ep_dequeue,
.set_halt = pxa_ep_set_halt,
.fifo_status = pxa_ep_fifo_status,
.fifo_flush = pxa_ep_fifo_flush,
};
/**
* dplus_pullup - Connect or disconnect pullup resistor to D+ pin
* @udc: udc device
* @on: 0 if disconnect pullup resistor, 1 otherwise
* Context: any
*
* Handle D+ pullup resistor, make the device visible to the usb bus, and
* declare it as a full speed usb device
*/
static void dplus_pullup(struct pxa_udc *udc, int on)
{
if (udc->gpiod) {
gpiod_set_value(udc->gpiod, on);
} else if (udc->udc_command) {
if (on)
udc->udc_command(PXA2XX_UDC_CMD_CONNECT);
else
udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
}
udc->pullup_on = on;
}
/**
* pxa_udc_get_frame - Returns usb frame number
* @_gadget: usb gadget
*/
static int pxa_udc_get_frame(struct usb_gadget *_gadget)
{
struct pxa_udc *udc = to_gadget_udc(_gadget);
return (udc_readl(udc, UDCFNR) & 0x7ff);
}
/**
* pxa_udc_wakeup - Force udc device out of suspend
* @_gadget: usb gadget
*
* Returns 0 if successful, error code otherwise
*/
static int pxa_udc_wakeup(struct usb_gadget *_gadget)
{
struct pxa_udc *udc = to_gadget_udc(_gadget);
/* host may not have enabled remote wakeup */
if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
return -EHOSTUNREACH;
udc_set_mask_UDCCR(udc, UDCCR_UDR);
return 0;
}
static void udc_enable(struct pxa_udc *udc);
static void udc_disable(struct pxa_udc *udc);
/**
* should_enable_udc - Tells if UDC should be enabled
* @udc: udc device
* Context: any
*
* The UDC should be enabled if :
* - the pullup resistor is connected
* - and a gadget driver is bound
* - and vbus is sensed (or no vbus sense is available)
*
* Returns 1 if UDC should be enabled, 0 otherwise
*/
static int should_enable_udc(struct pxa_udc *udc)
{
int put_on;
put_on = ((udc->pullup_on) && (udc->driver));
put_on &= ((udc->vbus_sensed) || (IS_ERR_OR_NULL(udc->transceiver)));
return put_on;
}
/**
* should_disable_udc - Tells if UDC should be disabled
* @udc: udc device
* Context: any
*
* The UDC should be disabled if :
* - the pullup resistor is not connected
* - or no gadget driver is bound
* - or no vbus is sensed (when vbus sesing is available)
*
* Returns 1 if UDC should be disabled
*/
static int should_disable_udc(struct pxa_udc *udc)
{
int put_off;
put_off = ((!udc->pullup_on) || (!udc->driver));
put_off |= ((!udc->vbus_sensed) && (!IS_ERR_OR_NULL(udc->transceiver)));
return put_off;
}
/**
* pxa_udc_pullup - Offer manual D+ pullup control
* @_gadget: usb gadget using the control
* @is_active: 0 if disconnect, else connect D+ pullup resistor
*
* Context: task context, might sleep
*
* Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup
*/
static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
{
struct pxa_udc *udc = to_gadget_udc(_gadget);
if (!udc->gpiod && !udc->udc_command)
return -EOPNOTSUPP;
dplus_pullup(udc, is_active);
if (should_enable_udc(udc))
udc_enable(udc);
if (should_disable_udc(udc))
udc_disable(udc);
return 0;
}
/**
* pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
* @_gadget: usb gadget
* @is_active: 0 if should disable the udc, 1 if should enable
*
* Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the
* udc, and deactivates D+ pullup resistor.
*
* Returns 0
*/
static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct pxa_udc *udc = to_gadget_udc(_gadget);
udc->vbus_sensed = is_active;
if (should_enable_udc(udc))
udc_enable(udc);
if (should_disable_udc(udc))
udc_disable(udc);
return 0;
}
/**
* pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed
* @_gadget: usb gadget
* @mA: current drawn
*
* Context: task context, might sleep
*
* Called after a configuration was chosen by a USB host, to inform how much
* current can be drawn by the device from VBus line.
*
* Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc
*/
static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
{
struct pxa_udc *udc;
udc = to_gadget_udc(_gadget);
if (!IS_ERR_OR_NULL(udc->transceiver))
return usb_phy_set_power(udc->transceiver, mA);
return -EOPNOTSUPP;
}
/**
* pxa_udc_phy_event - Called by phy upon VBus event
* @nb: notifier block
* @action: phy action, is vbus connect or disconnect
* @data: the usb_gadget structure in pxa_udc
*
* Called by the USB Phy when a cable connect or disconnect is sensed.
*
* Returns 0
*/
static int pxa_udc_phy_event(struct notifier_block *nb, unsigned long action,
void *data)
{
struct usb_gadget *gadget = data;
switch (action) {
case USB_EVENT_VBUS:
usb_gadget_vbus_connect(gadget);
return NOTIFY_OK;
case USB_EVENT_NONE:
usb_gadget_vbus_disconnect(gadget);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static struct notifier_block pxa27x_udc_phy = {
.notifier_call = pxa_udc_phy_event,
};
static int pxa27x_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int pxa27x_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops pxa_udc_ops = {
.get_frame = pxa_udc_get_frame,
.wakeup = pxa_udc_wakeup,
.pullup = pxa_udc_pullup,
.vbus_session = pxa_udc_vbus_session,
.vbus_draw = pxa_udc_vbus_draw,
.udc_start = pxa27x_udc_start,
.udc_stop = pxa27x_udc_stop,
};
/**
* udc_disable - disable udc device controller
* @udc: udc device
* Context: any
*
* Disables the udc device : disables clocks, udc interrupts, control endpoint
* interrupts.
*/
static void udc_disable(struct pxa_udc *udc)
{
if (!udc->enabled)
return;
udc_writel(udc, UDCICR0, 0);
udc_writel(udc, UDCICR1, 0);
udc_clear_mask_UDCCR(udc, UDCCR_UDE);
ep0_idle(udc);
udc->gadget.speed = USB_SPEED_UNKNOWN;
clk_disable(udc->clk);
udc->enabled = 0;
}
/**
* udc_init_data - Initialize udc device data structures
* @dev: udc device
*
* Initializes gadget endpoint list, endpoints locks. No action is taken
* on the hardware.
*/
static void udc_init_data(struct pxa_udc *dev)
{
int i;
struct pxa_ep *ep;
/* device/ep0 records init */
INIT_LIST_HEAD(&dev->gadget.ep_list);
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
dev->gadget.quirk_altset_not_supp = 1;
ep0_idle(dev);
/* PXA endpoints init */
for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
ep = &dev->pxa_ep[i];
ep->enabled = is_ep0(ep);
INIT_LIST_HEAD(&ep->queue);
spin_lock_init(&ep->lock);
}
/* USB endpoints init */
for (i = 1; i < NR_USB_ENDPOINTS; i++) {
list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
&dev->gadget.ep_list);
usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep,
dev->udc_usb_ep[i].usb_ep.maxpacket);
}
}
/**
* udc_enable - Enables the udc device
* @udc: udc device
*
* Enables the udc device : enables clocks, udc interrupts, control endpoint
* interrupts, sets usb as UDC client and setups endpoints.
*/
static void udc_enable(struct pxa_udc *udc)
{
if (udc->enabled)
return;
clk_enable(udc->clk);
udc_writel(udc, UDCICR0, 0);
udc_writel(udc, UDCICR1, 0);
udc_clear_mask_UDCCR(udc, UDCCR_UDE);
ep0_idle(udc);
udc->gadget.speed = USB_SPEED_FULL;
memset(&udc->stats, 0, sizeof(udc->stats));
pxa_eps_setup(udc);
udc_set_mask_UDCCR(udc, UDCCR_UDE);
ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
udelay(2);
if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
dev_err(udc->dev, "Configuration errors, udc disabled\n");
/*
* Caller must be able to sleep in order to cope with startup transients
*/
msleep(100);
/* enable suspend/resume and reset irqs */
udc_writel(udc, UDCICR1,
UDCICR1_IECC | UDCICR1_IERU
| UDCICR1_IESU | UDCICR1_IERS);
/* enable ep0 irqs */
pio_irq_enable(&udc->pxa_ep[0]);
udc->enabled = 1;
}
/**
* pxa27x_udc_start - Register gadget driver
* @g: gadget
* @driver: gadget driver
*
* When a driver is successfully registered, it will receive control requests
* including set_configuration(), which enables non-control requests. Then
* usb traffic follows until a disconnect is reported. Then a host may connect
* again, or the driver might get unbound.
*
* Note that the udc is not automatically enabled. Check function
* should_enable_udc().
*
* Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
*/
static int pxa27x_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct pxa_udc *udc = to_pxa(g);
int retval;
/* first hook up the driver ... */
udc->driver = driver;
if (!IS_ERR_OR_NULL(udc->transceiver)) {
retval = otg_set_peripheral(udc->transceiver->otg,
&udc->gadget);
if (retval) {
dev_err(udc->dev, "can't bind to transceiver\n");
goto fail;
}
}
if (should_enable_udc(udc))
udc_enable(udc);
return 0;
fail:
udc->driver = NULL;
return retval;
}
/**
* stop_activity - Stops udc endpoints
* @udc: udc device
*
* Disables all udc endpoints (even control endpoint), report disconnect to
* the gadget user.
*/
static void stop_activity(struct pxa_udc *udc)
{
int i;
udc->gadget.speed = USB_SPEED_UNKNOWN;
for (i = 0; i < NR_USB_ENDPOINTS; i++)
pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
}
/**
* pxa27x_udc_stop - Unregister the gadget driver
* @g: gadget
*
* Returns 0 if no error, -ENODEV, -EINVAL otherwise
*/
static int pxa27x_udc_stop(struct usb_gadget *g)
{
struct pxa_udc *udc = to_pxa(g);
stop_activity(udc);
udc_disable(udc);
udc->driver = NULL;
if (!IS_ERR_OR_NULL(udc->transceiver))
return otg_set_peripheral(udc->transceiver->otg, NULL);
return 0;
}
/**
* handle_ep0_ctrl_req - handle control endpoint control request
* @udc: udc device
* @req: control request
*/
static void handle_ep0_ctrl_req(struct pxa_udc *udc,
struct pxa27x_request *req)
{
struct pxa_ep *ep = &udc->pxa_ep[0];
union {
struct usb_ctrlrequest r;
u32 word[2];
} u;
int i;
int have_extrabytes = 0;
unsigned long flags;
nuke(ep, -EPROTO);
spin_lock_irqsave(&ep->lock, flags);
/*
* In the PXA320 manual, in the section about Back-to-Back setup
* packets, it describes this situation. The solution is to set OPC to
* get rid of the status packet, and then continue with the setup
* packet. Generalize to pxa27x CPUs.
*/
if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
ep_write_UDCCSR(ep, UDCCSR0_OPC);
/* read SETUP packet */
for (i = 0; i < 2; i++) {
if (unlikely(ep_is_empty(ep)))
goto stall;
u.word[i] = udc_ep_readl(ep, UDCDR);
}
have_extrabytes = !ep_is_empty(ep);
while (!ep_is_empty(ep)) {
i = udc_ep_readl(ep, UDCDR);
ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
}
ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
u.r.bRequestType, u.r.bRequest,
le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
le16_to_cpu(u.r.wLength));
if (unlikely(have_extrabytes))
goto stall;
if (u.r.bRequestType & USB_DIR_IN)
set_ep0state(udc, IN_DATA_STAGE);
else
set_ep0state(udc, OUT_DATA_STAGE);
/* Tell UDC to enter Data Stage */
ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
spin_unlock_irqrestore(&ep->lock, flags);
i = udc->driver->setup(&udc->gadget, &u.r);
spin_lock_irqsave(&ep->lock, flags);
if (i < 0)
goto stall;
out:
spin_unlock_irqrestore(&ep->lock, flags);
return;
stall:
ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
udc_ep_readl(ep, UDCCSR), i);
ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
set_ep0state(udc, STALL);
goto out;
}
/**
* handle_ep0 - Handle control endpoint data transfers
* @udc: udc device
* @fifo_irq: 1 if triggered by fifo service type irq
* @opc_irq: 1 if triggered by output packet complete type irq
*
* Context : interrupt handler
*
* Tries to transfer all pending request data into the endpoint and/or
* transfer all pending data in the endpoint into usb requests.
* Handles states of ep0 automata.
*
* PXA27x hardware handles several standard usb control requests without
* driver notification. The requests fully handled by hardware are :
* SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
* GET_STATUS
* The requests handled by hardware, but with irq notification are :
* SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
* The remaining standard requests really handled by handle_ep0 are :
* GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
* Requests standardized outside of USB 2.0 chapter 9 are handled more
* uniformly, by gadget drivers.
*
* The control endpoint state machine is _not_ USB spec compliant, it's even
* hardly compliant with Intel PXA270 developers guide.
* The key points which inferred this state machine are :
* - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
* software.
* - on every OUT packet received, UDCCSR0_OPC is raised and held until
* cleared by software.
* - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
* before reading ep0.
* This is true only for PXA27x. This is not true anymore for PXA3xx family
* (check Back-to-Back setup packet in developers guide).
* - irq can be called on a "packet complete" event (opc_irq=1), while
* UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
* from experimentation).
* - as UDCCSR0_SA can be activated while in irq handling, and clearing
* UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
* => we never actually read the "status stage" packet of an IN data stage
* => this is not documented in Intel documentation
* - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
* STAGE. The driver add STATUS STAGE to send last zero length packet in
* OUT_STATUS_STAGE.
* - special attention was needed for IN_STATUS_STAGE. If a packet complete
* event is detected, we terminate the status stage without ackowledging the
* packet (not to risk to loose a potential SETUP packet)
*/
static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
{
u32 udccsr0;
struct pxa_ep *ep = &udc->pxa_ep[0];
struct pxa27x_request *req = NULL;
int completed = 0;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next, struct pxa27x_request, queue);
udccsr0 = udc_ep_readl(ep, UDCCSR);
ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
(fifo_irq << 1 | opc_irq));
if (udccsr0 & UDCCSR0_SST) {
ep_dbg(ep, "clearing stall status\n");
nuke(ep, -EPIPE);
ep_write_UDCCSR(ep, UDCCSR0_SST);
ep0_idle(udc);
}
if (udccsr0 & UDCCSR0_SA) {
nuke(ep, 0);
set_ep0state(udc, SETUP_STAGE);
}
switch (udc->ep0state) {
case WAIT_FOR_SETUP:
/*
* Hardware bug : beware, we cannot clear OPC, since we would
* miss a potential OPC irq for a setup packet.
* So, we only do ... nothing, and hope for a next irq with
* UDCCSR0_SA set.
*/
break;
case SETUP_STAGE:
udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
handle_ep0_ctrl_req(udc, req);
break;
case IN_DATA_STAGE: /* GET_DESCRIPTOR */
if (epout_has_pkt(ep))
ep_write_UDCCSR(ep, UDCCSR0_OPC);
if (req && !ep_is_full(ep))
completed = write_ep0_fifo(ep, req);
if (completed)
ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
if (epout_has_pkt(ep) && req)
completed = read_ep0_fifo(ep, req);
if (completed)
ep0_end_out_req(ep, req, NULL);
break;
case STALL:
ep_write_UDCCSR(ep, UDCCSR0_FST);
break;
case IN_STATUS_STAGE:
/*
* Hardware bug : beware, we cannot clear OPC, since we would
* miss a potential PC irq for a setup packet.
* So, we only put the ep0 into WAIT_FOR_SETUP state.
*/
if (opc_irq)
ep0_idle(udc);
break;
case OUT_STATUS_STAGE:
case WAIT_ACK_SET_CONF_INTERF:
ep_warn(ep, "should never get in %s state here!!!\n",
EP0_STNAME(ep->dev));
ep0_idle(udc);
break;
}
}
/**
* handle_ep - Handle endpoint data tranfers
* @ep: pxa physical endpoint
*
* Tries to transfer all pending request data into the endpoint and/or
* transfer all pending data in the endpoint into usb requests.
*
* Is always called from the interrupt handler. ep->lock must not be held.
*/
static void handle_ep(struct pxa_ep *ep)
{
struct pxa27x_request *req;
int completed;
u32 udccsr;
int is_in = ep->dir_in;
int loop = 0;
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
if (ep->in_handle_ep)
goto recursion_detected;
ep->in_handle_ep = 1;
do {
completed = 0;
udccsr = udc_ep_readl(ep, UDCCSR);
if (likely(!list_empty(&ep->queue)))
req = list_entry(ep->queue.next,
struct pxa27x_request, queue);
else
req = NULL;
ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
req, udccsr, loop++);
if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
udc_ep_writel(ep, UDCCSR,
udccsr & (UDCCSR_SST | UDCCSR_TRN));
if (!req)
break;
if (unlikely(is_in)) {
if (likely(!ep_is_full(ep)))
completed = write_fifo(ep, req);
} else {
if (likely(epout_has_pkt(ep)))
completed = read_fifo(ep, req);
}
if (completed) {
if (is_in)
ep_end_in_req(ep, req, &flags);
else
ep_end_out_req(ep, req, &flags);
}
} while (completed);
ep->in_handle_ep = 0;
recursion_detected:
spin_unlock_irqrestore(&ep->lock, flags);
}
/**
* pxa27x_change_configuration - Handle SET_CONF usb request notification
* @udc: udc device
* @config: usb configuration
*
* Post the request to upper level.
* Don't use any pxa specific harware configuration capabilities
*/
static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
{
struct usb_ctrlrequest req ;
dev_dbg(udc->dev, "config=%d\n", config);
udc->config = config;
udc->last_interface = 0;
udc->last_alternate = 0;
req.bRequestType = 0;
req.bRequest = USB_REQ_SET_CONFIGURATION;
req.wValue = config;
req.wIndex = 0;
req.wLength = 0;
set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
udc->driver->setup(&udc->gadget, &req);
ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
}
/**
* pxa27x_change_interface - Handle SET_INTERF usb request notification
* @udc: udc device
* @iface: interface number
* @alt: alternate setting number
*
* Post the request to upper level.
* Don't use any pxa specific harware configuration capabilities
*/
static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
{
struct usb_ctrlrequest req;
dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
udc->last_interface = iface;
udc->last_alternate = alt;
req.bRequestType = USB_RECIP_INTERFACE;
req.bRequest = USB_REQ_SET_INTERFACE;
req.wValue = alt;
req.wIndex = iface;
req.wLength = 0;
set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
udc->driver->setup(&udc->gadget, &req);
ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
}
/*
* irq_handle_data - Handle data transfer
* @irq: irq IRQ number
* @udc: dev pxa_udc device structure
*
* Called from irq handler, transferts data to or from endpoint to queue
*/
static void irq_handle_data(int irq, struct pxa_udc *udc)
{
int i;
struct pxa_ep *ep;
u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
if (udcisr0 & UDCISR_INT_MASK) {
udc->pxa_ep[0].stats.irqs++;
udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
!!(udcisr0 & UDCICR_PKTCOMPL));
}
udcisr0 >>= 2;
for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
if (!(udcisr0 & UDCISR_INT_MASK))
continue;
udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
if (i < ARRAY_SIZE(udc->pxa_ep)) {
ep = &udc->pxa_ep[i];
ep->stats.irqs++;
handle_ep(ep);
}
}
for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
if (!(udcisr1 & UDCISR_INT_MASK))
continue;
WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
if (i < ARRAY_SIZE(udc->pxa_ep)) {
ep = &udc->pxa_ep[i];
ep->stats.irqs++;
handle_ep(ep);
}
}
}
/**
* irq_udc_suspend - Handle IRQ "UDC Suspend"
* @udc: udc device
*/
static void irq_udc_suspend(struct pxa_udc *udc)
{
udc_writel(udc, UDCISR1, UDCISR1_IRSU);
udc->stats.irqs_suspend++;
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->suspend)
udc->driver->suspend(&udc->gadget);
ep0_idle(udc);
}
/**
* irq_udc_resume - Handle IRQ "UDC Resume"
* @udc: udc device
*/
static void irq_udc_resume(struct pxa_udc *udc)
{
udc_writel(udc, UDCISR1, UDCISR1_IRRU);
udc->stats.irqs_resume++;
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->resume)
udc->driver->resume(&udc->gadget);
}
/**
* irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
* @udc: udc device
*/
static void irq_udc_reconfig(struct pxa_udc *udc)
{
unsigned config, interface, alternate, config_change;
u32 udccr = udc_readl(udc, UDCCR);
udc_writel(udc, UDCISR1, UDCISR1_IRCC);
udc->stats.irqs_reconfig++;
config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
config_change = (config != udc->config);
pxa27x_change_configuration(udc, config);
interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
pxa27x_change_interface(udc, interface, alternate);
if (config_change)
update_pxa_ep_matches(udc);
udc_set_mask_UDCCR(udc, UDCCR_SMAC);
}
/**
* irq_udc_reset - Handle IRQ "UDC Reset"
* @udc: udc device
*/
static void irq_udc_reset(struct pxa_udc *udc)
{
u32 udccr = udc_readl(udc, UDCCR);
struct pxa_ep *ep = &udc->pxa_ep[0];
dev_info(udc->dev, "USB reset\n");
udc_writel(udc, UDCISR1, UDCISR1_IRRS);
udc->stats.irqs_reset++;
if ((udccr & UDCCR_UDA) == 0) {
dev_dbg(udc->dev, "USB reset start\n");
stop_activity(udc);
}
udc->gadget.speed = USB_SPEED_FULL;
memset(&udc->stats, 0, sizeof udc->stats);
nuke(ep, -EPROTO);
ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
ep0_idle(udc);
}
/**
* pxa_udc_irq - Main irq handler
* @irq: irq number
* @_dev: udc device
*
* Handles all udc interrupts
*/
static irqreturn_t pxa_udc_irq(int irq, void *_dev)
{
struct pxa_udc *udc = _dev;
u32 udcisr0 = udc_readl(udc, UDCISR0);
u32 udcisr1 = udc_readl(udc, UDCISR1);
u32 udccr = udc_readl(udc, UDCCR);
u32 udcisr1_spec;
dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
"UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
udcisr1_spec = udcisr1 & 0xf8000000;
if (unlikely(udcisr1_spec & UDCISR1_IRSU))
irq_udc_suspend(udc);
if (unlikely(udcisr1_spec & UDCISR1_IRRU))
irq_udc_resume(udc);
if (unlikely(udcisr1_spec & UDCISR1_IRCC))
irq_udc_reconfig(udc);
if (unlikely(udcisr1_spec & UDCISR1_IRRS))
irq_udc_reset(udc);
if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
irq_handle_data(irq, udc);
return IRQ_HANDLED;
}
static struct pxa_udc memory = {
.gadget = {
.ops = &pxa_udc_ops,
.ep0 = &memory.udc_usb_ep[0].usb_ep,
.name = driver_name,
.dev = {
.init_name = "gadget",
},
},
.udc_usb_ep = {
USB_EP_CTRL,
USB_EP_OUT_BULK(1),
USB_EP_IN_BULK(2),
USB_EP_IN_ISO(3),
USB_EP_OUT_ISO(4),
USB_EP_IN_INT(5),
},
.pxa_ep = {
PXA_EP_CTRL,
/* Endpoints for gadget zero */
PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
PXA_EP_IN_BULK(2, 2, 3, 0, 0),
/* Endpoints for ether gadget, file storage gadget */
PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
PXA_EP_IN_BULK(4, 2, 1, 0, 0),
PXA_EP_IN_ISO(5, 3, 1, 0, 0),
PXA_EP_OUT_ISO(6, 4, 1, 0, 0),
PXA_EP_IN_INT(7, 5, 1, 0, 0),
/* Endpoints for RNDIS, serial */
PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
PXA_EP_IN_BULK(9, 2, 2, 0, 0),
PXA_EP_IN_INT(10, 5, 2, 0, 0),
/*
* All the following endpoints are only for completion. They
* won't never work, as multiple interfaces are really broken on
* the pxa.
*/
PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
PXA_EP_IN_BULK(12, 2, 2, 1, 0),
/* Endpoint for CDC Ether */
PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
PXA_EP_IN_BULK(14, 2, 1, 1, 1),
}
};
#if defined(CONFIG_OF)
static const struct of_device_id udc_pxa_dt_ids[] = {
{ .compatible = "marvell,pxa270-udc" },
{}
};
MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
#endif
/**
* pxa_udc_probe - probes the udc device
* @pdev: platform device
*
* Perform basic init : allocates udc clock, creates sysfs files, requests
* irq.
*/
static int pxa_udc_probe(struct platform_device *pdev)
{
struct pxa_udc *udc = &memory;
int retval = 0, gpio;
struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
unsigned long gpio_flags;
if (mach) {
gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0;
gpio = mach->gpio_pullup;
if (gpio_is_valid(gpio)) {
retval = devm_gpio_request_one(&pdev->dev, gpio,
gpio_flags,
"USB D+ pullup");
if (retval)
return retval;
udc->gpiod = gpio_to_desc(mach->gpio_pullup);
}
udc->udc_command = mach->udc_command;
} else {
udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
}
udc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
udc->irq = platform_get_irq(pdev, 0);
if (udc->irq < 0)
return udc->irq;
udc->dev = &pdev->dev;
if (of_have_populated_dt()) {
udc->transceiver =
devm_usb_get_phy_by_phandle(udc->dev, "phys", 0);
if (IS_ERR(udc->transceiver))
return PTR_ERR(udc->transceiver);
} else {
udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
}
if (IS_ERR(udc->gpiod)) {
dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n",
PTR_ERR(udc->gpiod));
return PTR_ERR(udc->gpiod);
}
if (udc->gpiod)
gpiod_direction_output(udc->gpiod, 0);
udc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(udc->clk))
return PTR_ERR(udc->clk);
retval = clk_prepare(udc->clk);
if (retval)
return retval;
udc->vbus_sensed = 0;
the_controller = udc;
platform_set_drvdata(pdev, udc);
udc_init_data(udc);
/* irq setup after old hardware state is cleaned up */
retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq,
IRQF_SHARED, driver_name, udc);
if (retval != 0) {
dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
driver_name, udc->irq, retval);
goto err;
}
if (!IS_ERR_OR_NULL(udc->transceiver))
usb_register_notifier(udc->transceiver, &pxa27x_udc_phy);
retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (retval)
goto err_add_gadget;
pxa_init_debugfs(udc);
if (should_enable_udc(udc))
udc_enable(udc);
return 0;
err_add_gadget:
if (!IS_ERR_OR_NULL(udc->transceiver))
usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
err:
clk_unprepare(udc->clk);
return retval;
}
/**
* pxa_udc_remove - removes the udc device driver
* @_dev: platform device
*/
static void pxa_udc_remove(struct platform_device *_dev)
{
struct pxa_udc *udc = platform_get_drvdata(_dev);
usb_del_gadget_udc(&udc->gadget);
pxa_cleanup_debugfs(udc);
if (!IS_ERR_OR_NULL(udc->transceiver)) {
usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
usb_put_phy(udc->transceiver);
}
udc->transceiver = NULL;
the_controller = NULL;
clk_unprepare(udc->clk);
}
static void pxa_udc_shutdown(struct platform_device *_dev)
{
struct pxa_udc *udc = platform_get_drvdata(_dev);
if (udc_readl(udc, UDCCR) & UDCCR_UDE)
udc_disable(udc);
}
#ifdef CONFIG_PM
/**
* pxa_udc_suspend - Suspend udc device
* @_dev: platform device
* @state: suspend state
*
* Suspends udc : saves configuration registers (UDCCR*), then disables the udc
* device.
*/
static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
{
struct pxa_udc *udc = platform_get_drvdata(_dev);
struct pxa_ep *ep;
ep = &udc->pxa_ep[0];
udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
udc_disable(udc);
udc->pullup_resume = udc->pullup_on;
dplus_pullup(udc, 0);
if (udc->driver)
udc->driver->disconnect(&udc->gadget);
return 0;
}
/**
* pxa_udc_resume - Resume udc device
* @_dev: platform device
*
* Resumes udc : restores configuration registers (UDCCR*), then enables the udc
* device.
*/
static int pxa_udc_resume(struct platform_device *_dev)
{
struct pxa_udc *udc = platform_get_drvdata(_dev);
struct pxa_ep *ep;
ep = &udc->pxa_ep[0];
udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
dplus_pullup(udc, udc->pullup_resume);
if (should_enable_udc(udc))
udc_enable(udc);
/*
* We do not handle OTG yet.
*
* OTGPH bit is set when sleep mode is entered.
* it indicates that OTG pad is retaining its state.
* Upon exit from sleep mode and before clearing OTGPH,
* Software must configure the USB OTG pad, UDC, and UHC
* to the state they were in before entering sleep mode.
*/
pxa27x_clear_otgph();
return 0;
}
#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:pxa27x-udc");
static struct platform_driver udc_driver = {
.driver = {
.name = "pxa27x-udc",
.of_match_table = of_match_ptr(udc_pxa_dt_ids),
},
.probe = pxa_udc_probe,
.remove_new = pxa_udc_remove,
.shutdown = pxa_udc_shutdown,
#ifdef CONFIG_PM
.suspend = pxa_udc_suspend,
.resume = pxa_udc_resume
#endif
};
module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Robert Jarzmik");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/pxa27x_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Fusb300 UDC (USB gadget)
*
* Copyright (C) 2010 Faraday Technology Corp.
*
* Author : Yuan-hsin Chen <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "fusb300_udc.h"
MODULE_DESCRIPTION("FUSB300 USB gadget driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <[email protected]>");
MODULE_ALIAS("platform:fusb300_udc");
#define DRIVER_VERSION "20 October 2010"
static const char udc_name[] = "fusb300_udc";
static const char * const fusb300_ep_name[] = {
"ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9",
"ep10", "ep11", "ep12", "ep13", "ep14", "ep15"
};
static void done(struct fusb300_ep *ep, struct fusb300_request *req,
int status);
static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset,
u32 value)
{
u32 reg = ioread32(fusb300->reg + offset);
reg |= value;
iowrite32(reg, fusb300->reg + offset);
}
static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset,
u32 value)
{
u32 reg = ioread32(fusb300->reg + offset);
reg &= ~value;
iowrite32(reg, fusb300->reg + offset);
}
static void fusb300_ep_setting(struct fusb300_ep *ep,
struct fusb300_ep_info info)
{
ep->epnum = info.epnum;
ep->type = info.type;
}
static int fusb300_ep_release(struct fusb300_ep *ep)
{
if (!ep->epnum)
return 0;
ep->epnum = 0;
ep->stall = 0;
ep->wedged = 0;
return 0;
}
static void fusb300_set_fifo_entry(struct fusb300 *fusb300,
u32 ep)
{
u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
val &= ~FUSB300_EPSET1_FIFOENTRY_MSK;
val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM);
iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
}
static void fusb300_set_start_entry(struct fusb300 *fusb300,
u8 ep)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM;
reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ;
reg |= FUSB300_EPSET1_START_ENTRY(start_entry);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) {
fusb300->fifo_entry_num = 0;
fusb300->addrofs = 0;
pr_err("fifo entry is over the maximum number!\n");
} else
fusb300->fifo_entry_num++;
}
/* set fusb300_set_start_entry first before fusb300_set_epaddrofs */
static void fusb300_set_epaddrofs(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
reg &= ~FUSB300_EPSET2_ADDROFS_MSK;
reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM;
}
static void ep_fifo_setting(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
fusb300_set_fifo_entry(fusb300, info.epnum);
fusb300_set_start_entry(fusb300, info.epnum);
fusb300_set_epaddrofs(fusb300, info);
}
static void fusb300_set_eptype(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
reg &= ~FUSB300_EPSET1_TYPE_MSK;
reg |= FUSB300_EPSET1_TYPE(info.type);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
}
static void fusb300_set_epdir(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
u32 reg;
if (!info.dir_in)
return;
reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
reg &= ~FUSB300_EPSET1_DIR_MSK;
reg |= FUSB300_EPSET1_DIRIN;
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
}
static void fusb300_set_ep_active(struct fusb300 *fusb300,
u8 ep)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
reg |= FUSB300_EPSET1_ACTEN;
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
}
static void fusb300_set_epmps(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
reg &= ~FUSB300_EPSET2_MPS_MSK;
reg |= FUSB300_EPSET2_MPS(info.maxpacket);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
}
static void fusb300_set_interval(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
reg &= ~FUSB300_EPSET1_INTERVAL(0x7);
reg |= FUSB300_EPSET1_INTERVAL(info.interval);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
}
static void fusb300_set_bwnum(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
reg &= ~FUSB300_EPSET1_BWNUM(0x3);
reg |= FUSB300_EPSET1_BWNUM(info.bw_num);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
}
static void set_ep_reg(struct fusb300 *fusb300,
struct fusb300_ep_info info)
{
fusb300_set_eptype(fusb300, info);
fusb300_set_epdir(fusb300, info);
fusb300_set_epmps(fusb300, info);
if (info.interval)
fusb300_set_interval(fusb300, info);
if (info.bw_num)
fusb300_set_bwnum(fusb300, info);
fusb300_set_ep_active(fusb300, info.epnum);
}
static int config_ep(struct fusb300_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct fusb300 *fusb300 = ep->fusb300;
struct fusb300_ep_info info;
ep->ep.desc = desc;
info.interval = 0;
info.addrofs = 0;
info.bw_num = 0;
info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
info.maxpacket = usb_endpoint_maxp(desc);
info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
if ((info.type == USB_ENDPOINT_XFER_INT) ||
(info.type == USB_ENDPOINT_XFER_ISOC)) {
info.interval = desc->bInterval;
if (info.type == USB_ENDPOINT_XFER_ISOC)
info.bw_num = usb_endpoint_maxp_mult(desc);
}
ep_fifo_setting(fusb300, info);
set_ep_reg(fusb300, info);
fusb300_ep_setting(ep, info);
fusb300->ep[info.epnum] = ep;
return 0;
}
static int fusb300_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct fusb300_ep *ep;
ep = container_of(_ep, struct fusb300_ep, ep);
if (ep->fusb300->reenum) {
ep->fusb300->fifo_entry_num = 0;
ep->fusb300->addrofs = 0;
ep->fusb300->reenum = 0;
}
return config_ep(ep, desc);
}
static int fusb300_disable(struct usb_ep *_ep)
{
struct fusb300_ep *ep;
struct fusb300_request *req;
unsigned long flags;
ep = container_of(_ep, struct fusb300_ep, ep);
BUG_ON(!ep);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct fusb300_request, queue);
spin_lock_irqsave(&ep->fusb300->lock, flags);
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->fusb300->lock, flags);
}
return fusb300_ep_release(ep);
}
static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct fusb300_request *req;
req = kzalloc(sizeof(struct fusb300_request), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct fusb300_request *req;
req = container_of(_req, struct fusb300_request, req);
kfree(req);
}
static int enable_fifo_int(struct fusb300_ep *ep)
{
struct fusb300 *fusb300 = ep->fusb300;
if (ep->epnum) {
fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0,
FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
} else {
pr_err("can't enable_fifo_int ep0\n");
return -EINVAL;
}
return 0;
}
static int disable_fifo_int(struct fusb300_ep *ep)
{
struct fusb300 *fusb300 = ep->fusb300;
if (ep->epnum) {
fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0,
FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
} else {
pr_err("can't disable_fifo_int ep0\n");
return -EINVAL;
}
return 0;
}
static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length)
{
u32 reg;
reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
reg &= ~FUSB300_CSR_LEN_MSK;
reg |= FUSB300_CSR_LEN(length);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR);
}
/* write data to cx fifo */
static void fusb300_wrcxf(struct fusb300_ep *ep,
struct fusb300_request *req)
{
int i = 0;
u8 *tmp;
u32 data;
struct fusb300 *fusb300 = ep->fusb300;
u32 length = req->req.length - req->req.actual;
tmp = req->req.buf + req->req.actual;
if (length > SS_CTL_MAX_PACKET_SIZE) {
fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE);
for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) {
data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
*(tmp + 3) << 24;
iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
tmp += 4;
}
req->req.actual += SS_CTL_MAX_PACKET_SIZE;
} else { /* length is less than max packet size */
fusb300_set_cxlen(fusb300, length);
for (i = length >> 2; i > 0; i--) {
data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
*(tmp + 3) << 24;
printk(KERN_DEBUG " 0x%x\n", data);
iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
tmp = tmp + 4;
}
switch (length % 4) {
case 1:
data = *tmp;
printk(KERN_DEBUG " 0x%x\n", data);
iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
break;
case 2:
data = *tmp | *(tmp + 1) << 8;
printk(KERN_DEBUG " 0x%x\n", data);
iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
break;
case 3:
data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
printk(KERN_DEBUG " 0x%x\n", data);
iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
break;
default:
break;
}
req->req.actual += length;
}
}
static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep)
{
fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
FUSB300_EPSET0_STL);
}
static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
if (reg & FUSB300_EPSET0_STL) {
printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep);
reg |= FUSB300_EPSET0_STL_CLR;
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
}
}
static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req)
{
if (ep->fusb300->ep0_dir) { /* if IN */
if (req->req.length) {
fusb300_wrcxf(ep, req);
} else
printk(KERN_DEBUG "%s : req->req.length = 0x%x\n",
__func__, req->req.length);
if ((req->req.length == req->req.actual) ||
(req->req.actual < ep->ep.maxpacket))
done(ep, req, 0);
} else { /* OUT */
if (!req->req.length)
done(ep, req, 0);
else
fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1,
FUSB300_IGER1_CX_OUT_INT);
}
}
static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct fusb300_ep *ep;
struct fusb300_request *req;
unsigned long flags;
int request = 0;
ep = container_of(_ep, struct fusb300_ep, ep);
req = container_of(_req, struct fusb300_request, req);
if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&ep->fusb300->lock, flags);
if (list_empty(&ep->queue))
request = 1;
list_add_tail(&req->queue, &ep->queue);
req->req.actual = 0;
req->req.status = -EINPROGRESS;
if (ep->ep.desc == NULL) /* ep0 */
ep0_queue(ep, req);
else if (request && !ep->stall)
enable_fifo_int(ep);
spin_unlock_irqrestore(&ep->fusb300->lock, flags);
return 0;
}
static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct fusb300_ep *ep;
struct fusb300_request *req;
unsigned long flags;
ep = container_of(_ep, struct fusb300_ep, ep);
req = container_of(_req, struct fusb300_request, req);
spin_lock_irqsave(&ep->fusb300->lock, flags);
if (!list_empty(&ep->queue))
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->fusb300->lock, flags);
return 0;
}
static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
{
struct fusb300_ep *ep;
struct fusb300 *fusb300;
unsigned long flags;
int ret = 0;
ep = container_of(_ep, struct fusb300_ep, ep);
fusb300 = ep->fusb300;
spin_lock_irqsave(&ep->fusb300->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
goto out;
}
if (value) {
fusb300_set_epnstall(fusb300, ep->epnum);
ep->stall = 1;
if (wedge)
ep->wedged = 1;
} else {
fusb300_clear_epnstall(fusb300, ep->epnum);
ep->stall = 0;
ep->wedged = 0;
}
out:
spin_unlock_irqrestore(&ep->fusb300->lock, flags);
return ret;
}
static int fusb300_set_halt(struct usb_ep *_ep, int value)
{
return fusb300_set_halt_and_wedge(_ep, value, 0);
}
static int fusb300_set_wedge(struct usb_ep *_ep)
{
return fusb300_set_halt_and_wedge(_ep, 1, 1);
}
static void fusb300_fifo_flush(struct usb_ep *_ep)
{
}
static const struct usb_ep_ops fusb300_ep_ops = {
.enable = fusb300_enable,
.disable = fusb300_disable,
.alloc_request = fusb300_alloc_request,
.free_request = fusb300_free_request,
.queue = fusb300_queue,
.dequeue = fusb300_dequeue,
.set_halt = fusb300_set_halt,
.fifo_flush = fusb300_fifo_flush,
.set_wedge = fusb300_set_wedge,
};
/*****************************************************************************/
static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset,
u32 value)
{
iowrite32(value, fusb300->reg + offset);
}
static void fusb300_reset(void)
{
}
static void fusb300_set_cxstall(struct fusb300 *fusb300)
{
fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
FUSB300_CSR_STL);
}
static void fusb300_set_cxdone(struct fusb300 *fusb300)
{
fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
FUSB300_CSR_DONE);
}
/* read data from cx fifo */
static void fusb300_rdcxf(struct fusb300 *fusb300,
u8 *buffer, u32 length)
{
int i = 0;
u8 *tmp;
u32 data;
tmp = buffer;
for (i = (length >> 2); i > 0; i--) {
data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
printk(KERN_DEBUG " 0x%x\n", data);
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
*(tmp + 2) = (data >> 16) & 0xFF;
*(tmp + 3) = (data >> 24) & 0xFF;
tmp = tmp + 4;
}
switch (length % 4) {
case 1:
data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
printk(KERN_DEBUG " 0x%x\n", data);
*tmp = data & 0xFF;
break;
case 2:
data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
printk(KERN_DEBUG " 0x%x\n", data);
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
break;
case 3:
data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
printk(KERN_DEBUG " 0x%x\n", data);
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
*(tmp + 2) = (data >> 16) & 0xFF;
break;
default:
break;
}
}
static void fusb300_rdfifo(struct fusb300_ep *ep,
struct fusb300_request *req,
u32 length)
{
int i = 0;
u8 *tmp;
u32 data, reg;
struct fusb300 *fusb300 = ep->fusb300;
tmp = req->req.buf + req->req.actual;
req->req.actual += length;
if (req->req.actual > req->req.length)
printk(KERN_DEBUG "req->req.actual > req->req.length\n");
for (i = (length >> 2); i > 0; i--) {
data = ioread32(fusb300->reg +
FUSB300_OFFSET_EPPORT(ep->epnum));
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
*(tmp + 2) = (data >> 16) & 0xFF;
*(tmp + 3) = (data >> 24) & 0xFF;
tmp = tmp + 4;
}
switch (length % 4) {
case 1:
data = ioread32(fusb300->reg +
FUSB300_OFFSET_EPPORT(ep->epnum));
*tmp = data & 0xFF;
break;
case 2:
data = ioread32(fusb300->reg +
FUSB300_OFFSET_EPPORT(ep->epnum));
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
break;
case 3:
data = ioread32(fusb300->reg +
FUSB300_OFFSET_EPPORT(ep->epnum));
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
*(tmp + 2) = (data >> 16) & 0xFF;
break;
default:
break;
}
do {
reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
reg &= FUSB300_IGR1_SYNF0_EMPTY_INT;
if (i)
printk(KERN_INFO "sync fifo is not empty!\n");
i++;
} while (!reg);
}
static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep)
{
u8 value;
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
value = reg & FUSB300_EPSET0_STL;
return value;
}
static u8 fusb300_get_cxstall(struct fusb300 *fusb300)
{
u8 value;
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
value = (reg & FUSB300_CSR_STL) >> 1;
return value;
}
static void request_error(struct fusb300 *fusb300)
{
fusb300_set_cxstall(fusb300);
printk(KERN_DEBUG "request error!!\n");
}
static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
__releases(fusb300->lock)
__acquires(fusb300->lock)
{
u8 ep;
u16 status = 0;
u16 w_index = ctrl->wIndex;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
status = 1 << USB_DEVICE_SELF_POWERED;
break;
case USB_RECIP_INTERFACE:
status = 0;
break;
case USB_RECIP_ENDPOINT:
ep = w_index & USB_ENDPOINT_NUMBER_MASK;
if (ep) {
if (fusb300_get_epnstall(fusb300, ep))
status = 1 << USB_ENDPOINT_HALT;
} else {
if (fusb300_get_cxstall(fusb300))
status = 0;
}
break;
default:
request_error(fusb300);
return; /* exit */
}
fusb300->ep0_data = cpu_to_le16(status);
fusb300->ep0_req->buf = &fusb300->ep0_data;
fusb300->ep0_req->length = 2;
spin_unlock(&fusb300->lock);
fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL);
spin_lock(&fusb300->lock);
}
static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
{
u8 ep;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
fusb300_set_cxdone(fusb300);
break;
case USB_RECIP_INTERFACE:
fusb300_set_cxdone(fusb300);
break;
case USB_RECIP_ENDPOINT: {
u16 w_index = le16_to_cpu(ctrl->wIndex);
ep = w_index & USB_ENDPOINT_NUMBER_MASK;
if (ep)
fusb300_set_epnstall(fusb300, ep);
else
fusb300_set_cxstall(fusb300);
fusb300_set_cxdone(fusb300);
}
break;
default:
request_error(fusb300);
break;
}
}
static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep)
{
fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
FUSB300_EPSET0_CLRSEQNUM);
}
static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
{
struct fusb300_ep *ep =
fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
fusb300_set_cxdone(fusb300);
break;
case USB_RECIP_INTERFACE:
fusb300_set_cxdone(fusb300);
break;
case USB_RECIP_ENDPOINT:
if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
if (ep->wedged) {
fusb300_set_cxdone(fusb300);
break;
}
if (ep->stall) {
ep->stall = 0;
fusb300_clear_seqnum(fusb300, ep->epnum);
fusb300_clear_epnstall(fusb300, ep->epnum);
if (!list_empty(&ep->queue))
enable_fifo_int(ep);
}
}
fusb300_set_cxdone(fusb300);
break;
default:
request_error(fusb300);
break;
}
}
static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR);
reg &= ~FUSB300_DAR_DRVADDR_MSK;
reg |= FUSB300_DAR_DRVADDR(addr);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR);
}
static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
{
if (ctrl->wValue >= 0x0100)
request_error(fusb300);
else {
fusb300_set_dev_addr(fusb300, ctrl->wValue);
fusb300_set_cxdone(fusb300);
}
}
#define UVC_COPY_DESCRIPTORS(mem, src) \
do { \
const struct usb_descriptor_header * const *__src; \
for (__src = src; *__src; ++__src) { \
memcpy(mem, *__src, (*__src)->bLength); \
mem += (*__src)->bLength; \
} \
} while (0)
static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
{
u8 *p = (u8 *)ctrl;
u8 ret = 0;
u8 i = 0;
fusb300_rdcxf(fusb300, p, 8);
fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN;
fusb300->ep0_length = ctrl->wLength;
/* check request */
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
get_status(fusb300, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
clear_feature(fusb300, ctrl);
break;
case USB_REQ_SET_FEATURE:
set_feature(fusb300, ctrl);
break;
case USB_REQ_SET_ADDRESS:
set_address(fusb300, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR,
FUSB300_DAR_SETCONFG);
/* clear sequence number */
for (i = 1; i <= FUSB300_MAX_NUM_EP; i++)
fusb300_clear_seqnum(fusb300, i);
fusb300->reenum = 1;
ret = 1;
break;
default:
ret = 1;
break;
}
} else
ret = 1;
return ret;
}
static void done(struct fusb300_ep *ep, struct fusb300_request *req,
int status)
{
list_del_init(&req->queue);
/* don't modify queue heads during completion callback */
if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
req->req.status = -ESHUTDOWN;
else
req->req.status = status;
spin_unlock(&ep->fusb300->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->fusb300->lock);
if (ep->epnum) {
disable_fifo_int(ep);
if (!list_empty(&ep->queue))
enable_fifo_int(ep);
} else
fusb300_set_cxdone(ep->fusb300);
}
static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d,
u32 len)
{
u32 value;
u32 reg;
/* wait SW owner */
do {
reg = ioread32(ep->fusb300->reg +
FUSB300_OFFSET_EPPRD_W0(ep->epnum));
reg &= FUSB300_EPPRD0_H;
} while (reg);
iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum));
value = FUSB300_EPPRD0_BTC(len) | FUSB300_EPPRD0_H |
FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I;
iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum));
iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum));
fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY,
FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum));
}
static void fusb300_wait_idma_finished(struct fusb300_ep *ep)
{
u32 reg;
do {
reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1);
if ((reg & FUSB300_IGR1_VBUS_CHG_INT) ||
(reg & FUSB300_IGR1_WARM_RST_INT) ||
(reg & FUSB300_IGR1_HOT_RST_INT) ||
(reg & FUSB300_IGR1_USBRST_INT)
)
goto IDMA_RESET;
reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0);
reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum);
} while (!reg);
fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0,
FUSB300_IGR0_EPn_PRD_INT(ep->epnum));
return;
IDMA_RESET:
reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGER0);
reg &= ~FUSB300_IGER0_EEPn_PRD_INT(ep->epnum);
iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_IGER0);
}
static void fusb300_set_idma(struct fusb300_ep *ep,
struct fusb300_request *req)
{
int ret;
ret = usb_gadget_map_request(&ep->fusb300->gadget,
&req->req, DMA_TO_DEVICE);
if (ret)
return;
fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0,
FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length);
/* check idma is done */
fusb300_wait_idma_finished(ep);
usb_gadget_unmap_request(&ep->fusb300->gadget,
&req->req, DMA_TO_DEVICE);
}
static void in_ep_fifo_handler(struct fusb300_ep *ep)
{
struct fusb300_request *req = list_entry(ep->queue.next,
struct fusb300_request, queue);
if (req->req.length)
fusb300_set_idma(ep, req);
done(ep, req, 0);
}
static void out_ep_fifo_handler(struct fusb300_ep *ep)
{
struct fusb300 *fusb300 = ep->fusb300;
struct fusb300_request *req = list_entry(ep->queue.next,
struct fusb300_request, queue);
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
u32 length = reg & FUSB300_FFR_BYCNT;
fusb300_rdfifo(ep, req, length);
/* finish out transfer */
if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket))
done(ep, req, 0);
}
static void check_device_mode(struct fusb300 *fusb300)
{
u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR);
switch (reg & FUSB300_GCR_DEVEN_MSK) {
case FUSB300_GCR_DEVEN_SS:
fusb300->gadget.speed = USB_SPEED_SUPER;
break;
case FUSB300_GCR_DEVEN_HS:
fusb300->gadget.speed = USB_SPEED_HIGH;
break;
case FUSB300_GCR_DEVEN_FS:
fusb300->gadget.speed = USB_SPEED_FULL;
break;
default:
fusb300->gadget.speed = USB_SPEED_UNKNOWN;
break;
}
printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK));
}
static void fusb300_ep0out(struct fusb300 *fusb300)
{
struct fusb300_ep *ep = fusb300->ep[0];
u32 reg;
if (!list_empty(&ep->queue)) {
struct fusb300_request *req;
req = list_first_entry(&ep->queue,
struct fusb300_request, queue);
if (req->req.length)
fusb300_rdcxf(ep->fusb300, req->req.buf,
req->req.length);
done(ep, req, 0);
reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
reg &= ~FUSB300_IGER1_CX_OUT_INT;
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1);
} else
pr_err("%s : empty queue\n", __func__);
}
static void fusb300_ep0in(struct fusb300 *fusb300)
{
struct fusb300_request *req;
struct fusb300_ep *ep = fusb300->ep[0];
if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) {
req = list_entry(ep->queue.next,
struct fusb300_request, queue);
if (req->req.length)
fusb300_wrcxf(ep, req);
if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
done(ep, req, 0);
} else
fusb300_set_cxdone(fusb300);
}
static void fusb300_grp2_handler(void)
{
}
static void fusb300_grp3_handler(void)
{
}
static void fusb300_grp4_handler(void)
{
}
static void fusb300_grp5_handler(void)
{
}
static irqreturn_t fusb300_irq(int irq, void *_fusb300)
{
struct fusb300 *fusb300 = _fusb300;
u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0);
u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0);
struct usb_ctrlrequest ctrl;
u8 in;
u32 reg;
int i;
spin_lock(&fusb300->lock);
int_grp1 &= int_grp1_en;
int_grp0 &= int_grp0_en;
if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_WARM_RST_INT);
printk(KERN_INFO"fusb300_warmreset\n");
fusb300_reset();
}
if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_HOT_RST_INT);
printk(KERN_INFO"fusb300_hotreset\n");
fusb300_reset();
}
if (int_grp1 & FUSB300_IGR1_USBRST_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_USBRST_INT);
fusb300_reset();
}
/* COMABT_INT has a highest priority */
if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_CX_COMABT_INT);
printk(KERN_INFO"fusb300_ep0abt\n");
}
if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_VBUS_CHG_INT);
printk(KERN_INFO"fusb300_vbus_change\n");
}
if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U3_EXIT_FAIL_INT);
}
if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U2_EXIT_FAIL_INT);
}
if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U1_EXIT_FAIL_INT);
}
if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U2_ENTRY_FAIL_INT);
}
if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U1_ENTRY_FAIL_INT);
}
if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U3_EXIT_INT);
printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n");
}
if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U2_EXIT_INT);
printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n");
}
if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U1_EXIT_INT);
printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n");
}
if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U3_ENTRY_INT);
printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n");
fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1,
FUSB300_SSCR1_GO_U3_DONE);
}
if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U2_ENTRY_INT);
printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n");
}
if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_U1_ENTRY_INT);
printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n");
}
if (int_grp1 & FUSB300_IGR1_RESM_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_RESM_INT);
printk(KERN_INFO "fusb300_resume\n");
}
if (int_grp1 & FUSB300_IGR1_SUSP_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_SUSP_INT);
printk(KERN_INFO "fusb300_suspend\n");
}
if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_HS_LPM_INT);
printk(KERN_INFO "fusb300_HS_LPM_INT\n");
}
if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) {
fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
FUSB300_IGR1_DEV_MODE_CHG_INT);
check_device_mode(fusb300);
}
if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) {
fusb300_set_cxstall(fusb300);
printk(KERN_INFO "fusb300_ep0fail\n");
}
if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) {
printk(KERN_INFO "fusb300_ep0setup\n");
if (setup_packet(fusb300, &ctrl)) {
spin_unlock(&fusb300->lock);
if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0)
fusb300_set_cxstall(fusb300);
spin_lock(&fusb300->lock);
}
}
if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT)
printk(KERN_INFO "fusb300_cmdend\n");
if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) {
printk(KERN_INFO "fusb300_cxout\n");
fusb300_ep0out(fusb300);
}
if (int_grp1 & FUSB300_IGR1_CX_IN_INT) {
printk(KERN_INFO "fusb300_cxin\n");
fusb300_ep0in(fusb300);
}
if (int_grp1 & FUSB300_IGR1_INTGRP5)
fusb300_grp5_handler();
if (int_grp1 & FUSB300_IGR1_INTGRP4)
fusb300_grp4_handler();
if (int_grp1 & FUSB300_IGR1_INTGRP3)
fusb300_grp3_handler();
if (int_grp1 & FUSB300_IGR1_INTGRP2)
fusb300_grp2_handler();
if (int_grp0) {
for (i = 1; i < FUSB300_MAX_NUM_EP; i++) {
if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) {
reg = ioread32(fusb300->reg +
FUSB300_OFFSET_EPSET1(i));
in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0;
if (in)
in_ep_fifo_handler(fusb300->ep[i]);
else
out_ep_fifo_handler(fusb300->ep[i]);
}
}
}
spin_unlock(&fusb300->lock);
return IRQ_HANDLED;
}
static void fusb300_set_u2_timeout(struct fusb300 *fusb300,
u32 time)
{
u32 reg;
reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
reg &= ~0xff;
reg |= FUSB300_SSCR2_U2TIMEOUT(time);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
}
static void fusb300_set_u1_timeout(struct fusb300 *fusb300,
u32 time)
{
u32 reg;
reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
reg &= ~(0xff << 8);
reg |= FUSB300_SSCR2_U1TIMEOUT(time);
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
}
static void init_controller(struct fusb300 *fusb300)
{
u32 reg;
u32 mask = 0;
u32 val = 0;
/* split on */
mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON;
reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR);
reg &= ~mask;
reg |= val;
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR);
/* enable high-speed LPM */
mask = val = FUSB300_HSCR_HS_LPM_PERMIT;
reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR);
reg &= ~mask;
reg |= val;
iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR);
/*set u1 u2 timmer*/
fusb300_set_u2_timeout(fusb300, 0xff);
fusb300_set_u1_timeout(fusb300, 0xff);
/* enable all grp1 interrupt */
iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1);
}
/*------------------------------------------------------------------------*/
static int fusb300_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct fusb300 *fusb300 = to_fusb300(g);
/* hook up the driver */
fusb300->driver = driver;
return 0;
}
static int fusb300_udc_stop(struct usb_gadget *g)
{
struct fusb300 *fusb300 = to_fusb300(g);
init_controller(fusb300);
fusb300->driver = NULL;
return 0;
}
/*--------------------------------------------------------------------------*/
static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
{
return 0;
}
static const struct usb_gadget_ops fusb300_gadget_ops = {
.pullup = fusb300_udc_pullup,
.udc_start = fusb300_udc_start,
.udc_stop = fusb300_udc_stop,
};
static void fusb300_remove(struct platform_device *pdev)
{
struct fusb300 *fusb300 = platform_get_drvdata(pdev);
int i;
usb_del_gadget_udc(&fusb300->gadget);
iounmap(fusb300->reg);
free_irq(platform_get_irq(pdev, 0), fusb300);
free_irq(platform_get_irq(pdev, 1), fusb300);
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
kfree(fusb300->ep[i]);
kfree(fusb300);
}
static int fusb300_probe(struct platform_device *pdev)
{
struct resource *res, *ires, *ires1;
void __iomem *reg = NULL;
struct fusb300 *fusb300 = NULL;
struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP];
int ret = 0;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
pr_err("platform_get_resource error.\n");
goto clean_up;
}
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!ires) {
ret = -ENODEV;
dev_err(&pdev->dev,
"platform_get_resource IORESOURCE_IRQ error.\n");
goto clean_up;
}
ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!ires1) {
ret = -ENODEV;
dev_err(&pdev->dev,
"platform_get_resource IORESOURCE_IRQ 1 error.\n");
goto clean_up;
}
reg = ioremap(res->start, resource_size(res));
if (reg == NULL) {
ret = -ENOMEM;
pr_err("ioremap error.\n");
goto clean_up;
}
/* initialize udc */
fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL);
if (fusb300 == NULL) {
ret = -ENOMEM;
goto clean_up;
}
for (i = 0; i < FUSB300_MAX_NUM_EP; i++) {
_ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL);
if (_ep[i] == NULL) {
ret = -ENOMEM;
goto clean_up;
}
fusb300->ep[i] = _ep[i];
}
spin_lock_init(&fusb300->lock);
platform_set_drvdata(pdev, fusb300);
fusb300->gadget.ops = &fusb300_gadget_ops;
fusb300->gadget.max_speed = USB_SPEED_HIGH;
fusb300->gadget.name = udc_name;
fusb300->reg = reg;
ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED,
udc_name, fusb300);
if (ret < 0) {
pr_err("request_irq error (%d)\n", ret);
goto clean_up;
}
ret = request_irq(ires1->start, fusb300_irq,
IRQF_SHARED, udc_name, fusb300);
if (ret < 0) {
pr_err("request_irq1 error (%d)\n", ret);
goto err_request_irq1;
}
INIT_LIST_HEAD(&fusb300->gadget.ep_list);
for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) {
struct fusb300_ep *ep = fusb300->ep[i];
if (i != 0) {
INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list);
list_add_tail(&fusb300->ep[i]->ep.ep_list,
&fusb300->gadget.ep_list);
}
ep->fusb300 = fusb300;
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = fusb300_ep_name[i];
ep->ep.ops = &fusb300_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE);
if (i == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE);
fusb300->ep[0]->epnum = 0;
fusb300->gadget.ep0 = &fusb300->ep[0]->ep;
INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list);
fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
GFP_KERNEL);
if (fusb300->ep0_req == NULL) {
ret = -ENOMEM;
goto err_alloc_request;
}
init_controller(fusb300);
ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
if (ret)
goto err_add_udc;
dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
return 0;
err_add_udc:
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
err_alloc_request:
free_irq(ires1->start, fusb300);
err_request_irq1:
free_irq(ires->start, fusb300);
clean_up:
if (fusb300) {
if (fusb300->ep0_req)
fusb300_free_request(&fusb300->ep[0]->ep,
fusb300->ep0_req);
for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
kfree(fusb300->ep[i]);
kfree(fusb300);
}
if (reg)
iounmap(reg);
return ret;
}
static struct platform_driver fusb300_driver = {
.remove_new = fusb300_remove,
.driver = {
.name = udc_name,
},
};
module_platform_driver_probe(fusb300_driver, fusb300_probe);
| linux-master | drivers/usb/gadget/udc/fusb300_udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Xilinx USB peripheral controller driver
*
* Copyright (C) 2004 by Thomas Rathbone
* Copyright (C) 2005 by HP Labs
* Copyright (C) 2005 by David Brownell
* Copyright (C) 2010 - 2014 Xilinx, Inc.
*
* Some parts of this driver code is based on the driver for at91-series
* USB peripheral controller (at91_udc.c).
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
/* Register offsets for the USB device.*/
#define XUSB_EP0_CONFIG_OFFSET 0x0000 /* EP0 Config Reg Offset */
#define XUSB_SETUP_PKT_ADDR_OFFSET 0x0080 /* Setup Packet Address */
#define XUSB_ADDRESS_OFFSET 0x0100 /* Address Register */
#define XUSB_CONTROL_OFFSET 0x0104 /* Control Register */
#define XUSB_STATUS_OFFSET 0x0108 /* Status Register */
#define XUSB_FRAMENUM_OFFSET 0x010C /* Frame Number Register */
#define XUSB_IER_OFFSET 0x0110 /* Interrupt Enable Register */
#define XUSB_BUFFREADY_OFFSET 0x0114 /* Buffer Ready Register */
#define XUSB_TESTMODE_OFFSET 0x0118 /* Test Mode Register */
#define XUSB_DMA_RESET_OFFSET 0x0200 /* DMA Soft Reset Register */
#define XUSB_DMA_CONTROL_OFFSET 0x0204 /* DMA Control Register */
#define XUSB_DMA_DSAR_ADDR_OFFSET 0x0208 /* DMA source Address Reg */
#define XUSB_DMA_DDAR_ADDR_OFFSET 0x020C /* DMA destination Addr Reg */
#define XUSB_DMA_LENGTH_OFFSET 0x0210 /* DMA Length Register */
#define XUSB_DMA_STATUS_OFFSET 0x0214 /* DMA Status Register */
/* Endpoint Configuration Space offsets */
#define XUSB_EP_CFGSTATUS_OFFSET 0x00 /* Endpoint Config Status */
#define XUSB_EP_BUF0COUNT_OFFSET 0x08 /* Buffer 0 Count */
#define XUSB_EP_BUF1COUNT_OFFSET 0x0C /* Buffer 1 Count */
#define XUSB_CONTROL_USB_READY_MASK 0x80000000 /* USB ready Mask */
#define XUSB_CONTROL_USB_RMTWAKE_MASK 0x40000000 /* Remote wake up mask */
/* Interrupt register related masks.*/
#define XUSB_STATUS_GLOBAL_INTR_MASK 0x80000000 /* Global Intr Enable */
#define XUSB_STATUS_DMADONE_MASK 0x04000000 /* DMA done Mask */
#define XUSB_STATUS_DMAERR_MASK 0x02000000 /* DMA Error Mask */
#define XUSB_STATUS_DMABUSY_MASK 0x80000000 /* DMA Error Mask */
#define XUSB_STATUS_RESUME_MASK 0x01000000 /* USB Resume Mask */
#define XUSB_STATUS_RESET_MASK 0x00800000 /* USB Reset Mask */
#define XUSB_STATUS_SUSPEND_MASK 0x00400000 /* USB Suspend Mask */
#define XUSB_STATUS_DISCONNECT_MASK 0x00200000 /* USB Disconnect Mask */
#define XUSB_STATUS_FIFO_BUFF_RDY_MASK 0x00100000 /* FIFO Buff Ready Mask */
#define XUSB_STATUS_FIFO_BUFF_FREE_MASK 0x00080000 /* FIFO Buff Free Mask */
#define XUSB_STATUS_SETUP_PACKET_MASK 0x00040000 /* Setup packet received */
#define XUSB_STATUS_EP1_BUFF2_COMP_MASK 0x00000200 /* EP 1 Buff 2 Processed */
#define XUSB_STATUS_EP1_BUFF1_COMP_MASK 0x00000002 /* EP 1 Buff 1 Processed */
#define XUSB_STATUS_EP0_BUFF2_COMP_MASK 0x00000100 /* EP 0 Buff 2 Processed */
#define XUSB_STATUS_EP0_BUFF1_COMP_MASK 0x00000001 /* EP 0 Buff 1 Processed */
#define XUSB_STATUS_HIGH_SPEED_MASK 0x00010000 /* USB Speed Mask */
/* Suspend,Reset,Suspend and Disconnect Mask */
#define XUSB_STATUS_INTR_EVENT_MASK 0x01E00000
/* Buffers completion Mask */
#define XUSB_STATUS_INTR_BUFF_COMP_ALL_MASK 0x0000FEFF
/* Mask for buffer 0 and buffer 1 completion for all Endpoints */
#define XUSB_STATUS_INTR_BUFF_COMP_SHIFT_MASK 0x00000101
#define XUSB_STATUS_EP_BUFF2_SHIFT 8 /* EP buffer offset */
/* Endpoint Configuration Status Register */
#define XUSB_EP_CFG_VALID_MASK 0x80000000 /* Endpoint Valid bit */
#define XUSB_EP_CFG_STALL_MASK 0x40000000 /* Endpoint Stall bit */
#define XUSB_EP_CFG_DATA_TOGGLE_MASK 0x08000000 /* Endpoint Data toggle */
/* USB device specific global configuration constants.*/
#define XUSB_MAX_ENDPOINTS 8 /* Maximum End Points */
#define XUSB_EP_NUMBER_ZERO 0 /* End point Zero */
/* DPRAM is the source address for DMA transfer */
#define XUSB_DMA_READ_FROM_DPRAM 0x80000000
#define XUSB_DMA_DMASR_BUSY 0x80000000 /* DMA busy */
#define XUSB_DMA_DMASR_ERROR 0x40000000 /* DMA Error */
/*
* When this bit is set, the DMA buffer ready bit is set by hardware upon
* DMA transfer completion.
*/
#define XUSB_DMA_BRR_CTRL 0x40000000 /* DMA bufready ctrl bit */
/* Phase States */
#define SETUP_PHASE 0x0000 /* Setup Phase */
#define DATA_PHASE 0x0001 /* Data Phase */
#define STATUS_PHASE 0x0002 /* Status Phase */
#define EP0_MAX_PACKET 64 /* Endpoint 0 maximum packet length */
#define STATUSBUFF_SIZE 2 /* Buffer size for GET_STATUS command */
#define EPNAME_SIZE 4 /* Buffer size for endpoint name */
/* container_of helper macros */
#define to_udc(g) container_of((g), struct xusb_udc, gadget)
#define to_xusb_ep(ep) container_of((ep), struct xusb_ep, ep_usb)
#define to_xusb_req(req) container_of((req), struct xusb_req, usb_req)
/**
* struct xusb_req - Xilinx USB device request structure
* @usb_req: Linux usb request structure
* @queue: usb device request queue
* @ep: pointer to xusb_endpoint structure
*/
struct xusb_req {
struct usb_request usb_req;
struct list_head queue;
struct xusb_ep *ep;
};
/**
* struct xusb_ep - USB end point structure.
* @ep_usb: usb endpoint instance
* @queue: endpoint message queue
* @udc: xilinx usb peripheral driver instance pointer
* @desc: pointer to the usb endpoint descriptor
* @rambase: the endpoint buffer address
* @offset: the endpoint register offset value
* @name: name of the endpoint
* @epnumber: endpoint number
* @maxpacket: maximum packet size the endpoint can store
* @buffer0count: the size of the packet recieved in the first buffer
* @buffer1count: the size of the packet received in the second buffer
* @curbufnum: current buffer of endpoint that will be processed next
* @buffer0ready: the busy state of first buffer
* @buffer1ready: the busy state of second buffer
* @is_in: endpoint direction (IN or OUT)
* @is_iso: endpoint type(isochronous or non isochronous)
*/
struct xusb_ep {
struct usb_ep ep_usb;
struct list_head queue;
struct xusb_udc *udc;
const struct usb_endpoint_descriptor *desc;
u32 rambase;
u32 offset;
char name[4];
u16 epnumber;
u16 maxpacket;
u16 buffer0count;
u16 buffer1count;
u8 curbufnum;
bool buffer0ready;
bool buffer1ready;
bool is_in;
bool is_iso;
};
/**
* struct xusb_udc - USB peripheral driver structure
* @gadget: USB gadget driver instance
* @ep: an array of endpoint structures
* @driver: pointer to the usb gadget driver instance
* @setup: usb_ctrlrequest structure for control requests
* @req: pointer to dummy request for get status command
* @dev: pointer to device structure in gadget
* @usb_state: device in suspended state or not
* @remote_wkp: remote wakeup enabled by host
* @setupseqtx: tx status
* @setupseqrx: rx status
* @addr: the usb device base address
* @lock: instance of spinlock
* @dma_enabled: flag indicating whether the dma is included in the system
* @clk: pointer to struct clk
* @read_fn: function pointer to read device registers
* @write_fn: function pointer to write to device registers
*/
struct xusb_udc {
struct usb_gadget gadget;
struct xusb_ep ep[8];
struct usb_gadget_driver *driver;
struct usb_ctrlrequest setup;
struct xusb_req *req;
struct device *dev;
u32 usb_state;
u32 remote_wkp;
u32 setupseqtx;
u32 setupseqrx;
void __iomem *addr;
spinlock_t lock;
bool dma_enabled;
struct clk *clk;
unsigned int (*read_fn)(void __iomem *reg);
void (*write_fn)(void __iomem *, u32, u32);
};
/* Endpoint buffer start addresses in the core */
static u32 rambase[8] = { 0x22, 0x1000, 0x1100, 0x1200, 0x1300, 0x1400, 0x1500,
0x1600 };
static const char driver_name[] = "xilinx-udc";
static const char ep0name[] = "ep0";
/* Control endpoint configuration.*/
static const struct usb_endpoint_descriptor config_bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(EP0_MAX_PACKET),
};
/**
* xudc_write32 - little endian write to device registers
* @addr: base addr of device registers
* @offset: register offset
* @val: data to be written
*/
static void xudc_write32(void __iomem *addr, u32 offset, u32 val)
{
iowrite32(val, addr + offset);
}
/**
* xudc_read32 - little endian read from device registers
* @addr: addr of device register
* Return: value at addr
*/
static unsigned int xudc_read32(void __iomem *addr)
{
return ioread32(addr);
}
/**
* xudc_write32_be - big endian write to device registers
* @addr: base addr of device registers
* @offset: register offset
* @val: data to be written
*/
static void xudc_write32_be(void __iomem *addr, u32 offset, u32 val)
{
iowrite32be(val, addr + offset);
}
/**
* xudc_read32_be - big endian read from device registers
* @addr: addr of device register
* Return: value at addr
*/
static unsigned int xudc_read32_be(void __iomem *addr)
{
return ioread32be(addr);
}
/**
* xudc_wrstatus - Sets up the usb device status stages.
* @udc: pointer to the usb device controller structure.
*/
static void xudc_wrstatus(struct xusb_udc *udc)
{
struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO];
u32 epcfgreg;
epcfgreg = udc->read_fn(udc->addr + ep0->offset)|
XUSB_EP_CFG_DATA_TOGGLE_MASK;
udc->write_fn(udc->addr, ep0->offset, epcfgreg);
udc->write_fn(udc->addr, ep0->offset + XUSB_EP_BUF0COUNT_OFFSET, 0);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
}
/**
* xudc_epconfig - Configures the given endpoint.
* @ep: pointer to the usb device endpoint structure.
* @udc: pointer to the usb peripheral controller structure.
*
* This function configures a specific endpoint with the given configuration
* data.
*/
static void xudc_epconfig(struct xusb_ep *ep, struct xusb_udc *udc)
{
u32 epcfgreg;
/*
* Configure the end point direction, type, Max Packet Size and the
* EP buffer location.
*/
epcfgreg = ((ep->is_in << 29) | (ep->is_iso << 28) |
(ep->ep_usb.maxpacket << 15) | (ep->rambase));
udc->write_fn(udc->addr, ep->offset, epcfgreg);
/* Set the Buffer count and the Buffer ready bits.*/
udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF0COUNT_OFFSET,
ep->buffer0count);
udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF1COUNT_OFFSET,
ep->buffer1count);
if (ep->buffer0ready)
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
1 << ep->epnumber);
if (ep->buffer1ready)
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
1 << (ep->epnumber + XUSB_STATUS_EP_BUFF2_SHIFT));
}
/**
* xudc_start_dma - Starts DMA transfer.
* @ep: pointer to the usb device endpoint structure.
* @src: DMA source address.
* @dst: DMA destination address.
* @length: number of bytes to transfer.
*
* Return: 0 on success, error code on failure
*
* This function starts DMA transfer by writing to DMA source,
* destination and lenth registers.
*/
static int xudc_start_dma(struct xusb_ep *ep, dma_addr_t src,
dma_addr_t dst, u32 length)
{
struct xusb_udc *udc = ep->udc;
int rc = 0;
u32 timeout = 500;
u32 reg;
/*
* Set the addresses in the DMA source and
* destination registers and then set the length
* into the DMA length register.
*/
udc->write_fn(udc->addr, XUSB_DMA_DSAR_ADDR_OFFSET, src);
udc->write_fn(udc->addr, XUSB_DMA_DDAR_ADDR_OFFSET, dst);
udc->write_fn(udc->addr, XUSB_DMA_LENGTH_OFFSET, length);
/*
* Wait till DMA transaction is complete and
* check whether the DMA transaction was
* successful.
*/
do {
reg = udc->read_fn(udc->addr + XUSB_DMA_STATUS_OFFSET);
if (!(reg & XUSB_DMA_DMASR_BUSY))
break;
/*
* We can't sleep here, because it's also called from
* interrupt context.
*/
timeout--;
if (!timeout) {
dev_err(udc->dev, "DMA timeout\n");
return -ETIMEDOUT;
}
udelay(1);
} while (1);
if ((udc->read_fn(udc->addr + XUSB_DMA_STATUS_OFFSET) &
XUSB_DMA_DMASR_ERROR) == XUSB_DMA_DMASR_ERROR){
dev_err(udc->dev, "DMA Error\n");
rc = -EINVAL;
}
return rc;
}
/**
* xudc_dma_send - Sends IN data using DMA.
* @ep: pointer to the usb device endpoint structure.
* @req: pointer to the usb request structure.
* @buffer: pointer to data to be sent.
* @length: number of bytes to send.
*
* Return: 0 on success, -EAGAIN if no buffer is free and error
* code on failure.
*
* This function sends data using DMA.
*/
static int xudc_dma_send(struct xusb_ep *ep, struct xusb_req *req,
u8 *buffer, u32 length)
{
u32 *eprambase;
dma_addr_t src;
dma_addr_t dst;
struct xusb_udc *udc = ep->udc;
src = req->usb_req.dma + req->usb_req.actual;
if (req->usb_req.length)
dma_sync_single_for_device(udc->dev, src,
length, DMA_TO_DEVICE);
if (!ep->curbufnum && !ep->buffer0ready) {
/* Get the Buffer address and copy the transmit data.*/
eprambase = (u32 __force *)(udc->addr + ep->rambase);
dst = virt_to_phys(eprambase);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF0COUNT_OFFSET, length);
udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
XUSB_DMA_BRR_CTRL | (1 << ep->epnumber));
ep->buffer0ready = 1;
ep->curbufnum = 1;
} else if (ep->curbufnum && !ep->buffer1ready) {
/* Get the Buffer address and copy the transmit data.*/
eprambase = (u32 __force *)(udc->addr + ep->rambase +
ep->ep_usb.maxpacket);
dst = virt_to_phys(eprambase);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF1COUNT_OFFSET, length);
udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
XUSB_DMA_BRR_CTRL | (1 << (ep->epnumber +
XUSB_STATUS_EP_BUFF2_SHIFT)));
ep->buffer1ready = 1;
ep->curbufnum = 0;
} else {
/* None of ping pong buffers are ready currently .*/
return -EAGAIN;
}
return xudc_start_dma(ep, src, dst, length);
}
/**
* xudc_dma_receive - Receives OUT data using DMA.
* @ep: pointer to the usb device endpoint structure.
* @req: pointer to the usb request structure.
* @buffer: pointer to storage buffer of received data.
* @length: number of bytes to receive.
*
* Return: 0 on success, -EAGAIN if no buffer is free and error
* code on failure.
*
* This function receives data using DMA.
*/
static int xudc_dma_receive(struct xusb_ep *ep, struct xusb_req *req,
u8 *buffer, u32 length)
{
u32 *eprambase;
dma_addr_t src;
dma_addr_t dst;
struct xusb_udc *udc = ep->udc;
dst = req->usb_req.dma + req->usb_req.actual;
if (!ep->curbufnum && !ep->buffer0ready) {
/* Get the Buffer address and copy the transmit data */
eprambase = (u32 __force *)(udc->addr + ep->rambase);
src = virt_to_phys(eprambase);
udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
XUSB_DMA_BRR_CTRL | XUSB_DMA_READ_FROM_DPRAM |
(1 << ep->epnumber));
ep->buffer0ready = 1;
ep->curbufnum = 1;
} else if (ep->curbufnum && !ep->buffer1ready) {
/* Get the Buffer address and copy the transmit data */
eprambase = (u32 __force *)(udc->addr +
ep->rambase + ep->ep_usb.maxpacket);
src = virt_to_phys(eprambase);
udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
XUSB_DMA_BRR_CTRL | XUSB_DMA_READ_FROM_DPRAM |
(1 << (ep->epnumber +
XUSB_STATUS_EP_BUFF2_SHIFT)));
ep->buffer1ready = 1;
ep->curbufnum = 0;
} else {
/* None of the ping-pong buffers are ready currently */
return -EAGAIN;
}
return xudc_start_dma(ep, src, dst, length);
}
/**
* xudc_eptxrx - Transmits or receives data to or from an endpoint.
* @ep: pointer to the usb endpoint configuration structure.
* @req: pointer to the usb request structure.
* @bufferptr: pointer to buffer containing the data to be sent.
* @bufferlen: The number of data bytes to be sent.
*
* Return: 0 on success, -EAGAIN if no buffer is free.
*
* This function copies the transmit/receive data to/from the end point buffer
* and enables the buffer for transmission/reception.
*/
static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
u8 *bufferptr, u32 bufferlen)
{
u32 *eprambase;
u32 bytestosend;
int rc = 0;
struct xusb_udc *udc = ep->udc;
bytestosend = bufferlen;
if (udc->dma_enabled) {
if (ep->is_in)
rc = xudc_dma_send(ep, req, bufferptr, bufferlen);
else
rc = xudc_dma_receive(ep, req, bufferptr, bufferlen);
return rc;
}
/* Put the transmit buffer into the correct ping-pong buffer.*/
if (!ep->curbufnum && !ep->buffer0ready) {
/* Get the Buffer address and copy the transmit data.*/
eprambase = (u32 __force *)(udc->addr + ep->rambase);
if (ep->is_in) {
memcpy(eprambase, bufferptr, bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
} else {
memcpy(bufferptr, eprambase, bytestosend);
}
/*
* Enable the buffer for transmission.
*/
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
1 << ep->epnumber);
ep->buffer0ready = 1;
ep->curbufnum = 1;
} else if (ep->curbufnum && !ep->buffer1ready) {
/* Get the Buffer address and copy the transmit data.*/
eprambase = (u32 __force *)(udc->addr + ep->rambase +
ep->ep_usb.maxpacket);
if (ep->is_in) {
memcpy(eprambase, bufferptr, bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
} else {
memcpy(bufferptr, eprambase, bytestosend);
}
/*
* Enable the buffer for transmission.
*/
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
1 << (ep->epnumber + XUSB_STATUS_EP_BUFF2_SHIFT));
ep->buffer1ready = 1;
ep->curbufnum = 0;
} else {
/* None of the ping-pong buffers are ready currently */
return -EAGAIN;
}
return rc;
}
/**
* xudc_done - Exeutes the endpoint data transfer completion tasks.
* @ep: pointer to the usb device endpoint structure.
* @req: pointer to the usb request structure.
* @status: Status of the data transfer.
*
* Deletes the message from the queue and updates data transfer completion
* status.
*/
static void xudc_done(struct xusb_ep *ep, struct xusb_req *req, int status)
{
struct xusb_udc *udc = ep->udc;
list_del_init(&req->queue);
if (req->usb_req.status == -EINPROGRESS)
req->usb_req.status = status;
else
status = req->usb_req.status;
if (status && status != -ESHUTDOWN)
dev_dbg(udc->dev, "%s done %p, status %d\n",
ep->ep_usb.name, req, status);
/* unmap request if DMA is present*/
if (udc->dma_enabled && ep->epnumber && req->usb_req.length)
usb_gadget_unmap_request(&udc->gadget, &req->usb_req,
ep->is_in);
if (req->usb_req.complete) {
spin_unlock(&udc->lock);
req->usb_req.complete(&ep->ep_usb, &req->usb_req);
spin_lock(&udc->lock);
}
}
/**
* xudc_read_fifo - Reads the data from the given endpoint buffer.
* @ep: pointer to the usb device endpoint structure.
* @req: pointer to the usb request structure.
*
* Return: 0 if request is completed and -EAGAIN if not completed.
*
* Pulls OUT packet data from the endpoint buffer.
*/
static int xudc_read_fifo(struct xusb_ep *ep, struct xusb_req *req)
{
u8 *buf;
u32 is_short, count, bufferspace;
u8 bufoffset;
u8 two_pkts = 0;
int ret;
int retval = -EAGAIN;
struct xusb_udc *udc = ep->udc;
if (ep->buffer0ready && ep->buffer1ready) {
dev_dbg(udc->dev, "Packet NOT ready!\n");
return retval;
}
top:
if (ep->curbufnum)
bufoffset = XUSB_EP_BUF1COUNT_OFFSET;
else
bufoffset = XUSB_EP_BUF0COUNT_OFFSET;
count = udc->read_fn(udc->addr + ep->offset + bufoffset);
if (!ep->buffer0ready && !ep->buffer1ready)
two_pkts = 1;
buf = req->usb_req.buf + req->usb_req.actual;
prefetchw(buf);
bufferspace = req->usb_req.length - req->usb_req.actual;
is_short = count < ep->ep_usb.maxpacket;
if (unlikely(!bufferspace)) {
/*
* This happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data.
*/
if (req->usb_req.status != -EOVERFLOW)
dev_dbg(udc->dev, "%s overflow %d\n",
ep->ep_usb.name, count);
req->usb_req.status = -EOVERFLOW;
xudc_done(ep, req, -EOVERFLOW);
return 0;
}
ret = xudc_eptxrx(ep, req, buf, count);
switch (ret) {
case 0:
req->usb_req.actual += min(count, bufferspace);
dev_dbg(udc->dev, "read %s, %d bytes%s req %p %d/%d\n",
ep->ep_usb.name, count, is_short ? "/S" : "", req,
req->usb_req.actual, req->usb_req.length);
/* Completion */
if ((req->usb_req.actual == req->usb_req.length) || is_short) {
if (udc->dma_enabled && req->usb_req.length)
dma_sync_single_for_cpu(udc->dev,
req->usb_req.dma,
req->usb_req.actual,
DMA_FROM_DEVICE);
xudc_done(ep, req, 0);
return 0;
}
if (two_pkts) {
two_pkts = 0;
goto top;
}
break;
case -EAGAIN:
dev_dbg(udc->dev, "receive busy\n");
break;
case -EINVAL:
case -ETIMEDOUT:
/* DMA error, dequeue the request */
xudc_done(ep, req, -ECONNRESET);
retval = 0;
break;
}
return retval;
}
/**
* xudc_write_fifo - Writes data into the given endpoint buffer.
* @ep: pointer to the usb device endpoint structure.
* @req: pointer to the usb request structure.
*
* Return: 0 if request is completed and -EAGAIN if not completed.
*
* Loads endpoint buffer for an IN packet.
*/
static int xudc_write_fifo(struct xusb_ep *ep, struct xusb_req *req)
{
u32 max;
u32 length;
int ret;
int retval = -EAGAIN;
struct xusb_udc *udc = ep->udc;
int is_last, is_short = 0;
u8 *buf;
max = le16_to_cpu(ep->desc->wMaxPacketSize);
buf = req->usb_req.buf + req->usb_req.actual;
prefetch(buf);
length = req->usb_req.length - req->usb_req.actual;
length = min(length, max);
ret = xudc_eptxrx(ep, req, buf, length);
switch (ret) {
case 0:
req->usb_req.actual += length;
if (unlikely(length != max)) {
is_last = is_short = 1;
} else {
if (likely(req->usb_req.length !=
req->usb_req.actual) || req->usb_req.zero)
is_last = 0;
else
is_last = 1;
}
dev_dbg(udc->dev, "%s: wrote %s %d bytes%s%s %d left %p\n",
__func__, ep->ep_usb.name, length, is_last ? "/L" : "",
is_short ? "/S" : "",
req->usb_req.length - req->usb_req.actual, req);
/* completion */
if (is_last) {
xudc_done(ep, req, 0);
retval = 0;
}
break;
case -EAGAIN:
dev_dbg(udc->dev, "Send busy\n");
break;
case -EINVAL:
case -ETIMEDOUT:
/* DMA error, dequeue the request */
xudc_done(ep, req, -ECONNRESET);
retval = 0;
break;
}
return retval;
}
/**
* xudc_nuke - Cleans up the data transfer message list.
* @ep: pointer to the usb device endpoint structure.
* @status: Status of the data transfer.
*/
static void xudc_nuke(struct xusb_ep *ep, int status)
{
struct xusb_req *req;
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct xusb_req, queue);
xudc_done(ep, req, status);
}
}
/**
* xudc_ep_set_halt - Stalls/unstalls the given endpoint.
* @_ep: pointer to the usb device endpoint structure.
* @value: value to indicate stall/unstall.
*
* Return: 0 for success and error value on failure
*/
static int xudc_ep_set_halt(struct usb_ep *_ep, int value)
{
struct xusb_ep *ep = to_xusb_ep(_ep);
struct xusb_udc *udc;
unsigned long flags;
u32 epcfgreg;
if (!_ep || (!ep->desc && ep->epnumber)) {
pr_debug("%s: bad ep or descriptor\n", __func__);
return -EINVAL;
}
udc = ep->udc;
if (ep->is_in && (!list_empty(&ep->queue)) && value) {
dev_dbg(udc->dev, "requests pending can't halt\n");
return -EAGAIN;
}
if (ep->buffer0ready || ep->buffer1ready) {
dev_dbg(udc->dev, "HW buffers busy can't halt\n");
return -EAGAIN;
}
spin_lock_irqsave(&udc->lock, flags);
if (value) {
/* Stall the device.*/
epcfgreg = udc->read_fn(udc->addr + ep->offset);
epcfgreg |= XUSB_EP_CFG_STALL_MASK;
udc->write_fn(udc->addr, ep->offset, epcfgreg);
} else {
/* Unstall the device.*/
epcfgreg = udc->read_fn(udc->addr + ep->offset);
epcfgreg &= ~XUSB_EP_CFG_STALL_MASK;
udc->write_fn(udc->addr, ep->offset, epcfgreg);
if (ep->epnumber) {
/* Reset the toggle bit.*/
epcfgreg = udc->read_fn(ep->udc->addr + ep->offset);
epcfgreg &= ~XUSB_EP_CFG_DATA_TOGGLE_MASK;
udc->write_fn(udc->addr, ep->offset, epcfgreg);
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* __xudc_ep_enable - Enables the given endpoint.
* @ep: pointer to the xusb endpoint structure.
* @desc: pointer to usb endpoint descriptor.
*
* Return: 0 for success and error value on failure
*/
static int __xudc_ep_enable(struct xusb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct xusb_udc *udc = ep->udc;
u32 tmp;
u32 epcfg;
u32 ier;
u16 maxpacket;
ep->is_in = ((desc->bEndpointAddress & USB_DIR_IN) != 0);
/* Bit 3...0:endpoint number */
ep->epnumber = (desc->bEndpointAddress & 0x0f);
ep->desc = desc;
ep->ep_usb.desc = desc;
tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
ep->ep_usb.maxpacket = maxpacket = le16_to_cpu(desc->wMaxPacketSize);
switch (tmp) {
case USB_ENDPOINT_XFER_CONTROL:
dev_dbg(udc->dev, "only one control endpoint\n");
/* NON- ISO */
ep->is_iso = 0;
return -EINVAL;
case USB_ENDPOINT_XFER_INT:
/* NON- ISO */
ep->is_iso = 0;
if (maxpacket > 64) {
dev_dbg(udc->dev, "bogus maxpacket %d\n", maxpacket);
return -EINVAL;
}
break;
case USB_ENDPOINT_XFER_BULK:
/* NON- ISO */
ep->is_iso = 0;
if (!(is_power_of_2(maxpacket) && maxpacket >= 8 &&
maxpacket <= 512)) {
dev_dbg(udc->dev, "bogus maxpacket %d\n", maxpacket);
return -EINVAL;
}
break;
case USB_ENDPOINT_XFER_ISOC:
/* ISO */
ep->is_iso = 1;
break;
}
ep->buffer0ready = false;
ep->buffer1ready = false;
ep->curbufnum = 0;
ep->rambase = rambase[ep->epnumber];
xudc_epconfig(ep, udc);
dev_dbg(udc->dev, "Enable Endpoint %d max pkt is %d\n",
ep->epnumber, maxpacket);
/* Enable the End point.*/
epcfg = udc->read_fn(udc->addr + ep->offset);
epcfg |= XUSB_EP_CFG_VALID_MASK;
udc->write_fn(udc->addr, ep->offset, epcfg);
if (ep->epnumber)
ep->rambase <<= 2;
/* Enable buffer completion interrupts for endpoint */
ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
ier |= (XUSB_STATUS_INTR_BUFF_COMP_SHIFT_MASK << ep->epnumber);
udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
/* for OUT endpoint set buffers ready to receive */
if (ep->epnumber && !ep->is_in) {
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
1 << ep->epnumber);
ep->buffer0ready = true;
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
(1 << (ep->epnumber +
XUSB_STATUS_EP_BUFF2_SHIFT)));
ep->buffer1ready = true;
}
return 0;
}
/**
* xudc_ep_enable - Enables the given endpoint.
* @_ep: pointer to the usb endpoint structure.
* @desc: pointer to usb endpoint descriptor.
*
* Return: 0 for success and error value on failure
*/
static int xudc_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct xusb_ep *ep;
struct xusb_udc *udc;
unsigned long flags;
int ret;
if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
pr_debug("%s: bad ep or descriptor\n", __func__);
return -EINVAL;
}
ep = to_xusb_ep(_ep);
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
dev_dbg(udc->dev, "bogus device state\n");
return -ESHUTDOWN;
}
spin_lock_irqsave(&udc->lock, flags);
ret = __xudc_ep_enable(ep, desc);
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
/**
* xudc_ep_disable - Disables the given endpoint.
* @_ep: pointer to the usb endpoint structure.
*
* Return: 0 for success and error value on failure
*/
static int xudc_ep_disable(struct usb_ep *_ep)
{
struct xusb_ep *ep;
unsigned long flags;
u32 epcfg;
struct xusb_udc *udc;
if (!_ep) {
pr_debug("%s: invalid ep\n", __func__);
return -EINVAL;
}
ep = to_xusb_ep(_ep);
udc = ep->udc;
spin_lock_irqsave(&udc->lock, flags);
xudc_nuke(ep, -ESHUTDOWN);
/* Restore the endpoint's pristine config */
ep->desc = NULL;
ep->ep_usb.desc = NULL;
dev_dbg(udc->dev, "USB Ep %d disable\n ", ep->epnumber);
/* Disable the endpoint.*/
epcfg = udc->read_fn(udc->addr + ep->offset);
epcfg &= ~XUSB_EP_CFG_VALID_MASK;
udc->write_fn(udc->addr, ep->offset, epcfg);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* xudc_ep_alloc_request - Initializes the request queue.
* @_ep: pointer to the usb endpoint structure.
* @gfp_flags: Flags related to the request call.
*
* Return: pointer to request structure on success and a NULL on failure.
*/
static struct usb_request *xudc_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct xusb_ep *ep = to_xusb_ep(_ep);
struct xusb_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
req->ep = ep;
INIT_LIST_HEAD(&req->queue);
return &req->usb_req;
}
/**
* xudc_free_request - Releases the request from queue.
* @_ep: pointer to the usb device endpoint structure.
* @_req: pointer to the usb request structure.
*/
static void xudc_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct xusb_req *req = to_xusb_req(_req);
kfree(req);
}
/**
* __xudc_ep0_queue - Adds the request to endpoint 0 queue.
* @ep0: pointer to the xusb endpoint 0 structure.
* @req: pointer to the xusb request structure.
*
* Return: 0 for success and error value on failure
*/
static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
{
struct xusb_udc *udc = ep0->udc;
u32 length;
u8 *corebuf;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
dev_dbg(udc->dev, "%s, bogus device state\n", __func__);
return -EINVAL;
}
if (!list_empty(&ep0->queue)) {
dev_dbg(udc->dev, "%s:ep0 busy\n", __func__);
return -EBUSY;
}
req->usb_req.status = -EINPROGRESS;
req->usb_req.actual = 0;
list_add_tail(&req->queue, &ep0->queue);
if (udc->setup.bRequestType & USB_DIR_IN) {
prefetch(req->usb_req.buf);
length = req->usb_req.length;
corebuf = (void __force *) ((ep0->rambase << 2) +
udc->addr);
length = req->usb_req.actual = min_t(u32, length,
EP0_MAX_PACKET);
memcpy(corebuf, req->usb_req.buf, length);
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
} else {
if (udc->setup.wLength) {
/* Enable EP0 buffer to receive data */
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, 0);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
} else {
xudc_wrstatus(udc);
}
}
return 0;
}
/**
* xudc_ep0_queue - Adds the request to endpoint 0 queue.
* @_ep: pointer to the usb endpoint 0 structure.
* @_req: pointer to the usb request structure.
* @gfp_flags: Flags related to the request call.
*
* Return: 0 for success and error value on failure
*/
static int xudc_ep0_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct xusb_req *req = to_xusb_req(_req);
struct xusb_ep *ep0 = to_xusb_ep(_ep);
struct xusb_udc *udc = ep0->udc;
unsigned long flags;
int ret;
spin_lock_irqsave(&udc->lock, flags);
ret = __xudc_ep0_queue(ep0, req);
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
/**
* xudc_ep_queue - Adds the request to endpoint queue.
* @_ep: pointer to the usb endpoint structure.
* @_req: pointer to the usb request structure.
* @gfp_flags: Flags related to the request call.
*
* Return: 0 for success and error value on failure
*/
static int xudc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct xusb_req *req = to_xusb_req(_req);
struct xusb_ep *ep = to_xusb_ep(_ep);
struct xusb_udc *udc = ep->udc;
int ret;
unsigned long flags;
if (!ep->desc) {
dev_dbg(udc->dev, "%s: queuing request to disabled %s\n",
__func__, ep->name);
return -ESHUTDOWN;
}
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
dev_dbg(udc->dev, "%s, bogus device state\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&udc->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
if (udc->dma_enabled) {
ret = usb_gadget_map_request(&udc->gadget, &req->usb_req,
ep->is_in);
if (ret) {
dev_dbg(udc->dev, "gadget_map failed ep%d\n",
ep->epnumber);
spin_unlock_irqrestore(&udc->lock, flags);
return -EAGAIN;
}
}
if (list_empty(&ep->queue)) {
if (ep->is_in) {
dev_dbg(udc->dev, "xudc_write_fifo from ep_queue\n");
if (!xudc_write_fifo(ep, req))
req = NULL;
} else {
dev_dbg(udc->dev, "xudc_read_fifo from ep_queue\n");
if (!xudc_read_fifo(ep, req))
req = NULL;
}
}
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* xudc_ep_dequeue - Removes the request from the queue.
* @_ep: pointer to the usb device endpoint structure.
* @_req: pointer to the usb request structure.
*
* Return: 0 for success and error value on failure
*/
static int xudc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct xusb_ep *ep = to_xusb_ep(_ep);
struct xusb_req *req = NULL;
struct xusb_req *iter;
struct xusb_udc *udc = ep->udc;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* Make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->usb_req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
xudc_done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* xudc_ep0_enable - Enables the given endpoint.
* @ep: pointer to the usb endpoint structure.
* @desc: pointer to usb endpoint descriptor.
*
* Return: error always.
*
* endpoint 0 enable should not be called by gadget layer.
*/
static int xudc_ep0_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
return -EINVAL;
}
/**
* xudc_ep0_disable - Disables the given endpoint.
* @ep: pointer to the usb endpoint structure.
*
* Return: error always.
*
* endpoint 0 disable should not be called by gadget layer.
*/
static int xudc_ep0_disable(struct usb_ep *ep)
{
return -EINVAL;
}
static const struct usb_ep_ops xusb_ep0_ops = {
.enable = xudc_ep0_enable,
.disable = xudc_ep0_disable,
.alloc_request = xudc_ep_alloc_request,
.free_request = xudc_free_request,
.queue = xudc_ep0_queue,
.dequeue = xudc_ep_dequeue,
.set_halt = xudc_ep_set_halt,
};
static const struct usb_ep_ops xusb_ep_ops = {
.enable = xudc_ep_enable,
.disable = xudc_ep_disable,
.alloc_request = xudc_ep_alloc_request,
.free_request = xudc_free_request,
.queue = xudc_ep_queue,
.dequeue = xudc_ep_dequeue,
.set_halt = xudc_ep_set_halt,
};
/**
* xudc_get_frame - Reads the current usb frame number.
* @gadget: pointer to the usb gadget structure.
*
* Return: current frame number for success and error value on failure.
*/
static int xudc_get_frame(struct usb_gadget *gadget)
{
struct xusb_udc *udc;
int frame;
if (!gadget)
return -ENODEV;
udc = to_udc(gadget);
frame = udc->read_fn(udc->addr + XUSB_FRAMENUM_OFFSET);
return frame;
}
/**
* xudc_wakeup - Send remote wakeup signal to host
* @gadget: pointer to the usb gadget structure.
*
* Return: 0 on success and error on failure
*/
static int xudc_wakeup(struct usb_gadget *gadget)
{
struct xusb_udc *udc = to_udc(gadget);
u32 crtlreg;
int status = -EINVAL;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* Remote wake up not enabled by host */
if (!udc->remote_wkp)
goto done;
crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
crtlreg |= XUSB_CONTROL_USB_RMTWAKE_MASK;
/* set remote wake up bit */
udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
/*
* wait for a while and reset remote wake up bit since this bit
* is not cleared by HW after sending remote wakeup to host.
*/
mdelay(2);
crtlreg &= ~XUSB_CONTROL_USB_RMTWAKE_MASK;
udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
status = 0;
done:
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
/**
* xudc_pullup - start/stop USB traffic
* @gadget: pointer to the usb gadget structure.
* @is_on: flag to start or stop
*
* Return: 0 always
*
* This function starts/stops SIE engine of IP based on is_on.
*/
static int xudc_pullup(struct usb_gadget *gadget, int is_on)
{
struct xusb_udc *udc = to_udc(gadget);
unsigned long flags;
u32 crtlreg;
spin_lock_irqsave(&udc->lock, flags);
crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
if (is_on)
crtlreg |= XUSB_CONTROL_USB_READY_MASK;
else
crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/**
* xudc_eps_init - initialize endpoints.
* @udc: pointer to the usb device controller structure.
*/
static void xudc_eps_init(struct xusb_udc *udc)
{
u32 ep_number;
INIT_LIST_HEAD(&udc->gadget.ep_list);
for (ep_number = 0; ep_number < XUSB_MAX_ENDPOINTS; ep_number++) {
struct xusb_ep *ep = &udc->ep[ep_number];
if (ep_number) {
list_add_tail(&ep->ep_usb.ep_list,
&udc->gadget.ep_list);
usb_ep_set_maxpacket_limit(&ep->ep_usb,
(unsigned short) ~0);
snprintf(ep->name, EPNAME_SIZE, "ep%d", ep_number);
ep->ep_usb.name = ep->name;
ep->ep_usb.ops = &xusb_ep_ops;
ep->ep_usb.caps.type_iso = true;
ep->ep_usb.caps.type_bulk = true;
ep->ep_usb.caps.type_int = true;
} else {
ep->ep_usb.name = ep0name;
usb_ep_set_maxpacket_limit(&ep->ep_usb, EP0_MAX_PACKET);
ep->ep_usb.ops = &xusb_ep0_ops;
ep->ep_usb.caps.type_control = true;
}
ep->ep_usb.caps.dir_in = true;
ep->ep_usb.caps.dir_out = true;
ep->udc = udc;
ep->epnumber = ep_number;
ep->desc = NULL;
/*
* The configuration register address offset between
* each endpoint is 0x10.
*/
ep->offset = XUSB_EP0_CONFIG_OFFSET + (ep_number * 0x10);
ep->is_in = 0;
ep->is_iso = 0;
ep->maxpacket = 0;
xudc_epconfig(ep, udc);
/* Initialize one queue per endpoint */
INIT_LIST_HEAD(&ep->queue);
}
}
/**
* xudc_stop_activity - Stops any further activity on the device.
* @udc: pointer to the usb device controller structure.
*/
static void xudc_stop_activity(struct xusb_udc *udc)
{
int i;
struct xusb_ep *ep;
for (i = 0; i < XUSB_MAX_ENDPOINTS; i++) {
ep = &udc->ep[i];
xudc_nuke(ep, -ESHUTDOWN);
}
}
/**
* xudc_start - Starts the device.
* @gadget: pointer to the usb gadget structure
* @driver: pointer to gadget driver structure
*
* Return: zero on success and error on failure
*/
static int xudc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct xusb_udc *udc = to_udc(gadget);
struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO];
const struct usb_endpoint_descriptor *desc = &config_bulk_out_desc;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&udc->lock, flags);
if (udc->driver) {
dev_err(udc->dev, "%s is already bound to %s\n",
udc->gadget.name, udc->driver->driver.name);
ret = -EBUSY;
goto err;
}
/* hook up the driver */
udc->driver = driver;
udc->gadget.speed = driver->max_speed;
/* Enable the control endpoint. */
ret = __xudc_ep_enable(ep0, desc);
/* Set device address and remote wakeup to 0 */
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
udc->remote_wkp = 0;
err:
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
/**
* xudc_stop - stops the device.
* @gadget: pointer to the usb gadget structure
*
* Return: zero always
*/
static int xudc_stop(struct usb_gadget *gadget)
{
struct xusb_udc *udc = to_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->driver = NULL;
/* Set device address and remote wakeup to 0 */
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
udc->remote_wkp = 0;
xudc_stop_activity(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static const struct usb_gadget_ops xusb_udc_ops = {
.get_frame = xudc_get_frame,
.wakeup = xudc_wakeup,
.pullup = xudc_pullup,
.udc_start = xudc_start,
.udc_stop = xudc_stop,
};
/**
* xudc_clear_stall_all_ep - clears stall of every endpoint.
* @udc: pointer to the udc structure.
*/
static void xudc_clear_stall_all_ep(struct xusb_udc *udc)
{
struct xusb_ep *ep;
u32 epcfgreg;
int i;
for (i = 0; i < XUSB_MAX_ENDPOINTS; i++) {
ep = &udc->ep[i];
epcfgreg = udc->read_fn(udc->addr + ep->offset);
epcfgreg &= ~XUSB_EP_CFG_STALL_MASK;
udc->write_fn(udc->addr, ep->offset, epcfgreg);
if (ep->epnumber) {
/* Reset the toggle bit.*/
epcfgreg = udc->read_fn(udc->addr + ep->offset);
epcfgreg &= ~XUSB_EP_CFG_DATA_TOGGLE_MASK;
udc->write_fn(udc->addr, ep->offset, epcfgreg);
}
}
}
/**
* xudc_startup_handler - The usb device controller interrupt handler.
* @udc: pointer to the udc structure.
* @intrstatus: The mask value containing the interrupt sources.
*
* This function handles the RESET,SUSPEND,RESUME and DISCONNECT interrupts.
*/
static void xudc_startup_handler(struct xusb_udc *udc, u32 intrstatus)
{
u32 intrreg;
if (intrstatus & XUSB_STATUS_RESET_MASK) {
dev_dbg(udc->dev, "Reset\n");
if (intrstatus & XUSB_STATUS_HIGH_SPEED_MASK)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
xudc_stop_activity(udc);
xudc_clear_stall_all_ep(udc);
udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, 0);
/* Set device address and remote wakeup to 0 */
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
udc->remote_wkp = 0;
/* Enable the suspend, resume and disconnect */
intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
intrreg |= XUSB_STATUS_SUSPEND_MASK | XUSB_STATUS_RESUME_MASK |
XUSB_STATUS_DISCONNECT_MASK;
udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
}
if (intrstatus & XUSB_STATUS_SUSPEND_MASK) {
dev_dbg(udc->dev, "Suspend\n");
/* Enable the reset, resume and disconnect */
intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
intrreg |= XUSB_STATUS_RESET_MASK | XUSB_STATUS_RESUME_MASK |
XUSB_STATUS_DISCONNECT_MASK;
udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
udc->usb_state = USB_STATE_SUSPENDED;
if (udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (intrstatus & XUSB_STATUS_RESUME_MASK) {
bool condition = (udc->usb_state != USB_STATE_SUSPENDED);
dev_WARN_ONCE(udc->dev, condition,
"Resume IRQ while not suspended\n");
dev_dbg(udc->dev, "Resume\n");
/* Enable the reset, suspend and disconnect */
intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
intrreg |= XUSB_STATUS_RESET_MASK | XUSB_STATUS_SUSPEND_MASK |
XUSB_STATUS_DISCONNECT_MASK;
udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
udc->usb_state = 0;
if (udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (intrstatus & XUSB_STATUS_DISCONNECT_MASK) {
dev_dbg(udc->dev, "Disconnect\n");
/* Enable the reset, resume and suspend */
intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
intrreg |= XUSB_STATUS_RESET_MASK | XUSB_STATUS_RESUME_MASK |
XUSB_STATUS_SUSPEND_MASK;
udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
if (udc->driver && udc->driver->disconnect) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
}
}
/**
* xudc_ep0_stall - Stall endpoint zero.
* @udc: pointer to the udc structure.
*
* This function stalls endpoint zero.
*/
static void xudc_ep0_stall(struct xusb_udc *udc)
{
u32 epcfgreg;
struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO];
epcfgreg = udc->read_fn(udc->addr + ep0->offset);
epcfgreg |= XUSB_EP_CFG_STALL_MASK;
udc->write_fn(udc->addr, ep0->offset, epcfgreg);
}
/**
* xudc_setaddress - executes SET_ADDRESS command
* @udc: pointer to the udc structure.
*
* This function executes USB SET_ADDRESS command
*/
static void xudc_setaddress(struct xusb_udc *udc)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct xusb_req *req = udc->req;
int ret;
req->usb_req.length = 0;
ret = __xudc_ep0_queue(ep0, req);
if (ret == 0)
return;
dev_err(udc->dev, "Can't respond to SET ADDRESS request\n");
xudc_ep0_stall(udc);
}
/**
* xudc_getstatus - executes GET_STATUS command
* @udc: pointer to the udc structure.
*
* This function executes USB GET_STATUS command
*/
static void xudc_getstatus(struct xusb_udc *udc)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct xusb_req *req = udc->req;
struct xusb_ep *target_ep;
u16 status = 0;
u32 epcfgreg;
int epnum;
u32 halt;
int ret;
switch (udc->setup.bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
/* Get device status */
status = 1 << USB_DEVICE_SELF_POWERED;
if (udc->remote_wkp)
status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:
epnum = le16_to_cpu(udc->setup.wIndex) & USB_ENDPOINT_NUMBER_MASK;
if (epnum >= XUSB_MAX_ENDPOINTS)
goto stall;
target_ep = &udc->ep[epnum];
epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
if (le16_to_cpu(udc->setup.wIndex) & USB_DIR_IN) {
if (!target_ep->is_in)
goto stall;
} else {
if (target_ep->is_in)
goto stall;
}
if (halt)
status = 1 << USB_ENDPOINT_HALT;
break;
default:
goto stall;
}
req->usb_req.length = 2;
*(__le16 *)req->usb_req.buf = cpu_to_le16(status);
ret = __xudc_ep0_queue(ep0, req);
if (ret == 0)
return;
stall:
dev_err(udc->dev, "Can't respond to getstatus request\n");
xudc_ep0_stall(udc);
}
/**
* xudc_set_clear_feature - Executes the set feature and clear feature commands.
* @udc: pointer to the usb device controller structure.
*
* Processes the SET_FEATURE and CLEAR_FEATURE commands.
*/
static void xudc_set_clear_feature(struct xusb_udc *udc)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct xusb_req *req = udc->req;
struct xusb_ep *target_ep;
u8 endpoint;
u8 outinbit;
u32 epcfgreg;
int flag = (udc->setup.bRequest == USB_REQ_SET_FEATURE ? 1 : 0);
int ret;
switch (udc->setup.bRequestType) {
case USB_RECIP_DEVICE:
switch (le16_to_cpu(udc->setup.wValue)) {
case USB_DEVICE_TEST_MODE:
/*
* The Test Mode will be executed
* after the status phase.
*/
break;
case USB_DEVICE_REMOTE_WAKEUP:
if (flag)
udc->remote_wkp = 1;
else
udc->remote_wkp = 0;
break;
default:
xudc_ep0_stall(udc);
break;
}
break;
case USB_RECIP_ENDPOINT:
if (!udc->setup.wValue) {
endpoint = le16_to_cpu(udc->setup.wIndex) &
USB_ENDPOINT_NUMBER_MASK;
if (endpoint >= XUSB_MAX_ENDPOINTS) {
xudc_ep0_stall(udc);
return;
}
target_ep = &udc->ep[endpoint];
outinbit = le16_to_cpu(udc->setup.wIndex) &
USB_ENDPOINT_DIR_MASK;
outinbit = outinbit >> 7;
/* Make sure direction matches.*/
if (outinbit != target_ep->is_in) {
xudc_ep0_stall(udc);
return;
}
epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
if (!endpoint) {
/* Clear the stall.*/
epcfgreg &= ~XUSB_EP_CFG_STALL_MASK;
udc->write_fn(udc->addr,
target_ep->offset, epcfgreg);
} else {
if (flag) {
epcfgreg |= XUSB_EP_CFG_STALL_MASK;
udc->write_fn(udc->addr,
target_ep->offset,
epcfgreg);
} else {
/* Unstall the endpoint.*/
epcfgreg &= ~(XUSB_EP_CFG_STALL_MASK |
XUSB_EP_CFG_DATA_TOGGLE_MASK);
udc->write_fn(udc->addr,
target_ep->offset,
epcfgreg);
}
}
}
break;
default:
xudc_ep0_stall(udc);
return;
}
req->usb_req.length = 0;
ret = __xudc_ep0_queue(ep0, req);
if (ret == 0)
return;
dev_err(udc->dev, "Can't respond to SET/CLEAR FEATURE\n");
xudc_ep0_stall(udc);
}
/**
* xudc_handle_setup - Processes the setup packet.
* @udc: pointer to the usb device controller structure.
*
* Process setup packet and delegate to gadget layer.
*/
static void xudc_handle_setup(struct xusb_udc *udc)
__must_hold(&udc->lock)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct usb_ctrlrequest setup;
u32 *ep0rambase;
/* Load up the chapter 9 command buffer.*/
ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
memcpy(&setup, ep0rambase, 8);
udc->setup = setup;
udc->setup.wValue = cpu_to_le16((u16 __force)setup.wValue);
udc->setup.wIndex = cpu_to_le16((u16 __force)setup.wIndex);
udc->setup.wLength = cpu_to_le16((u16 __force)setup.wLength);
/* Clear previous requests */
xudc_nuke(ep0, -ECONNRESET);
if (udc->setup.bRequestType & USB_DIR_IN) {
/* Execute the get command.*/
udc->setupseqrx = STATUS_PHASE;
udc->setupseqtx = DATA_PHASE;
} else {
/* Execute the put command.*/
udc->setupseqrx = DATA_PHASE;
udc->setupseqtx = STATUS_PHASE;
}
switch (udc->setup.bRequest) {
case USB_REQ_GET_STATUS:
/* Data+Status phase form udc */
if ((udc->setup.bRequestType &
(USB_DIR_IN | USB_TYPE_MASK)) !=
(USB_DIR_IN | USB_TYPE_STANDARD))
break;
xudc_getstatus(udc);
return;
case USB_REQ_SET_ADDRESS:
/* Status phase from udc */
if (udc->setup.bRequestType != (USB_DIR_OUT |
USB_TYPE_STANDARD | USB_RECIP_DEVICE))
break;
xudc_setaddress(udc);
return;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
/* Requests with no data phase, status phase from udc */
if ((udc->setup.bRequestType & USB_TYPE_MASK)
!= USB_TYPE_STANDARD)
break;
xudc_set_clear_feature(udc);
return;
default:
break;
}
spin_unlock(&udc->lock);
if (udc->driver->setup(&udc->gadget, &setup) < 0)
xudc_ep0_stall(udc);
spin_lock(&udc->lock);
}
/**
* xudc_ep0_out - Processes the endpoint 0 OUT token.
* @udc: pointer to the usb device controller structure.
*/
static void xudc_ep0_out(struct xusb_udc *udc)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct xusb_req *req;
u8 *ep0rambase;
unsigned int bytes_to_rx;
void *buffer;
req = list_first_entry(&ep0->queue, struct xusb_req, queue);
switch (udc->setupseqrx) {
case STATUS_PHASE:
/*
* This resets both state machines for the next
* Setup packet.
*/
udc->setupseqrx = SETUP_PHASE;
udc->setupseqtx = SETUP_PHASE;
req->usb_req.actual = req->usb_req.length;
xudc_done(ep0, req, 0);
break;
case DATA_PHASE:
bytes_to_rx = udc->read_fn(udc->addr +
XUSB_EP_BUF0COUNT_OFFSET);
/* Copy the data to be received from the DPRAM. */
ep0rambase = (u8 __force *) (udc->addr +
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
memcpy(buffer, ep0rambase, bytes_to_rx);
if (req->usb_req.length == req->usb_req.actual) {
/* Data transfer completed get ready for Status stage */
xudc_wrstatus(udc);
} else {
/* Enable EP0 buffer to receive data */
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, 0);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
}
break;
default:
break;
}
}
/**
* xudc_ep0_in - Processes the endpoint 0 IN token.
* @udc: pointer to the usb device controller structure.
*/
static void xudc_ep0_in(struct xusb_udc *udc)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct xusb_req *req;
unsigned int bytes_to_tx;
void *buffer;
u32 epcfgreg;
u16 count = 0;
u16 length;
u8 *ep0rambase;
u8 test_mode = le16_to_cpu(udc->setup.wIndex) >> 8;
req = list_first_entry(&ep0->queue, struct xusb_req, queue);
bytes_to_tx = req->usb_req.length - req->usb_req.actual;
switch (udc->setupseqtx) {
case STATUS_PHASE:
switch (udc->setup.bRequest) {
case USB_REQ_SET_ADDRESS:
/* Set the address of the device.*/
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET,
le16_to_cpu(udc->setup.wValue));
break;
case USB_REQ_SET_FEATURE:
if (udc->setup.bRequestType ==
USB_RECIP_DEVICE) {
if (le16_to_cpu(udc->setup.wValue) ==
USB_DEVICE_TEST_MODE)
udc->write_fn(udc->addr,
XUSB_TESTMODE_OFFSET,
test_mode);
}
break;
}
req->usb_req.actual = req->usb_req.length;
xudc_done(ep0, req, 0);
break;
case DATA_PHASE:
if (!bytes_to_tx) {
/*
* We're done with data transfer, next
* will be zero length OUT with data toggle of
* 1. Setup data_toggle.
*/
epcfgreg = udc->read_fn(udc->addr + ep0->offset);
epcfgreg |= XUSB_EP_CFG_DATA_TOGGLE_MASK;
udc->write_fn(udc->addr, ep0->offset, epcfgreg);
udc->setupseqtx = STATUS_PHASE;
} else {
length = count = min_t(u32, bytes_to_tx,
EP0_MAX_PACKET);
/* Copy the data to be transmitted into the DPRAM. */
ep0rambase = (u8 __force *) (udc->addr +
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + length;
memcpy(ep0rambase, buffer, length);
}
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
break;
default:
break;
}
}
/**
* xudc_ctrl_ep_handler - Endpoint 0 interrupt handler.
* @udc: pointer to the udc structure.
* @intrstatus: It's the mask value for the interrupt sources on endpoint 0.
*
* Processes the commands received during enumeration phase.
*/
static void xudc_ctrl_ep_handler(struct xusb_udc *udc, u32 intrstatus)
{
if (intrstatus & XUSB_STATUS_SETUP_PACKET_MASK) {
xudc_handle_setup(udc);
} else {
if (intrstatus & XUSB_STATUS_FIFO_BUFF_RDY_MASK)
xudc_ep0_out(udc);
else if (intrstatus & XUSB_STATUS_FIFO_BUFF_FREE_MASK)
xudc_ep0_in(udc);
}
}
/**
* xudc_nonctrl_ep_handler - Non control endpoint interrupt handler.
* @udc: pointer to the udc structure.
* @epnum: End point number for which the interrupt is to be processed
* @intrstatus: mask value for interrupt sources of endpoints other
* than endpoint 0.
*
* Processes the buffer completion interrupts.
*/
static void xudc_nonctrl_ep_handler(struct xusb_udc *udc, u8 epnum,
u32 intrstatus)
{
struct xusb_req *req;
struct xusb_ep *ep;
ep = &udc->ep[epnum];
/* Process the End point interrupts.*/
if (intrstatus & (XUSB_STATUS_EP0_BUFF1_COMP_MASK << epnum))
ep->buffer0ready = 0;
if (intrstatus & (XUSB_STATUS_EP0_BUFF2_COMP_MASK << epnum))
ep->buffer1ready = false;
if (list_empty(&ep->queue))
return;
req = list_first_entry(&ep->queue, struct xusb_req, queue);
if (ep->is_in)
xudc_write_fifo(ep, req);
else
xudc_read_fifo(ep, req);
}
/**
* xudc_irq - The main interrupt handler.
* @irq: The interrupt number.
* @_udc: pointer to the usb device controller structure.
*
* Return: IRQ_HANDLED after the interrupt is handled.
*/
static irqreturn_t xudc_irq(int irq, void *_udc)
{
struct xusb_udc *udc = _udc;
u32 intrstatus;
u32 ier;
u8 index;
u32 bufintr;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/*
* Event interrupts are level sensitive hence first disable
* IER, read ISR and figure out active interrupts.
*/
ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
ier &= ~XUSB_STATUS_INTR_EVENT_MASK;
udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
/* Read the Interrupt Status Register.*/
intrstatus = udc->read_fn(udc->addr + XUSB_STATUS_OFFSET);
/* Call the handler for the event interrupt.*/
if (intrstatus & XUSB_STATUS_INTR_EVENT_MASK) {
/*
* Check if there is any action to be done for :
* - USB Reset received {XUSB_STATUS_RESET_MASK}
* - USB Suspend received {XUSB_STATUS_SUSPEND_MASK}
* - USB Resume received {XUSB_STATUS_RESUME_MASK}
* - USB Disconnect received {XUSB_STATUS_DISCONNECT_MASK}
*/
xudc_startup_handler(udc, intrstatus);
}
/* Check the buffer completion interrupts */
if (intrstatus & XUSB_STATUS_INTR_BUFF_COMP_ALL_MASK) {
/* Enable Reset, Suspend, Resume and Disconnect */
ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
ier |= XUSB_STATUS_INTR_EVENT_MASK;
udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
if (intrstatus & XUSB_STATUS_EP0_BUFF1_COMP_MASK)
xudc_ctrl_ep_handler(udc, intrstatus);
for (index = 1; index < 8; index++) {
bufintr = ((intrstatus &
(XUSB_STATUS_EP1_BUFF1_COMP_MASK <<
(index - 1))) || (intrstatus &
(XUSB_STATUS_EP1_BUFF2_COMP_MASK <<
(index - 1))));
if (bufintr) {
xudc_nonctrl_ep_handler(udc, index,
intrstatus);
}
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
/**
* xudc_probe - The device probe function for driver initialization.
* @pdev: pointer to the platform device structure.
*
* Return: 0 for success and error value on failure
*/
static int xudc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct resource *res;
struct xusb_udc *udc;
int irq;
int ret;
u32 ier;
u8 *buff;
udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
/* Create a dummy request for GET_STATUS, SET_ADDRESS */
udc->req = devm_kzalloc(&pdev->dev, sizeof(struct xusb_req),
GFP_KERNEL);
if (!udc->req)
return -ENOMEM;
buff = devm_kzalloc(&pdev->dev, STATUSBUFF_SIZE, GFP_KERNEL);
if (!buff)
return -ENOMEM;
udc->req->usb_req.buf = buff;
/* Map the registers */
udc->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(udc->addr))
return PTR_ERR(udc->addr);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, xudc_irq, 0,
dev_name(&pdev->dev), udc);
if (ret < 0) {
dev_dbg(&pdev->dev, "unable to request irq %d", irq);
goto fail;
}
udc->dma_enabled = of_property_read_bool(np, "xlnx,has-builtin-dma");
/* Setup gadget structure */
udc->gadget.ops = &xusb_udc_ops;
udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO].ep_usb;
udc->gadget.name = driver_name;
udc->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(udc->clk)) {
if (PTR_ERR(udc->clk) != -ENOENT) {
ret = PTR_ERR(udc->clk);
goto fail;
}
/*
* Clock framework support is optional, continue on,
* anyways if we don't find a matching clock
*/
dev_warn(&pdev->dev, "s_axi_aclk clock property is not found\n");
udc->clk = NULL;
}
ret = clk_prepare_enable(udc->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable clock.\n");
return ret;
}
spin_lock_init(&udc->lock);
/* Check for IP endianness */
udc->write_fn = xudc_write32_be;
udc->read_fn = xudc_read32_be;
udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, USB_TEST_J);
if ((udc->read_fn(udc->addr + XUSB_TESTMODE_OFFSET))
!= USB_TEST_J) {
udc->write_fn = xudc_write32;
udc->read_fn = xudc_read32;
}
udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, 0);
xudc_eps_init(udc);
/* Set device address to 0.*/
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret)
goto err_disable_unprepare_clk;
udc->dev = &udc->gadget.dev;
/* Enable the interrupts.*/
ier = XUSB_STATUS_GLOBAL_INTR_MASK | XUSB_STATUS_INTR_EVENT_MASK |
XUSB_STATUS_FIFO_BUFF_RDY_MASK | XUSB_STATUS_FIFO_BUFF_FREE_MASK |
XUSB_STATUS_SETUP_PACKET_MASK |
XUSB_STATUS_INTR_BUFF_COMP_ALL_MASK;
udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
platform_set_drvdata(pdev, udc);
dev_vdbg(&pdev->dev, "%s at 0x%08X mapped to %p %s\n",
driver_name, (u32)res->start, udc->addr,
udc->dma_enabled ? "with DMA" : "without DMA");
return 0;
err_disable_unprepare_clk:
clk_disable_unprepare(udc->clk);
fail:
dev_err(&pdev->dev, "probe failed, %d\n", ret);
return ret;
}
/**
* xudc_remove - Releases the resources allocated during the initialization.
* @pdev: pointer to the platform device structure.
*
* Return: 0 always
*/
static void xudc_remove(struct platform_device *pdev)
{
struct xusb_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
clk_disable_unprepare(udc->clk);
}
#ifdef CONFIG_PM_SLEEP
static int xudc_suspend(struct device *dev)
{
struct xusb_udc *udc;
u32 crtlreg;
unsigned long flags;
udc = dev_get_drvdata(dev);
spin_lock_irqsave(&udc->lock, flags);
crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->driver && udc->driver->suspend)
udc->driver->suspend(&udc->gadget);
clk_disable(udc->clk);
return 0;
}
static int xudc_resume(struct device *dev)
{
struct xusb_udc *udc;
u32 crtlreg;
unsigned long flags;
int ret;
udc = dev_get_drvdata(dev);
ret = clk_enable(udc->clk);
if (ret < 0)
return ret;
spin_lock_irqsave(&udc->lock, flags);
crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
crtlreg |= XUSB_CONTROL_USB_READY_MASK;
udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops xudc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xudc_suspend, xudc_resume)
};
/* Match table for of_platform binding */
static const struct of_device_id usb_of_match[] = {
{ .compatible = "xlnx,usb2-device-4.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, usb_of_match);
static struct platform_driver xudc_driver = {
.driver = {
.name = driver_name,
.of_match_table = usb_of_match,
.pm = &xudc_pm_ops,
},
.probe = xudc_probe,
.remove_new = xudc_remove,
};
module_platform_driver(xudc_driver);
MODULE_DESCRIPTION("Xilinx udc driver");
MODULE_AUTHOR("Xilinx, Inc");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/udc-xilinx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Intel PXA25x and IXP4xx on-chip full speed USB device controllers
*
* Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
* Copyright (C) 2003 Robert Schwebel, Pengutronix
* Copyright (C) 2003 Benedikt Spranger, Pengutronix
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003 Joshua Wise
*/
/* #define VERBOSE_DEBUG */
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/dma.h>
#include <asm/mach-types.h>
#include <asm/unaligned.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#define UDCCR 0x0000 /* UDC Control Register */
#define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
#define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
#define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */
#define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */
#define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */
#define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */
#define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */
#define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */
#define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */
#define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */
#define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */
#define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */
#define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */
#define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */
#define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */
#define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */
#define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */
#define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */
#define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */
#define UFNRH 0x0060 /* UDC Frame Number Register High */
#define UFNRL 0x0064 /* UDC Frame Number Register Low */
#define UBCR2 0x0068 /* UDC Byte Count Reg 2 */
#define UBCR4 0x006c /* UDC Byte Count Reg 4 */
#define UBCR7 0x0070 /* UDC Byte Count Reg 7 */
#define UBCR9 0x0074 /* UDC Byte Count Reg 9 */
#define UBCR12 0x0078 /* UDC Byte Count Reg 12 */
#define UBCR14 0x007c /* UDC Byte Count Reg 14 */
#define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */
#define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */
#define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */
#define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */
#define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */
#define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */
#define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */
#define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */
#define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */
#define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */
#define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */
#define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */
#define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */
#define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */
#define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */
#define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */
#define UICR0 0x0050 /* UDC Interrupt Control Register 0 */
#define UICR1 0x0054 /* UDC Interrupt Control Register 1 */
#define USIR0 0x0058 /* UDC Status Interrupt Register 0 */
#define USIR1 0x005C /* UDC Status Interrupt Register 1 */
#define UDCCR_UDE (1 << 0) /* UDC enable */
#define UDCCR_UDA (1 << 1) /* UDC active */
#define UDCCR_RSM (1 << 2) /* Device resume */
#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */
#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */
#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */
#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */
#define UDCCR_REM (1 << 7) /* Reset interrupt mask */
#define UDCCS0_OPR (1 << 0) /* OUT packet ready */
#define UDCCS0_IPR (1 << 1) /* IN packet ready */
#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */
#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */
#define UDCCS0_SST (1 << 4) /* Sent stall */
#define UDCCS0_FST (1 << 5) /* Force stall */
#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */
#define UDCCS0_SA (1 << 7) /* Setup active */
#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */
#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */
#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */
#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */
#define UDCCS_BI_SST (1 << 4) /* Sent stall */
#define UDCCS_BI_FST (1 << 5) /* Force stall */
#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */
#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */
#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */
#define UDCCS_BO_DME (1 << 3) /* DMA enable */
#define UDCCS_BO_SST (1 << 4) /* Sent stall */
#define UDCCS_BO_FST (1 << 5) /* Force stall */
#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */
#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */
#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */
#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */
#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */
#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */
#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */
#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */
#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */
#ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */
#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */
#endif
#ifdef CONFIG_ARCH_PXA
#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */
#endif
#define UDCCS_IO_DME (1 << 3) /* DMA enable */
#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */
#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */
#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */
#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */
#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */
#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */
#define UDCCS_INT_SST (1 << 4) /* Sent stall */
#define UDCCS_INT_FST (1 << 5) /* Force stall */
#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */
#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
/*
* This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
* series processors. The UDC for the IXP 4xx series is very similar.
* There are fifteen endpoints, in addition to ep0.
*
* Such controller drivers work with a gadget driver. The gadget driver
* returns descriptors, implements configuration and data protocols used
* by the host to interact with this device, and allocates endpoints to
* the different protocol interfaces. The controller driver virtualizes
* usb hardware so that the gadget drivers will be more portable.
*
* This UDC hardware wants to implement a bit too much USB protocol, so
* it constrains the sorts of USB configuration change events that work.
* The errata for these chips are misleading; some "fixed" bugs from
* pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
*
* Note that the UDC hardware supports DMA (except on IXP) but that's
* not used here. IN-DMA (to host) is simple enough, when the data is
* suitably aligned (16 bytes) ... the network stack doesn't do that,
* other software can. OUT-DMA is buggy in most chip versions, as well
* as poorly designed (data toggle not automatic). So this driver won't
* bother using DMA. (Mostly-working IN-DMA support was available in
* kernels before 2.6.23, but was never enabled or well tested.)
*/
#define DRIVER_VERSION "30-June-2007"
#define DRIVER_DESC "PXA 25x USB Device Controller driver"
static const char driver_name [] = "pxa25x_udc";
static const char ep0name [] = "ep0";
#ifdef CONFIG_ARCH_IXP4XX
/* cpu-specific register addresses are compiled in to this code */
#ifdef CONFIG_ARCH_PXA
#error "Can't configure both IXP and PXA"
#endif
/* IXP doesn't yet support <linux/clk.h> */
#define clk_get(dev,name) NULL
#define clk_enable(clk) do { } while (0)
#define clk_disable(clk) do { } while (0)
#define clk_put(clk) do { } while (0)
#endif
#include "pxa25x_udc.h"
#ifdef CONFIG_USB_PXA25X_SMALL
#define SIZE_STR " (small)"
#else
#define SIZE_STR ""
#endif
/* ---------------------------------------------------------------------------
* endpoint related parts of the api to the usb controller hardware,
* used by gadget driver; and the inner talker-to-hardware core.
* ---------------------------------------------------------------------------
*/
static void pxa25x_ep_fifo_flush (struct usb_ep *ep);
static void nuke (struct pxa25x_ep *, int status);
/* one GPIO should control a D+ pullup, so host sees this device (or not) */
static void pullup_off(void)
{
struct pxa2xx_udc_mach_info *mach = the_controller->mach;
int off_level = mach->gpio_pullup_inverted;
if (gpio_is_valid(mach->gpio_pullup))
gpio_set_value(mach->gpio_pullup, off_level);
else if (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
}
static void pullup_on(void)
{
struct pxa2xx_udc_mach_info *mach = the_controller->mach;
int on_level = !mach->gpio_pullup_inverted;
if (gpio_is_valid(mach->gpio_pullup))
gpio_set_value(mach->gpio_pullup, on_level);
else if (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
}
#if defined(CONFIG_CPU_BIG_ENDIAN)
/*
* IXP4xx has its buses wired up in a way that relies on never doing any
* byte swaps, independent of whether it runs in big-endian or little-endian
* mode, as explained by Krzysztof Hałasa.
*
* We only support pxa25x in little-endian mode, but it is very likely
* that it works the same way.
*/
static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
{
iowrite32be(val, dev->regs + reg);
}
static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
{
return ioread32be(dev->regs + reg);
}
#else
static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
{
writel(val, dev->regs + reg);
}
static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
{
return readl(dev->regs + reg);
}
#endif
static void pio_irq_enable(struct pxa25x_ep *ep)
{
u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
if (bEndpointAddress < 8)
udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) &
~(1 << bEndpointAddress));
else {
bEndpointAddress -= 8;
udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) &
~(1 << bEndpointAddress));
}
}
static void pio_irq_disable(struct pxa25x_ep *ep)
{
u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
if (bEndpointAddress < 8)
udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) |
(1 << bEndpointAddress));
else {
bEndpointAddress -= 8;
udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) |
(1 << bEndpointAddress));
}
}
/* The UDCCR reg contains mask and interrupt status bits,
* so using '|=' isn't safe as it may ack an interrupt.
*/
#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask)
{
u32 udccr = udc_get_reg(dev, UDCCR);
udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR);
}
static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask)
{
u32 udccr = udc_get_reg(dev, UDCCR);
udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR);
}
static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask)
{
/* udccr contains the bits we dont want to change */
u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS;
udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR);
}
static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep)
{
return udc_get_reg(ep->dev, ep->regoff_udccs);
}
static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data)
{
udc_set_reg(ep->dev, data, ep->regoff_udccs);
}
static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev)
{
return udc_get_reg(dev, UDCCS0);
}
static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data)
{
udc_set_reg(dev, data, UDCCS0);
}
static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep)
{
return udc_get_reg(ep->dev, ep->regoff_uddr);
}
static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data)
{
udc_set_reg(ep->dev, data, ep->regoff_uddr);
}
static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep)
{
return udc_get_reg(ep->dev, ep->regoff_ubcr);
}
/*
* endpoint enable/disable
*
* we need to verify the descriptors used to enable endpoints. since pxa25x
* endpoint configurations are fixed, and are pretty much always enabled,
* there's not a lot to manage here.
*
* because pxa25x can't selectively initialize bulk (or interrupt) endpoints,
* (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
* for a single interface (with only the default altsetting) and for gadget
* drivers that don't halt endpoints (not reset by set_interface). that also
* means that if you use ISO, you must violate the USB spec rule that all
* iso endpoints must be in non-default altsettings.
*/
static int pxa25x_ep_enable (struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct pxa25x_ep *ep;
struct pxa25x_udc *dev;
ep = container_of (_ep, struct pxa25x_ep, ep);
if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->fifo_size < usb_endpoint_maxp (desc)) {
DMSG("%s, bad ep or descriptor\n", __func__);
return -EINVAL;
}
/* xfer types must match, except that interrupt ~= bulk */
if (ep->bmAttributes != desc->bmAttributes
&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
DMSG("%s, %s type mismatch\n", __func__, _ep->name);
return -EINVAL;
}
/* hardware _could_ do smaller, but driver doesn't */
if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
&& usb_endpoint_maxp (desc)
!= BULK_FIFO_SIZE)
|| !desc->wMaxPacketSize) {
DMSG("%s, bad %s maxpacket\n", __func__, _ep->name);
return -ERANGE;
}
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
DMSG("%s, bogus device state\n", __func__);
return -ESHUTDOWN;
}
ep->ep.desc = desc;
ep->stopped = 0;
ep->pio_irqs = 0;
ep->ep.maxpacket = usb_endpoint_maxp (desc);
/* flush fifo (mostly for OUT buffers) */
pxa25x_ep_fifo_flush (_ep);
/* ... reset halt state too, if we could ... */
DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
return 0;
}
static int pxa25x_ep_disable (struct usb_ep *_ep)
{
struct pxa25x_ep *ep;
unsigned long flags;
ep = container_of (_ep, struct pxa25x_ep, ep);
if (!_ep || !ep->ep.desc) {
DMSG("%s, %s not enabled\n", __func__,
_ep ? ep->ep.name : NULL);
return -EINVAL;
}
local_irq_save(flags);
nuke (ep, -ESHUTDOWN);
/* flush fifo (mostly for IN buffers) */
pxa25x_ep_fifo_flush (_ep);
ep->ep.desc = NULL;
ep->stopped = 1;
local_irq_restore(flags);
DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
return 0;
}
/*-------------------------------------------------------------------------*/
/* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers
* must still pass correctly initialized endpoints, since other controller
* drivers may care about how it's currently set up (dma issues etc).
*/
/*
* pxa25x_ep_alloc_request - allocate a request data structure
*/
static struct usb_request *
pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
{
struct pxa25x_request *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD (&req->queue);
return &req->req;
}
/*
* pxa25x_ep_free_request - deallocate a request data structure
*/
static void
pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
{
struct pxa25x_request *req;
req = container_of (_req, struct pxa25x_request, req);
WARN_ON(!list_empty (&req->queue));
kfree(req);
}
/*-------------------------------------------------------------------------*/
/*
* done - retire a request; caller blocked irqs
*/
static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status)
{
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (likely (req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
if (status && status != -ESHUTDOWN)
DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
usb_gadget_giveback_request(&ep->ep, &req->req);
ep->stopped = stopped;
}
static inline void ep0_idle (struct pxa25x_udc *dev)
{
dev->ep0state = EP0_IDLE;
}
static int
write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max)
{
u8 *buf;
unsigned length, count;
buf = req->req.buf + req->req.actual;
prefetch(buf);
/* how big will this packet be? */
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
count = length;
while (likely(count--))
udc_ep_set_UDDR(ep, *buf++);
return length;
}
/*
* write to an IN endpoint fifo, as many packets as possible.
* irqs will use this to write the rest later.
* caller guarantees at least one packet buffer is ready (or a zlp).
*/
static int
write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{
unsigned max;
max = usb_endpoint_maxp(ep->ep.desc);
do {
unsigned count;
int is_last, is_short;
count = write_packet(ep, req, max);
/* last packet is usually short (or a zlp) */
if (unlikely (count != max))
is_last = is_short = 1;
else {
if (likely(req->req.length != req->req.actual)
|| req->req.zero)
is_last = 0;
else
is_last = 1;
/* interrupt/iso maxpacket may not fill the fifo */
is_short = unlikely (max < ep->fifo_size);
}
DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
ep->ep.name, count,
is_last ? "/L" : "", is_short ? "/S" : "",
req->req.length - req->req.actual, req);
/* let loose that packet. maybe try writing another one,
* double buffering might work. TSP, TPC, and TFS
* bit values are the same for all normal IN endpoints.
*/
udc_ep_set_UDCCS(ep, UDCCS_BI_TPC);
if (is_short)
udc_ep_set_UDCCS(ep, UDCCS_BI_TSP);
/* requests complete when all IN data is in the FIFO */
if (is_last) {
done (ep, req, 0);
if (list_empty(&ep->queue))
pio_irq_disable(ep);
return 1;
}
// TODO experiment: how robust can fifo mode tweaking be?
// double buffering is off in the default fifo mode, which
// prevents TFS from being set here.
} while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS);
return 0;
}
/* caller asserts req->pending (ep0 irq status nyet cleared); starts
* ep0 data stage. these chips want very simple state transitions.
*/
static inline
void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag)
{
udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR);
udc_set_reg(dev, USIR0, USIR0_IR0);
dev->req_pending = 0;
DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
__func__, tag, udc_ep0_get_UDCCS(dev), flags);
}
static int
write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{
struct pxa25x_udc *dev = ep->dev;
unsigned count;
int is_short;
count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE);
ep->dev->stats.write.bytes += count;
/* last packet "must be" short (or a zlp) */
is_short = (count != EP0_FIFO_SIZE);
DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
req->req.length - req->req.actual, req);
if (unlikely (is_short)) {
if (ep->dev->req_pending)
ep0start(ep->dev, UDCCS0_IPR, "short IN");
else
udc_ep0_set_UDCCS(dev, UDCCS0_IPR);
count = req->req.length;
done (ep, req, 0);
ep0_idle(ep->dev);
#ifndef CONFIG_ARCH_IXP4XX
#if 1
/* This seems to get rid of lost status irqs in some cases:
* host responds quickly, or next request involves config
* change automagic, or should have been hidden, or ...
*
* FIXME get rid of all udelays possible...
*/
if (count >= EP0_FIFO_SIZE) {
count = 100;
do {
if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) {
/* clear OPR, generate ack */
udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
break;
}
count--;
udelay(1);
} while (count);
}
#endif
#endif
} else if (ep->dev->req_pending)
ep0start(ep->dev, 0, "IN");
return is_short;
}
/*
* read_fifo - unload packet(s) from the fifo we use for usb OUT
* transfers and put them into the request. caller should have made
* sure there's at least one packet ready.
*
* returns true if the request completed because of short packet or the
* request buffer having filled (and maybe overran till end-of-packet).
*/
static int
read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{
for (;;) {
u32 udccs;
u8 *buf;
unsigned bufferspace, count, is_short;
/* make sure there's a packet in the FIFO.
* UDCCS_{BO,IO}_RPC are all the same bit value.
* UDCCS_{BO,IO}_RNE are all the same bit value.
*/
udccs = udc_ep_get_UDCCS(ep);
if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
break;
buf = req->req.buf + req->req.actual;
prefetchw(buf);
bufferspace = req->req.length - req->req.actual;
/* read all bytes from this packet */
if (likely (udccs & UDCCS_BO_RNE)) {
count = 1 + (0x0ff & udc_ep_get_UBCR(ep));
req->req.actual += min (count, bufferspace);
} else /* zlp */
count = 0;
is_short = (count < ep->ep.maxpacket);
DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
ep->ep.name, udccs, count,
is_short ? "/S" : "",
req, req->req.actual, req->req.length);
while (likely (count-- != 0)) {
u8 byte = (u8) udc_ep_get_UDDR(ep);
if (unlikely (bufferspace == 0)) {
/* this happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data.
*/
if (req->req.status != -EOVERFLOW)
DMSG("%s overflow %d\n",
ep->ep.name, count);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
bufferspace--;
}
}
udc_ep_set_UDCCS(ep, UDCCS_BO_RPC);
/* RPC/RSP/RNE could now reflect the other packet buffer */
/* iso is one request per packet */
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
if (udccs & UDCCS_IO_ROF)
req->req.status = -EHOSTUNREACH;
/* more like "is_done" */
is_short = 1;
}
/* completion */
if (is_short || req->req.actual == req->req.length) {
done (ep, req, 0);
if (list_empty(&ep->queue))
pio_irq_disable(ep);
return 1;
}
/* finished that packet. the next one may be waiting... */
}
return 0;
}
/*
* special ep0 version of the above. no UBCR0 or double buffering; status
* handshaking is magic. most device protocols don't need control-OUT.
* CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
* protocols do use them.
*/
static int
read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{
u8 *buf, byte;
unsigned bufferspace;
buf = req->req.buf + req->req.actual;
bufferspace = req->req.length - req->req.actual;
while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) {
byte = (u8) UDDR0;
if (unlikely (bufferspace == 0)) {
/* this happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data.
*/
if (req->req.status != -EOVERFLOW)
DMSG("%s overflow\n", ep->ep.name);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
req->req.actual++;
bufferspace--;
}
}
udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR);
/* completion */
if (req->req.actual >= req->req.length)
return 1;
/* finished that packet. the next one may be waiting... */
return 0;
}
/*-------------------------------------------------------------------------*/
static int
pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct pxa25x_request *req;
struct pxa25x_ep *ep;
struct pxa25x_udc *dev;
unsigned long flags;
req = container_of(_req, struct pxa25x_request, req);
if (unlikely (!_req || !_req->complete || !_req->buf
|| !list_empty(&req->queue))) {
DMSG("%s, bad params\n", __func__);
return -EINVAL;
}
ep = container_of(_ep, struct pxa25x_ep, ep);
if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
DMSG("%s, bad ep\n", __func__);
return -EINVAL;
}
dev = ep->dev;
if (unlikely (!dev->driver
|| dev->gadget.speed == USB_SPEED_UNKNOWN)) {
DMSG("%s, bogus device state\n", __func__);
return -ESHUTDOWN;
}
/* iso is always one packet per request, that's the only way
* we can report per-packet status. that also helps with dma.
*/
if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
&& req->req.length > usb_endpoint_maxp(ep->ep.desc)))
return -EMSGSIZE;
DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
_ep->name, _req, _req->length, _req->buf);
local_irq_save(flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* kickstart this i/o queue? */
if (list_empty(&ep->queue) && !ep->stopped) {
if (ep->ep.desc == NULL/* ep0 */) {
unsigned length = _req->length;
switch (dev->ep0state) {
case EP0_IN_DATA_PHASE:
dev->stats.write.ops++;
if (write_ep0_fifo(ep, req))
req = NULL;
break;
case EP0_OUT_DATA_PHASE:
dev->stats.read.ops++;
/* messy ... */
if (dev->req_config) {
DBG(DBG_VERBOSE, "ep0 config ack%s\n",
dev->has_cfr ? "" : " raced");
if (dev->has_cfr)
udc_set_reg(dev, UDCCFR, UDCCFR_AREN |
UDCCFR_ACM | UDCCFR_MB1);
done(ep, req, 0);
dev->ep0state = EP0_END_XFER;
local_irq_restore (flags);
return 0;
}
if (dev->req_pending)
ep0start(dev, UDCCS0_IPR, "OUT");
if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0
&& read_ep0_fifo(ep, req))) {
ep0_idle(dev);
done(ep, req, 0);
req = NULL;
}
break;
default:
DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
local_irq_restore (flags);
return -EL2HLT;
}
/* can the FIFO can satisfy the request immediately? */
} else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0
&& write_fifo(ep, req))
req = NULL;
} else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0
&& read_fifo(ep, req)) {
req = NULL;
}
if (likely(req && ep->ep.desc))
pio_irq_enable(ep);
}
/* pio or dma irq handler advances the queue. */
if (likely(req != NULL))
list_add_tail(&req->queue, &ep->queue);
local_irq_restore(flags);
return 0;
}
/*
* nuke - dequeue ALL requests
*/
static void nuke(struct pxa25x_ep *ep, int status)
{
struct pxa25x_request *req;
/* called with irqs blocked */
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct pxa25x_request,
queue);
done(ep, req, status);
}
if (ep->ep.desc)
pio_irq_disable(ep);
}
/* dequeue JUST ONE request */
static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct pxa25x_ep *ep;
struct pxa25x_request *req = NULL;
struct pxa25x_request *iter;
unsigned long flags;
ep = container_of(_ep, struct pxa25x_ep, ep);
if (!_ep || ep->ep.name == ep0name)
return -EINVAL;
local_irq_save(flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
local_irq_restore(flags);
return -EINVAL;
}
done(ep, req, -ECONNRESET);
local_irq_restore(flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
{
struct pxa25x_ep *ep;
unsigned long flags;
ep = container_of(_ep, struct pxa25x_ep, ep);
if (unlikely (!_ep
|| (!ep->ep.desc && ep->ep.name != ep0name))
|| ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
DMSG("%s, bad ep\n", __func__);
return -EINVAL;
}
if (value == 0) {
/* this path (reset toggle+halt) is needed to implement
* SET_INTERFACE on normal hardware. but it can't be
* done from software on the PXA UDC, and the hardware
* forgets to do it as part of SET_INTERFACE automagic.
*/
DMSG("only host can clear %s halt\n", _ep->name);
return -EROFS;
}
local_irq_save(flags);
if ((ep->bEndpointAddress & USB_DIR_IN) != 0
&& ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0
|| !list_empty(&ep->queue))) {
local_irq_restore(flags);
return -EAGAIN;
}
/* FST bit is the same for control, bulk in, bulk out, interrupt in */
udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF);
/* ep0 needs special care */
if (!ep->ep.desc) {
start_watchdog(ep->dev);
ep->dev->req_pending = 0;
ep->dev->ep0state = EP0_STALL;
/* and bulk/intr endpoints like dropping stalls too */
} else {
unsigned i;
for (i = 0; i < 1000; i += 20) {
if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST)
break;
udelay(20);
}
}
local_irq_restore(flags);
DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
return 0;
}
static int pxa25x_ep_fifo_status(struct usb_ep *_ep)
{
struct pxa25x_ep *ep;
ep = container_of(_ep, struct pxa25x_ep, ep);
if (!_ep) {
DMSG("%s, bad ep\n", __func__);
return -ENODEV;
}
/* pxa can't report unclaimed bytes from IN fifos */
if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
return -EOPNOTSUPP;
if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
|| (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0)
return 0;
else
return (udc_ep_get_UBCR(ep) & 0xfff) + 1;
}
static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
{
struct pxa25x_ep *ep;
ep = container_of(_ep, struct pxa25x_ep, ep);
if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
DMSG("%s, bad ep\n", __func__);
return;
}
/* toggle and halt bits stay unchanged */
/* for OUT, just read and discard the FIFO contents. */
if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0)
(void)udc_ep_get_UDDR(ep);
return;
}
/* most IN status is the same, but ISO can't stall */
udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
| (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
? 0 : UDCCS_BI_SST));
}
static const struct usb_ep_ops pxa25x_ep_ops = {
.enable = pxa25x_ep_enable,
.disable = pxa25x_ep_disable,
.alloc_request = pxa25x_ep_alloc_request,
.free_request = pxa25x_ep_free_request,
.queue = pxa25x_ep_queue,
.dequeue = pxa25x_ep_dequeue,
.set_halt = pxa25x_ep_set_halt,
.fifo_status = pxa25x_ep_fifo_status,
.fifo_flush = pxa25x_ep_fifo_flush,
};
/* ---------------------------------------------------------------------------
* device-scoped parts of the api to the usb controller hardware
* ---------------------------------------------------------------------------
*/
static int pxa25x_udc_get_frame(struct usb_gadget *_gadget)
{
struct pxa25x_udc *dev;
dev = container_of(_gadget, struct pxa25x_udc, gadget);
return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) |
(udc_get_reg(dev, UFNRL) & 0xff);
}
static int pxa25x_udc_wakeup(struct usb_gadget *_gadget)
{
struct pxa25x_udc *udc;
udc = container_of(_gadget, struct pxa25x_udc, gadget);
/* host may not have enabled remote wakeup */
if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0)
return -EHOSTUNREACH;
udc_set_mask_UDCCR(udc, UDCCR_RSM);
return 0;
}
static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *);
static void udc_enable (struct pxa25x_udc *);
static void udc_disable(struct pxa25x_udc *);
/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
* in active use.
*/
static int pullup(struct pxa25x_udc *udc)
{
int is_active = udc->vbus && udc->pullup && !udc->suspended;
DMSG("%s\n", is_active ? "active" : "inactive");
if (is_active) {
if (!udc->active) {
udc->active = 1;
/* Enable clock for USB device */
clk_enable(udc->clk);
udc_enable(udc);
}
} else {
if (udc->active) {
if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
DMSG("disconnect %s\n", udc->driver
? udc->driver->driver.name
: "(no driver)");
stop_activity(udc, udc->driver);
}
udc_disable(udc);
/* Disable clock for USB device */
clk_disable(udc->clk);
udc->active = 0;
}
}
return 0;
}
/* VBUS reporting logically comes from a transceiver */
static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct pxa25x_udc *udc;
udc = container_of(_gadget, struct pxa25x_udc, gadget);
udc->vbus = is_active;
DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
pullup(udc);
return 0;
}
/* drivers may have software control over D+ pullup */
static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
{
struct pxa25x_udc *udc;
udc = container_of(_gadget, struct pxa25x_udc, gadget);
/* not all boards support pullup control */
if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
return -EOPNOTSUPP;
udc->pullup = (is_active != 0);
pullup(udc);
return 0;
}
/* boards may consume current from VBUS, up to 100-500mA based on config.
* the 500uA suspend ceiling means that exclusively vbus-powered PXA designs
* violate USB specs.
*/
static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
{
struct pxa25x_udc *udc;
udc = container_of(_gadget, struct pxa25x_udc, gadget);
if (!IS_ERR_OR_NULL(udc->transceiver))
return usb_phy_set_power(udc->transceiver, mA);
return -EOPNOTSUPP;
}
static int pxa25x_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int pxa25x_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops pxa25x_udc_ops = {
.get_frame = pxa25x_udc_get_frame,
.wakeup = pxa25x_udc_wakeup,
.vbus_session = pxa25x_udc_vbus_session,
.pullup = pxa25x_udc_pullup,
.vbus_draw = pxa25x_udc_vbus_draw,
.udc_start = pxa25x_udc_start,
.udc_stop = pxa25x_udc_stop,
};
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FS
static int udc_debug_show(struct seq_file *m, void *_d)
{
struct pxa25x_udc *dev = m->private;
unsigned long flags;
int i;
u32 tmp;
local_irq_save(flags);
/* basic device status */
seq_printf(m, DRIVER_DESC "\n"
"%s version: %s\nGadget driver: %s\nHost %s\n\n",
driver_name, DRIVER_VERSION SIZE_STR "(pio)",
dev->driver ? dev->driver->driver.name : "(none)",
dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected");
/* registers for device and ep0 */
seq_printf(m,
"uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0),
udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0),
udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL));
tmp = udc_get_reg(dev, UDCCR);
seq_printf(m,
"udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
(tmp & UDCCR_REM) ? " rem" : "",
(tmp & UDCCR_RSTIR) ? " rstir" : "",
(tmp & UDCCR_SRM) ? " srm" : "",
(tmp & UDCCR_SUSIR) ? " susir" : "",
(tmp & UDCCR_RESIR) ? " resir" : "",
(tmp & UDCCR_RSM) ? " rsm" : "",
(tmp & UDCCR_UDA) ? " uda" : "",
(tmp & UDCCR_UDE) ? " ude" : "");
tmp = udc_ep0_get_UDCCS(dev);
seq_printf(m,
"udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
(tmp & UDCCS0_SA) ? " sa" : "",
(tmp & UDCCS0_RNE) ? " rne" : "",
(tmp & UDCCS0_FST) ? " fst" : "",
(tmp & UDCCS0_SST) ? " sst" : "",
(tmp & UDCCS0_DRWF) ? " dwrf" : "",
(tmp & UDCCS0_FTF) ? " ftf" : "",
(tmp & UDCCS0_IPR) ? " ipr" : "",
(tmp & UDCCS0_OPR) ? " opr" : "");
if (dev->has_cfr) {
tmp = udc_get_reg(dev, UDCCFR);
seq_printf(m,
"udccfr %02X =%s%s\n", tmp,
(tmp & UDCCFR_AREN) ? " aren" : "",
(tmp & UDCCFR_ACM) ? " acm" : "");
}
if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver)
goto done;
seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
dev->stats.write.bytes, dev->stats.write.ops,
dev->stats.read.bytes, dev->stats.read.ops,
dev->stats.irqs);
/* dump endpoint queues */
for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
struct pxa25x_ep *ep = &dev->ep [i];
struct pxa25x_request *req;
if (i != 0) {
const struct usb_endpoint_descriptor *desc;
desc = ep->ep.desc;
if (!desc)
continue;
tmp = udc_ep_get_UDCCS(&dev->ep[i]);
seq_printf(m,
"%s max %d %s udccs %02x irqs %lu\n",
ep->ep.name, usb_endpoint_maxp(desc),
"pio", tmp, ep->pio_irqs);
/* TODO translate all five groups of udccs bits! */
} else /* ep0 should only have one transfer queued */
seq_printf(m, "ep0 max 16 pio irqs %lu\n",
ep->pio_irqs);
if (list_empty(&ep->queue)) {
seq_printf(m, "\t(nothing queued)\n");
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
seq_printf(m,
"\treq %p len %d/%d buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
}
}
done:
local_irq_restore(flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(udc_debug);
#define create_debug_files(dev) \
do { \
debugfs_create_file(dev->gadget.name, \
S_IRUGO, NULL, dev, &udc_debug_fops); \
} while (0)
#define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL)
#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
#define create_debug_files(dev) do {} while (0)
#define remove_debug_files(dev) do {} while (0)
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
/*
* udc_disable - disable USB device controller
*/
static void udc_disable(struct pxa25x_udc *dev)
{
/* block all irqs */
udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM);
udc_set_reg(dev, UICR0, 0xff);
udc_set_reg(dev, UICR1, 0xff);
udc_set_reg(dev, UFNRH, UFNRH_SIM);
/* if hardware supports it, disconnect from usb */
pullup_off();
udc_clear_mask_UDCCR(dev, UDCCR_UDE);
ep0_idle (dev);
dev->gadget.speed = USB_SPEED_UNKNOWN;
}
/*
* udc_reinit - initialize software state
*/
static void udc_reinit(struct pxa25x_udc *dev)
{
u32 i;
/* device/ep0 records init */
INIT_LIST_HEAD (&dev->gadget.ep_list);
INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
dev->ep0state = EP0_IDLE;
dev->gadget.quirk_altset_not_supp = 1;
/* basic endpoint records init */
for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
struct pxa25x_ep *ep = &dev->ep[i];
if (i != 0)
list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
ep->ep.desc = NULL;
ep->stopped = 0;
INIT_LIST_HEAD (&ep->queue);
ep->pio_irqs = 0;
usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
}
/* the rest was statically initialized, and is read-only */
}
/* until it's enabled, this UDC should be completely invisible
* to any USB host.
*/
static void udc_enable (struct pxa25x_udc *dev)
{
udc_clear_mask_UDCCR(dev, UDCCR_UDE);
/* try to clear these bits before we enable the udc */
udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
ep0_idle(dev);
dev->gadget.speed = USB_SPEED_UNKNOWN;
dev->stats.irqs = 0;
/*
* sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
* - enable UDC
* - if RESET is already in progress, ack interrupt
* - unmask reset interrupt
*/
udc_set_mask_UDCCR(dev, UDCCR_UDE);
if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA))
udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
if (dev->has_cfr /* UDC_RES2 is defined */) {
/* pxa255 (a0+) can avoid a set_config race that could
* prevent gadget drivers from configuring correctly
*/
udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1);
} else {
/* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
* which could result in missing packets and interrupts.
* supposedly one bit per endpoint, controlling whether it
* double buffers or not; ACM/AREN bits fit into the holes.
* zero bits (like USIR0_IRx) disable double buffering.
*/
udc_set_reg(dev, UDC_RES1, 0x00);
udc_set_reg(dev, UDC_RES2, 0x00);
}
/* enable suspend/resume and reset irqs */
udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM);
/* enable ep0 irqs */
udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0);
/* if hardware supports it, pullup D+ and wait for reset */
pullup_on();
}
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int pxa25x_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct pxa25x_udc *dev = to_pxa25x(g);
int retval;
/* first hook up the driver ... */
dev->driver = driver;
dev->pullup = 1;
/* ... then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
/* connect to bus through transceiver */
if (!IS_ERR_OR_NULL(dev->transceiver)) {
retval = otg_set_peripheral(dev->transceiver->otg,
&dev->gadget);
if (retval)
goto bind_fail;
}
dump_state(dev);
return 0;
bind_fail:
return retval;
}
static void
reset_gadget(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
{
int i;
/* don't disconnect drivers more than once */
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
dev->gadget.speed = USB_SPEED_UNKNOWN;
/* prevent new request submissions, kill any outstanding requests */
for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
struct pxa25x_ep *ep = &dev->ep[i];
ep->stopped = 1;
nuke(ep, -ESHUTDOWN);
}
del_timer_sync(&dev->timer);
/* report reset; the driver is already quiesced */
if (driver)
usb_gadget_udc_reset(&dev->gadget, driver);
/* re-init driver-visible data structures */
udc_reinit(dev);
}
static void
stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
{
int i;
/* don't disconnect drivers more than once */
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
dev->gadget.speed = USB_SPEED_UNKNOWN;
/* prevent new request submissions, kill any outstanding requests */
for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
struct pxa25x_ep *ep = &dev->ep[i];
ep->stopped = 1;
nuke(ep, -ESHUTDOWN);
}
del_timer_sync(&dev->timer);
/* report disconnect; the driver is already quiesced */
if (driver)
driver->disconnect(&dev->gadget);
/* re-init driver-visible data structures */
udc_reinit(dev);
}
static int pxa25x_udc_stop(struct usb_gadget*g)
{
struct pxa25x_udc *dev = to_pxa25x(g);
local_irq_disable();
dev->pullup = 0;
stop_activity(dev, NULL);
local_irq_enable();
if (!IS_ERR_OR_NULL(dev->transceiver))
(void) otg_set_peripheral(dev->transceiver->otg, NULL);
dev->driver = NULL;
dump_state(dev);
return 0;
}
/*-------------------------------------------------------------------------*/
static inline void clear_ep_state (struct pxa25x_udc *dev)
{
unsigned i;
/* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
* fifos, and pending transactions mustn't be continued in any case.
*/
for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
nuke(&dev->ep[i], -ECONNABORTED);
}
static void udc_watchdog(struct timer_list *t)
{
struct pxa25x_udc *dev = from_timer(dev, t, timer);
local_irq_disable();
if (dev->ep0state == EP0_STALL
&& (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0
&& (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) {
udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF);
DBG(DBG_VERBOSE, "ep0 re-stall\n");
start_watchdog(dev);
}
local_irq_enable();
}
static void handle_ep0 (struct pxa25x_udc *dev)
{
u32 udccs0 = udc_ep0_get_UDCCS(dev);
struct pxa25x_ep *ep = &dev->ep [0];
struct pxa25x_request *req;
union {
struct usb_ctrlrequest r;
u8 raw [8];
u32 word [2];
} u;
if (list_empty(&ep->queue))
req = NULL;
else
req = list_entry(ep->queue.next, struct pxa25x_request, queue);
/* clear stall status */
if (udccs0 & UDCCS0_SST) {
nuke(ep, -EPIPE);
udc_ep0_set_UDCCS(dev, UDCCS0_SST);
del_timer(&dev->timer);
ep0_idle(dev);
}
/* previous request unfinished? non-error iff back-to-back ... */
if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
nuke(ep, 0);
del_timer(&dev->timer);
ep0_idle(dev);
}
switch (dev->ep0state) {
case EP0_IDLE:
/* late-breaking status? */
udccs0 = udc_ep0_get_UDCCS(dev);
/* start control request? */
if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
== (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
int i;
nuke (ep, -EPROTO);
/* read SETUP packet */
for (i = 0; i < 8; i++) {
if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) {
bad_setup:
DMSG("SETUP %d!\n", i);
goto stall;
}
u.raw [i] = (u8) UDDR0;
}
if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0))
goto bad_setup;
got_setup:
DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
u.r.bRequestType, u.r.bRequest,
le16_to_cpu(u.r.wValue),
le16_to_cpu(u.r.wIndex),
le16_to_cpu(u.r.wLength));
/* cope with automagic for some standard requests. */
dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
== USB_TYPE_STANDARD;
dev->req_config = 0;
dev->req_pending = 1;
switch (u.r.bRequest) {
/* hardware restricts gadget drivers here! */
case USB_REQ_SET_CONFIGURATION:
if (u.r.bRequestType == USB_RECIP_DEVICE) {
/* reflect hardware's automagic
* up to the gadget driver.
*/
config_change:
dev->req_config = 1;
clear_ep_state(dev);
/* if !has_cfr, there's no synch
* else use AREN (later) not SA|OPR
* USIR0_IR0 acts edge sensitive
*/
}
break;
/* ... and here, even more ... */
case USB_REQ_SET_INTERFACE:
if (u.r.bRequestType == USB_RECIP_INTERFACE) {
/* udc hardware is broken by design:
* - altsetting may only be zero;
* - hw resets all interfaces' eps;
* - ep reset doesn't include halt(?).
*/
DMSG("broken set_interface (%d/%d)\n",
le16_to_cpu(u.r.wIndex),
le16_to_cpu(u.r.wValue));
goto config_change;
}
break;
/* hardware was supposed to hide this */
case USB_REQ_SET_ADDRESS:
if (u.r.bRequestType == USB_RECIP_DEVICE) {
ep0start(dev, 0, "address");
return;
}
break;
}
if (u.r.bRequestType & USB_DIR_IN)
dev->ep0state = EP0_IN_DATA_PHASE;
else
dev->ep0state = EP0_OUT_DATA_PHASE;
i = dev->driver->setup(&dev->gadget, &u.r);
if (i < 0) {
/* hardware automagic preventing STALL... */
if (dev->req_config) {
/* hardware sometimes neglects to tell
* tell us about config change events,
* so later ones may fail...
*/
WARNING("config change %02x fail %d?\n",
u.r.bRequest, i);
return;
/* TODO experiment: if has_cfr,
* hardware didn't ACK; maybe we
* could actually STALL!
*/
}
DBG(DBG_VERBOSE, "protocol STALL, "
"%02x err %d\n", udc_ep0_get_UDCCS(dev), i);
stall:
/* the watchdog timer helps deal with cases
* where udc seems to clear FST wrongly, and
* then NAKs instead of STALLing.
*/
ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
start_watchdog(dev);
dev->ep0state = EP0_STALL;
/* deferred i/o == no response yet */
} else if (dev->req_pending) {
if (likely(dev->ep0state == EP0_IN_DATA_PHASE
|| dev->req_std || u.r.wLength))
ep0start(dev, 0, "defer");
else
ep0start(dev, UDCCS0_IPR, "defer/IPR");
}
/* expect at least one data or status stage irq */
return;
} else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
== (UDCCS0_OPR|UDCCS0_SA))) {
unsigned i;
/* pxa210/250 erratum 131 for B0/B1 says RNE lies.
* still observed on a pxa255 a0.
*/
DBG(DBG_VERBOSE, "e131\n");
nuke(ep, -EPROTO);
/* read SETUP data, but don't trust it too much */
for (i = 0; i < 8; i++)
u.raw [i] = (u8) UDDR0;
if ((u.r.bRequestType & USB_RECIP_MASK)
> USB_RECIP_OTHER)
goto stall;
if (u.word [0] == 0 && u.word [1] == 0)
goto stall;
goto got_setup;
} else {
/* some random early IRQ:
* - we acked FST
* - IPR cleared
* - OPR got set, without SA (likely status stage)
*/
udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR));
}
break;
case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
if (udccs0 & UDCCS0_OPR) {
udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF);
DBG(DBG_VERBOSE, "ep0in premature status\n");
if (req)
done(ep, req, 0);
ep0_idle(dev);
} else /* irq was IPR clearing */ {
if (req) {
/* this IN packet might finish the request */
(void) write_ep0_fifo(ep, req);
} /* else IN token before response was written */
}
break;
case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
if (udccs0 & UDCCS0_OPR) {
if (req) {
/* this OUT packet might finish the request */
if (read_ep0_fifo(ep, req))
done(ep, req, 0);
/* else more OUT packets expected */
} /* else OUT token before read was issued */
} else /* irq was IPR clearing */ {
DBG(DBG_VERBOSE, "ep0out premature status\n");
if (req)
done(ep, req, 0);
ep0_idle(dev);
}
break;
case EP0_END_XFER:
if (req)
done(ep, req, 0);
/* ack control-IN status (maybe in-zlp was skipped)
* also appears after some config change events.
*/
if (udccs0 & UDCCS0_OPR)
udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
ep0_idle(dev);
break;
case EP0_STALL:
udc_ep0_set_UDCCS(dev, UDCCS0_FST);
break;
}
udc_set_reg(dev, USIR0, USIR0_IR0);
}
static void handle_ep(struct pxa25x_ep *ep)
{
struct pxa25x_request *req;
int is_in = ep->bEndpointAddress & USB_DIR_IN;
int completed;
u32 udccs, tmp;
do {
completed = 0;
if (likely (!list_empty(&ep->queue)))
req = list_entry(ep->queue.next,
struct pxa25x_request, queue);
else
req = NULL;
// TODO check FST handling
udccs = udc_ep_get_UDCCS(ep);
if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
tmp = UDCCS_BI_TUR;
if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
tmp |= UDCCS_BI_SST;
tmp &= udccs;
if (likely (tmp))
udc_ep_set_UDCCS(ep, tmp);
if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
completed = write_fifo(ep, req);
} else { /* irq from RPC (or for ISO, ROF) */
if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
tmp = UDCCS_BO_SST | UDCCS_BO_DME;
else
tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
tmp &= udccs;
if (likely(tmp))
udc_ep_set_UDCCS(ep, tmp);
/* fifos can hold packets, ready for reading... */
if (likely(req)) {
completed = read_fifo(ep, req);
} else
pio_irq_disable(ep);
}
ep->pio_irqs++;
} while (completed);
}
/*
* pxa25x_udc_irq - interrupt handler
*
* avoid delays in ep0 processing. the control handshaking isn't always
* under software control (pxa250c0 and the pxa255 are better), and delays
* could cause usb protocol errors.
*/
static irqreturn_t
pxa25x_udc_irq(int irq, void *_dev)
{
struct pxa25x_udc *dev = _dev;
int handled;
dev->stats.irqs++;
do {
u32 udccr = udc_get_reg(dev, UDCCR);
handled = 0;
/* SUSpend Interrupt Request */
if (unlikely(udccr & UDCCR_SUSIR)) {
udc_ack_int_UDCCR(dev, UDCCR_SUSIR);
handled = 1;
DBG(DBG_VERBOSE, "USB suspend\n");
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
ep0_idle (dev);
}
/* RESume Interrupt Request */
if (unlikely(udccr & UDCCR_RESIR)) {
udc_ack_int_UDCCR(dev, UDCCR_RESIR);
handled = 1;
DBG(DBG_VERBOSE, "USB resume\n");
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->resume)
dev->driver->resume(&dev->gadget);
}
/* ReSeT Interrupt Request - USB reset */
if (unlikely(udccr & UDCCR_RSTIR)) {
udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
handled = 1;
if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) {
DBG(DBG_VERBOSE, "USB reset start\n");
/* reset driver and endpoints,
* in case that's not yet done
*/
reset_gadget(dev, dev->driver);
} else {
DBG(DBG_VERBOSE, "USB reset end\n");
dev->gadget.speed = USB_SPEED_FULL;
memset(&dev->stats, 0, sizeof dev->stats);
/* driver and endpoints are still reset */
}
} else {
u32 usir0 = udc_get_reg(dev, USIR0) &
~udc_get_reg(dev, UICR0);
u32 usir1 = udc_get_reg(dev, USIR1) &
~udc_get_reg(dev, UICR1);
int i;
if (unlikely (!usir0 && !usir1))
continue;
DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
/* control traffic */
if (usir0 & USIR0_IR0) {
dev->ep[0].pio_irqs++;
handle_ep0(dev);
handled = 1;
}
/* endpoint data transfers */
for (i = 0; i < 8; i++) {
u32 tmp = 1 << i;
if (i && (usir0 & tmp)) {
handle_ep(&dev->ep[i]);
udc_set_reg(dev, USIR0,
udc_get_reg(dev, USIR0) | tmp);
handled = 1;
}
#ifndef CONFIG_USB_PXA25X_SMALL
if (usir1 & tmp) {
handle_ep(&dev->ep[i+8]);
udc_set_reg(dev, USIR1,
udc_get_reg(dev, USIR1) | tmp);
handled = 1;
}
#endif
}
}
/* we could also ask for 1 msec SOF (SIR) interrupts */
} while (handled);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
static void nop_release (struct device *dev)
{
DMSG("%s %s\n", __func__, dev_name(dev));
}
/* this uses load-time allocation and initialization (instead of
* doing it at run-time) to save code, eliminate fault paths, and
* be more obviously correct.
*/
static struct pxa25x_udc memory = {
.gadget = {
.ops = &pxa25x_udc_ops,
.ep0 = &memory.ep[0].ep,
.name = driver_name,
.dev = {
.init_name = "gadget",
.release = nop_release,
},
},
/* control endpoint */
.ep[0] = {
.ep = {
.name = ep0name,
.ops = &pxa25x_ep_ops,
.maxpacket = EP0_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
.regoff_udccs = UDCCS0,
.regoff_uddr = UDDR0,
},
/* first group of endpoints */
.ep[1] = {
.ep = {
.name = "ep1in-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 1,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.regoff_udccs = UDCCS1,
.regoff_uddr = UDDR1,
},
.ep[2] = {
.ep = {
.name = "ep2out-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = 2,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.regoff_udccs = UDCCS2,
.regoff_ubcr = UBCR2,
.regoff_uddr = UDDR2,
},
#ifndef CONFIG_USB_PXA25X_SMALL
.ep[3] = {
.ep = {
.name = "ep3in-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 3,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.regoff_udccs = UDCCS3,
.regoff_uddr = UDDR3,
},
.ep[4] = {
.ep = {
.name = "ep4out-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = 4,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.regoff_udccs = UDCCS4,
.regoff_ubcr = UBCR4,
.regoff_uddr = UDDR4,
},
.ep[5] = {
.ep = {
.name = "ep5in-int",
.ops = &pxa25x_ep_ops,
.maxpacket = INT_FIFO_SIZE,
.caps = USB_EP_CAPS(0, 0),
},
.dev = &memory,
.fifo_size = INT_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 5,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.regoff_udccs = UDCCS5,
.regoff_uddr = UDDR5,
},
/* second group of endpoints */
.ep[6] = {
.ep = {
.name = "ep6in-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 6,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.regoff_udccs = UDCCS6,
.regoff_uddr = UDDR6,
},
.ep[7] = {
.ep = {
.name = "ep7out-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = 7,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.regoff_udccs = UDCCS7,
.regoff_ubcr = UBCR7,
.regoff_uddr = UDDR7,
},
.ep[8] = {
.ep = {
.name = "ep8in-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 8,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.regoff_udccs = UDCCS8,
.regoff_uddr = UDDR8,
},
.ep[9] = {
.ep = {
.name = "ep9out-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = 9,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.regoff_udccs = UDCCS9,
.regoff_ubcr = UBCR9,
.regoff_uddr = UDDR9,
},
.ep[10] = {
.ep = {
.name = "ep10in-int",
.ops = &pxa25x_ep_ops,
.maxpacket = INT_FIFO_SIZE,
.caps = USB_EP_CAPS(0, 0),
},
.dev = &memory,
.fifo_size = INT_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 10,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.regoff_udccs = UDCCS10,
.regoff_uddr = UDDR10,
},
/* third group of endpoints */
.ep[11] = {
.ep = {
.name = "ep11in-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 11,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.regoff_udccs = UDCCS11,
.regoff_uddr = UDDR11,
},
.ep[12] = {
.ep = {
.name = "ep12out-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = 12,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.regoff_udccs = UDCCS12,
.regoff_ubcr = UBCR12,
.regoff_uddr = UDDR12,
},
.ep[13] = {
.ep = {
.name = "ep13in-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 13,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.regoff_udccs = UDCCS13,
.regoff_uddr = UDDR13,
},
.ep[14] = {
.ep = {
.name = "ep14out-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = 14,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.regoff_udccs = UDCCS14,
.regoff_ubcr = UBCR14,
.regoff_uddr = UDDR14,
},
.ep[15] = {
.ep = {
.name = "ep15in-int",
.ops = &pxa25x_ep_ops,
.maxpacket = INT_FIFO_SIZE,
.caps = USB_EP_CAPS(0, 0),
},
.dev = &memory,
.fifo_size = INT_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 15,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.regoff_udccs = UDCCS15,
.regoff_uddr = UDDR15,
},
#endif /* !CONFIG_USB_PXA25X_SMALL */
};
#define CP15R0_VENDOR_MASK 0xffffe000
#if defined(CONFIG_ARCH_PXA)
#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
#elif defined(CONFIG_ARCH_IXP4XX)
#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
#endif
#define CP15R0_PROD_MASK 0x000003f0
#define PXA25x 0x00000100 /* and PXA26x */
#define PXA210 0x00000120
#define CP15R0_REV_MASK 0x0000000f
#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
#define PXA255_A0 0x00000106 /* or PXA260_B1 */
#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
#define PXA250_B2 0x00000104
#define PXA250_B1 0x00000103 /* or PXA260_A0 */
#define PXA250_B0 0x00000102
#define PXA250_A1 0x00000101
#define PXA250_A0 0x00000100
#define PXA210_C0 0x00000125
#define PXA210_B2 0x00000124
#define PXA210_B1 0x00000123
#define PXA210_B0 0x00000122
#define IXP425_A0 0x000001c1
#define IXP425_B0 0x000001f1
#define IXP465_AD 0x00000200
/*
* probe - binds to the platform device
*/
static int pxa25x_udc_probe(struct platform_device *pdev)
{
struct pxa25x_udc *dev = &memory;
int retval, irq;
u32 chiprev;
pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
/* insist on Intel/ARM/XScale */
asm("mrc p15, 0, %0, c0, c0" : "=r" (chiprev));
if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
pr_err("%s: not XScale!\n", driver_name);
return -ENODEV;
}
/* trigger chiprev-specific logic */
switch (chiprev & CP15R0_PRODREV_MASK) {
#if defined(CONFIG_ARCH_PXA)
case PXA255_A0:
dev->has_cfr = 1;
break;
case PXA250_A0:
case PXA250_A1:
/* A0/A1 "not released"; ep 13, 15 unusable */
fallthrough;
case PXA250_B2: case PXA210_B2:
case PXA250_B1: case PXA210_B1:
case PXA250_B0: case PXA210_B0:
/* OUT-DMA is broken ... */
fallthrough;
case PXA250_C0: case PXA210_C0:
break;
#elif defined(CONFIG_ARCH_IXP4XX)
case IXP425_A0:
case IXP425_B0:
case IXP465_AD:
dev->has_cfr = 1;
break;
#endif
default:
pr_err("%s: unrecognized processor: %08x\n",
driver_name, chiprev);
/* iop3xx, ixp4xx, ... */
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
pr_debug("%s: IRQ %d%s%s\n", driver_name, irq,
dev->has_cfr ? "" : " (!cfr)",
SIZE_STR "(pio)"
);
/* other non-static parts of init */
dev->dev = &pdev->dev;
dev->mach = dev_get_platdata(&pdev->dev);
dev->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (gpio_is_valid(dev->mach->gpio_pullup)) {
retval = devm_gpio_request(&pdev->dev, dev->mach->gpio_pullup,
"pca25x_udc GPIO PULLUP");
if (retval) {
dev_dbg(&pdev->dev,
"can't get pullup gpio %d, err: %d\n",
dev->mach->gpio_pullup, retval);
goto err;
}
gpio_direction_output(dev->mach->gpio_pullup, 0);
}
timer_setup(&dev->timer, udc_watchdog, 0);
the_controller = dev;
platform_set_drvdata(pdev, dev);
udc_disable(dev);
udc_reinit(dev);
dev->vbus = 0;
/* irq setup after old hardware state is cleaned up */
retval = devm_request_irq(&pdev->dev, irq, pxa25x_udc_irq, 0,
driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %d, err %d\n",
driver_name, irq, retval);
goto err;
}
dev->got_irq = 1;
create_debug_files(dev);
retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
if (!retval)
return retval;
remove_debug_files(dev);
err:
if (!IS_ERR_OR_NULL(dev->transceiver))
dev->transceiver = NULL;
return retval;
}
static void pxa25x_udc_shutdown(struct platform_device *_dev)
{
pullup_off();
}
static int pxa25x_udc_remove(struct platform_device *pdev)
{
struct pxa25x_udc *dev = platform_get_drvdata(pdev);
if (dev->driver)
return -EBUSY;
usb_del_gadget_udc(&dev->gadget);
dev->pullup = 0;
pullup(dev);
remove_debug_files(dev);
if (!IS_ERR_OR_NULL(dev->transceiver))
dev->transceiver = NULL;
the_controller = NULL;
return 0;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
/* USB suspend (controlled by the host) and system suspend (controlled
* by the PXA) don't necessarily work well together. If USB is active,
* the 48 MHz clock is required; so the system can't enter 33 MHz idle
* mode, or any deeper PM saving state.
*
* For now, we punt and forcibly disconnect from the USB host when PXA
* enters any suspend state. While we're disconnected, we always disable
* the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
* Boards without software pullup control shouldn't use those states.
* VBUS IRQs should probably be ignored so that the PXA device just acts
* "dead" to USB hosts until system resume.
*/
static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state)
{
struct pxa25x_udc *udc = platform_get_drvdata(dev);
unsigned long flags;
if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
WARNING("USB host won't detect disconnect!\n");
udc->suspended = 1;
local_irq_save(flags);
pullup(udc);
local_irq_restore(flags);
return 0;
}
static int pxa25x_udc_resume(struct platform_device *dev)
{
struct pxa25x_udc *udc = platform_get_drvdata(dev);
unsigned long flags;
udc->suspended = 0;
local_irq_save(flags);
pullup(udc);
local_irq_restore(flags);
return 0;
}
#else
#define pxa25x_udc_suspend NULL
#define pxa25x_udc_resume NULL
#endif
/*-------------------------------------------------------------------------*/
static struct platform_driver udc_driver = {
.shutdown = pxa25x_udc_shutdown,
.probe = pxa25x_udc_probe,
.remove = pxa25x_udc_remove,
.suspend = pxa25x_udc_suspend,
.resume = pxa25x_udc_resume,
.driver = {
.name = "pxa25x-udc",
},
};
module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa25x-udc");
| linux-master | drivers/usb/gadget/udc/pxa25x_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* M66592 UDC (USB gadget)
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <[email protected]>
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "m66592-udc.h"
MODULE_DESCRIPTION("M66592 USB gadget driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:m66592_udc");
#define DRIVER_VERSION "21 July 2009"
static const char udc_name[] = "m66592_udc";
static const char *m66592_ep_name[] = {
"ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7"
};
static void disable_controller(struct m66592 *m66592);
static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags);
static void transfer_complete(struct m66592_ep *ep,
struct m66592_request *req, int status);
/*-------------------------------------------------------------------------*/
static inline u16 get_usb_speed(struct m66592 *m66592)
{
return (m66592_read(m66592, M66592_DVSTCTR) & M66592_RHST);
}
static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum,
unsigned long reg)
{
u16 tmp;
tmp = m66592_read(m66592, M66592_INTENB0);
m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
M66592_INTENB0);
m66592_bset(m66592, (1 << pipenum), reg);
m66592_write(m66592, tmp, M66592_INTENB0);
}
static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum,
unsigned long reg)
{
u16 tmp;
tmp = m66592_read(m66592, M66592_INTENB0);
m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
M66592_INTENB0);
m66592_bclr(m66592, (1 << pipenum), reg);
m66592_write(m66592, tmp, M66592_INTENB0);
}
static void m66592_usb_connect(struct m66592 *m66592)
{
m66592_bset(m66592, M66592_CTRE, M66592_INTENB0);
m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
M66592_INTENB0);
m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
}
static void m66592_usb_disconnect(struct m66592 *m66592)
__releases(m66592->lock)
__acquires(m66592->lock)
{
m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0);
m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
M66592_INTENB0);
m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
m66592->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock(&m66592->lock);
m66592->driver->disconnect(&m66592->gadget);
spin_lock(&m66592->lock);
disable_controller(m66592);
INIT_LIST_HEAD(&m66592->ep[0].queue);
}
static inline u16 control_reg_get_pid(struct m66592 *m66592, u16 pipenum)
{
u16 pid = 0;
unsigned long offset;
if (pipenum == 0)
pid = m66592_read(m66592, M66592_DCPCTR) & M66592_PID;
else if (pipenum < M66592_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
pid = m66592_read(m66592, offset) & M66592_PID;
} else
pr_err("unexpect pipe num (%d)\n", pipenum);
return pid;
}
static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum,
u16 pid)
{
unsigned long offset;
if (pipenum == 0)
m66592_mdfy(m66592, pid, M66592_PID, M66592_DCPCTR);
else if (pipenum < M66592_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
m66592_mdfy(m66592, pid, M66592_PID, offset);
} else
pr_err("unexpect pipe num (%d)\n", pipenum);
}
static inline void pipe_start(struct m66592 *m66592, u16 pipenum)
{
control_reg_set_pid(m66592, pipenum, M66592_PID_BUF);
}
static inline void pipe_stop(struct m66592 *m66592, u16 pipenum)
{
control_reg_set_pid(m66592, pipenum, M66592_PID_NAK);
}
static inline void pipe_stall(struct m66592 *m66592, u16 pipenum)
{
control_reg_set_pid(m66592, pipenum, M66592_PID_STALL);
}
static inline u16 control_reg_get(struct m66592 *m66592, u16 pipenum)
{
u16 ret = 0;
unsigned long offset;
if (pipenum == 0)
ret = m66592_read(m66592, M66592_DCPCTR);
else if (pipenum < M66592_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
ret = m66592_read(m66592, offset);
} else
pr_err("unexpect pipe num (%d)\n", pipenum);
return ret;
}
static inline void control_reg_sqclr(struct m66592 *m66592, u16 pipenum)
{
unsigned long offset;
pipe_stop(m66592, pipenum);
if (pipenum == 0)
m66592_bset(m66592, M66592_SQCLR, M66592_DCPCTR);
else if (pipenum < M66592_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
m66592_bset(m66592, M66592_SQCLR, offset);
} else
pr_err("unexpect pipe num(%d)\n", pipenum);
}
static inline int get_buffer_size(struct m66592 *m66592, u16 pipenum)
{
u16 tmp;
int size;
if (pipenum == 0) {
tmp = m66592_read(m66592, M66592_DCPCFG);
if ((tmp & M66592_CNTMD) != 0)
size = 256;
else {
tmp = m66592_read(m66592, M66592_DCPMAXP);
size = tmp & M66592_MAXP;
}
} else {
m66592_write(m66592, pipenum, M66592_PIPESEL);
tmp = m66592_read(m66592, M66592_PIPECFG);
if ((tmp & M66592_CNTMD) != 0) {
tmp = m66592_read(m66592, M66592_PIPEBUF);
size = ((tmp >> 10) + 1) * 64;
} else {
tmp = m66592_read(m66592, M66592_PIPEMAXP);
size = tmp & M66592_MXPS;
}
}
return size;
}
static inline void pipe_change(struct m66592 *m66592, u16 pipenum)
{
struct m66592_ep *ep = m66592->pipenum2ep[pipenum];
unsigned short mbw;
if (ep->use_dma)
return;
m66592_mdfy(m66592, pipenum, M66592_CURPIPE, ep->fifosel);
ndelay(450);
if (m66592->pdata->on_chip)
mbw = M66592_MBW_32;
else
mbw = M66592_MBW_16;
m66592_bset(m66592, mbw, ep->fifosel);
}
static int pipe_buffer_setting(struct m66592 *m66592,
struct m66592_pipe_info *info)
{
u16 bufnum = 0, buf_bsize = 0;
u16 pipecfg = 0;
if (info->pipe == 0)
return -EINVAL;
m66592_write(m66592, info->pipe, M66592_PIPESEL);
if (info->dir_in)
pipecfg |= M66592_DIR;
pipecfg |= info->type;
pipecfg |= info->epnum;
switch (info->type) {
case M66592_INT:
bufnum = 4 + (info->pipe - M66592_BASE_PIPENUM_INT);
buf_bsize = 0;
break;
case M66592_BULK:
/* isochronous pipes may be used as bulk pipes */
if (info->pipe >= M66592_BASE_PIPENUM_BULK)
bufnum = info->pipe - M66592_BASE_PIPENUM_BULK;
else
bufnum = info->pipe - M66592_BASE_PIPENUM_ISOC;
bufnum = M66592_BASE_BUFNUM + (bufnum * 16);
buf_bsize = 7;
pipecfg |= M66592_DBLB;
if (!info->dir_in)
pipecfg |= M66592_SHTNAK;
break;
case M66592_ISO:
bufnum = M66592_BASE_BUFNUM +
(info->pipe - M66592_BASE_PIPENUM_ISOC) * 16;
buf_bsize = 7;
break;
}
if (buf_bsize && ((bufnum + 16) >= M66592_MAX_BUFNUM)) {
pr_err("m66592 pipe memory is insufficient\n");
return -ENOMEM;
}
m66592_write(m66592, pipecfg, M66592_PIPECFG);
m66592_write(m66592, (buf_bsize << 10) | (bufnum), M66592_PIPEBUF);
m66592_write(m66592, info->maxpacket, M66592_PIPEMAXP);
if (info->interval)
info->interval--;
m66592_write(m66592, info->interval, M66592_PIPEPERI);
return 0;
}
static void pipe_buffer_release(struct m66592 *m66592,
struct m66592_pipe_info *info)
{
if (info->pipe == 0)
return;
if (is_bulk_pipe(info->pipe)) {
m66592->bulk--;
} else if (is_interrupt_pipe(info->pipe))
m66592->interrupt--;
else if (is_isoc_pipe(info->pipe)) {
m66592->isochronous--;
if (info->type == M66592_BULK)
m66592->bulk--;
} else
pr_err("ep_release: unexpect pipenum (%d)\n",
info->pipe);
}
static void pipe_initialize(struct m66592_ep *ep)
{
struct m66592 *m66592 = ep->m66592;
unsigned short mbw;
m66592_mdfy(m66592, 0, M66592_CURPIPE, ep->fifosel);
m66592_write(m66592, M66592_ACLRM, ep->pipectr);
m66592_write(m66592, 0, ep->pipectr);
m66592_write(m66592, M66592_SQCLR, ep->pipectr);
if (ep->use_dma) {
m66592_mdfy(m66592, ep->pipenum, M66592_CURPIPE, ep->fifosel);
ndelay(450);
if (m66592->pdata->on_chip)
mbw = M66592_MBW_32;
else
mbw = M66592_MBW_16;
m66592_bset(m66592, mbw, ep->fifosel);
}
}
static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep,
const struct usb_endpoint_descriptor *desc,
u16 pipenum, int dma)
{
if ((pipenum != 0) && dma) {
if (m66592->num_dma == 0) {
m66592->num_dma++;
ep->use_dma = 1;
ep->fifoaddr = M66592_D0FIFO;
ep->fifosel = M66592_D0FIFOSEL;
ep->fifoctr = M66592_D0FIFOCTR;
ep->fifotrn = M66592_D0FIFOTRN;
} else if (!m66592->pdata->on_chip && m66592->num_dma == 1) {
m66592->num_dma++;
ep->use_dma = 1;
ep->fifoaddr = M66592_D1FIFO;
ep->fifosel = M66592_D1FIFOSEL;
ep->fifoctr = M66592_D1FIFOCTR;
ep->fifotrn = M66592_D1FIFOTRN;
} else {
ep->use_dma = 0;
ep->fifoaddr = M66592_CFIFO;
ep->fifosel = M66592_CFIFOSEL;
ep->fifoctr = M66592_CFIFOCTR;
ep->fifotrn = 0;
}
} else {
ep->use_dma = 0;
ep->fifoaddr = M66592_CFIFO;
ep->fifosel = M66592_CFIFOSEL;
ep->fifoctr = M66592_CFIFOCTR;
ep->fifotrn = 0;
}
ep->pipectr = get_pipectr_addr(pipenum);
ep->pipenum = pipenum;
ep->ep.maxpacket = usb_endpoint_maxp(desc);
m66592->pipenum2ep[pipenum] = ep;
m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep;
INIT_LIST_HEAD(&ep->queue);
}
static void m66592_ep_release(struct m66592_ep *ep)
{
struct m66592 *m66592 = ep->m66592;
u16 pipenum = ep->pipenum;
if (pipenum == 0)
return;
if (ep->use_dma)
m66592->num_dma--;
ep->pipenum = 0;
ep->busy = 0;
ep->use_dma = 0;
}
static int alloc_pipe_config(struct m66592_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct m66592 *m66592 = ep->m66592;
struct m66592_pipe_info info;
int dma = 0;
int *counter;
int ret;
ep->ep.desc = desc;
BUG_ON(ep->pipenum);
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
if (m66592->bulk >= M66592_MAX_NUM_BULK) {
if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
pr_err("bulk pipe is insufficient\n");
return -ENODEV;
} else {
info.pipe = M66592_BASE_PIPENUM_ISOC
+ m66592->isochronous;
counter = &m66592->isochronous;
}
} else {
info.pipe = M66592_BASE_PIPENUM_BULK + m66592->bulk;
counter = &m66592->bulk;
}
info.type = M66592_BULK;
dma = 1;
break;
case USB_ENDPOINT_XFER_INT:
if (m66592->interrupt >= M66592_MAX_NUM_INT) {
pr_err("interrupt pipe is insufficient\n");
return -ENODEV;
}
info.pipe = M66592_BASE_PIPENUM_INT + m66592->interrupt;
info.type = M66592_INT;
counter = &m66592->interrupt;
break;
case USB_ENDPOINT_XFER_ISOC:
if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
pr_err("isochronous pipe is insufficient\n");
return -ENODEV;
}
info.pipe = M66592_BASE_PIPENUM_ISOC + m66592->isochronous;
info.type = M66592_ISO;
counter = &m66592->isochronous;
break;
default:
pr_err("unexpect xfer type\n");
return -EINVAL;
}
ep->type = info.type;
info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
info.maxpacket = usb_endpoint_maxp(desc);
info.interval = desc->bInterval;
if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
info.dir_in = 1;
else
info.dir_in = 0;
ret = pipe_buffer_setting(m66592, &info);
if (ret < 0) {
pr_err("pipe_buffer_setting fail\n");
return ret;
}
(*counter)++;
if ((counter == &m66592->isochronous) && info.type == M66592_BULK)
m66592->bulk++;
m66592_ep_setting(m66592, ep, desc, info.pipe, dma);
pipe_initialize(ep);
return 0;
}
static int free_pipe_config(struct m66592_ep *ep)
{
struct m66592 *m66592 = ep->m66592;
struct m66592_pipe_info info;
info.pipe = ep->pipenum;
info.type = ep->type;
pipe_buffer_release(m66592, &info);
m66592_ep_release(ep);
return 0;
}
/*-------------------------------------------------------------------------*/
static void pipe_irq_enable(struct m66592 *m66592, u16 pipenum)
{
enable_irq_ready(m66592, pipenum);
enable_irq_nrdy(m66592, pipenum);
}
static void pipe_irq_disable(struct m66592 *m66592, u16 pipenum)
{
disable_irq_ready(m66592, pipenum);
disable_irq_nrdy(m66592, pipenum);
}
/* if complete is true, gadget driver complete function is not call */
static void control_end(struct m66592 *m66592, unsigned ccpl)
{
m66592->ep[0].internal_ccpl = ccpl;
pipe_start(m66592, 0);
m66592_bset(m66592, M66592_CCPL, M66592_DCPCTR);
}
static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
{
struct m66592 *m66592 = ep->m66592;
pipe_change(m66592, ep->pipenum);
m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0,
(M66592_ISEL | M66592_CURPIPE),
M66592_CFIFOSEL);
m66592_write(m66592, M66592_BCLR, ep->fifoctr);
if (req->req.length == 0) {
m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
pipe_start(m66592, 0);
transfer_complete(ep, req, 0);
} else {
m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
irq_ep0_write(ep, req);
}
}
static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req)
{
struct m66592 *m66592 = ep->m66592;
u16 tmp;
pipe_change(m66592, ep->pipenum);
disable_irq_empty(m66592, ep->pipenum);
pipe_start(m66592, ep->pipenum);
tmp = m66592_read(m66592, ep->fifoctr);
if (unlikely((tmp & M66592_FRDY) == 0))
pipe_irq_enable(m66592, ep->pipenum);
else
irq_packet_write(ep, req);
}
static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req)
{
struct m66592 *m66592 = ep->m66592;
u16 pipenum = ep->pipenum;
if (ep->pipenum == 0) {
m66592_mdfy(m66592, M66592_PIPE0,
(M66592_ISEL | M66592_CURPIPE),
M66592_CFIFOSEL);
m66592_write(m66592, M66592_BCLR, ep->fifoctr);
pipe_start(m66592, pipenum);
pipe_irq_enable(m66592, pipenum);
} else {
if (ep->use_dma) {
m66592_bset(m66592, M66592_TRCLR, ep->fifosel);
pipe_change(m66592, pipenum);
m66592_bset(m66592, M66592_TRENB, ep->fifosel);
m66592_write(m66592,
(req->req.length + ep->ep.maxpacket - 1)
/ ep->ep.maxpacket,
ep->fifotrn);
}
pipe_start(m66592, pipenum); /* trigger once */
pipe_irq_enable(m66592, pipenum);
}
}
static void start_packet(struct m66592_ep *ep, struct m66592_request *req)
{
if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
start_packet_write(ep, req);
else
start_packet_read(ep, req);
}
static void start_ep0(struct m66592_ep *ep, struct m66592_request *req)
{
u16 ctsq;
ctsq = m66592_read(ep->m66592, M66592_INTSTS0) & M66592_CTSQ;
switch (ctsq) {
case M66592_CS_RDDS:
start_ep0_write(ep, req);
break;
case M66592_CS_WRDS:
start_packet_read(ep, req);
break;
case M66592_CS_WRND:
control_end(ep->m66592, 0);
break;
default:
pr_err("start_ep0: unexpect ctsq(%x)\n", ctsq);
break;
}
}
static void init_controller(struct m66592 *m66592)
{
unsigned int endian;
if (m66592->pdata->on_chip) {
if (m66592->pdata->endian)
endian = 0; /* big endian */
else
endian = M66592_LITTLE; /* little endian */
m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */
m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG);
m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
m66592_bset(m66592, M66592_USBE, M66592_SYSCFG);
/* This is a workaound for SH7722 2nd cut */
m66592_bset(m66592, 0x8000, M66592_DVSTCTR);
m66592_bset(m66592, 0x1000, M66592_TESTMODE);
m66592_bclr(m66592, 0x8000, M66592_DVSTCTR);
m66592_bset(m66592, M66592_INTL, M66592_INTENB1);
m66592_write(m66592, 0, M66592_CFBCFG);
m66592_write(m66592, 0, M66592_D0FBCFG);
m66592_bset(m66592, endian, M66592_CFBCFG);
m66592_bset(m66592, endian, M66592_D0FBCFG);
} else {
unsigned int clock, vif, irq_sense;
if (m66592->pdata->endian)
endian = M66592_BIGEND; /* big endian */
else
endian = 0; /* little endian */
if (m66592->pdata->vif)
vif = M66592_LDRV; /* 3.3v */
else
vif = 0; /* 1.5v */
switch (m66592->pdata->xtal) {
case M66592_PLATDATA_XTAL_12MHZ:
clock = M66592_XTAL12;
break;
case M66592_PLATDATA_XTAL_24MHZ:
clock = M66592_XTAL24;
break;
case M66592_PLATDATA_XTAL_48MHZ:
clock = M66592_XTAL48;
break;
default:
pr_warn("m66592-udc: xtal configuration error\n");
clock = 0;
}
switch (m66592->irq_trigger) {
case IRQF_TRIGGER_LOW:
irq_sense = M66592_INTL;
break;
case IRQF_TRIGGER_FALLING:
irq_sense = 0;
break;
default:
pr_warn("m66592-udc: irq trigger config error\n");
irq_sense = 0;
}
m66592_bset(m66592,
(vif & M66592_LDRV) | (endian & M66592_BIGEND),
M66592_PINCFG);
m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */
m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL,
M66592_SYSCFG);
m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG);
m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
m66592_bset(m66592, M66592_USBE, M66592_SYSCFG);
m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
msleep(3);
m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
msleep(1);
m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1);
m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR,
M66592_DMA0CFG);
}
}
static void disable_controller(struct m66592 *m66592)
{
m66592_bclr(m66592, M66592_UTST, M66592_TESTMODE);
if (!m66592->pdata->on_chip) {
m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG);
udelay(1);
m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG);
udelay(1);
m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG);
udelay(1);
m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG);
}
}
static void m66592_start_xclock(struct m66592 *m66592)
{
u16 tmp;
if (!m66592->pdata->on_chip) {
tmp = m66592_read(m66592, M66592_SYSCFG);
if (!(tmp & M66592_XCKE))
m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
}
}
/*-------------------------------------------------------------------------*/
static void transfer_complete(struct m66592_ep *ep,
struct m66592_request *req, int status)
__releases(m66592->lock)
__acquires(m66592->lock)
{
int restart = 0;
if (unlikely(ep->pipenum == 0)) {
if (ep->internal_ccpl) {
ep->internal_ccpl = 0;
return;
}
}
list_del_init(&req->queue);
if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
req->req.status = -ESHUTDOWN;
else
req->req.status = status;
if (!list_empty(&ep->queue))
restart = 1;
spin_unlock(&ep->m66592->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->m66592->lock);
if (restart) {
req = list_entry(ep->queue.next, struct m66592_request, queue);
if (ep->ep.desc)
start_packet(ep, req);
}
}
static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
{
int i;
u16 tmp;
unsigned bufsize;
size_t size;
void *buf;
u16 pipenum = ep->pipenum;
struct m66592 *m66592 = ep->m66592;
pipe_change(m66592, pipenum);
m66592_bset(m66592, M66592_ISEL, ep->fifosel);
i = 0;
do {
tmp = m66592_read(m66592, ep->fifoctr);
if (i++ > 100000) {
pr_err("pipe0 is busy. maybe cpu i/o bus "
"conflict. please power off this controller.");
return;
}
ndelay(1);
} while ((tmp & M66592_FRDY) == 0);
/* prepare parameters */
bufsize = get_buffer_size(m66592, pipenum);
buf = req->req.buf + req->req.actual;
size = min(bufsize, req->req.length - req->req.actual);
/* write fifo */
if (req->req.buf) {
if (size > 0)
m66592_write_fifo(m66592, ep, buf, size);
if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
}
/* update parameters */
req->req.actual += size;
/* check transfer finish */
if ((!req->req.zero && (req->req.actual == req->req.length))
|| (size % ep->ep.maxpacket)
|| (size == 0)) {
disable_irq_ready(m66592, pipenum);
disable_irq_empty(m66592, pipenum);
} else {
disable_irq_ready(m66592, pipenum);
enable_irq_empty(m66592, pipenum);
}
pipe_start(m66592, pipenum);
}
static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req)
{
u16 tmp;
unsigned bufsize;
size_t size;
void *buf;
u16 pipenum = ep->pipenum;
struct m66592 *m66592 = ep->m66592;
pipe_change(m66592, pipenum);
tmp = m66592_read(m66592, ep->fifoctr);
if (unlikely((tmp & M66592_FRDY) == 0)) {
pipe_stop(m66592, pipenum);
pipe_irq_disable(m66592, pipenum);
pr_err("write fifo not ready. pipnum=%d\n", pipenum);
return;
}
/* prepare parameters */
bufsize = get_buffer_size(m66592, pipenum);
buf = req->req.buf + req->req.actual;
size = min(bufsize, req->req.length - req->req.actual);
/* write fifo */
if (req->req.buf) {
m66592_write_fifo(m66592, ep, buf, size);
if ((size == 0)
|| ((size % ep->ep.maxpacket) != 0)
|| ((bufsize != ep->ep.maxpacket)
&& (bufsize > size)))
m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
}
/* update parameters */
req->req.actual += size;
/* check transfer finish */
if ((!req->req.zero && (req->req.actual == req->req.length))
|| (size % ep->ep.maxpacket)
|| (size == 0)) {
disable_irq_ready(m66592, pipenum);
enable_irq_empty(m66592, pipenum);
} else {
disable_irq_empty(m66592, pipenum);
pipe_irq_enable(m66592, pipenum);
}
}
static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req)
{
u16 tmp;
int rcv_len, bufsize, req_len;
int size;
void *buf;
u16 pipenum = ep->pipenum;
struct m66592 *m66592 = ep->m66592;
int finish = 0;
pipe_change(m66592, pipenum);
tmp = m66592_read(m66592, ep->fifoctr);
if (unlikely((tmp & M66592_FRDY) == 0)) {
req->req.status = -EPIPE;
pipe_stop(m66592, pipenum);
pipe_irq_disable(m66592, pipenum);
pr_err("read fifo not ready");
return;
}
/* prepare parameters */
rcv_len = tmp & M66592_DTLN;
bufsize = get_buffer_size(m66592, pipenum);
buf = req->req.buf + req->req.actual;
req_len = req->req.length - req->req.actual;
if (rcv_len < bufsize)
size = min(rcv_len, req_len);
else
size = min(bufsize, req_len);
/* update parameters */
req->req.actual += size;
/* check transfer finish */
if ((!req->req.zero && (req->req.actual == req->req.length))
|| (size % ep->ep.maxpacket)
|| (size == 0)) {
pipe_stop(m66592, pipenum);
pipe_irq_disable(m66592, pipenum);
finish = 1;
}
/* read fifo */
if (req->req.buf) {
if (size == 0)
m66592_write(m66592, M66592_BCLR, ep->fifoctr);
else
m66592_read_fifo(m66592, ep->fifoaddr, buf, size);
}
if ((ep->pipenum != 0) && finish)
transfer_complete(ep, req, 0);
}
static void irq_pipe_ready(struct m66592 *m66592, u16 status, u16 enb)
{
u16 check;
u16 pipenum;
struct m66592_ep *ep;
struct m66592_request *req;
if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) {
m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS);
m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE,
M66592_CFIFOSEL);
ep = &m66592->ep[0];
req = list_entry(ep->queue.next, struct m66592_request, queue);
irq_packet_read(ep, req);
} else {
for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
check = 1 << pipenum;
if ((status & check) && (enb & check)) {
m66592_write(m66592, ~check, M66592_BRDYSTS);
ep = m66592->pipenum2ep[pipenum];
req = list_entry(ep->queue.next,
struct m66592_request, queue);
if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
irq_packet_write(ep, req);
else
irq_packet_read(ep, req);
}
}
}
}
static void irq_pipe_empty(struct m66592 *m66592, u16 status, u16 enb)
{
u16 tmp;
u16 check;
u16 pipenum;
struct m66592_ep *ep;
struct m66592_request *req;
if ((status & M66592_BEMP0) && (enb & M66592_BEMP0)) {
m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
ep = &m66592->ep[0];
req = list_entry(ep->queue.next, struct m66592_request, queue);
irq_ep0_write(ep, req);
} else {
for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
check = 1 << pipenum;
if ((status & check) && (enb & check)) {
m66592_write(m66592, ~check, M66592_BEMPSTS);
tmp = control_reg_get(m66592, pipenum);
if ((tmp & M66592_INBUFM) == 0) {
disable_irq_empty(m66592, pipenum);
pipe_irq_disable(m66592, pipenum);
pipe_stop(m66592, pipenum);
ep = m66592->pipenum2ep[pipenum];
req = list_entry(ep->queue.next,
struct m66592_request,
queue);
if (!list_empty(&ep->queue))
transfer_complete(ep, req, 0);
}
}
}
}
}
static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
__releases(m66592->lock)
__acquires(m66592->lock)
{
struct m66592_ep *ep;
u16 pid;
u16 status = 0;
u16 w_index = le16_to_cpu(ctrl->wIndex);
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
status = 1 << USB_DEVICE_SELF_POWERED;
break;
case USB_RECIP_INTERFACE:
status = 0;
break;
case USB_RECIP_ENDPOINT:
ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
pid = control_reg_get_pid(m66592, ep->pipenum);
if (pid == M66592_PID_STALL)
status = 1 << USB_ENDPOINT_HALT;
else
status = 0;
break;
default:
pipe_stall(m66592, 0);
return; /* exit */
}
m66592->ep0_data = cpu_to_le16(status);
m66592->ep0_req->buf = &m66592->ep0_data;
m66592->ep0_req->length = 2;
/* AV: what happens if we get called again before that gets through? */
spin_unlock(&m66592->lock);
m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL);
spin_lock(&m66592->lock);
}
static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
{
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
control_end(m66592, 1);
break;
case USB_RECIP_INTERFACE:
control_end(m66592, 1);
break;
case USB_RECIP_ENDPOINT: {
struct m66592_ep *ep;
struct m66592_request *req;
u16 w_index = le16_to_cpu(ctrl->wIndex);
ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
pipe_stop(m66592, ep->pipenum);
control_reg_sqclr(m66592, ep->pipenum);
control_end(m66592, 1);
req = list_entry(ep->queue.next,
struct m66592_request, queue);
if (ep->busy) {
ep->busy = 0;
if (list_empty(&ep->queue))
break;
start_packet(ep, req);
} else if (!list_empty(&ep->queue))
pipe_start(m66592, ep->pipenum);
}
break;
default:
pipe_stall(m66592, 0);
break;
}
}
static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
{
u16 tmp;
int timeout = 3000;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
switch (le16_to_cpu(ctrl->wValue)) {
case USB_DEVICE_TEST_MODE:
control_end(m66592, 1);
/* Wait for the completion of status stage */
do {
tmp = m66592_read(m66592, M66592_INTSTS0) &
M66592_CTSQ;
udelay(1);
} while (tmp != M66592_CS_IDST && timeout-- > 0);
if (tmp == M66592_CS_IDST)
m66592_bset(m66592,
le16_to_cpu(ctrl->wIndex >> 8),
M66592_TESTMODE);
break;
default:
pipe_stall(m66592, 0);
break;
}
break;
case USB_RECIP_INTERFACE:
control_end(m66592, 1);
break;
case USB_RECIP_ENDPOINT: {
struct m66592_ep *ep;
u16 w_index = le16_to_cpu(ctrl->wIndex);
ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
pipe_stall(m66592, ep->pipenum);
control_end(m66592, 1);
}
break;
default:
pipe_stall(m66592, 0);
break;
}
}
/* if return value is true, call class driver's setup() */
static int setup_packet(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
{
u16 *p = (u16 *)ctrl;
unsigned long offset = M66592_USBREQ;
int i, ret = 0;
/* read fifo */
m66592_write(m66592, ~M66592_VALID, M66592_INTSTS0);
for (i = 0; i < 4; i++)
p[i] = m66592_read(m66592, offset + i*2);
/* check request */
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
get_status(m66592, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
clear_feature(m66592, ctrl);
break;
case USB_REQ_SET_FEATURE:
set_feature(m66592, ctrl);
break;
default:
ret = 1;
break;
}
} else
ret = 1;
return ret;
}
static void m66592_update_usb_speed(struct m66592 *m66592)
{
u16 speed = get_usb_speed(m66592);
switch (speed) {
case M66592_HSMODE:
m66592->gadget.speed = USB_SPEED_HIGH;
break;
case M66592_FSMODE:
m66592->gadget.speed = USB_SPEED_FULL;
break;
default:
m66592->gadget.speed = USB_SPEED_UNKNOWN;
pr_err("USB speed unknown\n");
}
}
static void irq_device_state(struct m66592 *m66592)
{
u16 dvsq;
dvsq = m66592_read(m66592, M66592_INTSTS0) & M66592_DVSQ;
m66592_write(m66592, ~M66592_DVST, M66592_INTSTS0);
if (dvsq == M66592_DS_DFLT) { /* bus reset */
usb_gadget_udc_reset(&m66592->gadget, m66592->driver);
m66592_update_usb_speed(m66592);
}
if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG)
m66592_update_usb_speed(m66592);
if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS)
&& m66592->gadget.speed == USB_SPEED_UNKNOWN)
m66592_update_usb_speed(m66592);
m66592->old_dvsq = dvsq;
}
static void irq_control_stage(struct m66592 *m66592)
__releases(m66592->lock)
__acquires(m66592->lock)
{
struct usb_ctrlrequest ctrl;
u16 ctsq;
ctsq = m66592_read(m66592, M66592_INTSTS0) & M66592_CTSQ;
m66592_write(m66592, ~M66592_CTRT, M66592_INTSTS0);
switch (ctsq) {
case M66592_CS_IDST: {
struct m66592_ep *ep;
struct m66592_request *req;
ep = &m66592->ep[0];
req = list_entry(ep->queue.next, struct m66592_request, queue);
transfer_complete(ep, req, 0);
}
break;
case M66592_CS_RDDS:
case M66592_CS_WRDS:
case M66592_CS_WRND:
if (setup_packet(m66592, &ctrl)) {
spin_unlock(&m66592->lock);
if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0)
pipe_stall(m66592, 0);
spin_lock(&m66592->lock);
}
break;
case M66592_CS_RDSS:
case M66592_CS_WRSS:
control_end(m66592, 0);
break;
default:
pr_err("ctrl_stage: unexpect ctsq(%x)\n", ctsq);
break;
}
}
static irqreturn_t m66592_irq(int irq, void *_m66592)
{
struct m66592 *m66592 = _m66592;
u16 intsts0;
u16 intenb0;
u16 savepipe;
u16 mask0;
spin_lock(&m66592->lock);
intsts0 = m66592_read(m66592, M66592_INTSTS0);
intenb0 = m66592_read(m66592, M66592_INTENB0);
if (m66592->pdata->on_chip && !intsts0 && !intenb0) {
/*
* When USB clock stops, it cannot read register. Even if a
* clock stops, the interrupt occurs. So this driver turn on
* a clock by this timing and do re-reading of register.
*/
m66592_start_xclock(m66592);
intsts0 = m66592_read(m66592, M66592_INTSTS0);
intenb0 = m66592_read(m66592, M66592_INTENB0);
}
savepipe = m66592_read(m66592, M66592_CFIFOSEL);
mask0 = intsts0 & intenb0;
if (mask0) {
u16 brdysts = m66592_read(m66592, M66592_BRDYSTS);
u16 bempsts = m66592_read(m66592, M66592_BEMPSTS);
u16 brdyenb = m66592_read(m66592, M66592_BRDYENB);
u16 bempenb = m66592_read(m66592, M66592_BEMPENB);
if (mask0 & M66592_VBINT) {
m66592_write(m66592, 0xffff & ~M66592_VBINT,
M66592_INTSTS0);
m66592_start_xclock(m66592);
/* start vbus sampling */
m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0)
& M66592_VBSTS;
m66592->scount = M66592_MAX_SAMPLING;
mod_timer(&m66592->timer,
jiffies + msecs_to_jiffies(50));
}
if (intsts0 & M66592_DVSQ)
irq_device_state(m66592);
if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE)
&& (brdysts & brdyenb)) {
irq_pipe_ready(m66592, brdysts, brdyenb);
}
if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE)
&& (bempsts & bempenb)) {
irq_pipe_empty(m66592, bempsts, bempenb);
}
if (intsts0 & M66592_CTRT)
irq_control_stage(m66592);
}
m66592_write(m66592, savepipe, M66592_CFIFOSEL);
spin_unlock(&m66592->lock);
return IRQ_HANDLED;
}
static void m66592_timer(struct timer_list *t)
{
struct m66592 *m66592 = from_timer(m66592, t, timer);
unsigned long flags;
u16 tmp;
spin_lock_irqsave(&m66592->lock, flags);
tmp = m66592_read(m66592, M66592_SYSCFG);
if (!(tmp & M66592_RCKE)) {
m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
udelay(10);
m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
}
if (m66592->scount > 0) {
tmp = m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS;
if (tmp == m66592->old_vbus) {
m66592->scount--;
if (m66592->scount == 0) {
if (tmp == M66592_VBSTS)
m66592_usb_connect(m66592);
else
m66592_usb_disconnect(m66592);
} else {
mod_timer(&m66592->timer,
jiffies + msecs_to_jiffies(50));
}
} else {
m66592->scount = M66592_MAX_SAMPLING;
m66592->old_vbus = tmp;
mod_timer(&m66592->timer,
jiffies + msecs_to_jiffies(50));
}
}
spin_unlock_irqrestore(&m66592->lock, flags);
}
/*-------------------------------------------------------------------------*/
static int m66592_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct m66592_ep *ep;
ep = container_of(_ep, struct m66592_ep, ep);
return alloc_pipe_config(ep, desc);
}
static int m66592_disable(struct usb_ep *_ep)
{
struct m66592_ep *ep;
struct m66592_request *req;
unsigned long flags;
ep = container_of(_ep, struct m66592_ep, ep);
BUG_ON(!ep);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct m66592_request, queue);
spin_lock_irqsave(&ep->m66592->lock, flags);
transfer_complete(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->m66592->lock, flags);
}
pipe_irq_disable(ep->m66592, ep->pipenum);
return free_pipe_config(ep);
}
static struct usb_request *m66592_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct m66592_request *req;
req = kzalloc(sizeof(struct m66592_request), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void m66592_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct m66592_request *req;
req = container_of(_req, struct m66592_request, req);
kfree(req);
}
static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct m66592_ep *ep;
struct m66592_request *req;
unsigned long flags;
int request = 0;
ep = container_of(_ep, struct m66592_ep, ep);
req = container_of(_req, struct m66592_request, req);
if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&ep->m66592->lock, flags);
if (list_empty(&ep->queue))
request = 1;
list_add_tail(&req->queue, &ep->queue);
req->req.actual = 0;
req->req.status = -EINPROGRESS;
if (ep->ep.desc == NULL) /* control */
start_ep0(ep, req);
else {
if (request && !ep->busy)
start_packet(ep, req);
}
spin_unlock_irqrestore(&ep->m66592->lock, flags);
return 0;
}
static int m66592_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct m66592_ep *ep;
struct m66592_request *req;
unsigned long flags;
ep = container_of(_ep, struct m66592_ep, ep);
req = container_of(_req, struct m66592_request, req);
spin_lock_irqsave(&ep->m66592->lock, flags);
if (!list_empty(&ep->queue))
transfer_complete(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->m66592->lock, flags);
return 0;
}
static int m66592_set_halt(struct usb_ep *_ep, int value)
{
struct m66592_ep *ep = container_of(_ep, struct m66592_ep, ep);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ep->m66592->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
} else if (value) {
ep->busy = 1;
pipe_stall(ep->m66592, ep->pipenum);
} else {
ep->busy = 0;
pipe_stop(ep->m66592, ep->pipenum);
}
spin_unlock_irqrestore(&ep->m66592->lock, flags);
return ret;
}
static void m66592_fifo_flush(struct usb_ep *_ep)
{
struct m66592_ep *ep;
unsigned long flags;
ep = container_of(_ep, struct m66592_ep, ep);
spin_lock_irqsave(&ep->m66592->lock, flags);
if (list_empty(&ep->queue) && !ep->busy) {
pipe_stop(ep->m66592, ep->pipenum);
m66592_bclr(ep->m66592, M66592_BCLR, ep->fifoctr);
}
spin_unlock_irqrestore(&ep->m66592->lock, flags);
}
static const struct usb_ep_ops m66592_ep_ops = {
.enable = m66592_enable,
.disable = m66592_disable,
.alloc_request = m66592_alloc_request,
.free_request = m66592_free_request,
.queue = m66592_queue,
.dequeue = m66592_dequeue,
.set_halt = m66592_set_halt,
.fifo_flush = m66592_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int m66592_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct m66592 *m66592 = to_m66592(g);
/* hook up the driver */
m66592->driver = driver;
m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
if (m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS) {
m66592_start_xclock(m66592);
/* start vbus sampling */
m66592->old_vbus = m66592_read(m66592,
M66592_INTSTS0) & M66592_VBSTS;
m66592->scount = M66592_MAX_SAMPLING;
mod_timer(&m66592->timer, jiffies + msecs_to_jiffies(50));
}
return 0;
}
static int m66592_udc_stop(struct usb_gadget *g)
{
struct m66592 *m66592 = to_m66592(g);
m66592_bclr(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
init_controller(m66592);
disable_controller(m66592);
m66592->driver = NULL;
return 0;
}
/*-------------------------------------------------------------------------*/
static int m66592_get_frame(struct usb_gadget *_gadget)
{
struct m66592 *m66592 = gadget_to_m66592(_gadget);
return m66592_read(m66592, M66592_FRMNUM) & 0x03FF;
}
static int m66592_pullup(struct usb_gadget *gadget, int is_on)
{
struct m66592 *m66592 = gadget_to_m66592(gadget);
unsigned long flags;
spin_lock_irqsave(&m66592->lock, flags);
if (is_on)
m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
else
m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
spin_unlock_irqrestore(&m66592->lock, flags);
return 0;
}
static const struct usb_gadget_ops m66592_gadget_ops = {
.get_frame = m66592_get_frame,
.udc_start = m66592_udc_start,
.udc_stop = m66592_udc_stop,
.pullup = m66592_pullup,
};
static void m66592_remove(struct platform_device *pdev)
{
struct m66592 *m66592 = platform_get_drvdata(pdev);
usb_del_gadget_udc(&m66592->gadget);
timer_shutdown_sync(&m66592->timer);
iounmap(m66592->reg);
free_irq(platform_get_irq(pdev, 0), m66592);
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
if (m66592->pdata->on_chip) {
clk_disable(m66592->clk);
clk_put(m66592->clk);
}
kfree(m66592);
}
static void nop_completion(struct usb_ep *ep, struct usb_request *r)
{
}
static int m66592_probe(struct platform_device *pdev)
{
struct resource *res, *ires;
void __iomem *reg = NULL;
struct m66592 *m66592 = NULL;
char clk_name[8];
int ret = 0;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
pr_err("platform_get_resource error.\n");
goto clean_up;
}
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!ires) {
ret = -ENODEV;
dev_err(&pdev->dev,
"platform_get_resource IORESOURCE_IRQ error.\n");
goto clean_up;
}
reg = ioremap(res->start, resource_size(res));
if (reg == NULL) {
ret = -ENOMEM;
pr_err("ioremap error.\n");
goto clean_up;
}
if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "no platform data\n");
ret = -ENODEV;
goto clean_up;
}
/* initialize ucd */
m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
if (m66592 == NULL) {
ret = -ENOMEM;
goto clean_up;
}
m66592->pdata = dev_get_platdata(&pdev->dev);
m66592->irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
spin_lock_init(&m66592->lock);
platform_set_drvdata(pdev, m66592);
m66592->gadget.ops = &m66592_gadget_ops;
m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.name = udc_name;
timer_setup(&m66592->timer, m66592_timer, 0);
m66592->reg = reg;
ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
udc_name, m66592);
if (ret < 0) {
pr_err("request_irq error (%d)\n", ret);
goto clean_up;
}
if (m66592->pdata->on_chip) {
snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id);
m66592->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(m66592->clk)) {
dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
clk_name);
ret = PTR_ERR(m66592->clk);
goto clean_up2;
}
clk_enable(m66592->clk);
}
INIT_LIST_HEAD(&m66592->gadget.ep_list);
m66592->gadget.ep0 = &m66592->ep[0].ep;
INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list);
for (i = 0; i < M66592_MAX_NUM_PIPE; i++) {
struct m66592_ep *ep = &m66592->ep[i];
if (i != 0) {
INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list);
list_add_tail(&m66592->ep[i].ep.ep_list,
&m66592->gadget.ep_list);
}
ep->m66592 = m66592;
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = m66592_ep_name[i];
ep->ep.ops = &m66592_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, 512);
if (i == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&m66592->ep[0].ep, 64);
m66592->ep[0].pipenum = 0;
m66592->ep[0].fifoaddr = M66592_CFIFO;
m66592->ep[0].fifosel = M66592_CFIFOSEL;
m66592->ep[0].fifoctr = M66592_CFIFOCTR;
m66592->ep[0].fifotrn = 0;
m66592->ep[0].pipectr = get_pipectr_addr(0);
m66592->pipenum2ep[0] = &m66592->ep[0];
m66592->epaddr2ep[0] = &m66592->ep[0];
m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
if (m66592->ep0_req == NULL) {
ret = -ENOMEM;
goto clean_up3;
}
m66592->ep0_req->complete = nop_completion;
init_controller(m66592);
ret = usb_add_gadget_udc(&pdev->dev, &m66592->gadget);
if (ret)
goto err_add_udc;
dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
return 0;
err_add_udc:
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
m66592->ep0_req = NULL;
clean_up3:
if (m66592->pdata->on_chip) {
clk_disable(m66592->clk);
clk_put(m66592->clk);
}
clean_up2:
free_irq(ires->start, m66592);
clean_up:
if (m66592) {
if (m66592->ep0_req)
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
kfree(m66592);
}
if (reg)
iounmap(reg);
return ret;
}
/*-------------------------------------------------------------------------*/
static struct platform_driver m66592_driver = {
.remove_new = m66592_remove,
.driver = {
.name = udc_name,
},
};
module_platform_driver_probe(m66592_driver, m66592_probe);
| linux-master | drivers/usb/gadget/udc/m66592-udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the PLX NET2280 USB device controller.
* Specs and errata are available from <http://www.plxtech.com>.
*
* PLX Technology Inc. (formerly NetChip Technology) supported the
* development of this driver.
*
*
* CODE STATUS HIGHLIGHTS
*
* This driver should work well with most "gadget" drivers, including
* the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
* as well as Gadget Zero and Gadgetfs.
*
* DMA is enabled by default.
*
* MSI is enabled by default. The legacy IRQ is used if MSI couldn't
* be enabled.
*
* Note that almost all the errata workarounds here are only needed for
* rev1 chips. Rev1a silicon (0110) fixes almost all of them.
*/
/*
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 PLX Technology, Inc.
* Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
*
* Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
* with 2282 chip
*
* Modified Ricardo Ribalda Qtechnology AS to provide compatibility
* with usb 338x chip. Based on PLX driver
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/prefetch.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
#define DRIVER_VERSION "2005 Sept 27/v3.0"
#define EP_DONTUSE 13 /* nonzero */
#define USE_RDK_LEDS /* GPIO pins control three LEDs */
static const char driver_name[] = "net2280";
static const char driver_desc[] = DRIVER_DESC;
static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
static const char ep0name[] = "ep0";
#define EP_INFO(_name, _caps) \
{ \
.name = _name, \
.caps = _caps, \
}
static const struct {
const char *name;
const struct usb_ep_caps caps;
} ep_info_dft[] = { /* Default endpoint configuration */
EP_INFO(ep0name,
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-a",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-b",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-c",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-d",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-e",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-f",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-g",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep-h",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
}, ep_info_adv[] = { /* Endpoints for usb3380 advance mode */
EP_INFO(ep0name,
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
EP_INFO("ep1in",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep2out",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep3in",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep4out",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep1out",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep2in",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep3out",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep4in",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
};
#undef EP_INFO
/* mode 0 == ep-{a,b,c,d} 1K fifo each
* mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
* mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
*/
static ushort fifo_mode;
/* "modprobe net2280 fifo_mode=1" etc */
module_param(fifo_mode, ushort, 0644);
/* enable_suspend -- When enabled, the driver will respond to
* USB suspend requests by powering down the NET2280. Otherwise,
* USB suspend requests will be ignored. This is acceptable for
* self-powered devices
*/
static bool enable_suspend;
/* "modprobe net2280 enable_suspend=1" etc */
module_param(enable_suspend, bool, 0444);
#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
static char *type_string(u8 bmAttributes)
{
switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK: return "bulk";
case USB_ENDPOINT_XFER_ISOC: return "iso";
case USB_ENDPOINT_XFER_INT: return "intr";
}
return "control";
}
#include "net2280.h"
#define valid_bit cpu_to_le32(BIT(VALID_BIT))
#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
static void ep_clear_seqnum(struct net2280_ep *ep);
static void stop_activity(struct net2280 *dev,
struct usb_gadget_driver *driver);
static void ep0_start(struct net2280 *dev);
/*-------------------------------------------------------------------------*/
static inline void enable_pciirqenb(struct net2280_ep *ep)
{
u32 tmp = readl(&ep->dev->regs->pciirqenb0);
if (ep->dev->quirks & PLX_LEGACY)
tmp |= BIT(ep->num);
else
tmp |= BIT(ep_bit[ep->num]);
writel(tmp, &ep->dev->regs->pciirqenb0);
return;
}
static int
net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct net2280 *dev;
struct net2280_ep *ep;
u32 max;
u32 tmp = 0;
u32 type;
unsigned long flags;
static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
int ret = 0;
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
desc->bDescriptorType != USB_DT_ENDPOINT) {
pr_err("%s: failed at line=%d\n", __func__, __LINE__);
return -EINVAL;
}
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
ret = -ESHUTDOWN;
goto print_err;
}
/* erratum 0119 workaround ties up an endpoint number */
if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) {
ret = -EDOM;
goto print_err;
}
if (dev->quirks & PLX_PCIE) {
if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
ret = -EDOM;
goto print_err;
}
ep->is_in = !!usb_endpoint_dir_in(desc);
if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) {
ret = -EINVAL;
goto print_err;
}
}
/* sanity check ep-e/ep-f since their fifos are small */
max = usb_endpoint_maxp(desc);
if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
ret = -ERANGE;
goto print_err;
}
spin_lock_irqsave(&dev->lock, flags);
_ep->maxpacket = max;
ep->desc = desc;
/* ep_reset() has already been called */
ep->stopped = 0;
ep->wedged = 0;
ep->out_overflow = 0;
/* set speed-dependent max packet; may kick in high bandwidth */
set_max_speed(ep, max);
/* set type, direction, address; reset fifo counters */
writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
tmp = readl(&ep->cfg->ep_cfg);
/* If USB ep number doesn't match hardware ep number */
if ((tmp & 0xf) != usb_endpoint_num(desc)) {
ret = -EINVAL;
spin_unlock_irqrestore(&dev->lock, flags);
goto print_err;
}
if (ep->is_in)
tmp &= ~USB3380_EP_CFG_MASK_IN;
else
tmp &= ~USB3380_EP_CFG_MASK_OUT;
}
type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
if (type == USB_ENDPOINT_XFER_INT) {
/* erratum 0105 workaround prevents hs NYET */
if (dev->chiprev == 0100 &&
dev->gadget.speed == USB_SPEED_HIGH &&
!(desc->bEndpointAddress & USB_DIR_IN))
writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
&ep->regs->ep_rsp);
} else if (type == USB_ENDPOINT_XFER_BULK) {
/* catch some particularly blatant driver bugs */
if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
(dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
(dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
spin_unlock_irqrestore(&dev->lock, flags);
ret = -ERANGE;
goto print_err;
}
}
ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC);
/* Enable this endpoint */
if (dev->quirks & PLX_LEGACY) {
tmp |= type << ENDPOINT_TYPE;
tmp |= desc->bEndpointAddress;
/* default full fifo lines */
tmp |= (4 << ENDPOINT_BYTE_COUNT);
tmp |= BIT(ENDPOINT_ENABLE);
ep->is_in = (tmp & USB_DIR_IN) != 0;
} else {
/* In Legacy mode, only OUT endpoints are used */
if (dev->enhanced_mode && ep->is_in) {
tmp |= type << IN_ENDPOINT_TYPE;
tmp |= BIT(IN_ENDPOINT_ENABLE);
} else {
tmp |= type << OUT_ENDPOINT_TYPE;
tmp |= BIT(OUT_ENDPOINT_ENABLE);
tmp |= (ep->is_in << ENDPOINT_DIRECTION);
}
tmp |= (4 << ENDPOINT_BYTE_COUNT);
if (!dev->enhanced_mode)
tmp |= usb_endpoint_num(desc);
tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
}
/* Make sure all the registers are written before ep_rsp*/
wmb();
/* for OUT transfers, block the rx fifo until a read is posted */
if (!ep->is_in)
writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
else if (!(dev->quirks & PLX_2280)) {
/* Added for 2282, Don't use nak packets on an in endpoint,
* this was ignored on 2280
*/
writel(BIT(CLEAR_NAK_OUT_PACKETS) |
BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
}
if (dev->quirks & PLX_PCIE)
ep_clear_seqnum(ep);
writel(tmp, &ep->cfg->ep_cfg);
/* enable irqs */
if (!ep->dma) { /* pio, per-packet */
enable_pciirqenb(ep);
tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
if (dev->quirks & PLX_2280)
tmp |= readl(&ep->regs->ep_irqenb);
writel(tmp, &ep->regs->ep_irqenb);
} else { /* dma, per-request */
tmp = BIT((8 + ep->num)); /* completion */
tmp |= readl(&dev->regs->pciirqenb1);
writel(tmp, &dev->regs->pciirqenb1);
/* for short OUT transfers, dma completions can't
* advance the queue; do it pio-style, by hand.
* NOTE erratum 0112 workaround #2
*/
if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
writel(tmp, &ep->regs->ep_irqenb);
enable_pciirqenb(ep);
}
}
tmp = desc->bEndpointAddress;
ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
_ep->name, tmp & 0x0f, DIR_STRING(tmp),
type_string(desc->bmAttributes),
ep->dma ? "dma" : "pio", max);
/* pci writes may still be posted */
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
print_err:
dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
return ret;
}
static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
int ret;
ret = readl_poll_timeout_atomic(ptr, result,
((result & mask) == done ||
result == U32_MAX),
1, usec);
if (result == U32_MAX) /* device unplugged */
return -ENODEV;
return ret;
}
static const struct usb_ep_ops net2280_ep_ops;
static void ep_reset_228x(struct net2280_regs __iomem *regs,
struct net2280_ep *ep)
{
u32 tmp;
ep->desc = NULL;
INIT_LIST_HEAD(&ep->queue);
usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.ops = &net2280_ep_ops;
/* disable the dma, irqs, endpoint... */
if (ep->dma) {
writel(0, &ep->dma->dmactl);
writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
BIT(DMA_ABORT),
&ep->dma->dmastat);
tmp = readl(®s->pciirqenb0);
tmp &= ~BIT(ep->num);
writel(tmp, ®s->pciirqenb0);
} else {
tmp = readl(®s->pciirqenb1);
tmp &= ~BIT((8 + ep->num)); /* completion */
writel(tmp, ®s->pciirqenb1);
}
writel(0, &ep->regs->ep_irqenb);
/* init to our chosen defaults, notably so that we NAK OUT
* packets until the driver queues a read (+note erratum 0112)
*/
if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
BIT(SET_NAK_OUT_PACKETS) |
BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
BIT(CLEAR_INTERRUPT_MODE);
} else {
/* added for 2282 */
tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
BIT(CLEAR_NAK_OUT_PACKETS) |
BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
BIT(CLEAR_INTERRUPT_MODE);
}
if (ep->num != 0) {
tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
BIT(CLEAR_ENDPOINT_HALT);
}
writel(tmp, &ep->regs->ep_rsp);
/* scrub most status bits, and flush any fifo state */
if (ep->dev->quirks & PLX_2280)
tmp = BIT(FIFO_OVERFLOW) |
BIT(FIFO_UNDERFLOW);
else
tmp = 0;
writel(tmp | BIT(TIMEOUT) |
BIT(USB_STALL_SENT) |
BIT(USB_IN_NAK_SENT) |
BIT(USB_IN_ACK_RCVD) |
BIT(USB_OUT_PING_NAK_SENT) |
BIT(USB_OUT_ACK_SENT) |
BIT(FIFO_FLUSH) |
BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
BIT(DATA_IN_TOKEN_INTERRUPT),
&ep->regs->ep_stat);
/* fifo size is handled separately */
}
static void ep_reset_338x(struct net2280_regs __iomem *regs,
struct net2280_ep *ep)
{
u32 tmp, dmastat;
ep->desc = NULL;
INIT_LIST_HEAD(&ep->queue);
usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.ops = &net2280_ep_ops;
/* disable the dma, irqs, endpoint... */
if (ep->dma) {
writel(0, &ep->dma->dmactl);
writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
BIT(DMA_PAUSE_DONE_INTERRUPT) |
BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
BIT(DMA_TRANSACTION_DONE_INTERRUPT),
/* | BIT(DMA_ABORT), */
&ep->dma->dmastat);
dmastat = readl(&ep->dma->dmastat);
if (dmastat == 0x5002) {
ep_warn(ep->dev, "The dmastat return = %x!!\n",
dmastat);
writel(0x5a, &ep->dma->dmastat);
}
tmp = readl(®s->pciirqenb0);
tmp &= ~BIT(ep_bit[ep->num]);
writel(tmp, ®s->pciirqenb0);
} else {
if (ep->num < 5) {
tmp = readl(®s->pciirqenb1);
tmp &= ~BIT((8 + ep->num)); /* completion */
writel(tmp, ®s->pciirqenb1);
}
}
writel(0, &ep->regs->ep_irqenb);
writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
BIT(FIFO_OVERFLOW) |
BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
tmp = readl(&ep->cfg->ep_cfg);
if (ep->is_in)
tmp &= ~USB3380_EP_CFG_MASK_IN;
else
tmp &= ~USB3380_EP_CFG_MASK_OUT;
writel(tmp, &ep->cfg->ep_cfg);
}
static void nuke(struct net2280_ep *);
static int net2280_disable(struct usb_ep *_ep)
{
struct net2280_ep *ep;
unsigned long flags;
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || _ep->name == ep0name) {
pr_err("%s: Invalid ep=%p\n", __func__, _ep);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
nuke(ep);
if (ep->dev->quirks & PLX_PCIE)
ep_reset_338x(ep->dev->regs, ep);
else
ep_reset_228x(ep->dev->regs, ep);
ep_vdbg(ep->dev, "disabled %s %s\n",
ep->dma ? "dma" : "pio", _ep->name);
/* synch memory views with the device */
(void)readl(&ep->cfg->ep_cfg);
if (!ep->dma && ep->num >= 1 && ep->num <= 4)
ep->dma = &ep->dev->dma[ep->num - 1];
spin_unlock_irqrestore(&ep->dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request
*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct net2280_ep *ep;
struct net2280_request *req;
if (!_ep) {
pr_err("%s: Invalid ep\n", __func__);
return NULL;
}
ep = container_of(_ep, struct net2280_ep, ep);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
/* this dma descriptor may be swapped with the previous dummy */
if (ep->dma) {
struct net2280_dma *td;
td = dma_pool_alloc(ep->dev->requests, gfp_flags,
&req->td_dma);
if (!td) {
kfree(req);
return NULL;
}
td->dmacount = 0; /* not VALID */
td->dmadesc = td->dmaaddr;
req->td = td;
}
return &req->req;
}
static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct net2280_ep *ep;
struct net2280_request *req;
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || !_req) {
dev_err(&ep->dev->pdev->dev, "%s: Invalid ep=%p or req=%p\n",
__func__, _ep, _req);
return;
}
req = container_of(_req, struct net2280_request, req);
WARN_ON(!list_empty(&req->queue));
if (req->td)
dma_pool_free(ep->dev->requests, req->td, req->td_dma);
kfree(req);
}
/*-------------------------------------------------------------------------*/
/* load a packet into the fifo we use for usb IN transfers.
* works for all endpoints.
*
* NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
* at a time, but this code is simpler because it knows it only writes
* one packet. ep-a..ep-d should use dma instead.
*/
static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
{
struct net2280_ep_regs __iomem *regs = ep->regs;
u8 *buf;
u32 tmp;
unsigned count, total;
/* INVARIANT: fifo is currently empty. (testable) */
if (req) {
buf = req->buf + req->actual;
prefetch(buf);
total = req->length - req->actual;
} else {
total = 0;
buf = NULL;
}
/* write just one packet at a time */
count = ep->ep.maxpacket;
if (count > total) /* min() cannot be used on a bitfield */
count = total;
ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
ep->ep.name, count,
(count != ep->ep.maxpacket) ? " (short)" : "",
req);
while (count >= 4) {
/* NOTE be careful if you try to align these. fifo lines
* should normally be full (4 bytes) and successive partial
* lines are ok only in certain cases.
*/
tmp = get_unaligned((u32 *)buf);
cpu_to_le32s(&tmp);
writel(tmp, ®s->ep_data);
buf += 4;
count -= 4;
}
/* last fifo entry is "short" unless we wrote a full packet.
* also explicitly validate last word in (periodic) transfers
* when maxpacket is not a multiple of 4 bytes.
*/
if (count || total < ep->ep.maxpacket) {
tmp = count ? get_unaligned((u32 *)buf) : count;
cpu_to_le32s(&tmp);
set_fifo_bytecount(ep, count & 0x03);
writel(tmp, ®s->ep_data);
}
/* pci writes may still be posted */
}
/* work around erratum 0106: PCI and USB race over the OUT fifo.
* caller guarantees chiprev 0100, out endpoint is NAKing, and
* there's no real data in the fifo.
*
* NOTE: also used in cases where that erratum doesn't apply:
* where the host wrote "too much" data to us.
*/
static void out_flush(struct net2280_ep *ep)
{
u32 __iomem *statp;
u32 tmp;
statp = &ep->regs->ep_stat;
tmp = readl(statp);
if (tmp & BIT(NAK_OUT_PACKETS)) {
ep_dbg(ep->dev, "%s %s %08x !NAK\n",
ep->ep.name, __func__, tmp);
writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
}
writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
BIT(DATA_PACKET_RECEIVED_INTERRUPT),
statp);
writel(BIT(FIFO_FLUSH), statp);
/* Make sure that stap is written */
mb();
tmp = readl(statp);
if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
/* high speed did bulk NYET; fifo isn't filling */
ep->dev->gadget.speed == USB_SPEED_FULL) {
unsigned usec;
usec = 50; /* 64 byte bulk/interrupt */
handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
BIT(USB_OUT_PING_NAK_SENT), usec);
/* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
}
}
/* unload packet(s) from the fifo we use for usb OUT transfers.
* returns true iff the request completed, because of short packet
* or the request buffer having filled with full packets.
*
* for ep-a..ep-d this will read multiple packets out when they
* have been accepted.
*/
static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
{
struct net2280_ep_regs __iomem *regs = ep->regs;
u8 *buf = req->req.buf + req->req.actual;
unsigned count, tmp, is_short;
unsigned cleanup = 0, prevent = 0;
/* erratum 0106 ... packets coming in during fifo reads might
* be incompletely rejected. not all cases have workarounds.
*/
if (ep->dev->chiprev == 0x0100 &&
ep->dev->gadget.speed == USB_SPEED_FULL) {
udelay(1);
tmp = readl(&ep->regs->ep_stat);
if ((tmp & BIT(NAK_OUT_PACKETS)))
cleanup = 1;
else if ((tmp & BIT(FIFO_FULL))) {
start_out_naking(ep);
prevent = 1;
}
/* else: hope we don't see the problem */
}
/* never overflow the rx buffer. the fifo reads packets until
* it sees a short one; we might not be ready for them all.
*/
prefetchw(buf);
count = readl(®s->ep_avail);
if (unlikely(count == 0)) {
udelay(1);
tmp = readl(&ep->regs->ep_stat);
count = readl(®s->ep_avail);
/* handled that data already? */
if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
return 0;
}
tmp = req->req.length - req->req.actual;
if (count > tmp) {
/* as with DMA, data overflow gets flushed */
if ((tmp % ep->ep.maxpacket) != 0) {
ep_err(ep->dev,
"%s out fifo %d bytes, expected %d\n",
ep->ep.name, count, tmp);
req->req.status = -EOVERFLOW;
cleanup = 1;
/* NAK_OUT_PACKETS will be set, so flushing is safe;
* the next read will start with the next packet
*/
} /* else it's a ZLP, no worries */
count = tmp;
}
req->req.actual += count;
is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
ep->ep.name, count, is_short ? " (short)" : "",
cleanup ? " flush" : "", prevent ? " nak" : "",
req, req->req.actual, req->req.length);
while (count >= 4) {
tmp = readl(®s->ep_data);
cpu_to_le32s(&tmp);
put_unaligned(tmp, (u32 *)buf);
buf += 4;
count -= 4;
}
if (count) {
tmp = readl(®s->ep_data);
/* LE conversion is implicit here: */
do {
*buf++ = (u8) tmp;
tmp >>= 8;
} while (--count);
}
if (cleanup)
out_flush(ep);
if (prevent) {
writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
(void) readl(&ep->regs->ep_rsp);
}
return is_short || req->req.actual == req->req.length;
}
/* fill out dma descriptor to match a given request */
static void fill_dma_desc(struct net2280_ep *ep,
struct net2280_request *req, int valid)
{
struct net2280_dma *td = req->td;
u32 dmacount = req->req.length;
/* don't let DMA continue after a short OUT packet,
* so overruns can't affect the next transfer.
* in case of overruns on max-size packets, we can't
* stop the fifo from filling but we can flush it.
*/
if (ep->is_in)
dmacount |= BIT(DMA_DIRECTION);
if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
!(ep->dev->quirks & PLX_2280))
dmacount |= BIT(END_OF_CHAIN);
req->valid = valid;
if (valid)
dmacount |= BIT(VALID_BIT);
dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
/* td->dmadesc = previously set by caller */
td->dmaaddr = cpu_to_le32 (req->req.dma);
/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
wmb();
td->dmacount = cpu_to_le32(dmacount);
}
static const u32 dmactl_default =
BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
BIT(DMA_CLEAR_COUNT_ENABLE) |
/* erratum 0116 workaround part 1 (use POLLING) */
(POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
BIT(DMA_VALID_BIT_POLLING_ENABLE) |
BIT(DMA_VALID_BIT_ENABLE) |
BIT(DMA_SCATTER_GATHER_ENABLE) |
/* erratum 0116 workaround part 2 (no AUTOSTART) */
BIT(DMA_ENABLE);
static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
{
handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
}
static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
{
writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
spin_stop_dma(dma);
}
static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
{
struct net2280_dma_regs __iomem *dma = ep->dma;
unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
if (!(ep->dev->quirks & PLX_2280))
tmp |= BIT(END_OF_CHAIN);
writel(tmp, &dma->dmacount);
writel(readl(&dma->dmastat), &dma->dmastat);
writel(td_dma, &dma->dmadesc);
if (ep->dev->quirks & PLX_PCIE)
dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
writel(dmactl, &dma->dmactl);
/* erratum 0116 workaround part 3: pci arbiter away from net2280 */
(void) readl(&ep->dev->pci->pcimstctl);
writel(BIT(DMA_START), &dma->dmastat);
}
static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
{
u32 tmp;
struct net2280_dma_regs __iomem *dma = ep->dma;
/* FIXME can't use DMA for ZLPs */
/* on this path we "know" there's no dma active (yet) */
WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
writel(0, &ep->dma->dmactl);
/* previous OUT packet might have been short */
if (!ep->is_in && (readl(&ep->regs->ep_stat) &
BIT(NAK_OUT_PACKETS))) {
writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
&ep->regs->ep_stat);
tmp = readl(&ep->regs->ep_avail);
if (tmp) {
writel(readl(&dma->dmastat), &dma->dmastat);
/* transfer all/some fifo data */
writel(req->req.dma, &dma->dmaaddr);
tmp = min(tmp, req->req.length);
/* dma irq, faking scatterlist status */
req->td->dmacount = cpu_to_le32(req->req.length - tmp);
writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
&dma->dmacount);
req->td->dmadesc = 0;
req->valid = 1;
writel(BIT(DMA_ENABLE), &dma->dmactl);
writel(BIT(DMA_START), &dma->dmastat);
return;
}
stop_out_naking(ep);
}
tmp = dmactl_default;
/* force packet boundaries between dma requests, but prevent the
* controller from automagically writing a last "short" packet
* (zero length) unless the driver explicitly said to do that.
*/
if (ep->is_in) {
if (likely((req->req.length % ep->ep.maxpacket) ||
req->req.zero)){
tmp |= BIT(DMA_FIFO_VALIDATE);
ep->in_fifo_validate = 1;
} else
ep->in_fifo_validate = 0;
}
/* init req->td, pointing to the current dummy */
req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fill_dma_desc(ep, req, 1);
req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
start_queue(ep, tmp, req->td_dma);
}
static inline void
queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
{
/* swap new dummy for old, link; fill and maybe activate */
swap(ep->dummy, req->td);
swap(ep->td_dma, req->td_dma);
req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fill_dma_desc(ep, req, valid);
}
static void
done(struct net2280_ep *ep, struct net2280_request *req, int status)
{
struct net2280 *dev;
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
if (status && status != -ESHUTDOWN)
ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
static int
net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct net2280_request *req;
struct net2280_ep *ep;
struct net2280 *dev;
unsigned long flags;
int ret = 0;
/* we always require a cpu-view buffer, so that we can
* always use pio (as fallback or whatever).
*/
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0)) {
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
return -EINVAL;
}
req = container_of(_req, struct net2280_request, req);
if (!_req || !_req->complete || !_req->buf ||
!list_empty(&req->queue)) {
ret = -EINVAL;
goto print_err;
}
if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) {
ret = -EDOM;
goto print_err;
}
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
ret = -ESHUTDOWN;
goto print_err;
}
/* FIXME implement PIO fallback for ZLPs with DMA */
if (ep->dma && _req->length == 0) {
ret = -EOPNOTSUPP;
goto print_err;
}
/* set up dma mapping in case the caller didn't */
if (ep->dma) {
ret = usb_gadget_map_request(&dev->gadget, _req,
ep->is_in);
if (ret)
goto print_err;
}
ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
_ep->name, _req, _req->length, _req->buf);
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* kickstart this i/o queue? */
if (list_empty(&ep->queue) && !ep->stopped &&
!((dev->quirks & PLX_PCIE) && ep->dma &&
(readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
/* use DMA if the endpoint supports it, else pio */
if (ep->dma)
start_dma(ep, req);
else {
/* maybe there's no control data, just status ack */
if (ep->num == 0 && _req->length == 0) {
allow_status(ep);
done(ep, req, 0);
ep_vdbg(dev, "%s status ack\n", ep->ep.name);
goto done;
}
/* PIO ... stuff the fifo, or unblock it. */
if (ep->is_in)
write_fifo(ep, _req);
else {
u32 s;
/* OUT FIFO might have packet(s) buffered */
s = readl(&ep->regs->ep_stat);
if ((s & BIT(FIFO_EMPTY)) == 0) {
/* note: _req->short_not_ok is
* ignored here since PIO _always_
* stops queue advance here, and
* _req->status doesn't change for
* short reads (only _req->actual)
*/
if (read_fifo(ep, req) &&
ep->num == 0) {
done(ep, req, 0);
allow_status(ep);
/* don't queue it */
req = NULL;
} else if (read_fifo(ep, req) &&
ep->num != 0) {
done(ep, req, 0);
req = NULL;
} else
s = readl(&ep->regs->ep_stat);
}
/* don't NAK, let the fifo fill */
if (req && (s & BIT(NAK_OUT_PACKETS)))
writel(BIT(CLEAR_NAK_OUT_PACKETS),
&ep->regs->ep_rsp);
}
}
} else if (ep->dma) {
int valid = 1;
if (ep->is_in) {
int expect;
/* preventing magic zlps is per-engine state, not
* per-transfer; irq logic must recover hiccups.
*/
expect = likely(req->req.zero ||
(req->req.length % ep->ep.maxpacket));
if (expect != ep->in_fifo_validate)
valid = 0;
}
queue_dma(ep, req, valid);
} /* else the irq handler advances the queue. */
ep->responded = 1;
if (req)
list_add_tail(&req->queue, &ep->queue);
done:
spin_unlock_irqrestore(&dev->lock, flags);
/* pci writes may still be posted */
return ret;
print_err:
dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
return ret;
}
static inline void
dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
int status)
{
req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
done(ep, req, status);
}
static int scan_dma_completions(struct net2280_ep *ep)
{
int num_completed = 0;
/* only look at descriptors that were "naturally" retired,
* so fifo and list head state won't matter
*/
while (!list_empty(&ep->queue)) {
struct net2280_request *req;
u32 req_dma_count;
req = list_entry(ep->queue.next,
struct net2280_request, queue);
if (!req->valid)
break;
rmb();
req_dma_count = le32_to_cpup(&req->td->dmacount);
if ((req_dma_count & BIT(VALID_BIT)) != 0)
break;
/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
* cases where DMA must be aborted; this code handles
* all non-abort DMA completions.
*/
if (unlikely(req->td->dmadesc == 0)) {
/* paranoia */
u32 const ep_dmacount = readl(&ep->dma->dmacount);
if (ep_dmacount & DMA_BYTE_COUNT_MASK)
break;
/* single transfer mode */
dma_done(ep, req, req_dma_count, 0);
num_completed++;
break;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) &&
!(ep->dev->quirks & PLX_PCIE)) {
u32 const ep_stat = readl(&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
* your gadget driver. That helps avoids errata 0121,
* 0122, and 0124; not all cases trigger the warning.
*/
if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
ep_warn(ep->dev, "%s lost packet sync!\n",
ep->ep.name);
req->req.status = -EOVERFLOW;
} else {
u32 const ep_avail = readl(&ep->regs->ep_avail);
if (ep_avail) {
/* fifo gets flushed later */
ep->out_overflow = 1;
ep_dbg(ep->dev,
"%s dma, discard %d len %d\n",
ep->ep.name, ep_avail,
req->req.length);
req->req.status = -EOVERFLOW;
}
}
}
dma_done(ep, req, req_dma_count, 0);
num_completed++;
}
return num_completed;
}
static void restart_dma(struct net2280_ep *ep)
{
struct net2280_request *req;
if (ep->stopped)
return;
req = list_entry(ep->queue.next, struct net2280_request, queue);
start_dma(ep, req);
}
static void abort_dma(struct net2280_ep *ep)
{
/* abort the current transfer */
if (likely(!list_empty(&ep->queue))) {
/* FIXME work around errata 0121, 0122, 0124 */
writel(BIT(DMA_ABORT), &ep->dma->dmastat);
spin_stop_dma(ep->dma);
} else
stop_dma(ep->dma);
scan_dma_completions(ep);
}
/* dequeue ALL requests */
static void nuke(struct net2280_ep *ep)
{
struct net2280_request *req;
/* called with spinlock held */
ep->stopped = 1;
if (ep->dma)
abort_dma(ep);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2280_request,
queue);
done(ep, req, -ESHUTDOWN);
}
}
/* dequeue JUST ONE request */
static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct net2280_ep *ep;
struct net2280_request *req = NULL;
struct net2280_request *iter;
unsigned long flags;
u32 dmactl;
int stopped;
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0) || !_req) {
pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
__func__, _ep, _req);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
stopped = ep->stopped;
/* quiesce dma while we patch the queue */
dmactl = 0;
ep->stopped = 1;
if (ep->dma) {
dmactl = readl(&ep->dma->dmactl);
/* WARNING erratum 0127 may kick in ... */
stop_dma(ep->dma);
scan_dma_completions(ep);
}
/* make sure it's still queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
return -EINVAL;
}
/* queue head may be partially complete. */
if (ep->queue.next == &req->queue) {
if (ep->dma) {
ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
_req->status = -ECONNRESET;
abort_dma(ep);
if (likely(ep->queue.next == &req->queue)) {
/* NOTE: misreports single-transfer mode*/
req->td->dmacount = 0; /* invalidate */
dma_done(ep, req,
readl(&ep->dma->dmacount),
-ECONNRESET);
}
} else {
ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
done(ep, req, -ECONNRESET);
}
req = NULL;
}
if (req)
done(ep, req, -ECONNRESET);
ep->stopped = stopped;
if (ep->dma) {
/* turn off dma on inactive queues */
if (list_empty(&ep->queue))
stop_dma(ep->dma);
else if (!ep->stopped) {
/* resume current request, or start new one */
if (req)
writel(dmactl, &ep->dma->dmactl);
else
start_dma(ep, list_entry(ep->queue.next,
struct net2280_request, queue));
}
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int net2280_fifo_status(struct usb_ep *_ep);
static int
net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct net2280_ep *ep;
unsigned long flags;
int retval = 0;
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0)) {
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
return -EINVAL;
}
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
retval = -ESHUTDOWN;
goto print_err;
}
if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
== USB_ENDPOINT_XFER_ISOC) {
retval = -EINVAL;
goto print_err;
}
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue)) {
retval = -EAGAIN;
goto print_unlock;
} else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) {
retval = -EAGAIN;
goto print_unlock;
} else {
ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
value ? "set" : "clear",
wedged ? "wedge" : "halt");
/* set/clear, then synch memory views with the device */
if (value) {
if (ep->num == 0)
ep->dev->protocol_stall = 1;
else
set_halt(ep);
if (wedged)
ep->wedged = 1;
} else {
clear_halt(ep);
if (ep->dev->quirks & PLX_PCIE &&
!list_empty(&ep->queue) && ep->td_dma)
restart_dma(ep);
ep->wedged = 0;
}
(void) readl(&ep->regs->ep_rsp);
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return retval;
print_unlock:
spin_unlock_irqrestore(&ep->dev->lock, flags);
print_err:
dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval);
return retval;
}
static int net2280_set_halt(struct usb_ep *_ep, int value)
{
return net2280_set_halt_and_wedge(_ep, value, 0);
}
static int net2280_set_wedge(struct usb_ep *_ep)
{
if (!_ep || _ep->name == ep0name) {
pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep);
return -EINVAL;
}
return net2280_set_halt_and_wedge(_ep, 1, 1);
}
static int net2280_fifo_status(struct usb_ep *_ep)
{
struct net2280_ep *ep;
u32 avail;
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0)) {
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
return -ENODEV;
}
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
dev_err(&ep->dev->pdev->dev,
"%s: Invalid driver=%p or speed=%d\n",
__func__, ep->dev->driver, ep->dev->gadget.speed);
return -ESHUTDOWN;
}
avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
if (avail > ep->fifo_size) {
dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__);
return -EOVERFLOW;
}
if (ep->is_in)
avail = ep->fifo_size - avail;
return avail;
}
static void net2280_fifo_flush(struct usb_ep *_ep)
{
struct net2280_ep *ep;
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0)) {
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
return;
}
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
dev_err(&ep->dev->pdev->dev,
"%s: Invalid driver=%p or speed=%d\n",
__func__, ep->dev->driver, ep->dev->gadget.speed);
return;
}
writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
(void) readl(&ep->regs->ep_rsp);
}
static const struct usb_ep_ops net2280_ep_ops = {
.enable = net2280_enable,
.disable = net2280_disable,
.alloc_request = net2280_alloc_request,
.free_request = net2280_free_request,
.queue = net2280_queue,
.dequeue = net2280_dequeue,
.set_halt = net2280_set_halt,
.set_wedge = net2280_set_wedge,
.fifo_status = net2280_fifo_status,
.fifo_flush = net2280_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int net2280_get_frame(struct usb_gadget *_gadget)
{
struct net2280 *dev;
unsigned long flags;
u16 retval;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct net2280, gadget);
spin_lock_irqsave(&dev->lock, flags);
retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
spin_unlock_irqrestore(&dev->lock, flags);
return retval;
}
static int net2280_wakeup(struct usb_gadget *_gadget)
{
struct net2280 *dev;
u32 tmp;
unsigned long flags;
if (!_gadget)
return 0;
dev = container_of(_gadget, struct net2280, gadget);
spin_lock_irqsave(&dev->lock, flags);
tmp = readl(&dev->usb->usbctl);
if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
spin_unlock_irqrestore(&dev->lock, flags);
/* pci writes may still be posted */
return 0;
}
static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
{
struct net2280 *dev;
u32 tmp;
unsigned long flags;
if (!_gadget)
return 0;
dev = container_of(_gadget, struct net2280, gadget);
spin_lock_irqsave(&dev->lock, flags);
tmp = readl(&dev->usb->usbctl);
if (value) {
tmp |= BIT(SELF_POWERED_STATUS);
_gadget->is_selfpowered = 1;
} else {
tmp &= ~BIT(SELF_POWERED_STATUS);
_gadget->is_selfpowered = 0;
}
writel(tmp, &dev->usb->usbctl);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
{
struct net2280 *dev;
u32 tmp;
unsigned long flags;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct net2280, gadget);
spin_lock_irqsave(&dev->lock, flags);
tmp = readl(&dev->usb->usbctl);
dev->softconnect = (is_on != 0);
if (is_on) {
ep0_start(dev);
writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
} else {
writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
stop_activity(dev, NULL);
}
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *ep_comp)
{
char name[8];
struct usb_ep *ep;
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) {
/* ep-e, ep-f are PIO with only 64 byte fifos */
ep = gadget_find_ep_by_name(_gadget, "ep-e");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
ep = gadget_find_ep_by_name(_gadget, "ep-f");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
}
/* USB3380: Only first four endpoints have DMA channels. Allocate
* slower interrupt endpoints from PIO hw endpoints, to allow bulk/isoc
* endpoints use DMA hw endpoints.
*/
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
usb_endpoint_dir_in(desc)) {
ep = gadget_find_ep_by_name(_gadget, "ep2in");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
ep = gadget_find_ep_by_name(_gadget, "ep4in");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
} else if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
!usb_endpoint_dir_in(desc)) {
ep = gadget_find_ep_by_name(_gadget, "ep1out");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
ep = gadget_find_ep_by_name(_gadget, "ep3out");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
} else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
usb_endpoint_dir_in(desc)) {
ep = gadget_find_ep_by_name(_gadget, "ep1in");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
ep = gadget_find_ep_by_name(_gadget, "ep3in");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
} else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
!usb_endpoint_dir_in(desc)) {
ep = gadget_find_ep_by_name(_gadget, "ep2out");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
ep = gadget_find_ep_by_name(_gadget, "ep4out");
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
}
/* USB3380: use same address for usb and hardware endpoints */
snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
usb_endpoint_dir_in(desc) ? "in" : "out");
ep = gadget_find_ep_by_name(_gadget, name);
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
return ep;
return NULL;
}
static int net2280_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static int net2280_stop(struct usb_gadget *_gadget);
static void net2280_async_callbacks(struct usb_gadget *_gadget, bool enable);
static const struct usb_gadget_ops net2280_ops = {
.get_frame = net2280_get_frame,
.wakeup = net2280_wakeup,
.set_selfpowered = net2280_set_selfpowered,
.pullup = net2280_pullup,
.udc_start = net2280_start,
.udc_stop = net2280_stop,
.udc_async_callbacks = net2280_async_callbacks,
.match_ep = net2280_match_ep,
};
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
/* FIXME move these into procfs, and use seq_file.
* Sysfs _still_ doesn't behave for arbitrarily sized files,
* and also doesn't help products using this with 2.4 kernels.
*/
/* "function" sysfs attribute */
static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
char *buf)
{
struct net2280 *dev = dev_get_drvdata(_dev);
if (!dev->driver || !dev->driver->function ||
strlen(dev->driver->function) > PAGE_SIZE)
return 0;
return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
}
static DEVICE_ATTR_RO(function);
static ssize_t registers_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct net2280 *dev;
char *next;
unsigned size, t;
unsigned long flags;
int i;
u32 t1, t2;
const char *s;
dev = dev_get_drvdata(_dev);
next = buf;
size = PAGE_SIZE;
spin_lock_irqsave(&dev->lock, flags);
if (dev->driver)
s = dev->driver->driver.name;
else
s = "(none)";
/* Main Control Registers */
t = scnprintf(next, size, "%s version " DRIVER_VERSION
", chiprev %04x\n\n"
"devinit %03x fifoctl %08x gadget '%s'\n"
"pci irqenb0 %02x irqenb1 %08x "
"irqstat0 %04x irqstat1 %08x\n",
driver_name, dev->chiprev,
readl(&dev->regs->devinit),
readl(&dev->regs->fifoctl),
s,
readl(&dev->regs->pciirqenb0),
readl(&dev->regs->pciirqenb1),
readl(&dev->regs->irqstat0),
readl(&dev->regs->irqstat1));
size -= t;
next += t;
/* USB Control Registers */
t1 = readl(&dev->usb->usbctl);
t2 = readl(&dev->usb->usbstat);
if (t1 & BIT(VBUS_PIN)) {
if (t2 & BIT(HIGH_SPEED))
s = "high speed";
else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
s = "powered";
else
s = "full speed";
/* full speed bit (6) not working?? */
} else
s = "not attached";
t = scnprintf(next, size,
"stdrsp %08x usbctl %08x usbstat %08x "
"addr 0x%02x (%s)\n",
readl(&dev->usb->stdrsp), t1, t2,
readl(&dev->usb->ouraddr), s);
size -= t;
next += t;
/* PCI Master Control Registers */
/* DMA Control Registers */
/* Configurable EP Control Registers */
for (i = 0; i < dev->n_ep; i++) {
struct net2280_ep *ep;
ep = &dev->ep[i];
if (i && !ep->desc)
continue;
t1 = readl(&ep->cfg->ep_cfg);
t2 = readl(&ep->regs->ep_rsp) & 0xff;
t = scnprintf(next, size,
"\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
"irqenb %02x\n",
ep->ep.name, t1, t2,
(t2 & BIT(CLEAR_NAK_OUT_PACKETS))
? "NAK " : "",
(t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
? "hide " : "",
(t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
? "CRC " : "",
(t2 & BIT(CLEAR_INTERRUPT_MODE))
? "interrupt " : "",
(t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
? "status " : "",
(t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
? "NAKmode " : "",
(t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
? "DATA1 " : "DATA0 ",
(t2 & BIT(CLEAR_ENDPOINT_HALT))
? "HALT " : "",
readl(&ep->regs->ep_irqenb));
size -= t;
next += t;
t = scnprintf(next, size,
"\tstat %08x avail %04x "
"(ep%d%s-%s)%s\n",
readl(&ep->regs->ep_stat),
readl(&ep->regs->ep_avail),
t1 & 0x0f, DIR_STRING(t1),
type_string(t1 >> 8),
ep->stopped ? "*" : "");
size -= t;
next += t;
if (!ep->dma)
continue;
t = scnprintf(next, size,
" dma\tctl %08x stat %08x count %08x\n"
"\taddr %08x desc %08x\n",
readl(&ep->dma->dmactl),
readl(&ep->dma->dmastat),
readl(&ep->dma->dmacount),
readl(&ep->dma->dmaaddr),
readl(&ep->dma->dmadesc));
size -= t;
next += t;
}
/* Indexed Registers (none yet) */
/* Statistics */
t = scnprintf(next, size, "\nirqs: ");
size -= t;
next += t;
for (i = 0; i < dev->n_ep; i++) {
struct net2280_ep *ep;
ep = &dev->ep[i];
if (i && !ep->irqs)
continue;
t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
size -= t;
next += t;
}
t = scnprintf(next, size, "\n");
size -= t;
next += t;
spin_unlock_irqrestore(&dev->lock, flags);
return PAGE_SIZE - size;
}
static DEVICE_ATTR_RO(registers);
static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
char *buf)
{
struct net2280 *dev;
char *next;
unsigned size;
unsigned long flags;
int i;
dev = dev_get_drvdata(_dev);
next = buf;
size = PAGE_SIZE;
spin_lock_irqsave(&dev->lock, flags);
for (i = 0; i < dev->n_ep; i++) {
struct net2280_ep *ep = &dev->ep[i];
struct net2280_request *req;
int t;
if (i != 0) {
const struct usb_endpoint_descriptor *d;
d = ep->desc;
if (!d)
continue;
t = d->bEndpointAddress;
t = scnprintf(next, size,
"\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
(t & USB_DIR_IN) ? "in" : "out",
type_string(d->bmAttributes),
usb_endpoint_maxp(d),
ep->dma ? "dma" : "pio", ep->fifo_size
);
} else /* ep0 should only have one transfer queued */
t = scnprintf(next, size, "ep0 max 64 pio %s\n",
ep->is_in ? "in" : "out");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (list_empty(&ep->queue)) {
t = scnprintf(next, size, "\t(nothing queued)\n");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
t = scnprintf(next, size,
"\treq %p len %d/%d "
"buf %p (dmacount %08x)\n",
&req->req, req->req.actual,
req->req.length, req->req.buf,
readl(&ep->dma->dmacount));
else
t = scnprintf(next, size,
"\treq %p len %d/%d buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (ep->dma) {
struct net2280_dma *td;
td = req->td;
t = scnprintf(next, size, "\t td %08x "
" count %08x buf %08x desc %08x\n",
(u32) req->td_dma,
le32_to_cpu(td->dmacount),
le32_to_cpu(td->dmaaddr),
le32_to_cpu(td->dmadesc));
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
}
}
}
done:
spin_unlock_irqrestore(&dev->lock, flags);
return PAGE_SIZE - size;
}
static DEVICE_ATTR_RO(queues);
#else
#define device_create_file(a, b) (0)
#define device_remove_file(a, b) do { } while (0)
#endif
/*-------------------------------------------------------------------------*/
/* another driver-specific mode might be a request type doing dma
* to/from another device fifo instead of to/from memory.
*/
static void set_fifo_mode(struct net2280 *dev, int mode)
{
/* keeping high bits preserves BAR2 */
writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
/* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
INIT_LIST_HEAD(&dev->gadget.ep_list);
list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
switch (mode) {
case 0:
list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
break;
case 1:
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
break;
case 2:
list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = 2048;
dev->ep[2].fifo_size = 1024;
break;
}
/* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
}
static void defect7374_disable_data_eps(struct net2280 *dev)
{
/*
* For Defect 7374, disable data EPs (and more):
* - This phase undoes the earlier phase of the Defect 7374 workaround,
* returing ep regs back to normal.
*/
struct net2280_ep *ep;
int i;
unsigned char ep_sel;
u32 tmp_reg;
for (i = 1; i < 5; i++) {
ep = &dev->ep[i];
writel(i, &ep->cfg->ep_cfg);
}
/* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
for (i = 0; i < 6; i++)
writel(0, &dev->dep[i].dep_cfg);
for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
/* Select an endpoint for subsequent operations: */
tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
ep_sel == 18 || ep_sel == 20)
continue;
/* Change settings on some selected endpoints */
tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
tmp_reg |= BIT(EP_INITIALIZED);
writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
}
}
static void defect7374_enable_data_eps_zero(struct net2280 *dev)
{
u32 tmp = 0, tmp_reg;
u32 scratch;
int i;
unsigned char ep_sel;
scratch = get_idx_reg(dev->regs, SCRATCH);
WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
== DEFECT7374_FSM_SS_CONTROL_READ);
scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
ep_warn(dev, "Operate Defect 7374 workaround soft this time");
ep_warn(dev, "It will operate on cold-reboot and SS connect");
/*GPEPs:*/
tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
(2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
((dev->enhanced_mode) ?
BIT(OUT_ENDPOINT_ENABLE) | BIT(IN_ENDPOINT_ENABLE) :
BIT(ENDPOINT_ENABLE)));
for (i = 1; i < 5; i++)
writel(tmp, &dev->ep[i].cfg->ep_cfg);
/* CSRIN, PCIIN, STATIN, RCIN*/
tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
writel(tmp, &dev->dep[1].dep_cfg);
writel(tmp, &dev->dep[3].dep_cfg);
writel(tmp, &dev->dep[4].dep_cfg);
writel(tmp, &dev->dep[5].dep_cfg);
/*Implemented for development and debug.
* Can be refined/tuned later.*/
for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
/* Select an endpoint for subsequent operations: */
tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
writel(((tmp_reg & ~0x1f) | ep_sel),
&dev->plregs->pl_ep_ctrl);
if (ep_sel == 1) {
tmp =
(readl(&dev->plregs->pl_ep_ctrl) |
BIT(CLEAR_ACK_ERROR_CODE) | 0);
writel(tmp, &dev->plregs->pl_ep_ctrl);
continue;
}
if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
ep_sel == 18 || ep_sel == 20)
continue;
tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
writel(tmp, &dev->plregs->pl_ep_cfg_4);
tmp = readl(&dev->plregs->pl_ep_ctrl) &
~BIT(EP_INITIALIZED);
writel(tmp, &dev->plregs->pl_ep_ctrl);
}
/* Set FSM to focus on the first Control Read:
* - Tip: Connection speed is known upon the first
* setup request.*/
scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
set_idx_reg(dev->regs, SCRATCH, scratch);
}
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
*
* most of the work to support multiple net2280 controllers would
* be to associate this gadget driver (yes?) with all of them, or
* perhaps to bind specific drivers to specific devices.
*/
static void usb_reset_228x(struct net2280 *dev)
{
u32 tmp;
dev->gadget.speed = USB_SPEED_UNKNOWN;
(void) readl(&dev->usb->usbctl);
net2280_led_init(dev);
/* disable automatic responses, and irqs */
writel(0, &dev->usb->stdrsp);
writel(0, &dev->regs->pciirqenb0);
writel(0, &dev->regs->pciirqenb1);
/* clear old dma and irq state */
for (tmp = 0; tmp < 4; tmp++) {
struct net2280_ep *ep = &dev->ep[tmp + 1];
if (ep->dma)
abort_dma(ep);
}
writel(~0, &dev->regs->irqstat0),
writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
/* reset, and enable pci */
tmp = readl(&dev->regs->devinit) |
BIT(PCI_ENABLE) |
BIT(FIFO_SOFT_RESET) |
BIT(USB_SOFT_RESET) |
BIT(M8051_RESET);
writel(tmp, &dev->regs->devinit);
/* standard fifo and endpoint allocations */
set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
}
static void usb_reset_338x(struct net2280 *dev)
{
u32 tmp;
dev->gadget.speed = USB_SPEED_UNKNOWN;
(void)readl(&dev->usb->usbctl);
net2280_led_init(dev);
if (dev->bug7734_patched) {
/* disable automatic responses, and irqs */
writel(0, &dev->usb->stdrsp);
writel(0, &dev->regs->pciirqenb0);
writel(0, &dev->regs->pciirqenb1);
}
/* clear old dma and irq state */
for (tmp = 0; tmp < 4; tmp++) {
struct net2280_ep *ep = &dev->ep[tmp + 1];
struct net2280_dma_regs __iomem *dma;
if (ep->dma) {
abort_dma(ep);
} else {
dma = &dev->dma[tmp];
writel(BIT(DMA_ABORT), &dma->dmastat);
writel(0, &dma->dmactl);
}
}
writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
if (dev->bug7734_patched) {
/* reset, and enable pci */
tmp = readl(&dev->regs->devinit) |
BIT(PCI_ENABLE) |
BIT(FIFO_SOFT_RESET) |
BIT(USB_SOFT_RESET) |
BIT(M8051_RESET);
writel(tmp, &dev->regs->devinit);
}
/* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
INIT_LIST_HEAD(&dev->gadget.ep_list);
for (tmp = 1; tmp < dev->n_ep; tmp++)
list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
}
static void usb_reset(struct net2280 *dev)
{
if (dev->quirks & PLX_LEGACY)
return usb_reset_228x(dev);
return usb_reset_338x(dev);
}
static void usb_reinit_228x(struct net2280 *dev)
{
u32 tmp;
/* basic endpoint init */
for (tmp = 0; tmp < 7; tmp++) {
struct net2280_ep *ep = &dev->ep[tmp];
ep->ep.name = ep_info_dft[tmp].name;
ep->ep.caps = ep_info_dft[tmp].caps;
ep->dev = dev;
ep->num = tmp;
if (tmp > 0 && tmp <= 4) {
ep->fifo_size = 1024;
ep->dma = &dev->dma[tmp - 1];
} else
ep->fifo_size = 64;
ep->regs = &dev->epregs[tmp];
ep->cfg = &dev->epregs[tmp];
ep_reset_228x(dev->regs, ep);
}
usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
dev->gadget.ep0 = &dev->ep[0].ep;
dev->ep[0].stopped = 0;
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
/* we want to prevent lowlevel/insecure access from the USB host,
* but erratum 0119 means this enable bit is ignored
*/
for (tmp = 0; tmp < 5; tmp++)
writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
}
static void usb_reinit_338x(struct net2280 *dev)
{
int i;
u32 tmp, val;
static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
0x00, 0xC0, 0x00, 0xC0 };
/* basic endpoint init */
for (i = 0; i < dev->n_ep; i++) {
struct net2280_ep *ep = &dev->ep[i];
ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name :
ep_info_dft[i].name;
ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps :
ep_info_dft[i].caps;
ep->dev = dev;
ep->num = i;
if (i > 0 && i <= 4)
ep->dma = &dev->dma[i - 1];
if (dev->enhanced_mode) {
ep->cfg = &dev->epregs[ne[i]];
/*
* Set USB endpoint number, hardware allows same number
* in both directions.
*/
if (i > 0 && i < 5)
writel(ne[i], &ep->cfg->ep_cfg);
ep->regs = (struct net2280_ep_regs __iomem *)
(((void __iomem *)&dev->epregs[ne[i]]) +
ep_reg_addr[i]);
} else {
ep->cfg = &dev->epregs[i];
ep->regs = &dev->epregs[i];
}
ep->fifo_size = (i != 0) ? 2048 : 512;
ep_reset_338x(dev->regs, ep);
}
usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
dev->gadget.ep0 = &dev->ep[0].ep;
dev->ep[0].stopped = 0;
/* Link layer set up */
if (dev->bug7734_patched) {
tmp = readl(&dev->usb_ext->usbctl2) &
~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
writel(tmp, &dev->usb_ext->usbctl2);
}
/* Hardware Defect and Workaround */
val = readl(&dev->llregs->ll_lfps_5);
val &= ~(0xf << TIMER_LFPS_6US);
val |= 0x5 << TIMER_LFPS_6US;
writel(val, &dev->llregs->ll_lfps_5);
val = readl(&dev->llregs->ll_lfps_6);
val &= ~(0xffff << TIMER_LFPS_80US);
val |= 0x0100 << TIMER_LFPS_80US;
writel(val, &dev->llregs->ll_lfps_6);
/*
* AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
* Hot Reset Exit Handshake may Fail in Specific Case using
* Default Register Settings. Workaround for Enumeration test.
*/
val = readl(&dev->llregs->ll_tsn_counters_2);
val &= ~(0x1f << HOT_TX_NORESET_TS2);
val |= 0x10 << HOT_TX_NORESET_TS2;
writel(val, &dev->llregs->ll_tsn_counters_2);
val = readl(&dev->llregs->ll_tsn_counters_3);
val &= ~(0x1f << HOT_RX_RESET_TS2);
val |= 0x3 << HOT_RX_RESET_TS2;
writel(val, &dev->llregs->ll_tsn_counters_3);
/*
* AB errata. Errata 11. Workaround for Default Duration of LFPS
* Handshake Signaling for Device-Initiated U1 Exit is too short.
* Without this, various enumeration failures observed with
* modern superspeed hosts.
*/
val = readl(&dev->llregs->ll_lfps_timers_2);
writel((val & 0xffff0000) | LFPS_TIMERS_2_WORKAROUND_VALUE,
&dev->llregs->ll_lfps_timers_2);
/*
* Set Recovery Idle to Recover bit:
* - On SS connections, setting Recovery Idle to Recover Fmw improves
* link robustness with various hosts and hubs.
* - It is safe to set for all connection speeds; all chip revisions.
* - R-M-W to leave other bits undisturbed.
* - Reference PLX TT-7372
*/
val = readl(&dev->llregs->ll_tsn_chicken_bit);
val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
writel(val, &dev->llregs->ll_tsn_chicken_bit);
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
/* disable dedicated endpoints */
writel(0x0D, &dev->dep[0].dep_cfg);
writel(0x0D, &dev->dep[1].dep_cfg);
writel(0x0E, &dev->dep[2].dep_cfg);
writel(0x0E, &dev->dep[3].dep_cfg);
writel(0x0F, &dev->dep[4].dep_cfg);
writel(0x0C, &dev->dep[5].dep_cfg);
}
static void usb_reinit(struct net2280 *dev)
{
if (dev->quirks & PLX_LEGACY)
return usb_reinit_228x(dev);
return usb_reinit_338x(dev);
}
static void ep0_start_228x(struct net2280 *dev)
{
writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
BIT(CLEAR_NAK_OUT_PACKETS) |
BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
&dev->epregs[0].ep_rsp);
/*
* hardware optionally handles a bunch of standard requests
* that the API hides from drivers anyway. have it do so.
* endpoint status/features are handled in software, to
* help pass tests for some dubious behavior.
*/
writel(BIT(SET_TEST_MODE) |
BIT(SET_ADDRESS) |
BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
BIT(GET_DEVICE_STATUS) |
BIT(GET_INTERFACE_STATUS),
&dev->usb->stdrsp);
writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
BIT(SELF_POWERED_USB_DEVICE) |
BIT(REMOTE_WAKEUP_SUPPORT) |
(dev->softconnect << USB_DETECT_ENABLE) |
BIT(SELF_POWERED_STATUS),
&dev->usb->usbctl);
/* enable irqs so we can see ep0 and general operation */
writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
BIT(ENDPOINT_0_INTERRUPT_ENABLE),
&dev->regs->pciirqenb0);
writel(BIT(PCI_INTERRUPT_ENABLE) |
BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
BIT(VBUS_INTERRUPT_ENABLE) |
BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
&dev->regs->pciirqenb1);
/* don't leave any writes posted */
(void) readl(&dev->usb->usbctl);
}
static void ep0_start_338x(struct net2280 *dev)
{
if (dev->bug7734_patched)
writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
BIT(SET_EP_HIDE_STATUS_PHASE),
&dev->epregs[0].ep_rsp);
/*
* hardware optionally handles a bunch of standard requests
* that the API hides from drivers anyway. have it do so.
* endpoint status/features are handled in software, to
* help pass tests for some dubious behavior.
*/
writel(BIT(SET_ISOCHRONOUS_DELAY) |
BIT(SET_SEL) |
BIT(SET_TEST_MODE) |
BIT(SET_ADDRESS) |
BIT(GET_INTERFACE_STATUS) |
BIT(GET_DEVICE_STATUS),
&dev->usb->stdrsp);
dev->wakeup_enable = 1;
writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
(dev->softconnect << USB_DETECT_ENABLE) |
BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
&dev->usb->usbctl);
/* enable irqs so we can see ep0 and general operation */
writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
BIT(ENDPOINT_0_INTERRUPT_ENABLE),
&dev->regs->pciirqenb0);
writel(BIT(PCI_INTERRUPT_ENABLE) |
BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
BIT(VBUS_INTERRUPT_ENABLE),
&dev->regs->pciirqenb1);
/* don't leave any writes posted */
(void)readl(&dev->usb->usbctl);
}
static void ep0_start(struct net2280 *dev)
{
if (dev->quirks & PLX_LEGACY)
return ep0_start_228x(dev);
return ep0_start_338x(dev);
}
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int net2280_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver)
{
struct net2280 *dev;
int retval;
unsigned i;
/* insist on high speed support from the driver, since
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
if (!driver || driver->max_speed < USB_SPEED_HIGH ||
!driver->setup)
return -EINVAL;
dev = container_of(_gadget, struct net2280, gadget);
for (i = 0; i < dev->n_ep; i++)
dev->ep[i].irqs = 0;
/* hook up the driver ... */
dev->driver = driver;
retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
if (retval)
goto err_unbind;
retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
if (retval)
goto err_func;
/* enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
net2280_led_active(dev, 1);
if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
defect7374_enable_data_eps_zero(dev);
ep0_start(dev);
/* pci writes may still be posted */
return 0;
err_func:
device_remove_file(&dev->pdev->dev, &dev_attr_function);
err_unbind:
dev->driver = NULL;
return retval;
}
static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
{
int i;
/* don't disconnect if it's not connected */
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* stop hardware; prevent new request submissions;
* and kill any outstanding requests.
*/
usb_reset(dev);
for (i = 0; i < dev->n_ep; i++)
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
if (dev->async_callbacks && driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
usb_reinit(dev);
}
static int net2280_stop(struct usb_gadget *_gadget)
{
struct net2280 *dev;
unsigned long flags;
dev = container_of(_gadget, struct net2280, gadget);
spin_lock_irqsave(&dev->lock, flags);
stop_activity(dev, NULL);
spin_unlock_irqrestore(&dev->lock, flags);
net2280_led_active(dev, 0);
device_remove_file(&dev->pdev->dev, &dev_attr_function);
device_remove_file(&dev->pdev->dev, &dev_attr_queues);
dev->driver = NULL;
return 0;
}
static void net2280_async_callbacks(struct usb_gadget *_gadget, bool enable)
{
struct net2280 *dev = container_of(_gadget, struct net2280, gadget);
spin_lock_irq(&dev->lock);
dev->async_callbacks = enable;
spin_unlock_irq(&dev->lock);
}
/*-------------------------------------------------------------------------*/
/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
* also works for dma-capable endpoints, in pio mode or just
* to manually advance the queue after short OUT transfers.
*/
static void handle_ep_small(struct net2280_ep *ep)
{
struct net2280_request *req;
u32 t;
/* 0 error, 1 mid-data, 2 done */
int mode = 1;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct net2280_request, queue);
else
req = NULL;
/* ack all, and handle what we care about */
t = readl(&ep->regs->ep_stat);
ep->irqs++;
ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
ep->ep.name, t, req ? &req->req : NULL);
if (!ep->is_in || (ep->dev->quirks & PLX_2280))
writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
else
/* Added for 2282 */
writel(t, &ep->regs->ep_stat);
/* for ep0, monitor token irqs to catch data stage length errors
* and to synchronize on status.
*
* also, to defer reporting of protocol stalls ... here's where
* data or status first appears, handling stalls here should never
* cause trouble on the host side..
*
* control requests could be slightly faster without token synch for
* status, but status can jam up that way.
*/
if (unlikely(ep->num == 0)) {
if (ep->is_in) {
/* status; stop NAKing */
if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt(ep);
}
if (!req)
allow_status(ep);
mode = 2;
/* reply to extra IN data tokens with a zlp */
} else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt(ep);
mode = 2;
} else if (ep->responded &&
!req && !ep->stopped)
write_fifo(ep, NULL);
}
} else {
/* status; stop NAKing */
if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt(ep);
}
mode = 2;
/* an extra OUT token is an error */
} else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
req &&
req->req.actual == req->req.length) ||
(ep->responded && !req)) {
ep->dev->protocol_stall = 1;
set_halt(ep);
ep->stopped = 1;
if (req)
done(ep, req, -EOVERFLOW);
req = NULL;
}
}
}
if (unlikely(!req))
return;
/* manual DMA queue advance after short OUT */
if (likely(ep->dma)) {
if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
struct net2280_request *stuck_req = NULL;
int stopped = ep->stopped;
int num_completed;
int stuck = 0;
u32 count;
/* TRANSFERRED works around OUT_DONE erratum 0112.
* we expect (N <= maxpacket) bytes; host wrote M.
* iff (M < N) we won't ever see a DMA interrupt.
*/
ep->stopped = 1;
for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
/* any preceding dma transfers must finish.
* dma handles (M >= N), may empty the queue
*/
num_completed = scan_dma_completions(ep);
if (unlikely(list_empty(&ep->queue) ||
ep->out_overflow)) {
req = NULL;
break;
}
req = list_entry(ep->queue.next,
struct net2280_request, queue);
/* here either (M < N), a "real" short rx;
* or (M == N) and the queue didn't empty
*/
if (likely(t & BIT(FIFO_EMPTY))) {
count = readl(&ep->dma->dmacount);
count &= DMA_BYTE_COUNT_MASK;
if (readl(&ep->dma->dmadesc)
!= req->td_dma)
req = NULL;
break;
}
/* Escape loop if no dma transfers completed
* after few retries.
*/
if (num_completed == 0) {
if (stuck_req == req &&
readl(&ep->dma->dmadesc) !=
req->td_dma && stuck++ > 5) {
count = readl(
&ep->dma->dmacount);
count &= DMA_BYTE_COUNT_MASK;
req = NULL;
ep_dbg(ep->dev, "%s escape stuck %d, count %u\n",
ep->ep.name, stuck,
count);
break;
} else if (stuck_req != req) {
stuck_req = req;
stuck = 0;
}
} else {
stuck_req = NULL;
stuck = 0;
}
udelay(1);
}
/* stop DMA, leave ep NAKing */
writel(BIT(DMA_ABORT), &ep->dma->dmastat);
spin_stop_dma(ep->dma);
if (likely(req)) {
req->td->dmacount = 0;
t = readl(&ep->regs->ep_avail);
dma_done(ep, req, count,
(ep->out_overflow || t)
? -EOVERFLOW : 0);
}
/* also flush to prevent erratum 0106 trouble */
if (unlikely(ep->out_overflow ||
(ep->dev->chiprev == 0x0100 &&
ep->dev->gadget.speed
== USB_SPEED_FULL))) {
out_flush(ep);
ep->out_overflow = 0;
}
/* (re)start dma if needed, stop NAKing */
ep->stopped = stopped;
if (!list_empty(&ep->queue))
restart_dma(ep);
} else
ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
ep->ep.name, t);
return;
/* data packet(s) received (in the fifo, OUT) */
} else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
if (read_fifo(ep, req) && ep->num != 0)
mode = 2;
/* data packet(s) transmitted (IN) */
} else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
unsigned len;
len = req->req.length - req->req.actual;
if (len > ep->ep.maxpacket)
len = ep->ep.maxpacket;
req->req.actual += len;
/* if we wrote it all, we're usually done */
/* send zlps until the status stage */
if ((req->req.actual == req->req.length) &&
(!req->req.zero || len != ep->ep.maxpacket) && ep->num)
mode = 2;
/* there was nothing to do ... */
} else if (mode == 1)
return;
/* done */
if (mode == 2) {
/* stream endpoints often resubmit/unlink in completion */
done(ep, req, 0);
/* maybe advance queue to next request */
if (ep->num == 0) {
/* NOTE: net2280 could let gadget driver start the
* status stage later. since not all controllers let
* them control that, the api doesn't (yet) allow it.
*/
if (!ep->stopped)
allow_status(ep);
req = NULL;
} else {
if (!list_empty(&ep->queue) && !ep->stopped)
req = list_entry(ep->queue.next,
struct net2280_request, queue);
else
req = NULL;
if (req && !ep->is_in)
stop_out_naking(ep);
}
}
/* is there a buffer for the next packet?
* for best streaming performance, make sure there is one.
*/
if (req && !ep->stopped) {
/* load IN fifo with next packet (may be zlp) */
if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
write_fifo(ep, &req->req);
}
}
static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
{
struct net2280_ep *ep;
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
return &dev->ep[0];
list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
u8 bEndpointAddress;
if (!ep->desc)
continue;
bEndpointAddress = ep->desc->bEndpointAddress;
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
continue;
if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
return ep;
}
return NULL;
}
static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
{
u32 scratch, fsmvalue;
u32 ack_wait_timeout, state;
/* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
scratch = get_idx_reg(dev->regs, SCRATCH);
fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
(r.bRequestType & USB_DIR_IN)))
return;
/* This is the first Control Read for this connection: */
if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
/*
* Connection is NOT SS:
* - Connection must be FS or HS.
* - This FSM state should allow workaround software to
* run after the next USB connection.
*/
scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
dev->bug7734_patched = 1;
goto restore_data_eps;
}
/* Connection is SS: */
for (ack_wait_timeout = 0;
ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
ack_wait_timeout++) {
state = readl(&dev->plregs->pl_ep_status_1)
& (0xff << STATE);
if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
(state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
dev->bug7734_patched = 1;
break;
}
/*
* We have not yet received host's Data Phase ACK
* - Wait and try again.
*/
udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
}
if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
"to detect SS host's data phase ACK.");
ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
"got 0x%2.2x.\n", state >> STATE);
} else {
ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
"%duSec for Control Read Data Phase ACK\n",
DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
}
restore_data_eps:
/*
* Restore data EPs to their pre-workaround settings (disabled,
* initialized, and other details).
*/
defect7374_disable_data_eps(dev);
set_idx_reg(dev->regs, SCRATCH, scratch);
return;
}
static void ep_clear_seqnum(struct net2280_ep *ep)
{
struct net2280 *dev = ep->dev;
u32 val;
static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
val |= ep_pl[ep->num];
writel(val, &dev->plregs->pl_ep_ctrl);
val |= BIT(SEQUENCE_NUMBER_RESET);
writel(val, &dev->plregs->pl_ep_ctrl);
return;
}
static void handle_stat0_irqs_superspeed(struct net2280 *dev,
struct net2280_ep *ep, struct usb_ctrlrequest r)
{
struct net2280_ep *e;
u16 status;
int tmp = 0;
#define w_value le16_to_cpu(r.wValue)
#define w_index le16_to_cpu(r.wIndex)
#define w_length le16_to_cpu(r.wLength)
switch (r.bRequest) {
case USB_REQ_SET_CONFIGURATION:
dev->addressed_state = !w_value;
goto usb3_delegate;
case USB_REQ_GET_STATUS:
switch (r.bRequestType) {
case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
status = dev->wakeup_enable ? 0x02 : 0x00;
if (dev->gadget.is_selfpowered)
status |= BIT(0);
status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
dev->ltm_enable << 4);
writel(0, &dev->epregs[0].ep_irqenb);
set_fifo_bytecount(ep, sizeof(status));
writel((__force u32) status, &dev->epregs[0].ep_data);
allow_status_338x(ep);
break;
case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
e = get_ep_by_addr(dev, w_index);
if (!e)
goto do_stall3;
status = readl(&e->regs->ep_rsp) &
BIT(CLEAR_ENDPOINT_HALT);
writel(0, &dev->epregs[0].ep_irqenb);
set_fifo_bytecount(ep, sizeof(status));
writel((__force u32) status, &dev->epregs[0].ep_data);
allow_status_338x(ep);
break;
default:
goto usb3_delegate;
}
break;
case USB_REQ_CLEAR_FEATURE:
switch (r.bRequestType) {
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
if (!dev->addressed_state) {
switch (w_value) {
case USB_DEVICE_U1_ENABLE:
dev->u1_enable = 0;
writel(readl(&dev->usb_ext->usbctl2) &
~BIT(U1_ENABLE),
&dev->usb_ext->usbctl2);
allow_status_338x(ep);
goto next_endpoints3;
case USB_DEVICE_U2_ENABLE:
dev->u2_enable = 0;
writel(readl(&dev->usb_ext->usbctl2) &
~BIT(U2_ENABLE),
&dev->usb_ext->usbctl2);
allow_status_338x(ep);
goto next_endpoints3;
case USB_DEVICE_LTM_ENABLE:
dev->ltm_enable = 0;
writel(readl(&dev->usb_ext->usbctl2) &
~BIT(LTM_ENABLE),
&dev->usb_ext->usbctl2);
allow_status_338x(ep);
goto next_endpoints3;
default:
break;
}
}
if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
dev->wakeup_enable = 0;
writel(readl(&dev->usb->usbctl) &
~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
&dev->usb->usbctl);
allow_status_338x(ep);
break;
}
goto usb3_delegate;
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
e = get_ep_by_addr(dev, w_index);
if (!e)
goto do_stall3;
if (w_value != USB_ENDPOINT_HALT)
goto do_stall3;
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
/*
* Workaround for SS SeqNum not cleared via
* Endpoint Halt (Clear) bit. select endpoint
*/
ep_clear_seqnum(e);
clear_halt(e);
if (!list_empty(&e->queue) && e->td_dma)
restart_dma(e);
allow_status(ep);
ep->stopped = 1;
break;
default:
goto usb3_delegate;
}
break;
case USB_REQ_SET_FEATURE:
switch (r.bRequestType) {
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
if (!dev->addressed_state) {
switch (w_value) {
case USB_DEVICE_U1_ENABLE:
dev->u1_enable = 1;
writel(readl(&dev->usb_ext->usbctl2) |
BIT(U1_ENABLE),
&dev->usb_ext->usbctl2);
allow_status_338x(ep);
goto next_endpoints3;
case USB_DEVICE_U2_ENABLE:
dev->u2_enable = 1;
writel(readl(&dev->usb_ext->usbctl2) |
BIT(U2_ENABLE),
&dev->usb_ext->usbctl2);
allow_status_338x(ep);
goto next_endpoints3;
case USB_DEVICE_LTM_ENABLE:
dev->ltm_enable = 1;
writel(readl(&dev->usb_ext->usbctl2) |
BIT(LTM_ENABLE),
&dev->usb_ext->usbctl2);
allow_status_338x(ep);
goto next_endpoints3;
default:
break;
}
}
if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
dev->wakeup_enable = 1;
writel(readl(&dev->usb->usbctl) |
BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
&dev->usb->usbctl);
allow_status_338x(ep);
break;
}
goto usb3_delegate;
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
e = get_ep_by_addr(dev, w_index);
if (!e || (w_value != USB_ENDPOINT_HALT))
goto do_stall3;
ep->stopped = 1;
if (ep->num == 0)
ep->dev->protocol_stall = 1;
else {
if (ep->dma)
abort_dma(ep);
set_halt(ep);
}
allow_status_338x(ep);
break;
default:
goto usb3_delegate;
}
break;
default:
usb3_delegate:
ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
r.bRequestType, r.bRequest,
w_value, w_index, w_length,
readl(&ep->cfg->ep_cfg));
ep->responded = 0;
if (dev->async_callbacks) {
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &r);
spin_lock(&dev->lock);
}
}
do_stall3:
if (tmp < 0) {
ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
r.bRequestType, r.bRequest, tmp);
dev->protocol_stall = 1;
/* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
set_halt(ep);
}
next_endpoints3:
#undef w_value
#undef w_index
#undef w_length
return;
}
static void usb338x_handle_ep_intr(struct net2280 *dev, u32 stat0)
{
u32 index;
u32 bit;
for (index = 0; index < ARRAY_SIZE(ep_bit); index++) {
bit = BIT(ep_bit[index]);
if (!stat0)
break;
if (!(stat0 & bit))
continue;
stat0 &= ~bit;
handle_ep_small(&dev->ep[index]);
}
}
static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
{
struct net2280_ep *ep;
u32 num, scratch;
/* most of these don't need individual acks */
stat &= ~BIT(INTA_ASSERTED);
if (!stat)
return;
/* ep_dbg(dev, "irqstat0 %04x\n", stat); */
/* starting a control request? */
if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
union {
u32 raw[2];
struct usb_ctrlrequest r;
} u;
int tmp;
struct net2280_request *req;
if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
u32 val = readl(&dev->usb->usbstat);
if (val & BIT(SUPER_SPEED)) {
dev->gadget.speed = USB_SPEED_SUPER;
usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
EP0_SS_MAX_PACKET_SIZE);
} else if (val & BIT(HIGH_SPEED)) {
dev->gadget.speed = USB_SPEED_HIGH;
usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
EP0_HS_MAX_PACKET_SIZE);
} else {
dev->gadget.speed = USB_SPEED_FULL;
usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
EP0_HS_MAX_PACKET_SIZE);
}
net2280_led_speed(dev, dev->gadget.speed);
ep_dbg(dev, "%s\n",
usb_speed_string(dev->gadget.speed));
}
ep = &dev->ep[0];
ep->irqs++;
/* make sure any leftover request state is cleared */
stat &= ~BIT(ENDPOINT_0_INTERRUPT);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2280_request, queue);
done(ep, req, (req->req.actual == req->req.length)
? 0 : -EPROTO);
}
ep->stopped = 0;
dev->protocol_stall = 0;
if (!(dev->quirks & PLX_PCIE)) {
if (ep->dev->quirks & PLX_2280)
tmp = BIT(FIFO_OVERFLOW) |
BIT(FIFO_UNDERFLOW);
else
tmp = 0;
writel(tmp | BIT(TIMEOUT) |
BIT(USB_STALL_SENT) |
BIT(USB_IN_NAK_SENT) |
BIT(USB_IN_ACK_RCVD) |
BIT(USB_OUT_PING_NAK_SENT) |
BIT(USB_OUT_ACK_SENT) |
BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
BIT(DATA_IN_TOKEN_INTERRUPT),
&ep->regs->ep_stat);
}
u.raw[0] = readl(&dev->usb->setup0123);
u.raw[1] = readl(&dev->usb->setup4567);
cpu_to_le32s(&u.raw[0]);
cpu_to_le32s(&u.raw[1]);
if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
defect7374_workaround(dev, u.r);
tmp = 0;
#define w_value le16_to_cpu(u.r.wValue)
#define w_index le16_to_cpu(u.r.wIndex)
#define w_length le16_to_cpu(u.r.wLength)
/* ack the irq */
writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
stat ^= BIT(SETUP_PACKET_INTERRUPT);
/* watch control traffic at the token level, and force
* synchronization before letting the status stage happen.
* FIXME ignore tokens we'll NAK, until driver responds.
* that'll mean a lot less irqs for some drivers.
*/
ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
if (ep->is_in) {
scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
BIT(DATA_IN_TOKEN_INTERRUPT);
stop_out_naking(ep);
} else
scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
BIT(DATA_IN_TOKEN_INTERRUPT);
writel(scratch, &dev->epregs[0].ep_irqenb);
/* we made the hardware handle most lowlevel requests;
* everything else goes uplevel to the gadget code.
*/
ep->responded = 1;
if (dev->gadget.speed == USB_SPEED_SUPER) {
handle_stat0_irqs_superspeed(dev, ep, u.r);
goto next_endpoints;
}
switch (u.r.bRequest) {
case USB_REQ_GET_STATUS: {
struct net2280_ep *e;
__le32 status;
/* hw handles device and interface status */
if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
goto delegate;
e = get_ep_by_addr(dev, w_index);
if (!e || w_length > 2)
goto do_stall;
if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
status = cpu_to_le32(1);
else
status = cpu_to_le32(0);
/* don't bother with a request object! */
writel(0, &dev->epregs[0].ep_irqenb);
set_fifo_bytecount(ep, w_length);
writel((__force u32)status, &dev->epregs[0].ep_data);
allow_status(ep);
ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
goto next_endpoints;
}
break;
case USB_REQ_CLEAR_FEATURE: {
struct net2280_ep *e;
/* hw handles device features */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT || w_length != 0)
goto do_stall;
e = get_ep_by_addr(dev, w_index);
if (!e)
goto do_stall;
if (e->wedged) {
ep_vdbg(dev, "%s wedged, halt not cleared\n",
ep->ep.name);
} else {
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
clear_halt(e);
if ((ep->dev->quirks & PLX_PCIE) &&
!list_empty(&e->queue) && e->td_dma)
restart_dma(e);
}
allow_status(ep);
goto next_endpoints;
}
break;
case USB_REQ_SET_FEATURE: {
struct net2280_ep *e;
/* hw handles device features */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT || w_length != 0)
goto do_stall;
e = get_ep_by_addr(dev, w_index);
if (!e)
goto do_stall;
if (e->ep.name == ep0name)
goto do_stall;
set_halt(e);
if ((dev->quirks & PLX_PCIE) && e->dma)
abort_dma(e);
allow_status(ep);
ep_vdbg(dev, "%s set halt\n", ep->ep.name);
goto next_endpoints;
}
break;
default:
delegate:
ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
"ep_cfg %08x\n",
u.r.bRequestType, u.r.bRequest,
w_value, w_index, w_length,
readl(&ep->cfg->ep_cfg));
ep->responded = 0;
if (dev->async_callbacks) {
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &u.r);
spin_lock(&dev->lock);
}
}
/* stall ep0 on error */
if (tmp < 0) {
do_stall:
ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
u.r.bRequestType, u.r.bRequest, tmp);
dev->protocol_stall = 1;
}
/* some in/out token irq should follow; maybe stall then.
* driver must queue a request (even zlp) or halt ep0
* before the host times out.
*/
}
#undef w_value
#undef w_index
#undef w_length
next_endpoints:
if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
USB3380_IRQSTAT0_EP_INTR_MASK_IN |
USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
if (stat & mask) {
usb338x_handle_ep_intr(dev, stat & mask);
stat &= ~mask;
}
} else {
/* endpoint data irq ? */
scratch = stat & 0x7f;
stat &= ~0x7f;
for (num = 0; scratch; num++) {
u32 t;
/* do this endpoint's FIFO and queue need tending? */
t = BIT(num);
if ((scratch & t) == 0)
continue;
scratch ^= t;
ep = &dev->ep[num];
handle_ep_small(ep);
}
}
if (stat)
ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
}
#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
BIT(DMA_C_INTERRUPT) | \
BIT(DMA_B_INTERRUPT) | \
BIT(DMA_A_INTERRUPT))
#define PCI_ERROR_INTERRUPTS ( \
BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
__releases(dev->lock)
__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
/* after disconnect there's nothing else to do! */
tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
* Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
* both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
* only indicates a change in the reset state).
*/
if (stat & tmp) {
bool reset = false;
bool disconnect = false;
/*
* Ignore disconnects and resets if the speed hasn't been set.
* VBUS can bounce and there's always an initial reset.
*/
writel(tmp, &dev->regs->irqstat1);
if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
if ((stat & BIT(VBUS_INTERRUPT)) &&
(readl(&dev->usb->usbctl) &
BIT(VBUS_PIN)) == 0) {
disconnect = true;
ep_dbg(dev, "disconnect %s\n",
dev->driver->driver.name);
} else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
(readl(&dev->usb->usbstat) & mask)
== 0) {
reset = true;
ep_dbg(dev, "reset %s\n",
dev->driver->driver.name);
}
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
if (dev->async_callbacks) {
spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)(&dev->gadget);
spin_lock(&dev->lock);
}
return;
}
}
stat &= ~tmp;
/* vBUS can bounce ... one of many reasons to ignore the
* notion of hotplug events on bus connect/disconnect!
*/
if (!stat)
return;
}
/* NOTE: chip stays in PCI D0 state for now, but it could
* enter D1 to save more power
*/
tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
if (stat & tmp) {
writel(tmp, &dev->regs->irqstat1);
spin_unlock(&dev->lock);
if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
if (dev->async_callbacks && dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
if (!enable_suspend)
stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
} else {
if (dev->async_callbacks && dev->driver->resume)
dev->driver->resume(&dev->gadget);
/* at high speed, note erratum 0133 */
}
spin_lock(&dev->lock);
stat &= ~tmp;
}
/* clear any other status/irqs */
if (stat)
writel(stat, &dev->regs->irqstat1);
/* some status we can just ignore */
if (dev->quirks & PLX_2280)
stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
BIT(SUSPEND_REQUEST_INTERRUPT) |
BIT(RESUME_INTERRUPT) |
BIT(SOF_INTERRUPT));
else
stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
BIT(RESUME_INTERRUPT) |
BIT(SOF_DOWN_INTERRUPT) |
BIT(SOF_INTERRUPT));
if (!stat)
return;
/* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
/* DMA status, for ep-{a,b,c,d} */
scratch = stat & DMA_INTERRUPTS;
stat &= ~DMA_INTERRUPTS;
scratch >>= 9;
for (num = 0; scratch; num++) {
struct net2280_dma_regs __iomem *dma;
tmp = BIT(num);
if ((tmp & scratch) == 0)
continue;
scratch ^= tmp;
ep = &dev->ep[num + 1];
dma = ep->dma;
if (!dma)
continue;
/* clear ep's dma status */
tmp = readl(&dma->dmastat);
writel(tmp, &dma->dmastat);
/* dma sync*/
if (dev->quirks & PLX_PCIE) {
u32 r_dmacount = readl(&dma->dmacount);
if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
continue;
}
if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
ep_dbg(ep->dev, "%s no xact done? %08x\n",
ep->ep.name, tmp);
continue;
}
stop_dma(ep->dma);
/* OUT transfers terminate when the data from the
* host is in our memory. Process whatever's done.
* On this path, we know transfer's last packet wasn't
* less than req->length. NAK_OUT_PACKETS may be set,
* or the FIFO may already be holding new packets.
*
* IN transfers can linger in the FIFO for a very
* long time ... we ignore that for now, accounting
* precisely (like PIO does) needs per-packet irqs
*/
scan_dma_completions(ep);
/* disable dma on inactive queues; else maybe restart */
if (!list_empty(&ep->queue)) {
tmp = readl(&dma->dmactl);
restart_dma(ep);
}
ep->irqs++;
}
/* NOTE: there are other PCI errors we might usefully notice.
* if they appear very often, here's where to try recovering.
*/
if (stat & PCI_ERROR_INTERRUPTS) {
ep_err(dev, "pci dma error; stat %08x\n", stat);
stat &= ~PCI_ERROR_INTERRUPTS;
/* these are fatal errors, but "maybe" they won't
* happen again ...
*/
stop_activity(dev, dev->driver);
ep0_start(dev);
stat = 0;
}
if (stat)
ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
}
static irqreturn_t net2280_irq(int irq, void *_dev)
{
struct net2280 *dev = _dev;
/* shared interrupt, not ours */
if ((dev->quirks & PLX_LEGACY) &&
(!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
return IRQ_NONE;
spin_lock(&dev->lock);
/* handle disconnect, dma, and more */
handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
/* control requests and PIO */
handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
if (dev->quirks & PLX_PCIE) {
/* re-enable interrupt to trigger any possible new interrupt */
u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
writel(pciirqenb1, &dev->regs->pciirqenb1);
}
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
static void gadget_release(struct device *_dev)
{
struct net2280 *dev = container_of(_dev, struct net2280, gadget.dev);
kfree(dev);
}
/* tear down the binding between this driver and the pci device */
static void net2280_remove(struct pci_dev *pdev)
{
struct net2280 *dev = pci_get_drvdata(pdev);
if (dev->added)
usb_del_gadget(&dev->gadget);
BUG_ON(dev->driver);
/* then clean up the resources we allocated during probe() */
if (dev->requests) {
int i;
for (i = 1; i < 5; i++) {
if (!dev->ep[i].dummy)
continue;
dma_pool_free(dev->requests, dev->ep[i].dummy,
dev->ep[i].td_dma);
}
dma_pool_destroy(dev->requests);
}
if (dev->got_irq)
free_irq(pdev->irq, dev);
if (dev->quirks & PLX_PCIE)
pci_disable_msi(pdev);
if (dev->regs) {
net2280_led_shutdown(dev);
iounmap(dev->regs);
}
if (dev->region)
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (dev->enabled)
pci_disable_device(pdev);
device_remove_file(&pdev->dev, &dev_attr_registers);
ep_info(dev, "unbind\n");
usb_put_gadget(&dev->gadget);
}
/* wrap this driver around the specified device, but
* don't respond over USB until a gadget driver binds to us.
*/
static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net2280 *dev;
unsigned long resource, len;
void __iomem *base = NULL;
int retval, i;
/* alloc, and start init */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
retval = -ENOMEM;
goto done;
}
pci_set_drvdata(pdev, dev);
usb_initialize_gadget(&pdev->dev, &dev->gadget, gadget_release);
spin_lock_init(&dev->lock);
dev->quirks = id->driver_data;
dev->pdev = pdev;
dev->gadget.ops = &net2280_ops;
dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
USB_SPEED_SUPER : USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev->gadget.name = driver_name;
/* now all the pci goodies ... */
if (pci_enable_device(pdev) < 0) {
retval = -ENODEV;
goto done;
}
dev->enabled = 1;
/* BAR 0 holds all the registers
* BAR 1 is 8051 memory; unused here (note erratum 0103)
* BAR 2 is fifo memory; unused here
*/
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, driver_name)) {
ep_dbg(dev, "controller already in use\n");
retval = -EBUSY;
goto done;
}
dev->region = 1;
/* FIXME provide firmware download interface to put
* 8051 code into the chip, e.g. to turn on PCI PM.
*/
base = ioremap(resource, len);
if (base == NULL) {
ep_dbg(dev, "can't map memory\n");
retval = -EFAULT;
goto done;
}
dev->regs = (struct net2280_regs __iomem *) base;
dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
if (dev->quirks & PLX_PCIE) {
u32 fsmvalue;
u32 usbstat;
dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
(base + 0x00b4);
dev->llregs = (struct usb338x_ll_regs __iomem *)
(base + 0x0700);
dev->plregs = (struct usb338x_pl_regs __iomem *)
(base + 0x0800);
usbstat = readl(&dev->usb->usbstat);
dev->enhanced_mode = !!(usbstat & BIT(11));
dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
/* put into initial config, link up all endpoints */
fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
(0xf << DEFECT7374_FSM_FIELD);
/* See if firmware needs to set up for workaround: */
if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
dev->bug7734_patched = 1;
writel(0, &dev->usb->usbctl);
} else
dev->bug7734_patched = 0;
} else {
dev->enhanced_mode = 0;
dev->n_ep = 7;
/* put into initial config, link up all endpoints */
writel(0, &dev->usb->usbctl);
}
usb_reset(dev);
usb_reinit(dev);
/* irq setup after old hardware is cleaned up */
if (!pdev->irq) {
ep_err(dev, "No IRQ. Check PCI setup!\n");
retval = -ENODEV;
goto done;
}
if (dev->quirks & PLX_PCIE)
if (pci_enable_msi(pdev))
ep_err(dev, "Failed to enable MSI mode\n");
if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
driver_name, dev)) {
ep_err(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
goto done;
}
dev->got_irq = 1;
/* DMA setup */
/* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
dev->requests = dma_pool_create("requests", &pdev->dev,
sizeof(struct net2280_dma),
0 /* no alignment requirements */,
0 /* or page-crossing issues */);
if (!dev->requests) {
ep_dbg(dev, "can't get request pool\n");
retval = -ENOMEM;
goto done;
}
for (i = 1; i < 5; i++) {
struct net2280_dma *td;
td = dma_pool_alloc(dev->requests, GFP_KERNEL,
&dev->ep[i].td_dma);
if (!td) {
ep_dbg(dev, "can't get dummy %d\n", i);
retval = -ENOMEM;
goto done;
}
td->dmacount = 0; /* not VALID */
td->dmadesc = td->dmaaddr;
dev->ep[i].dummy = td;
}
/* enable lower-overhead pci memory bursts during DMA */
if (dev->quirks & PLX_LEGACY)
writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
/*
* 256 write retries may not be enough...
BIT(PCI_RETRY_ABORT_ENABLE) |
*/
BIT(DMA_READ_MULTIPLE_ENABLE) |
BIT(DMA_READ_LINE_ENABLE),
&dev->pci->pcimstctl);
/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
pci_set_master(pdev);
pci_try_set_mwi(pdev);
/* ... also flushes any posted pci writes */
dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
/* done */
ep_info(dev, "%s\n", driver_desc);
ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
pdev->irq, base, dev->chiprev);
ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
dev->enhanced_mode ? "enhanced mode" : "legacy mode");
retval = device_create_file(&pdev->dev, &dev_attr_registers);
if (retval)
goto done;
retval = usb_add_gadget(&dev->gadget);
if (retval)
goto done;
dev->added = 1;
return 0;
done:
if (dev) {
net2280_remove(pdev);
kfree(dev);
}
return retval;
}
/* make sure the board is quiescent; otherwise it will continue
* generating IRQs across the upcoming reboot.
*/
static void net2280_shutdown(struct pci_dev *pdev)
{
struct net2280 *dev = pci_get_drvdata(pdev);
/* disable IRQs */
writel(0, &dev->regs->pciirqenb0);
writel(0, &dev->regs->pciirqenb1);
/* disable the pullup so the host will think we're gone */
writel(0, &dev->usb->usbctl);
}
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids[] = { {
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX_LEGACY,
.device = 0x2280,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = PLX_LEGACY | PLX_2280,
}, {
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX_LEGACY,
.device = 0x2282,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = PLX_LEGACY,
},
{
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
.device = 0x2380,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = PLX_PCIE,
},
{
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
.device = 0x3380,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = PLX_PCIE | PLX_SUPERSPEED,
},
{
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
.device = 0x3382,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = PLX_PCIE | PLX_SUPERSPEED,
},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver net2280_pci_driver = {
.name = driver_name,
.id_table = pci_ids,
.probe = net2280_probe,
.remove = net2280_remove,
.shutdown = net2280_shutdown,
/* FIXME add power management support */
};
module_pci_driver(net2280_pci_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/net2280.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* NVIDIA Tegra XUSB device mode controller
*
* Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015, Google Inc.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
#include <linux/pm_domain.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/role.h>
#include <linux/usb/phy.h>
#include <linux/workqueue.h>
/* XUSB_DEV registers */
#define DB 0x004
#define DB_TARGET_MASK GENMASK(15, 8)
#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
#define DB_STREAMID_MASK GENMASK(31, 16)
#define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
#define ERSTSZ 0x008
#define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
#define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
#define ERSTXBALO(x) (0x010 + 8 * (x))
#define ERSTXBAHI(x) (0x014 + 8 * (x))
#define ERDPLO 0x020
#define ERDPLO_EHB BIT(3)
#define ERDPHI 0x024
#define EREPLO 0x028
#define EREPLO_ECS BIT(0)
#define EREPLO_SEGI BIT(1)
#define EREPHI 0x02c
#define CTRL 0x030
#define CTRL_RUN BIT(0)
#define CTRL_LSE BIT(1)
#define CTRL_IE BIT(4)
#define CTRL_SMI_EVT BIT(5)
#define CTRL_SMI_DSE BIT(6)
#define CTRL_EWE BIT(7)
#define CTRL_DEVADDR_MASK GENMASK(30, 24)
#define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
#define CTRL_ENABLE BIT(31)
#define ST 0x034
#define ST_RC BIT(0)
#define ST_IP BIT(4)
#define RT_IMOD 0x038
#define RT_IMOD_IMODI_MASK GENMASK(15, 0)
#define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
#define RT_IMOD_IMODC_MASK GENMASK(31, 16)
#define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
#define PORTSC 0x03c
#define PORTSC_CCS BIT(0)
#define PORTSC_PED BIT(1)
#define PORTSC_PR BIT(4)
#define PORTSC_PLS_SHIFT 5
#define PORTSC_PLS_MASK GENMASK(8, 5)
#define PORTSC_PLS_U0 0x0
#define PORTSC_PLS_U2 0x2
#define PORTSC_PLS_U3 0x3
#define PORTSC_PLS_DISABLED 0x4
#define PORTSC_PLS_RXDETECT 0x5
#define PORTSC_PLS_INACTIVE 0x6
#define PORTSC_PLS_RESUME 0xf
#define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
#define PORTSC_PS_SHIFT 10
#define PORTSC_PS_MASK GENMASK(13, 10)
#define PORTSC_PS_UNDEFINED 0x0
#define PORTSC_PS_FS 0x1
#define PORTSC_PS_LS 0x2
#define PORTSC_PS_HS 0x3
#define PORTSC_PS_SS 0x4
#define PORTSC_LWS BIT(16)
#define PORTSC_CSC BIT(17)
#define PORTSC_WRC BIT(19)
#define PORTSC_PRC BIT(21)
#define PORTSC_PLC BIT(22)
#define PORTSC_CEC BIT(23)
#define PORTSC_WPR BIT(30)
#define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
PORTSC_PLC | PORTSC_CEC)
#define ECPLO 0x040
#define ECPHI 0x044
#define MFINDEX 0x048
#define MFINDEX_FRAME_SHIFT 3
#define MFINDEX_FRAME_MASK GENMASK(13, 3)
#define PORTPM 0x04c
#define PORTPM_L1S_MASK GENMASK(1, 0)
#define PORTPM_L1S_DROP 0x0
#define PORTPM_L1S_ACCEPT 0x1
#define PORTPM_L1S_NYET 0x2
#define PORTPM_L1S_STALL 0x3
#define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
#define PORTPM_RWE BIT(3)
#define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
#define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
#define PORTPM_FLA BIT(24)
#define PORTPM_VBA BIT(25)
#define PORTPM_WOC BIT(26)
#define PORTPM_WOD BIT(27)
#define PORTPM_U1E BIT(28)
#define PORTPM_U2E BIT(29)
#define PORTPM_FRWE BIT(30)
#define PORTPM_PNG_CYA BIT(31)
#define EP_HALT 0x050
#define EP_PAUSE 0x054
#define EP_RELOAD 0x058
#define EP_STCHG 0x05c
#define DEVNOTIF_LO 0x064
#define DEVNOTIF_LO_TRIG BIT(0)
#define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
#define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
#define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
#define DEVNOTIF_HI 0x068
#define PORTHALT 0x06c
#define PORTHALT_HALT_LTSSM BIT(0)
#define PORTHALT_HALT_REJECT BIT(1)
#define PORTHALT_STCHG_REQ BIT(20)
#define PORTHALT_STCHG_INTR_EN BIT(24)
#define PORT_TM 0x070
#define EP_THREAD_ACTIVE 0x074
#define EP_STOPPED 0x078
#define HSFSPI_COUNT0 0x100
#define HSFSPI_COUNT13 0x134
#define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
#define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
#define BLCG 0x840
#define SSPX_CORE_CNT0 0x610
#define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
#define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
#define SSPX_CORE_CNT30 0x688
#define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
#define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
#define SSPX_CORE_CNT32 0x690
#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
#define SSPX_CORE_CNT56 0x6fc
#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
#define SSPX_CORE_CNT57 0x700
#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
#define SSPX_CORE_CNT65 0x720
#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
#define SSPX_CORE_CNT66 0x724
#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
#define SSPX_CORE_CNT67 0x728
#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
#define SSPX_CORE_CNT72 0x73c
#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
#define SSPX_CORE_PADCTL4 0x750
#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
#define BLCG_DFPCI BIT(0)
#define BLCG_UFPCI BIT(1)
#define BLCG_FE BIT(2)
#define BLCG_COREPLL_PWRDN BIT(8)
#define BLCG_IOPLL_0_PWRDN BIT(9)
#define BLCG_IOPLL_1_PWRDN BIT(10)
#define BLCG_IOPLL_2_PWRDN BIT(11)
#define BLCG_ALL 0x1ff
#define CFG_DEV_SSPI_XFER 0x858
#define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
#define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
#define CFG_DEV_FE 0x85c
#define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
#define CFG_DEV_FE_PORTREGSEL_SS_PI 1
#define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
#define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
#define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
/* FPCI registers */
#define XUSB_DEV_CFG_1 0x004
#define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
#define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
#define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
#define XUSB_DEV_CFG_4 0x010
#define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
#define XUSB_DEV_CFG_5 0x014
/* IPFS registers */
#define XUSB_DEV_CONFIGURATION_0 0x180
#define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
#define XUSB_DEV_INTR_MASK_0 0x188
#define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
struct tegra_xudc_ep_context {
__le32 info0;
__le32 info1;
__le32 deq_lo;
__le32 deq_hi;
__le32 tx_info;
__le32 rsvd[11];
};
#define EP_STATE_DISABLED 0
#define EP_STATE_RUNNING 1
#define EP_STATE_HALTED 2
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
#define EP_TYPE_INVALID 0
#define EP_TYPE_ISOCH_OUT 1
#define EP_TYPE_BULK_OUT 2
#define EP_TYPE_INTERRUPT_OUT 3
#define EP_TYPE_CONTROL 4
#define EP_TYPE_ISCOH_IN 5
#define EP_TYPE_BULK_IN 6
#define EP_TYPE_INTERRUPT_IN 7
#define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
{ \
return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
} \
static inline void \
ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
{ \
u32 tmp; \
\
tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
tmp |= (val & (mask)) << (shift); \
ctx->member = cpu_to_le32(tmp); \
}
BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1)
BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1)
BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f)
BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
{
return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
(ep_ctx_read_deq_lo(ctx) << 4);
}
static inline void
ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
{
ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
}
struct tegra_xudc_trb {
__le32 data_lo;
__le32 data_hi;
__le32 status;
__le32 control;
};
#define TRB_TYPE_RSVD 0
#define TRB_TYPE_NORMAL 1
#define TRB_TYPE_SETUP_STAGE 2
#define TRB_TYPE_DATA_STAGE 3
#define TRB_TYPE_STATUS_STAGE 4
#define TRB_TYPE_ISOCH 5
#define TRB_TYPE_LINK 6
#define TRB_TYPE_TRANSFER_EVENT 32
#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
#define TRB_TYPE_STREAM 48
#define TRB_TYPE_SETUP_PACKET_EVENT 63
#define TRB_CMPL_CODE_INVALID 0
#define TRB_CMPL_CODE_SUCCESS 1
#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
#define TRB_CMPL_CODE_USB_TRANS_ERR 4
#define TRB_CMPL_CODE_TRB_ERR 5
#define TRB_CMPL_CODE_STALL 6
#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
#define TRB_CMPL_CODE_SHORT_PACKET 13
#define TRB_CMPL_CODE_RING_UNDERRUN 14
#define TRB_CMPL_CODE_RING_OVERRUN 15
#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
#define TRB_CMPL_CODE_STOPPED 26
#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
#define TRB_CMPL_CODE_HOST_REJECTED 221
#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
#define BUILD_TRB_RW(name, member, shift, mask) \
static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
{ \
return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
} \
static inline void \
trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
{ \
u32 tmp; \
\
tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
tmp |= (val & (mask)) << (shift); \
trb->member = cpu_to_le32(tmp); \
}
BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
BUILD_TRB_RW(seq_num, status, 0, 0xffff)
BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
BUILD_TRB_RW(td_size, status, 17, 0x1f)
BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
BUILD_TRB_RW(cycle, control, 0, 0x1)
BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
BUILD_TRB_RW(isp, control, 2, 0x1)
BUILD_TRB_RW(chain, control, 4, 0x1)
BUILD_TRB_RW(ioc, control, 5, 0x1)
BUILD_TRB_RW(type, control, 10, 0x3f)
BUILD_TRB_RW(stream_id, control, 16, 0xffff)
BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
BUILD_TRB_RW(tlbpc, control, 16, 0xf)
BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
BUILD_TRB_RW(sia, control, 31, 0x1)
static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
{
return ((u64)trb_read_data_hi(trb) << 32) |
trb_read_data_lo(trb);
}
static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
{
trb_write_data_lo(trb, lower_32_bits(addr));
trb_write_data_hi(trb, upper_32_bits(addr));
}
struct tegra_xudc_request {
struct usb_request usb_req;
size_t buf_queued;
unsigned int trbs_queued;
unsigned int trbs_needed;
bool need_zlp;
struct tegra_xudc_trb *first_trb;
struct tegra_xudc_trb *last_trb;
struct list_head list;
};
struct tegra_xudc_ep {
struct tegra_xudc *xudc;
struct usb_ep usb_ep;
unsigned int index;
char name[8];
struct tegra_xudc_ep_context *context;
#define XUDC_TRANSFER_RING_SIZE 64
struct tegra_xudc_trb *transfer_ring;
dma_addr_t transfer_ring_phys;
unsigned int enq_ptr;
unsigned int deq_ptr;
bool pcs;
bool ring_full;
bool stream_rejected;
struct list_head queue;
const struct usb_endpoint_descriptor *desc;
const struct usb_ss_ep_comp_descriptor *comp_desc;
};
struct tegra_xudc_sel_timing {
__u8 u1sel;
__u8 u1pel;
__le16 u2sel;
__le16 u2pel;
};
enum tegra_xudc_setup_state {
WAIT_FOR_SETUP,
DATA_STAGE_XFER,
DATA_STAGE_RECV,
STATUS_STAGE_XFER,
STATUS_STAGE_RECV,
};
struct tegra_xudc_setup_packet {
struct usb_ctrlrequest ctrl_req;
unsigned int seq_num;
};
struct tegra_xudc_save_regs {
u32 ctrl;
u32 portpm;
};
struct tegra_xudc {
struct device *dev;
const struct tegra_xudc_soc *soc;
struct tegra_xusb_padctl *padctl;
spinlock_t lock;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
#define XUDC_NR_EVENT_RINGS 2
#define XUDC_EVENT_RING_SIZE 4096
struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
unsigned int event_ring_index;
unsigned int event_ring_deq_ptr;
bool ccs;
#define XUDC_NR_EPS 32
struct tegra_xudc_ep ep[XUDC_NR_EPS];
struct tegra_xudc_ep_context *ep_context;
dma_addr_t ep_context_phys;
struct device *genpd_dev_device;
struct device *genpd_dev_ss;
struct device_link *genpd_dl_device;
struct device_link *genpd_dl_ss;
struct dma_pool *transfer_ring_pool;
bool queued_setup_packet;
struct tegra_xudc_setup_packet setup_packet;
enum tegra_xudc_setup_state setup_state;
u16 setup_seq_num;
u16 dev_addr;
u16 isoch_delay;
struct tegra_xudc_sel_timing sel_timing;
u8 test_mode_pattern;
u16 status_buf;
struct tegra_xudc_request *ep0_req;
bool pullup;
unsigned int nr_enabled_eps;
unsigned int nr_isoch_eps;
unsigned int device_state;
unsigned int resume_state;
int irq;
void __iomem *base;
resource_size_t phys_base;
void __iomem *ipfs;
void __iomem *fpci;
struct regulator_bulk_data *supplies;
struct clk_bulk_data *clks;
bool device_mode;
struct work_struct usb_role_sw_work;
struct phy **usb3_phy;
struct phy *curr_usb3_phy;
struct phy **utmi_phy;
struct phy *curr_utmi_phy;
struct tegra_xudc_save_regs saved_regs;
bool suspended;
bool powergated;
struct usb_phy **usbphy;
struct usb_phy *curr_usbphy;
struct notifier_block vbus_nb;
struct completion disconnect_complete;
bool selfpowered;
#define TOGGLE_VBUS_WAIT_MS 100
struct delayed_work plc_reset_work;
bool wait_csc;
struct delayed_work port_reset_war_work;
bool wait_for_sec_prc;
};
#define XUDC_TRB_MAX_BUFFER_SIZE 65536
#define XUDC_MAX_ISOCH_EPS 4
#define XUDC_INTERRUPT_MODERATION_US 0
static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(64),
};
struct tegra_xudc_soc {
const char * const *supply_names;
unsigned int num_supplies;
const char * const *clock_names;
unsigned int num_clks;
unsigned int num_phys;
bool u1_enable;
bool u2_enable;
bool lpm_enable;
bool invalid_seq_num;
bool pls_quirk;
bool port_reset_quirk;
bool port_speed_quirk;
bool has_ipfs;
};
static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
{
return readl(xudc->fpci + offset);
}
static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
unsigned int offset)
{
writel(val, xudc->fpci + offset);
}
static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
{
return readl(xudc->ipfs + offset);
}
static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
unsigned int offset)
{
writel(val, xudc->ipfs + offset);
}
static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
{
return readl(xudc->base + offset);
}
static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
unsigned int offset)
{
writel(val, xudc->base + offset);
}
static inline int xudc_readl_poll(struct tegra_xudc *xudc,
unsigned int offset, u32 mask, u32 val)
{
u32 regval;
return readl_poll_timeout_atomic(xudc->base + offset, regval,
(regval & mask) == val, 1, 100);
}
static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
{
return container_of(gadget, struct tegra_xudc, gadget);
}
static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
{
return container_of(ep, struct tegra_xudc_ep, usb_ep);
}
static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
{
return container_of(req, struct tegra_xudc_request, usb_req);
}
static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
struct tegra_xudc_trb *trb)
{
dev_dbg(xudc->dev,
"%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
type, trb, trb->data_lo, trb->data_hi, trb->status,
trb->control);
}
static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
{
u32 val;
/* limit port speed to gen 1 */
val = xudc_readl(xudc, SSPX_CORE_CNT56);
val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
xudc_writel(xudc, val, SSPX_CORE_CNT56);
val = xudc_readl(xudc, SSPX_CORE_CNT57);
val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
xudc_writel(xudc, val, SSPX_CORE_CNT57);
val = xudc_readl(xudc, SSPX_CORE_CNT65);
val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
xudc_writel(xudc, val, SSPX_CORE_CNT66);
val = xudc_readl(xudc, SSPX_CORE_CNT66);
val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
xudc_writel(xudc, val, SSPX_CORE_CNT66);
val = xudc_readl(xudc, SSPX_CORE_CNT67);
val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
xudc_writel(xudc, val, SSPX_CORE_CNT67);
val = xudc_readl(xudc, SSPX_CORE_CNT72);
val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
xudc_writel(xudc, val, SSPX_CORE_CNT72);
}
static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
{
u32 val;
/* restore port speed to gen2 */
val = xudc_readl(xudc, SSPX_CORE_CNT56);
val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
xudc_writel(xudc, val, SSPX_CORE_CNT56);
val = xudc_readl(xudc, SSPX_CORE_CNT57);
val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
xudc_writel(xudc, val, SSPX_CORE_CNT57);
val = xudc_readl(xudc, SSPX_CORE_CNT65);
val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
xudc_writel(xudc, val, SSPX_CORE_CNT66);
val = xudc_readl(xudc, SSPX_CORE_CNT66);
val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
xudc_writel(xudc, val, SSPX_CORE_CNT66);
val = xudc_readl(xudc, SSPX_CORE_CNT67);
val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
xudc_writel(xudc, val, SSPX_CORE_CNT67);
val = xudc_readl(xudc, SSPX_CORE_CNT72);
val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
xudc_writel(xudc, val, SSPX_CORE_CNT72);
}
static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
{
int err;
pm_runtime_get_sync(xudc->dev);
tegra_phy_xusb_utmi_pad_power_on(xudc->curr_utmi_phy);
err = phy_power_on(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
err = phy_power_on(xudc->curr_usb3_phy);
if (err < 0)
dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err);
dev_dbg(xudc->dev, "device mode on\n");
phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
USB_ROLE_DEVICE);
}
static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
{
bool connected = false;
u32 pls, val;
int err;
dev_dbg(xudc->dev, "device mode off\n");
connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
reinit_completion(&xudc->disconnect_complete);
if (xudc->soc->port_speed_quirk)
tegra_xudc_restore_port_speed(xudc);
phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
PORTSC_PLS_SHIFT;
/* Direct link to U0 if disconnected in RESUME or U2. */
if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
(pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
val = xudc_readl(xudc, PORTPM);
val |= PORTPM_FRWE;
xudc_writel(xudc, val, PORTPM);
val = xudc_readl(xudc, PORTSC);
val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
xudc_writel(xudc, val, PORTSC);
}
/* Wait for disconnect event. */
if (connected)
wait_for_completion(&xudc->disconnect_complete);
/* Make sure interrupt handler has completed before powergating. */
synchronize_irq(xudc->irq);
tegra_phy_xusb_utmi_pad_power_down(xudc->curr_utmi_phy);
err = phy_power_off(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
err = phy_power_off(xudc->curr_usb3_phy);
if (err < 0)
dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err);
pm_runtime_put(xudc->dev);
}
static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
{
struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
usb_role_sw_work);
if (xudc->device_mode)
tegra_xudc_device_mode_on(xudc);
else
tegra_xudc_device_mode_off(xudc);
}
static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
struct usb_phy *usbphy)
{
unsigned int i;
for (i = 0; i < xudc->soc->num_phys; i++) {
if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
return i;
}
dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
return -1;
}
static void tegra_xudc_update_data_role(struct tegra_xudc *xudc,
struct usb_phy *usbphy)
{
int phy_index;
if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
(!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
xudc->device_mode);
return;
}
xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
false;
phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
phy_index);
if (!xudc->suspended && phy_index != -1) {
xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
xudc->curr_usbphy = usbphy;
schedule_work(&xudc->usb_role_sw_work);
}
}
static int tegra_xudc_vbus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
vbus_nb);
struct usb_phy *usbphy = (struct usb_phy *)data;
dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
tegra_xudc_update_data_role(xudc, usbphy);
return NOTIFY_OK;
}
static void tegra_xudc_plc_reset_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
plc_reset_work);
unsigned long flags;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->wait_csc) {
u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
PORTSC_PLS_SHIFT;
if (pls == PORTSC_PLS_INACTIVE) {
dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
USB_ROLE_NONE);
phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
USB_ROLE_DEVICE);
xudc->wait_csc = false;
}
}
spin_unlock_irqrestore(&xudc->lock, flags);
}
static void tegra_xudc_port_reset_war_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct tegra_xudc *xudc =
container_of(dwork, struct tegra_xudc, port_reset_war_work);
unsigned long flags;
u32 pls;
int ret;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->device_mode && xudc->wait_for_sec_prc) {
pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
PORTSC_PLS_SHIFT;
dev_dbg(xudc->dev, "pls = %x\n", pls);
if (pls == PORTSC_PLS_DISABLED) {
dev_dbg(xudc->dev, "toggle vbus\n");
/* PRC doesn't complete in 100ms, toggle the vbus */
ret = tegra_phy_xusb_utmi_port_reset(
xudc->curr_utmi_phy);
if (ret == 1)
xudc->wait_for_sec_prc = 0;
}
}
spin_unlock_irqrestore(&xudc->lock, flags);
}
static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
struct tegra_xudc_trb *trb)
{
unsigned int index;
index = trb - ep->transfer_ring;
if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
return 0;
return (ep->transfer_ring_phys + index * sizeof(*trb));
}
static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
dma_addr_t addr)
{
struct tegra_xudc_trb *trb;
unsigned int index;
index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
return NULL;
trb = &ep->transfer_ring[index];
return trb;
}
static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
{
xudc_writel(xudc, BIT(ep), EP_RELOAD);
xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
}
static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
{
u32 val;
val = xudc_readl(xudc, EP_PAUSE);
if (val & BIT(ep))
return;
val |= BIT(ep);
xudc_writel(xudc, val, EP_PAUSE);
xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
xudc_writel(xudc, BIT(ep), EP_STCHG);
}
static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
{
u32 val;
val = xudc_readl(xudc, EP_PAUSE);
if (!(val & BIT(ep)))
return;
val &= ~BIT(ep);
xudc_writel(xudc, val, EP_PAUSE);
xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
xudc_writel(xudc, BIT(ep), EP_STCHG);
}
static void ep_unpause_all(struct tegra_xudc *xudc)
{
u32 val;
val = xudc_readl(xudc, EP_PAUSE);
xudc_writel(xudc, 0, EP_PAUSE);
xudc_readl_poll(xudc, EP_STCHG, val, val);
xudc_writel(xudc, val, EP_STCHG);
}
static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
{
u32 val;
val = xudc_readl(xudc, EP_HALT);
if (val & BIT(ep))
return;
val |= BIT(ep);
xudc_writel(xudc, val, EP_HALT);
xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
xudc_writel(xudc, BIT(ep), EP_STCHG);
}
static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
{
u32 val;
val = xudc_readl(xudc, EP_HALT);
if (!(val & BIT(ep)))
return;
val &= ~BIT(ep);
xudc_writel(xudc, val, EP_HALT);
xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
xudc_writel(xudc, BIT(ep), EP_STCHG);
}
static void ep_unhalt_all(struct tegra_xudc *xudc)
{
u32 val;
val = xudc_readl(xudc, EP_HALT);
if (!val)
return;
xudc_writel(xudc, 0, EP_HALT);
xudc_readl_poll(xudc, EP_STCHG, val, val);
xudc_writel(xudc, val, EP_STCHG);
}
static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
{
xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
xudc_writel(xudc, BIT(ep), EP_STOPPED);
}
static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
{
xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
}
static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
struct tegra_xudc_request *req, int status)
{
struct tegra_xudc *xudc = ep->xudc;
dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
req, ep->index, status);
if (likely(req->usb_req.status == -EINPROGRESS))
req->usb_req.status = status;
list_del_init(&req->list);
if (usb_endpoint_xfer_control(ep->desc)) {
usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
(xudc->setup_state ==
DATA_STAGE_XFER));
} else {
usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
usb_endpoint_dir_in(ep->desc));
}
spin_unlock(&xudc->lock);
usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
spin_lock(&xudc->lock);
}
static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
{
struct tegra_xudc_request *req;
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct tegra_xudc_request,
list);
tegra_xudc_req_done(ep, req, status);
}
}
static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
{
if (ep->ring_full)
return 0;
if (ep->deq_ptr > ep->enq_ptr)
return ep->deq_ptr - ep->enq_ptr - 1;
return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
}
static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
struct tegra_xudc_request *req,
struct tegra_xudc_trb *trb,
bool ioc)
{
struct tegra_xudc *xudc = ep->xudc;
dma_addr_t buf_addr;
size_t len;
len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
req->buf_queued);
if (len > 0)
buf_addr = req->usb_req.dma + req->buf_queued;
else
buf_addr = 0;
trb_write_data_ptr(trb, buf_addr);
trb_write_transfer_len(trb, len);
trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
if (req->trbs_queued == req->trbs_needed - 1 ||
(req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
trb_write_chain(trb, 0);
else
trb_write_chain(trb, 1);
trb_write_ioc(trb, ioc);
if (usb_endpoint_dir_out(ep->desc) ||
(usb_endpoint_xfer_control(ep->desc) &&
(xudc->setup_state == DATA_STAGE_RECV)))
trb_write_isp(trb, 1);
else
trb_write_isp(trb, 0);
if (usb_endpoint_xfer_control(ep->desc)) {
if (xudc->setup_state == DATA_STAGE_XFER ||
xudc->setup_state == DATA_STAGE_RECV)
trb_write_type(trb, TRB_TYPE_DATA_STAGE);
else
trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
if (xudc->setup_state == DATA_STAGE_XFER ||
xudc->setup_state == STATUS_STAGE_XFER)
trb_write_data_stage_dir(trb, 1);
else
trb_write_data_stage_dir(trb, 0);
} else if (usb_endpoint_xfer_isoc(ep->desc)) {
trb_write_type(trb, TRB_TYPE_ISOCH);
trb_write_sia(trb, 1);
trb_write_frame_id(trb, 0);
trb_write_tlbpc(trb, 0);
} else if (usb_ss_max_streams(ep->comp_desc)) {
trb_write_type(trb, TRB_TYPE_STREAM);
trb_write_stream_id(trb, req->usb_req.stream_id);
} else {
trb_write_type(trb, TRB_TYPE_NORMAL);
trb_write_stream_id(trb, 0);
}
trb_write_cycle(trb, ep->pcs);
req->trbs_queued++;
req->buf_queued += len;
dump_trb(xudc, "TRANSFER", trb);
}
static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
struct tegra_xudc_request *req)
{
unsigned int i, count, available;
bool wait_td = false;
available = ep_available_trbs(ep);
count = req->trbs_needed - req->trbs_queued;
if (available < count) {
count = available;
ep->ring_full = true;
}
/*
* To generate zero-length packet on USB bus, SW needs schedule a
* standalone zero-length TD. According to HW's behavior, SW needs
* to schedule TDs in different ways for different endpoint types.
*
* For control endpoint:
* - Data stage TD (IOC = 1, CH = 0)
* - Ring doorbell and wait transfer event
* - Data stage TD for ZLP (IOC = 1, CH = 0)
* - Ring doorbell
*
* For bulk and interrupt endpoints:
* - Normal transfer TD (IOC = 0, CH = 0)
* - Normal transfer TD for ZLP (IOC = 1, CH = 0)
* - Ring doorbell
*/
if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
wait_td = true;
if (!req->first_trb)
req->first_trb = &ep->transfer_ring[ep->enq_ptr];
for (i = 0; i < count; i++) {
struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
bool ioc = false;
if ((i == count - 1) || (wait_td && i == count - 2))
ioc = true;
tegra_xudc_queue_one_trb(ep, req, trb, ioc);
req->last_trb = trb;
ep->enq_ptr++;
if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
trb = &ep->transfer_ring[ep->enq_ptr];
trb_write_cycle(trb, ep->pcs);
ep->pcs = !ep->pcs;
ep->enq_ptr = 0;
}
if (ioc)
break;
}
return count;
}
static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
{
struct tegra_xudc *xudc = ep->xudc;
u32 val;
if (list_empty(&ep->queue))
return;
val = DB_TARGET(ep->index);
if (usb_endpoint_xfer_control(ep->desc)) {
val |= DB_STREAMID(xudc->setup_seq_num);
} else if (usb_ss_max_streams(ep->comp_desc) > 0) {
struct tegra_xudc_request *req;
/* Don't ring doorbell if the stream has been rejected. */
if (ep->stream_rejected)
return;
req = list_first_entry(&ep->queue, struct tegra_xudc_request,
list);
val |= DB_STREAMID(req->usb_req.stream_id);
}
dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
xudc_writel(xudc, val, DB);
}
static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
{
struct tegra_xudc_request *req;
bool trbs_queued = false;
list_for_each_entry(req, &ep->queue, list) {
if (ep->ring_full)
break;
if (tegra_xudc_queue_trbs(ep, req) > 0)
trbs_queued = true;
}
if (trbs_queued)
tegra_xudc_ep_ring_doorbell(ep);
}
static int
__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
{
struct tegra_xudc *xudc = ep->xudc;
int err;
if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
dev_err(xudc->dev, "control EP has pending transfers\n");
return -EINVAL;
}
if (usb_endpoint_xfer_control(ep->desc)) {
err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
(xudc->setup_state ==
DATA_STAGE_XFER));
} else {
err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
usb_endpoint_dir_in(ep->desc));
}
if (err < 0) {
dev_err(xudc->dev, "failed to map request: %d\n", err);
return err;
}
req->first_trb = NULL;
req->last_trb = NULL;
req->buf_queued = 0;
req->trbs_queued = 0;
req->need_zlp = false;
req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
XUDC_TRB_MAX_BUFFER_SIZE);
if (req->usb_req.length == 0)
req->trbs_needed++;
if (!usb_endpoint_xfer_isoc(ep->desc) &&
req->usb_req.zero && req->usb_req.length &&
((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
req->trbs_needed++;
req->need_zlp = true;
}
req->usb_req.status = -EINPROGRESS;
req->usb_req.actual = 0;
list_add_tail(&req->list, &ep->queue);
tegra_xudc_ep_kick_queue(ep);
return 0;
}
static int
tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
gfp_t gfp)
{
struct tegra_xudc_request *req;
struct tegra_xudc_ep *ep;
struct tegra_xudc *xudc;
unsigned long flags;
int ret;
if (!usb_ep || !usb_req)
return -EINVAL;
ep = to_xudc_ep(usb_ep);
req = to_xudc_req(usb_req);
xudc = ep->xudc;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->powergated || !ep->desc) {
ret = -ESHUTDOWN;
goto unlock;
}
ret = __tegra_xudc_ep_queue(ep, req);
unlock:
spin_unlock_irqrestore(&xudc->lock, flags);
return ret;
}
static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
struct tegra_xudc_request *req)
{
struct tegra_xudc_trb *trb = req->first_trb;
bool pcs_enq = trb_read_cycle(trb);
bool pcs;
/*
* Clear out all the TRBs part of or after the cancelled request,
* and must correct trb cycle bit to the last un-enqueued state.
*/
while (trb != &ep->transfer_ring[ep->enq_ptr]) {
pcs = trb_read_cycle(trb);
memset(trb, 0, sizeof(*trb));
trb_write_cycle(trb, !pcs);
trb++;
if (trb_read_type(trb) == TRB_TYPE_LINK)
trb = ep->transfer_ring;
}
/* Requests will be re-queued at the start of the cancelled request. */
ep->enq_ptr = req->first_trb - ep->transfer_ring;
/*
* Retrieve the correct cycle bit state from the first trb of
* the cancelled request.
*/
ep->pcs = pcs_enq;
ep->ring_full = false;
list_for_each_entry_continue(req, &ep->queue, list) {
req->usb_req.status = -EINPROGRESS;
req->usb_req.actual = 0;
req->first_trb = NULL;
req->last_trb = NULL;
req->buf_queued = 0;
req->trbs_queued = 0;
}
}
/*
* Determine if the given TRB is in the range [first trb, last trb] for the
* given request.
*/
static bool trb_in_request(struct tegra_xudc_ep *ep,
struct tegra_xudc_request *req,
struct tegra_xudc_trb *trb)
{
dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
req->first_trb, req->last_trb, trb);
if (trb >= req->first_trb && (trb <= req->last_trb ||
req->last_trb < req->first_trb))
return true;
if (trb < req->first_trb && trb <= req->last_trb &&
req->last_trb < req->first_trb)
return true;
return false;
}
/*
* Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
* for the given endpoint and request.
*/
static bool trb_before_request(struct tegra_xudc_ep *ep,
struct tegra_xudc_request *req,
struct tegra_xudc_trb *trb)
{
struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
__func__, req->first_trb, req->last_trb, enq_trb, trb);
if (trb < req->first_trb && (enq_trb <= trb ||
req->first_trb < enq_trb))
return true;
if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
return true;
return false;
}
static int
__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
struct tegra_xudc_request *req)
{
struct tegra_xudc *xudc = ep->xudc;
struct tegra_xudc_request *r = NULL, *iter;
struct tegra_xudc_trb *deq_trb;
bool busy, kick_queue = false;
int ret = 0;
/* Make sure the request is actually queued to this endpoint. */
list_for_each_entry(iter, &ep->queue, list) {
if (iter != req)
continue;
r = iter;
break;
}
if (!r)
return -EINVAL;
/* Request hasn't been queued in the transfer ring yet. */
if (!req->trbs_queued) {
tegra_xudc_req_done(ep, req, -ECONNRESET);
return 0;
}
/* Halt DMA for this endpoint. */
if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
ep_pause(xudc, ep->index);
ep_wait_for_inactive(xudc, ep->index);
}
deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
/* Is the hardware processing the TRB at the dequeue pointer? */
busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
if (trb_in_request(ep, req, deq_trb) && busy) {
/*
* Request has been partially completed or it hasn't
* started processing yet.
*/
dma_addr_t deq_ptr;
squeeze_transfer_ring(ep, req);
req->usb_req.actual = ep_ctx_read_edtla(ep->context);
tegra_xudc_req_done(ep, req, -ECONNRESET);
kick_queue = true;
/* EDTLA is > 0: request has been partially completed */
if (req->usb_req.actual > 0) {
/*
* Abort the pending transfer and update the dequeue
* pointer
*/
ep_ctx_write_edtla(ep->context, 0);
ep_ctx_write_partial_td(ep->context, 0);
ep_ctx_write_data_offset(ep->context, 0);
deq_ptr = trb_virt_to_phys(ep,
&ep->transfer_ring[ep->enq_ptr]);
if (dma_mapping_error(xudc->dev, deq_ptr)) {
ret = -EINVAL;
} else {
ep_ctx_write_deq_ptr(ep->context, deq_ptr);
ep_ctx_write_dcs(ep->context, ep->pcs);
ep_reload(xudc, ep->index);
}
}
} else if (trb_before_request(ep, req, deq_trb) && busy) {
/* Request hasn't started processing yet. */
squeeze_transfer_ring(ep, req);
tegra_xudc_req_done(ep, req, -ECONNRESET);
kick_queue = true;
} else {
/*
* Request has completed, but we haven't processed the
* completion event yet.
*/
tegra_xudc_req_done(ep, req, -ECONNRESET);
ret = -EINVAL;
}
/* Resume the endpoint. */
ep_unpause(xudc, ep->index);
if (kick_queue)
tegra_xudc_ep_kick_queue(ep);
return ret;
}
static int
tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
{
struct tegra_xudc_request *req;
struct tegra_xudc_ep *ep;
struct tegra_xudc *xudc;
unsigned long flags;
int ret;
if (!usb_ep || !usb_req)
return -EINVAL;
ep = to_xudc_ep(usb_ep);
req = to_xudc_req(usb_req);
xudc = ep->xudc;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->powergated || !ep->desc) {
ret = -ESHUTDOWN;
goto unlock;
}
ret = __tegra_xudc_ep_dequeue(ep, req);
unlock:
spin_unlock_irqrestore(&xudc->lock, flags);
return ret;
}
static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
{
struct tegra_xudc *xudc = ep->xudc;
if (!ep->desc)
return -EINVAL;
if (usb_endpoint_xfer_isoc(ep->desc)) {
dev_err(xudc->dev, "can't halt isochronous EP\n");
return -ENOTSUPP;
}
if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
halt ? "halted" : "not halted");
return 0;
}
if (halt) {
ep_halt(xudc, ep->index);
} else {
ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
ep_reload(xudc, ep->index);
ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
ep_ctx_write_rsvd(ep->context, 0);
ep_ctx_write_partial_td(ep->context, 0);
ep_ctx_write_splitxstate(ep->context, 0);
ep_ctx_write_seq_num(ep->context, 0);
ep_reload(xudc, ep->index);
ep_unpause(xudc, ep->index);
ep_unhalt(xudc, ep->index);
tegra_xudc_ep_ring_doorbell(ep);
}
return 0;
}
static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
{
struct tegra_xudc_ep *ep;
struct tegra_xudc *xudc;
unsigned long flags;
int ret;
if (!usb_ep)
return -EINVAL;
ep = to_xudc_ep(usb_ep);
xudc = ep->xudc;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->powergated) {
ret = -ESHUTDOWN;
goto unlock;
}
if (value && usb_endpoint_dir_in(ep->desc) &&
!list_empty(&ep->queue)) {
dev_err(xudc->dev, "can't halt EP with requests pending\n");
ret = -EAGAIN;
goto unlock;
}
ret = __tegra_xudc_ep_set_halt(ep, value);
unlock:
spin_unlock_irqrestore(&xudc->lock, flags);
return ret;
}
static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
{
const struct usb_endpoint_descriptor *desc = ep->desc;
const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
struct tegra_xudc *xudc = ep->xudc;
u16 maxpacket, maxburst = 0, esit = 0;
u32 val;
maxpacket = usb_endpoint_maxp(desc);
if (xudc->gadget.speed == USB_SPEED_SUPER) {
if (!usb_endpoint_xfer_control(desc))
maxburst = comp_desc->bMaxBurst;
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
esit = le16_to_cpu(comp_desc->wBytesPerInterval);
} else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
(usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc))) {
if (xudc->gadget.speed == USB_SPEED_HIGH) {
maxburst = usb_endpoint_maxp_mult(desc) - 1;
if (maxburst == 0x3) {
dev_warn(xudc->dev,
"invalid endpoint maxburst\n");
maxburst = 0x2;
}
}
esit = maxpacket * (maxburst + 1);
}
memset(ep->context, 0, sizeof(*ep->context));
ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
ep_ctx_write_interval(ep->context, desc->bInterval);
if (xudc->gadget.speed == USB_SPEED_SUPER) {
if (usb_endpoint_xfer_isoc(desc)) {
ep_ctx_write_mult(ep->context,
comp_desc->bmAttributes & 0x3);
}
if (usb_endpoint_xfer_bulk(desc)) {
ep_ctx_write_max_pstreams(ep->context,
comp_desc->bmAttributes &
0x1f);
ep_ctx_write_lsa(ep->context, 1);
}
}
if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
val = usb_endpoint_type(desc);
else
val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
ep_ctx_write_type(ep->context, val);
ep_ctx_write_cerr(ep->context, 0x3);
ep_ctx_write_max_packet_size(ep->context, maxpacket);
ep_ctx_write_max_burst_size(ep->context, maxburst);
ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
ep_ctx_write_dcs(ep->context, ep->pcs);
/* Select a reasonable average TRB length based on endpoint type. */
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
val = 8;
break;
case USB_ENDPOINT_XFER_INT:
val = 1024;
break;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_ISOC:
default:
val = 3072;
break;
}
ep_ctx_write_avg_trb_len(ep->context, val);
ep_ctx_write_max_esit_payload(ep->context, esit);
ep_ctx_write_cerrcnt(ep->context, 0x3);
}
static void setup_link_trb(struct tegra_xudc_ep *ep,
struct tegra_xudc_trb *trb)
{
trb_write_data_ptr(trb, ep->transfer_ring_phys);
trb_write_type(trb, TRB_TYPE_LINK);
trb_write_toggle_cycle(trb, 1);
}
static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
{
struct tegra_xudc *xudc = ep->xudc;
if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
dev_err(xudc->dev, "endpoint %u already disabled\n",
ep->index);
return -EINVAL;
}
ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
ep_reload(xudc, ep->index);
tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
xudc->nr_enabled_eps--;
if (usb_endpoint_xfer_isoc(ep->desc))
xudc->nr_isoch_eps--;
ep->desc = NULL;
ep->comp_desc = NULL;
memset(ep->context, 0, sizeof(*ep->context));
ep_unpause(xudc, ep->index);
ep_unhalt(xudc, ep->index);
if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
/*
* If this is the last endpoint disabled in a de-configure request,
* switch back to address state.
*/
if ((xudc->device_state == USB_STATE_CONFIGURED) &&
(xudc->nr_enabled_eps == 1)) {
u32 val;
xudc->device_state = USB_STATE_ADDRESS;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
val = xudc_readl(xudc, CTRL);
val &= ~CTRL_RUN;
xudc_writel(xudc, val, CTRL);
}
dev_info(xudc->dev, "ep %u disabled\n", ep->index);
return 0;
}
static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
{
struct tegra_xudc_ep *ep;
struct tegra_xudc *xudc;
unsigned long flags;
int ret;
if (!usb_ep)
return -EINVAL;
ep = to_xudc_ep(usb_ep);
xudc = ep->xudc;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->powergated) {
ret = -ESHUTDOWN;
goto unlock;
}
ret = __tegra_xudc_ep_disable(ep);
unlock:
spin_unlock_irqrestore(&xudc->lock, flags);
return ret;
}
static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct tegra_xudc *xudc = ep->xudc;
unsigned int i;
u32 val;
if (xudc->gadget.speed == USB_SPEED_SUPER &&
!usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
return -EINVAL;
/* Disable the EP if it is not disabled */
if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
__tegra_xudc_ep_disable(ep);
ep->desc = desc;
ep->comp_desc = ep->usb_ep.comp_desc;
if (usb_endpoint_xfer_isoc(desc)) {
if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
dev_err(xudc->dev, "too many isochronous endpoints\n");
return -EBUSY;
}
xudc->nr_isoch_eps++;
}
memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
sizeof(*ep->transfer_ring));
setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
ep->enq_ptr = 0;
ep->deq_ptr = 0;
ep->pcs = true;
ep->ring_full = false;
xudc->nr_enabled_eps++;
tegra_xudc_ep_context_setup(ep);
/*
* No need to reload and un-halt EP0. This will be done automatically
* once a valid SETUP packet is received.
*/
if (usb_endpoint_xfer_control(desc))
goto out;
/*
* Transition to configured state once the first non-control
* endpoint is enabled.
*/
if (xudc->device_state == USB_STATE_ADDRESS) {
val = xudc_readl(xudc, CTRL);
val |= CTRL_RUN;
xudc_writel(xudc, val, CTRL);
xudc->device_state = USB_STATE_CONFIGURED;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
}
if (usb_endpoint_xfer_isoc(desc)) {
/*
* Pause all bulk endpoints when enabling an isoch endpoint
* to ensure the isoch endpoint is allocated enough bandwidth.
*/
for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
if (xudc->ep[i].desc &&
usb_endpoint_xfer_bulk(xudc->ep[i].desc))
ep_pause(xudc, i);
}
}
ep_reload(xudc, ep->index);
ep_unpause(xudc, ep->index);
ep_unhalt(xudc, ep->index);
if (usb_endpoint_xfer_isoc(desc)) {
for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
if (xudc->ep[i].desc &&
usb_endpoint_xfer_bulk(xudc->ep[i].desc))
ep_unpause(xudc, i);
}
}
out:
dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
usb_ep_type_string(usb_endpoint_type(ep->desc)),
usb_endpoint_dir_in(ep->desc) ? "in" : "out");
return 0;
}
static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
const struct usb_endpoint_descriptor *desc)
{
struct tegra_xudc_ep *ep;
struct tegra_xudc *xudc;
unsigned long flags;
int ret;
if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
ep = to_xudc_ep(usb_ep);
xudc = ep->xudc;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->powergated) {
ret = -ESHUTDOWN;
goto unlock;
}
ret = __tegra_xudc_ep_enable(ep, desc);
unlock:
spin_unlock_irqrestore(&xudc->lock, flags);
return ret;
}
static struct usb_request *
tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
{
struct tegra_xudc_request *req;
req = kzalloc(sizeof(*req), gfp);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->list);
return &req->usb_req;
}
static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
struct usb_request *usb_req)
{
struct tegra_xudc_request *req = to_xudc_req(usb_req);
kfree(req);
}
static const struct usb_ep_ops tegra_xudc_ep_ops = {
.enable = tegra_xudc_ep_enable,
.disable = tegra_xudc_ep_disable,
.alloc_request = tegra_xudc_ep_alloc_request,
.free_request = tegra_xudc_ep_free_request,
.queue = tegra_xudc_ep_queue,
.dequeue = tegra_xudc_ep_dequeue,
.set_halt = tegra_xudc_ep_set_halt,
};
static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
const struct usb_endpoint_descriptor *desc)
{
return -EBUSY;
}
static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
{
return -EBUSY;
}
static const struct usb_ep_ops tegra_xudc_ep0_ops = {
.enable = tegra_xudc_ep0_enable,
.disable = tegra_xudc_ep0_disable,
.alloc_request = tegra_xudc_ep_alloc_request,
.free_request = tegra_xudc_ep_free_request,
.queue = tegra_xudc_ep_queue,
.dequeue = tegra_xudc_ep_dequeue,
.set_halt = tegra_xudc_ep_set_halt,
};
static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
{
struct tegra_xudc *xudc = to_xudc(gadget);
unsigned long flags;
int ret;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->powergated) {
ret = -ESHUTDOWN;
goto unlock;
}
ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
MFINDEX_FRAME_SHIFT;
unlock:
spin_unlock_irqrestore(&xudc->lock, flags);
return ret;
}
static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
{
unsigned int i;
u32 val;
ep_unpause_all(xudc);
/* Direct link to U0. */
val = xudc_readl(xudc, PORTSC);
if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
xudc_writel(xudc, val, PORTSC);
}
if (xudc->device_state == USB_STATE_SUSPENDED) {
xudc->device_state = xudc->resume_state;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
xudc->resume_state = 0;
}
/*
* Doorbells may be dropped if they are sent too soon (< ~200ns)
* after unpausing the endpoint. Wait for 500ns just to be safe.
*/
ndelay(500);
for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
}
static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
{
struct tegra_xudc *xudc = to_xudc(gadget);
unsigned long flags;
int ret = 0;
u32 val;
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->powergated) {
ret = -ESHUTDOWN;
goto unlock;
}
val = xudc_readl(xudc, PORTPM);
dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
val, gadget->speed);
if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
(val & PORTPM_RWE)) ||
((xudc->gadget.speed == USB_SPEED_SUPER) &&
(val & PORTPM_FRWE))) {
tegra_xudc_resume_device_state(xudc);
/* Send Device Notification packet. */
if (xudc->gadget.speed == USB_SPEED_SUPER) {
val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
| DEVNOTIF_LO_TRIG;
xudc_writel(xudc, 0, DEVNOTIF_HI);
xudc_writel(xudc, val, DEVNOTIF_LO);
}
}
unlock:
dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
spin_unlock_irqrestore(&xudc->lock, flags);
return ret;
}
static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct tegra_xudc *xudc = to_xudc(gadget);
unsigned long flags;
u32 val;
pm_runtime_get_sync(xudc->dev);
spin_lock_irqsave(&xudc->lock, flags);
if (is_on != xudc->pullup) {
val = xudc_readl(xudc, CTRL);
if (is_on)
val |= CTRL_ENABLE;
else
val &= ~CTRL_ENABLE;
xudc_writel(xudc, val, CTRL);
}
xudc->pullup = is_on;
dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
spin_unlock_irqrestore(&xudc->lock, flags);
pm_runtime_put(xudc->dev);
return 0;
}
static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct tegra_xudc *xudc = to_xudc(gadget);
unsigned long flags;
u32 val;
int ret;
unsigned int i;
if (!driver)
return -EINVAL;
pm_runtime_get_sync(xudc->dev);
spin_lock_irqsave(&xudc->lock, flags);
if (xudc->driver) {
ret = -EBUSY;
goto unlock;
}
xudc->setup_state = WAIT_FOR_SETUP;
xudc->device_state = USB_STATE_DEFAULT;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
if (ret < 0)
goto unlock;
val = xudc_readl(xudc, CTRL);
val |= CTRL_IE | CTRL_LSE;
xudc_writel(xudc, val, CTRL);
val = xudc_readl(xudc, PORTHALT);
val |= PORTHALT_STCHG_INTR_EN;
xudc_writel(xudc, val, PORTHALT);
if (xudc->pullup) {
val = xudc_readl(xudc, CTRL);
val |= CTRL_ENABLE;
xudc_writel(xudc, val, CTRL);
}
for (i = 0; i < xudc->soc->num_phys; i++)
if (xudc->usbphy[i])
otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
xudc->driver = driver;
unlock:
dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
spin_unlock_irqrestore(&xudc->lock, flags);
pm_runtime_put(xudc->dev);
return ret;
}
static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
{
struct tegra_xudc *xudc = to_xudc(gadget);
unsigned long flags;
u32 val;
unsigned int i;
pm_runtime_get_sync(xudc->dev);
spin_lock_irqsave(&xudc->lock, flags);
for (i = 0; i < xudc->soc->num_phys; i++)
if (xudc->usbphy[i])
otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
val = xudc_readl(xudc, CTRL);
val &= ~(CTRL_IE | CTRL_ENABLE);
xudc_writel(xudc, val, CTRL);
__tegra_xudc_ep_disable(&xudc->ep[0]);
xudc->driver = NULL;
dev_dbg(xudc->dev, "Gadget stopped");
spin_unlock_irqrestore(&xudc->lock, flags);
pm_runtime_put(xudc->dev);
return 0;
}
static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
unsigned int m_a)
{
struct tegra_xudc *xudc = to_xudc(gadget);
dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
if (xudc->curr_usbphy && xudc->curr_usbphy->chg_type == SDP_TYPE)
return usb_phy_set_power(xudc->curr_usbphy, m_a);
return 0;
}
static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
{
struct tegra_xudc *xudc = to_xudc(gadget);
dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
xudc->selfpowered = !!is_on;
return 0;
}
static const struct usb_gadget_ops tegra_xudc_gadget_ops = {
.get_frame = tegra_xudc_gadget_get_frame,
.wakeup = tegra_xudc_gadget_wakeup,
.pullup = tegra_xudc_gadget_pullup,
.udc_start = tegra_xudc_gadget_start,
.udc_stop = tegra_xudc_gadget_stop,
.vbus_draw = tegra_xudc_gadget_vbus_draw,
.set_selfpowered = tegra_xudc_set_selfpowered,
};
static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
{
}
static int
tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
void (*cmpl)(struct usb_ep *, struct usb_request *))
{
xudc->ep0_req->usb_req.buf = NULL;
xudc->ep0_req->usb_req.dma = 0;
xudc->ep0_req->usb_req.length = 0;
xudc->ep0_req->usb_req.complete = cmpl;
xudc->ep0_req->usb_req.context = xudc;
return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
}
static int
tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
void (*cmpl)(struct usb_ep *, struct usb_request *))
{
xudc->ep0_req->usb_req.buf = buf;
xudc->ep0_req->usb_req.length = len;
xudc->ep0_req->usb_req.complete = cmpl;
xudc->ep0_req->usb_req.context = xudc;
return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
}
static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
{
switch (xudc->setup_state) {
case DATA_STAGE_XFER:
xudc->setup_state = STATUS_STAGE_RECV;
tegra_xudc_ep0_queue_status(xudc, no_op_complete);
break;
case DATA_STAGE_RECV:
xudc->setup_state = STATUS_STAGE_XFER;
tegra_xudc_ep0_queue_status(xudc, no_op_complete);
break;
default:
xudc->setup_state = WAIT_FOR_SETUP;
break;
}
}
static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl)
{
int ret;
spin_unlock(&xudc->lock);
ret = xudc->driver->setup(&xudc->gadget, ctrl);
spin_lock(&xudc->lock);
return ret;
}
static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
{
struct tegra_xudc *xudc = req->context;
if (xudc->test_mode_pattern) {
xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
xudc->test_mode_pattern = 0;
}
}
static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl)
{
bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
u32 feature = le16_to_cpu(ctrl->wValue);
u32 index = le16_to_cpu(ctrl->wIndex);
u32 val, ep;
int ret;
if (le16_to_cpu(ctrl->wLength) != 0)
return -EINVAL;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
switch (feature) {
case USB_DEVICE_REMOTE_WAKEUP:
if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
(xudc->device_state == USB_STATE_DEFAULT))
return -EINVAL;
val = xudc_readl(xudc, PORTPM);
if (set)
val |= PORTPM_RWE;
else
val &= ~PORTPM_RWE;
xudc_writel(xudc, val, PORTPM);
break;
case USB_DEVICE_U1_ENABLE:
case USB_DEVICE_U2_ENABLE:
if ((xudc->device_state != USB_STATE_CONFIGURED) ||
(xudc->gadget.speed != USB_SPEED_SUPER))
return -EINVAL;
val = xudc_readl(xudc, PORTPM);
if ((feature == USB_DEVICE_U1_ENABLE) &&
xudc->soc->u1_enable) {
if (set)
val |= PORTPM_U1E;
else
val &= ~PORTPM_U1E;
}
if ((feature == USB_DEVICE_U2_ENABLE) &&
xudc->soc->u2_enable) {
if (set)
val |= PORTPM_U2E;
else
val &= ~PORTPM_U2E;
}
xudc_writel(xudc, val, PORTPM);
break;
case USB_DEVICE_TEST_MODE:
if (xudc->gadget.speed != USB_SPEED_HIGH)
return -EINVAL;
if (!set)
return -EINVAL;
xudc->test_mode_pattern = index >> 8;
break;
default:
return -EINVAL;
}
break;
case USB_RECIP_INTERFACE:
if (xudc->device_state != USB_STATE_CONFIGURED)
return -EINVAL;
switch (feature) {
case USB_INTRF_FUNC_SUSPEND:
if (set) {
val = xudc_readl(xudc, PORTPM);
if (index & USB_INTRF_FUNC_SUSPEND_RW)
val |= PORTPM_FRWE;
else
val &= ~PORTPM_FRWE;
xudc_writel(xudc, val, PORTPM);
}
return tegra_xudc_ep0_delegate_req(xudc, ctrl);
default:
return -EINVAL;
}
break;
case USB_RECIP_ENDPOINT:
ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
((index & USB_DIR_IN) ? 1 : 0);
if ((xudc->device_state == USB_STATE_DEFAULT) ||
((xudc->device_state == USB_STATE_ADDRESS) &&
(index != 0)))
return -EINVAL;
ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
if (ret < 0)
return ret;
break;
default:
return -EINVAL;
}
return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
}
static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl)
{
struct tegra_xudc_ep_context *ep_ctx;
u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
u16 status = 0;
if (!(ctrl->bRequestType & USB_DIR_IN))
return -EINVAL;
if ((le16_to_cpu(ctrl->wValue) != 0) ||
(le16_to_cpu(ctrl->wLength) != 2))
return -EINVAL;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
val = xudc_readl(xudc, PORTPM);
if (xudc->selfpowered)
status |= BIT(USB_DEVICE_SELF_POWERED);
if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
(val & PORTPM_RWE))
status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
if (xudc->gadget.speed == USB_SPEED_SUPER) {
if (val & PORTPM_U1E)
status |= BIT(USB_DEV_STAT_U1_ENABLED);
if (val & PORTPM_U2E)
status |= BIT(USB_DEV_STAT_U2_ENABLED);
}
break;
case USB_RECIP_INTERFACE:
if (xudc->gadget.speed == USB_SPEED_SUPER) {
status |= USB_INTRF_STAT_FUNC_RW_CAP;
val = xudc_readl(xudc, PORTPM);
if (val & PORTPM_FRWE)
status |= USB_INTRF_STAT_FUNC_RW;
}
break;
case USB_RECIP_ENDPOINT:
ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
((index & USB_DIR_IN) ? 1 : 0);
ep_ctx = &xudc->ep_context[ep];
if ((xudc->device_state != USB_STATE_CONFIGURED) &&
((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
return -EINVAL;
if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
return -EINVAL;
if (xudc_readl(xudc, EP_HALT) & BIT(ep))
status |= BIT(USB_ENDPOINT_HALT);
break;
default:
return -EINVAL;
}
xudc->status_buf = cpu_to_le16(status);
return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
sizeof(xudc->status_buf),
no_op_complete);
}
static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
{
/* Nothing to do with SEL values */
}
static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl)
{
if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
USB_TYPE_STANDARD))
return -EINVAL;
if (xudc->device_state == USB_STATE_DEFAULT)
return -EINVAL;
if ((le16_to_cpu(ctrl->wIndex) != 0) ||
(le16_to_cpu(ctrl->wValue) != 0) ||
(le16_to_cpu(ctrl->wLength) != 6))
return -EINVAL;
return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
sizeof(xudc->sel_timing),
set_sel_complete);
}
static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
{
/* Nothing to do with isoch delay */
}
static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl)
{
u32 delay = le16_to_cpu(ctrl->wValue);
if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
USB_TYPE_STANDARD))
return -EINVAL;
if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
(le16_to_cpu(ctrl->wLength) != 0))
return -EINVAL;
xudc->isoch_delay = delay;
return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
}
static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
{
struct tegra_xudc *xudc = req->context;
if ((xudc->device_state == USB_STATE_DEFAULT) &&
(xudc->dev_addr != 0)) {
xudc->device_state = USB_STATE_ADDRESS;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
} else if ((xudc->device_state == USB_STATE_ADDRESS) &&
(xudc->dev_addr == 0)) {
xudc->device_state = USB_STATE_DEFAULT;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
}
}
static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl)
{
struct tegra_xudc_ep *ep0 = &xudc->ep[0];
u32 val, addr = le16_to_cpu(ctrl->wValue);
if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
USB_TYPE_STANDARD))
return -EINVAL;
if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
(le16_to_cpu(ctrl->wLength) != 0))
return -EINVAL;
if (xudc->device_state == USB_STATE_CONFIGURED)
return -EINVAL;
dev_dbg(xudc->dev, "set address: %u\n", addr);
xudc->dev_addr = addr;
val = xudc_readl(xudc, CTRL);
val &= ~(CTRL_DEVADDR_MASK);
val |= CTRL_DEVADDR(addr);
xudc_writel(xudc, val, CTRL);
ep_ctx_write_devaddr(ep0->context, addr);
return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
}
static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl)
{
int ret;
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
ret = tegra_xudc_ep0_get_status(xudc, ctrl);
break;
case USB_REQ_SET_ADDRESS:
dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
ret = tegra_xudc_ep0_set_address(xudc, ctrl);
break;
case USB_REQ_SET_SEL:
dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
break;
case USB_REQ_SET_ISOCH_DELAY:
dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
/*
* In theory we need to clear RUN bit before status stage of
* deconfig request sent, but this seems to be causing problems.
* Clear RUN once all endpoints are disabled instead.
*/
fallthrough;
default:
ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
break;
}
return ret;
}
static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
struct usb_ctrlrequest *ctrl,
u16 seq_num)
{
int ret;
xudc->setup_seq_num = seq_num;
/* Ensure EP0 is unhalted. */
ep_unhalt(xudc, 0);
/*
* On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
* are invalid. Halt EP0 until we get a valid packet.
*/
if (xudc->soc->invalid_seq_num &&
(seq_num == 0xfffe || seq_num == 0xffff)) {
dev_warn(xudc->dev, "invalid sequence number detected\n");
ep_halt(xudc, 0);
return;
}
if (ctrl->wLength)
xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
DATA_STAGE_XFER : DATA_STAGE_RECV;
else
xudc->setup_state = STATUS_STAGE_XFER;
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
else
ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
if (ret < 0) {
dev_warn(xudc->dev, "setup request failed: %d\n", ret);
xudc->setup_state = WAIT_FOR_SETUP;
ep_halt(xudc, 0);
}
}
static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
struct tegra_xudc_trb *event)
{
struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
u16 seq_num = trb_read_seq_num(event);
if (xudc->setup_state != WAIT_FOR_SETUP) {
/*
* The controller is in the process of handling another
* setup request. Queue subsequent requests and handle
* the last one once the controller reports a sequence
* number error.
*/
memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
xudc->setup_packet.seq_num = seq_num;
xudc->queued_setup_packet = true;
} else {
tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
}
}
static struct tegra_xudc_request *
trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
{
struct tegra_xudc_request *req;
list_for_each_entry(req, &ep->queue, list) {
if (!req->trbs_queued)
break;
if (trb_in_request(ep, req, trb))
return req;
}
return NULL;
}
static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
struct tegra_xudc_ep *ep,
struct tegra_xudc_trb *event)
{
struct tegra_xudc_request *req;
struct tegra_xudc_trb *trb;
bool short_packet;
short_packet = (trb_read_cmpl_code(event) ==
TRB_CMPL_CODE_SHORT_PACKET);
trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
req = trb_to_request(ep, trb);
/*
* TDs are complete on short packet or when the completed TRB is the
* last TRB in the TD (the CHAIN bit is unset).
*/
if (req && (short_packet || (!trb_read_chain(trb) &&
(req->trbs_needed == req->trbs_queued)))) {
struct tegra_xudc_trb *last = req->last_trb;
unsigned int residual;
residual = trb_read_transfer_len(event);
req->usb_req.actual = req->usb_req.length - residual;
dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
req->usb_req.actual, req->usb_req.length);
tegra_xudc_req_done(ep, req, 0);
if (ep->desc && usb_endpoint_xfer_control(ep->desc))
tegra_xudc_ep0_req_done(xudc);
/*
* Advance the dequeue pointer past the end of the current TD
* on short packet completion.
*/
if (short_packet) {
ep->deq_ptr = (last - ep->transfer_ring) + 1;
if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
ep->deq_ptr = 0;
}
} else if (!req) {
dev_warn(xudc->dev, "transfer event on dequeued request\n");
}
if (ep->desc)
tegra_xudc_ep_kick_queue(ep);
}
static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
struct tegra_xudc_trb *event)
{
unsigned int ep_index = trb_read_endpoint_id(event);
struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
struct tegra_xudc_trb *trb;
u16 comp_code;
if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
ep_index);
return;
}
/* Update transfer ring dequeue pointer. */
trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
comp_code = trb_read_cmpl_code(event);
if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
ep->deq_ptr = (trb - ep->transfer_ring) + 1;
if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
ep->deq_ptr = 0;
ep->ring_full = false;
}
switch (comp_code) {
case TRB_CMPL_CODE_SUCCESS:
case TRB_CMPL_CODE_SHORT_PACKET:
tegra_xudc_handle_transfer_completion(xudc, ep, event);
break;
case TRB_CMPL_CODE_HOST_REJECTED:
dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
ep->stream_rejected = true;
break;
case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
if (ep->stream_rejected) {
ep->stream_rejected = false;
/*
* An EP is stopped when a stream is rejected. Wait
* for the EP to report that it is stopped and then
* un-stop it.
*/
ep_wait_for_stopped(xudc, ep_index);
}
tegra_xudc_ep_ring_doorbell(ep);
break;
case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
/*
* Wait for the EP to be stopped so the controller stops
* processing doorbells.
*/
ep_wait_for_stopped(xudc, ep_index);
ep->enq_ptr = ep->deq_ptr;
tegra_xudc_ep_nuke(ep, -EIO);
fallthrough;
case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
case TRB_CMPL_CODE_CTRL_DIR_ERR:
case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
case TRB_CMPL_CODE_RING_UNDERRUN:
case TRB_CMPL_CODE_RING_OVERRUN:
case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
case TRB_CMPL_CODE_USB_TRANS_ERR:
case TRB_CMPL_CODE_TRB_ERR:
dev_err(xudc->dev, "completion error %#x on EP %u\n",
comp_code, ep_index);
ep_halt(xudc, ep_index);
break;
case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
dev_info(xudc->dev, "sequence number error\n");
/*
* Kill any queued control request and skip to the last
* setup packet we received.
*/
tegra_xudc_ep_nuke(ep, -EINVAL);
xudc->setup_state = WAIT_FOR_SETUP;
if (!xudc->queued_setup_packet)
break;
tegra_xudc_handle_ep0_setup_packet(xudc,
&xudc->setup_packet.ctrl_req,
xudc->setup_packet.seq_num);
xudc->queued_setup_packet = false;
break;
case TRB_CMPL_CODE_STOPPED:
dev_dbg(xudc->dev, "stop completion code on EP %u\n",
ep_index);
/* Disconnected. */
tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
break;
default:
dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
comp_code, ep_index);
break;
}
}
static void tegra_xudc_reset(struct tegra_xudc *xudc)
{
struct tegra_xudc_ep *ep0 = &xudc->ep[0];
dma_addr_t deq_ptr;
unsigned int i;
xudc->setup_state = WAIT_FOR_SETUP;
xudc->device_state = USB_STATE_DEFAULT;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
ep_unpause_all(xudc);
for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
/*
* Reset sequence number and dequeue pointer to flush the transfer
* ring.
*/
ep0->deq_ptr = ep0->enq_ptr;
ep0->ring_full = false;
xudc->setup_seq_num = 0;
xudc->queued_setup_packet = false;
ep_ctx_write_rsvd(ep0->context, 0);
ep_ctx_write_partial_td(ep0->context, 0);
ep_ctx_write_splitxstate(ep0->context, 0);
ep_ctx_write_seq_num(ep0->context, 0);
deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
if (!dma_mapping_error(xudc->dev, deq_ptr)) {
ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
ep_ctx_write_dcs(ep0->context, ep0->pcs);
}
ep_unhalt_all(xudc);
ep_reload(xudc, 0);
ep_unpause(xudc, 0);
}
static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
{
struct tegra_xudc_ep *ep0 = &xudc->ep[0];
u16 maxpacket;
u32 val;
val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
switch (val) {
case PORTSC_PS_LS:
xudc->gadget.speed = USB_SPEED_LOW;
break;
case PORTSC_PS_FS:
xudc->gadget.speed = USB_SPEED_FULL;
break;
case PORTSC_PS_HS:
xudc->gadget.speed = USB_SPEED_HIGH;
break;
case PORTSC_PS_SS:
xudc->gadget.speed = USB_SPEED_SUPER;
break;
default:
xudc->gadget.speed = USB_SPEED_UNKNOWN;
break;
}
xudc->device_state = USB_STATE_DEFAULT;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
xudc->setup_state = WAIT_FOR_SETUP;
if (xudc->gadget.speed == USB_SPEED_SUPER)
maxpacket = 512;
else
maxpacket = 64;
ep_ctx_write_max_packet_size(ep0->context, maxpacket);
tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
if (!xudc->soc->u1_enable) {
val = xudc_readl(xudc, PORTPM);
val &= ~(PORTPM_U1TIMEOUT_MASK);
xudc_writel(xudc, val, PORTPM);
}
if (!xudc->soc->u2_enable) {
val = xudc_readl(xudc, PORTPM);
val &= ~(PORTPM_U2TIMEOUT_MASK);
xudc_writel(xudc, val, PORTPM);
}
if (xudc->gadget.speed <= USB_SPEED_HIGH) {
val = xudc_readl(xudc, PORTPM);
val &= ~(PORTPM_L1S_MASK);
if (xudc->soc->lpm_enable)
val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
else
val |= PORTPM_L1S(PORTPM_L1S_NYET);
xudc_writel(xudc, val, PORTPM);
}
val = xudc_readl(xudc, ST);
if (val & ST_RC)
xudc_writel(xudc, ST_RC, ST);
}
static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
{
tegra_xudc_reset(xudc);
if (xudc->driver && xudc->driver->disconnect) {
spin_unlock(&xudc->lock);
xudc->driver->disconnect(&xudc->gadget);
spin_lock(&xudc->lock);
}
xudc->device_state = USB_STATE_NOTATTACHED;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
complete(&xudc->disconnect_complete);
}
static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
{
tegra_xudc_reset(xudc);
if (xudc->driver) {
spin_unlock(&xudc->lock);
usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
spin_lock(&xudc->lock);
}
tegra_xudc_port_connect(xudc);
}
static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
{
dev_dbg(xudc->dev, "port suspend\n");
xudc->resume_state = xudc->device_state;
xudc->device_state = USB_STATE_SUSPENDED;
usb_gadget_set_state(&xudc->gadget, xudc->device_state);
if (xudc->driver->suspend) {
spin_unlock(&xudc->lock);
xudc->driver->suspend(&xudc->gadget);
spin_lock(&xudc->lock);
}
}
static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
{
dev_dbg(xudc->dev, "port resume\n");
tegra_xudc_resume_device_state(xudc);
if (xudc->driver->resume) {
spin_unlock(&xudc->lock);
xudc->driver->resume(&xudc->gadget);
spin_lock(&xudc->lock);
}
}
static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
{
u32 val;
val = xudc_readl(xudc, PORTSC);
val &= ~PORTSC_CHANGE_MASK;
val |= flag;
xudc_writel(xudc, val, PORTSC);
}
static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
{
u32 portsc, porthalt;
porthalt = xudc_readl(xudc, PORTHALT);
if ((porthalt & PORTHALT_STCHG_REQ) &&
(porthalt & PORTHALT_HALT_LTSSM)) {
dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
porthalt &= ~PORTHALT_HALT_LTSSM;
xudc_writel(xudc, porthalt, PORTHALT);
}
portsc = xudc_readl(xudc, PORTSC);
if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
#define TOGGLE_VBUS_WAIT_MS 100
if (xudc->soc->port_reset_quirk) {
schedule_delayed_work(&xudc->port_reset_war_work,
msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
xudc->wait_for_sec_prc = 1;
}
}
if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
tegra_xudc_port_reset(xudc);
cancel_delayed_work(&xudc->port_reset_war_work);
xudc->wait_for_sec_prc = 0;
}
portsc = xudc_readl(xudc, PORTSC);
if (portsc & PORTSC_WRC) {
dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
tegra_xudc_port_reset(xudc);
}
portsc = xudc_readl(xudc, PORTSC);
if (portsc & PORTSC_CSC) {
dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
clear_port_change(xudc, PORTSC_CSC);
if (portsc & PORTSC_CCS)
tegra_xudc_port_connect(xudc);
else
tegra_xudc_port_disconnect(xudc);
if (xudc->wait_csc) {
cancel_delayed_work(&xudc->plc_reset_work);
xudc->wait_csc = false;
}
}
portsc = xudc_readl(xudc, PORTSC);
if (portsc & PORTSC_PLC) {
u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
clear_port_change(xudc, PORTSC_PLC);
switch (pls) {
case PORTSC_PLS_U3:
tegra_xudc_port_suspend(xudc);
break;
case PORTSC_PLS_U0:
if (xudc->gadget.speed < USB_SPEED_SUPER)
tegra_xudc_port_resume(xudc);
break;
case PORTSC_PLS_RESUME:
if (xudc->gadget.speed == USB_SPEED_SUPER)
tegra_xudc_port_resume(xudc);
break;
case PORTSC_PLS_INACTIVE:
schedule_delayed_work(&xudc->plc_reset_work,
msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
xudc->wait_csc = true;
break;
default:
break;
}
}
if (portsc & PORTSC_CEC) {
dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
clear_port_change(xudc, PORTSC_CEC);
}
dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
}
static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
{
while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
(xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
__tegra_xudc_handle_port_status(xudc);
}
static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
struct tegra_xudc_trb *event)
{
u32 type = trb_read_type(event);
dump_trb(xudc, "EVENT", event);
switch (type) {
case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
tegra_xudc_handle_port_status(xudc);
break;
case TRB_TYPE_TRANSFER_EVENT:
tegra_xudc_handle_transfer_event(xudc, event);
break;
case TRB_TYPE_SETUP_PACKET_EVENT:
tegra_xudc_handle_ep0_event(xudc, event);
break;
default:
dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
break;
}
}
static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
{
struct tegra_xudc_trb *event;
dma_addr_t erdp;
while (true) {
event = xudc->event_ring[xudc->event_ring_index] +
xudc->event_ring_deq_ptr;
if (trb_read_cycle(event) != xudc->ccs)
break;
tegra_xudc_handle_event(xudc, event);
xudc->event_ring_deq_ptr++;
if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
xudc->event_ring_deq_ptr = 0;
xudc->event_ring_index++;
}
if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
xudc->event_ring_index = 0;
xudc->ccs = !xudc->ccs;
}
}
erdp = xudc->event_ring_phys[xudc->event_ring_index] +
xudc->event_ring_deq_ptr * sizeof(*event);
xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
}
static irqreturn_t tegra_xudc_irq(int irq, void *data)
{
struct tegra_xudc *xudc = data;
unsigned long flags;
u32 val;
val = xudc_readl(xudc, ST);
if (!(val & ST_IP))
return IRQ_NONE;
xudc_writel(xudc, ST_IP, ST);
spin_lock_irqsave(&xudc->lock, flags);
tegra_xudc_process_event_ring(xudc);
spin_unlock_irqrestore(&xudc->lock, flags);
return IRQ_HANDLED;
}
static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
{
struct tegra_xudc_ep *ep = &xudc->ep[index];
ep->xudc = xudc;
ep->index = index;
ep->context = &xudc->ep_context[index];
INIT_LIST_HEAD(&ep->queue);
/*
* EP1 would be the input endpoint corresponding to EP0, but since
* EP0 is bi-directional, EP1 is unused.
*/
if (index == 1)
return 0;
ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
GFP_KERNEL,
&ep->transfer_ring_phys);
if (!ep->transfer_ring)
return -ENOMEM;
if (index) {
snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
(index % 2 == 0) ? "out" : "in");
ep->usb_ep.name = ep->name;
usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
ep->usb_ep.max_streams = 16;
ep->usb_ep.ops = &tegra_xudc_ep_ops;
ep->usb_ep.caps.type_bulk = true;
ep->usb_ep.caps.type_int = true;
if (index & 1)
ep->usb_ep.caps.dir_in = true;
else
ep->usb_ep.caps.dir_out = true;
list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
} else {
strscpy(ep->name, "ep0", 3);
ep->usb_ep.name = ep->name;
usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
ep->usb_ep.ops = &tegra_xudc_ep0_ops;
ep->usb_ep.caps.type_control = true;
ep->usb_ep.caps.dir_in = true;
ep->usb_ep.caps.dir_out = true;
}
return 0;
}
static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
{
struct tegra_xudc_ep *ep = &xudc->ep[index];
/*
* EP1 would be the input endpoint corresponding to EP0, but since
* EP0 is bi-directional, EP1 is unused.
*/
if (index == 1)
return;
dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
ep->transfer_ring_phys);
}
static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
{
struct usb_request *req;
unsigned int i;
int err;
xudc->ep_context =
dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
sizeof(*xudc->ep_context),
&xudc->ep_context_phys, GFP_KERNEL);
if (!xudc->ep_context)
return -ENOMEM;
xudc->transfer_ring_pool =
dmam_pool_create(dev_name(xudc->dev), xudc->dev,
XUDC_TRANSFER_RING_SIZE *
sizeof(struct tegra_xudc_trb),
sizeof(struct tegra_xudc_trb), 0);
if (!xudc->transfer_ring_pool) {
err = -ENOMEM;
goto free_ep_context;
}
INIT_LIST_HEAD(&xudc->gadget.ep_list);
for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
err = tegra_xudc_alloc_ep(xudc, i);
if (err < 0)
goto free_eps;
}
req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
if (!req) {
err = -ENOMEM;
goto free_eps;
}
xudc->ep0_req = to_xudc_req(req);
return 0;
free_eps:
for (; i > 0; i--)
tegra_xudc_free_ep(xudc, i - 1);
free_ep_context:
dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
xudc->ep_context, xudc->ep_context_phys);
return err;
}
static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
{
xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
}
static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
{
unsigned int i;
tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
&xudc->ep0_req->usb_req);
for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
tegra_xudc_free_ep(xudc, i);
dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
xudc->ep_context, xudc->ep_context_phys);
}
static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
xudc->event_ring[i] =
dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
sizeof(*xudc->event_ring[i]),
&xudc->event_ring_phys[i],
GFP_KERNEL);
if (!xudc->event_ring[i])
goto free_dma;
}
return 0;
free_dma:
for (; i > 0; i--) {
dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
sizeof(*xudc->event_ring[i - 1]),
xudc->event_ring[i - 1],
xudc->event_ring_phys[i - 1]);
}
return -ENOMEM;
}
static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
{
unsigned int i;
u32 val;
for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
sizeof(*xudc->event_ring[i]));
val = xudc_readl(xudc, ERSTSZ);
val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
xudc_writel(xudc, val, ERSTSZ);
xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
ERSTXBALO(i));
xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
ERSTXBAHI(i));
}
val = lower_32_bits(xudc->event_ring_phys[0]);
xudc_writel(xudc, val, ERDPLO);
val |= EREPLO_ECS;
xudc_writel(xudc, val, EREPLO);
val = upper_32_bits(xudc->event_ring_phys[0]);
xudc_writel(xudc, val, ERDPHI);
xudc_writel(xudc, val, EREPHI);
xudc->ccs = true;
xudc->event_ring_index = 0;
xudc->event_ring_deq_ptr = 0;
}
static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
sizeof(*xudc->event_ring[i]),
xudc->event_ring[i],
xudc->event_ring_phys[i]);
}
}
static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
{
u32 val;
if (xudc->soc->has_ipfs) {
val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
usleep_range(10, 15);
}
/* Enable bus master */
val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
XUSB_DEV_CFG_1_BUS_MASTER_EN;
fpci_writel(xudc, val, XUSB_DEV_CFG_1);
/* Program BAR0 space */
val = fpci_readl(xudc, XUSB_DEV_CFG_4);
val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
fpci_writel(xudc, val, XUSB_DEV_CFG_4);
fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
usleep_range(100, 200);
if (xudc->soc->has_ipfs) {
/* Enable interrupt assertion */
val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
}
}
static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
{
u32 val, imod;
if (xudc->soc->has_ipfs) {
val = xudc_readl(xudc, BLCG);
val |= BLCG_ALL;
val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
BLCG_COREPLL_PWRDN);
val |= BLCG_IOPLL_0_PWRDN;
val |= BLCG_IOPLL_1_PWRDN;
val |= BLCG_IOPLL_2_PWRDN;
xudc_writel(xudc, val, BLCG);
}
if (xudc->soc->port_speed_quirk)
tegra_xudc_limit_port_speed(xudc);
/* Set a reasonable U3 exit timer value. */
val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
/* Default ping LFPS tBurst is too large. */
val = xudc_readl(xudc, SSPX_CORE_CNT0);
val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
xudc_writel(xudc, val, SSPX_CORE_CNT0);
/* Default tPortConfiguration timeout is too small. */
val = xudc_readl(xudc, SSPX_CORE_CNT30);
val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
xudc_writel(xudc, val, SSPX_CORE_CNT30);
if (xudc->soc->lpm_enable) {
/* Set L1 resume duration to 95 us. */
val = xudc_readl(xudc, HSFSPI_COUNT13);
val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
xudc_writel(xudc, val, HSFSPI_COUNT13);
}
/*
* Compliance suite appears to be violating polling LFPS tBurst max
* of 1.4us. Send 1.45us instead.
*/
val = xudc_readl(xudc, SSPX_CORE_CNT32);
val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
xudc_writel(xudc, val, SSPX_CORE_CNT32);
/* Direct HS/FS port instance to RxDetect. */
val = xudc_readl(xudc, CFG_DEV_FE);
val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
xudc_writel(xudc, val, CFG_DEV_FE);
val = xudc_readl(xudc, PORTSC);
val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
xudc_writel(xudc, val, PORTSC);
/* Direct SS port instance to RxDetect. */
val = xudc_readl(xudc, CFG_DEV_FE);
val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
xudc_writel(xudc, val, CFG_DEV_FE);
val = xudc_readl(xudc, PORTSC);
val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
xudc_writel(xudc, val, PORTSC);
/* Restore port instance. */
val = xudc_readl(xudc, CFG_DEV_FE);
val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
xudc_writel(xudc, val, CFG_DEV_FE);
/*
* Enable INFINITE_SS_RETRY to prevent device from entering
* Disabled.Error when attached to buggy SuperSpeed hubs.
*/
val = xudc_readl(xudc, CFG_DEV_FE);
val |= CFG_DEV_FE_INFINITE_SS_RETRY;
xudc_writel(xudc, val, CFG_DEV_FE);
/* Set interrupt moderation. */
imod = XUDC_INTERRUPT_MODERATION_US * 4;
val = xudc_readl(xudc, RT_IMOD);
val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
xudc_writel(xudc, val, RT_IMOD);
/* increase SSPI transaction timeout from 32us to 512us */
val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
}
static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
{
int err = 0, usb3;
unsigned int i;
xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
sizeof(*xudc->utmi_phy), GFP_KERNEL);
if (!xudc->utmi_phy)
return -ENOMEM;
xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
sizeof(*xudc->usb3_phy), GFP_KERNEL);
if (!xudc->usb3_phy)
return -ENOMEM;
xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
sizeof(*xudc->usbphy), GFP_KERNEL);
if (!xudc->usbphy)
return -ENOMEM;
xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
for (i = 0; i < xudc->soc->num_phys; i++) {
char phy_name[] = "usb.-.";
/* Get USB2 phy */
snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
if (IS_ERR(xudc->utmi_phy[i])) {
err = PTR_ERR(xudc->utmi_phy[i]);
dev_err_probe(xudc->dev, err,
"failed to get usb2-%d PHY\n", i);
goto clean_up;
} else if (xudc->utmi_phy[i]) {
/* Get usb-phy, if utmi phy is available */
xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
xudc->utmi_phy[i]->dev.of_node,
NULL);
if (IS_ERR(xudc->usbphy[i])) {
err = PTR_ERR(xudc->usbphy[i]);
dev_err_probe(xudc->dev, err,
"failed to get usbphy-%d\n", i);
goto clean_up;
}
} else if (!xudc->utmi_phy[i]) {
/* if utmi phy is not available, ignore USB3 phy get */
continue;
}
/* Get USB3 phy */
usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
if (usb3 < 0)
continue;
snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
if (IS_ERR(xudc->usb3_phy[i])) {
err = PTR_ERR(xudc->usb3_phy[i]);
dev_err_probe(xudc->dev, err,
"failed to get usb3-%d PHY\n", usb3);
goto clean_up;
} else if (xudc->usb3_phy[i])
dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
}
return err;
clean_up:
for (i = 0; i < xudc->soc->num_phys; i++) {
xudc->usb3_phy[i] = NULL;
xudc->utmi_phy[i] = NULL;
xudc->usbphy[i] = NULL;
}
return err;
}
static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
{
unsigned int i;
for (i = 0; i < xudc->soc->num_phys; i++) {
phy_exit(xudc->usb3_phy[i]);
phy_exit(xudc->utmi_phy[i]);
}
}
static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
{
int err;
unsigned int i;
for (i = 0; i < xudc->soc->num_phys; i++) {
err = phy_init(xudc->utmi_phy[i]);
if (err < 0) {
dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err);
goto exit_phy;
}
err = phy_init(xudc->usb3_phy[i]);
if (err < 0) {
dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err);
goto exit_phy;
}
}
return 0;
exit_phy:
tegra_xudc_phy_exit(xudc);
return err;
}
static const char * const tegra210_xudc_supply_names[] = {
"hvdd-usb",
"avddio-usb",
};
static const char * const tegra210_xudc_clock_names[] = {
"dev",
"ss",
"ss_src",
"hs_src",
"fs_src",
};
static const char * const tegra186_xudc_clock_names[] = {
"dev",
"ss",
"ss_src",
"fs_src",
};
static struct tegra_xudc_soc tegra210_xudc_soc_data = {
.supply_names = tegra210_xudc_supply_names,
.num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
.clock_names = tegra210_xudc_clock_names,
.num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
.num_phys = 4,
.u1_enable = false,
.u2_enable = true,
.lpm_enable = false,
.invalid_seq_num = true,
.pls_quirk = true,
.port_reset_quirk = true,
.port_speed_quirk = false,
.has_ipfs = true,
};
static struct tegra_xudc_soc tegra186_xudc_soc_data = {
.clock_names = tegra186_xudc_clock_names,
.num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
.num_phys = 4,
.u1_enable = true,
.u2_enable = true,
.lpm_enable = false,
.invalid_seq_num = false,
.pls_quirk = false,
.port_reset_quirk = false,
.port_speed_quirk = false,
.has_ipfs = false,
};
static struct tegra_xudc_soc tegra194_xudc_soc_data = {
.clock_names = tegra186_xudc_clock_names,
.num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
.num_phys = 4,
.u1_enable = true,
.u2_enable = true,
.lpm_enable = true,
.invalid_seq_num = false,
.pls_quirk = false,
.port_reset_quirk = false,
.port_speed_quirk = true,
.has_ipfs = false,
};
static struct tegra_xudc_soc tegra234_xudc_soc_data = {
.clock_names = tegra186_xudc_clock_names,
.num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
.num_phys = 4,
.u1_enable = true,
.u2_enable = true,
.lpm_enable = true,
.invalid_seq_num = false,
.pls_quirk = false,
.port_reset_quirk = false,
.has_ipfs = false,
};
static const struct of_device_id tegra_xudc_of_match[] = {
{
.compatible = "nvidia,tegra210-xudc",
.data = &tegra210_xudc_soc_data
},
{
.compatible = "nvidia,tegra186-xudc",
.data = &tegra186_xudc_soc_data
},
{
.compatible = "nvidia,tegra194-xudc",
.data = &tegra194_xudc_soc_data
},
{
.compatible = "nvidia,tegra234-xudc",
.data = &tegra234_xudc_soc_data
},
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
{
if (xudc->genpd_dl_ss)
device_link_del(xudc->genpd_dl_ss);
if (xudc->genpd_dl_device)
device_link_del(xudc->genpd_dl_device);
if (xudc->genpd_dev_ss)
dev_pm_domain_detach(xudc->genpd_dev_ss, true);
if (xudc->genpd_dev_device)
dev_pm_domain_detach(xudc->genpd_dev_device, true);
}
static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
{
struct device *dev = xudc->dev;
int err;
xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
if (IS_ERR(xudc->genpd_dev_device)) {
err = PTR_ERR(xudc->genpd_dev_device);
dev_err(dev, "failed to get device power domain: %d\n", err);
return err;
}
xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
if (IS_ERR(xudc->genpd_dev_ss)) {
err = PTR_ERR(xudc->genpd_dev_ss);
dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
return err;
}
xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
DL_FLAG_PM_RUNTIME |
DL_FLAG_STATELESS);
if (!xudc->genpd_dl_device) {
dev_err(dev, "failed to add USB device link\n");
return -ENODEV;
}
xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
DL_FLAG_PM_RUNTIME |
DL_FLAG_STATELESS);
if (!xudc->genpd_dl_ss) {
dev_err(dev, "failed to add SuperSpeed device link\n");
return -ENODEV;
}
return 0;
}
static int tegra_xudc_probe(struct platform_device *pdev)
{
struct tegra_xudc *xudc;
struct resource *res;
unsigned int i;
int err;
xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL);
if (!xudc)
return -ENOMEM;
xudc->dev = &pdev->dev;
platform_set_drvdata(pdev, xudc);
xudc->soc = of_device_get_match_data(&pdev->dev);
if (!xudc->soc)
return -ENODEV;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
xudc->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xudc->base))
return PTR_ERR(xudc->base);
xudc->phys_base = res->start;
xudc->fpci = devm_platform_ioremap_resource_byname(pdev, "fpci");
if (IS_ERR(xudc->fpci))
return PTR_ERR(xudc->fpci);
if (xudc->soc->has_ipfs) {
xudc->ipfs = devm_platform_ioremap_resource_byname(pdev, "ipfs");
if (IS_ERR(xudc->ipfs))
return PTR_ERR(xudc->ipfs);
}
xudc->irq = platform_get_irq(pdev, 0);
if (xudc->irq < 0)
return xudc->irq;
err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
dev_name(&pdev->dev), xudc);
if (err < 0) {
dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
err);
return err;
}
xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks),
GFP_KERNEL);
if (!xudc->clks)
return -ENOMEM;
for (i = 0; i < xudc->soc->num_clks; i++)
xudc->clks[i].id = xudc->soc->clock_names[i];
err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
if (err) {
dev_err_probe(xudc->dev, err, "failed to request clocks\n");
return err;
}
xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
sizeof(*xudc->supplies), GFP_KERNEL);
if (!xudc->supplies)
return -ENOMEM;
for (i = 0; i < xudc->soc->num_supplies; i++)
xudc->supplies[i].supply = xudc->soc->supply_names[i];
err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
xudc->supplies);
if (err) {
dev_err_probe(xudc->dev, err, "failed to request regulators\n");
return err;
}
xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
if (IS_ERR(xudc->padctl))
return PTR_ERR(xudc->padctl);
err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
if (err) {
dev_err(xudc->dev, "failed to enable regulators: %d\n", err);
goto put_padctl;
}
err = tegra_xudc_phy_get(xudc);
if (err)
goto disable_regulator;
err = tegra_xudc_powerdomain_init(xudc);
if (err)
goto put_powerdomains;
err = tegra_xudc_phy_init(xudc);
if (err)
goto put_powerdomains;
err = tegra_xudc_alloc_event_ring(xudc);
if (err)
goto disable_phy;
err = tegra_xudc_alloc_eps(xudc);
if (err)
goto free_event_ring;
spin_lock_init(&xudc->lock);
init_completion(&xudc->disconnect_complete);
INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
INIT_DELAYED_WORK(&xudc->port_reset_war_work,
tegra_xudc_port_reset_war_work);
pm_runtime_enable(&pdev->dev);
xudc->gadget.ops = &tegra_xudc_gadget_ops;
xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
xudc->gadget.name = "tegra-xudc";
xudc->gadget.max_speed = USB_SPEED_SUPER;
err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
if (err) {
dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
goto free_eps;
}
for (i = 0; i < xudc->soc->num_phys; i++) {
if (!xudc->usbphy[i])
continue;
usb_register_notifier(xudc->usbphy[i], &xudc->vbus_nb);
tegra_xudc_update_data_role(xudc, xudc->usbphy[i]);
}
return 0;
free_eps:
pm_runtime_disable(&pdev->dev);
tegra_xudc_free_eps(xudc);
free_event_ring:
tegra_xudc_free_event_ring(xudc);
disable_phy:
tegra_xudc_phy_exit(xudc);
put_powerdomains:
tegra_xudc_powerdomain_remove(xudc);
disable_regulator:
regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
put_padctl:
tegra_xusb_padctl_put(xudc->padctl);
return err;
}
static void tegra_xudc_remove(struct platform_device *pdev)
{
struct tegra_xudc *xudc = platform_get_drvdata(pdev);
unsigned int i;
pm_runtime_get_sync(xudc->dev);
cancel_delayed_work_sync(&xudc->plc_reset_work);
cancel_work_sync(&xudc->usb_role_sw_work);
usb_del_gadget_udc(&xudc->gadget);
tegra_xudc_free_eps(xudc);
tegra_xudc_free_event_ring(xudc);
tegra_xudc_powerdomain_remove(xudc);
regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
for (i = 0; i < xudc->soc->num_phys; i++) {
phy_power_off(xudc->utmi_phy[i]);
phy_power_off(xudc->usb3_phy[i]);
}
tegra_xudc_phy_exit(xudc);
pm_runtime_disable(xudc->dev);
pm_runtime_put(xudc->dev);
tegra_xusb_padctl_put(xudc->padctl);
}
static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
{
unsigned long flags;
dev_dbg(xudc->dev, "entering ELPG\n");
spin_lock_irqsave(&xudc->lock, flags);
xudc->powergated = true;
xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
xudc_writel(xudc, 0, CTRL);
spin_unlock_irqrestore(&xudc->lock, flags);
clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
dev_dbg(xudc->dev, "entering ELPG done\n");
return 0;
}
static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
{
unsigned long flags;
int err;
dev_dbg(xudc->dev, "exiting ELPG\n");
err = regulator_bulk_enable(xudc->soc->num_supplies,
xudc->supplies);
if (err < 0)
return err;
err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
if (err < 0)
return err;
tegra_xudc_fpci_ipfs_init(xudc);
tegra_xudc_device_params_init(xudc);
tegra_xudc_init_event_ring(xudc);
tegra_xudc_init_eps(xudc);
xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
spin_lock_irqsave(&xudc->lock, flags);
xudc->powergated = false;
spin_unlock_irqrestore(&xudc->lock, flags);
dev_dbg(xudc->dev, "exiting ELPG done\n");
return 0;
}
static int __maybe_unused tegra_xudc_suspend(struct device *dev)
{
struct tegra_xudc *xudc = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&xudc->lock, flags);
xudc->suspended = true;
spin_unlock_irqrestore(&xudc->lock, flags);
flush_work(&xudc->usb_role_sw_work);
if (!pm_runtime_status_suspended(dev)) {
/* Forcibly disconnect before powergating. */
tegra_xudc_device_mode_off(xudc);
tegra_xudc_powergate(xudc);
}
pm_runtime_disable(dev);
return 0;
}
static int __maybe_unused tegra_xudc_resume(struct device *dev)
{
struct tegra_xudc *xudc = dev_get_drvdata(dev);
unsigned long flags;
int err;
err = tegra_xudc_unpowergate(xudc);
if (err < 0)
return err;
spin_lock_irqsave(&xudc->lock, flags);
xudc->suspended = false;
spin_unlock_irqrestore(&xudc->lock, flags);
schedule_work(&xudc->usb_role_sw_work);
pm_runtime_enable(dev);
return 0;
}
static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
{
struct tegra_xudc *xudc = dev_get_drvdata(dev);
return tegra_xudc_powergate(xudc);
}
static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
{
struct tegra_xudc *xudc = dev_get_drvdata(dev);
return tegra_xudc_unpowergate(xudc);
}
static const struct dev_pm_ops tegra_xudc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
tegra_xudc_runtime_resume, NULL)
};
static struct platform_driver tegra_xudc_driver = {
.probe = tegra_xudc_probe,
.remove_new = tegra_xudc_remove,
.driver = {
.name = "tegra-xudc",
.pm = &tegra_xudc_pm_ops,
.of_match_table = tegra_xudc_of_match,
},
};
module_platform_driver(tegra_xudc_driver);
MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
MODULE_AUTHOR("Andrew Bresticker <[email protected]>");
MODULE_AUTHOR("Hui Fu <[email protected]>");
MODULE_AUTHOR("Nagarjuna Kristam <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/gadget/udc/tegra-xudc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* udc.c - Core UDC Framework
*
* Copyright (C) 2010 Texas Instruments
* Author: Felipe Balbi <[email protected]>
*/
#define pr_fmt(fmt) "UDC core: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/sched/task_stack.h>
#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb.h>
#include "trace.h"
static DEFINE_IDA(gadget_id_numbers);
static const struct bus_type gadget_bus_type;
/**
* struct usb_udc - describes one usb device controller
* @driver: the gadget driver pointer. For use by the class code
* @dev: the child device to the actual controller
* @gadget: the gadget. For use by the class code
* @list: for use by the udc class driver
* @vbus: for udcs who care about vbus status, this value is real vbus status;
* for udcs who do not care about vbus status, this value is always true
* @started: the UDC's started state. True if the UDC had started.
* @allow_connect: Indicates whether UDC is allowed to be pulled up.
* Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
* unbound.
* @vbus_work: work routine to handle VBUS status change notifications.
* @connect_lock: protects udc->started, gadget->connect,
* gadget->allow_connect and gadget->deactivate. The routines
* usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
* usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and
* usb_gadget_udc_stop_locked() are called with this lock held.
*
* This represents the internal data structure which is used by the UDC-class
* to hold information about udc driver and gadget together.
*/
struct usb_udc {
struct usb_gadget_driver *driver;
struct usb_gadget *gadget;
struct device dev;
struct list_head list;
bool vbus;
bool started;
bool allow_connect;
struct work_struct vbus_work;
struct mutex connect_lock;
};
static const struct class udc_class;
static LIST_HEAD(udc_list);
/* Protects udc_list, udc->driver, driver->is_bound, and related calls */
static DEFINE_MUTEX(udc_lock);
/* ------------------------------------------------------------------------- */
/**
* usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
* @ep:the endpoint being configured
* @maxpacket_limit:value of maximum packet size limit
*
* This function should be used only in UDC drivers to initialize endpoint
* (usually in probe function).
*/
void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
unsigned maxpacket_limit)
{
ep->maxpacket_limit = maxpacket_limit;
ep->maxpacket = maxpacket_limit;
trace_usb_ep_set_maxpacket_limit(ep, 0);
}
EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
/**
* usb_ep_enable - configure endpoint, making it usable
* @ep:the endpoint being configured. may not be the endpoint named "ep0".
* drivers discover endpoints through the ep_list of a usb_gadget.
*
* When configurations are set, or when interface settings change, the driver
* will enable or disable the relevant endpoints. while it is enabled, an
* endpoint may be used for i/o until the driver receives a disconnect() from
* the host or until the endpoint is disabled.
*
* the ep0 implementation (which calls this routine) must ensure that the
* hardware capabilities of each endpoint match the descriptor provided
* for it. for example, an endpoint named "ep2in-bulk" would be usable
* for interrupt transfers as well as bulk, but it likely couldn't be used
* for iso transfers or for endpoint 14. some endpoints are fully
* configurable, with more generic names like "ep-a". (remember that for
* USB, "in" means "towards the USB host".)
*
* This routine may be called in an atomic (interrupt) context.
*
* returns zero, or a negative error code.
*/
int usb_ep_enable(struct usb_ep *ep)
{
int ret = 0;
if (ep->enabled)
goto out;
/* UDC drivers can't handle endpoints with maxpacket size 0 */
if (usb_endpoint_maxp(ep->desc) == 0) {
/*
* We should log an error message here, but we can't call
* dev_err() because there's no way to find the gadget
* given only ep.
*/
ret = -EINVAL;
goto out;
}
ret = ep->ops->enable(ep, ep->desc);
if (ret)
goto out;
ep->enabled = true;
out:
trace_usb_ep_enable(ep, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_enable);
/**
* usb_ep_disable - endpoint is no longer usable
* @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
*
* no other task may be using this endpoint when this is called.
* any pending and uncompleted requests will complete with status
* indicating disconnect (-ESHUTDOWN) before this call returns.
* gadget drivers must call usb_ep_enable() again before queueing
* requests to the endpoint.
*
* This routine may be called in an atomic (interrupt) context.
*
* returns zero, or a negative error code.
*/
int usb_ep_disable(struct usb_ep *ep)
{
int ret = 0;
if (!ep->enabled)
goto out;
ret = ep->ops->disable(ep);
if (ret)
goto out;
ep->enabled = false;
out:
trace_usb_ep_disable(ep, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_disable);
/**
* usb_ep_alloc_request - allocate a request object to use with this endpoint
* @ep:the endpoint to be used with with the request
* @gfp_flags:GFP_* flags to use
*
* Request objects must be allocated with this call, since they normally
* need controller-specific setup and may even need endpoint-specific
* resources such as allocation of DMA descriptors.
* Requests may be submitted with usb_ep_queue(), and receive a single
* completion callback. Free requests with usb_ep_free_request(), when
* they are no longer needed.
*
* Returns the request, or null if one could not be allocated.
*/
struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct usb_request *req = NULL;
req = ep->ops->alloc_request(ep, gfp_flags);
trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
return req;
}
EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
/**
* usb_ep_free_request - frees a request object
* @ep:the endpoint associated with the request
* @req:the request being freed
*
* Reverses the effect of usb_ep_alloc_request().
* Caller guarantees the request is not queued, and that it will
* no longer be requeued (or otherwise used).
*/
void usb_ep_free_request(struct usb_ep *ep,
struct usb_request *req)
{
trace_usb_ep_free_request(ep, req, 0);
ep->ops->free_request(ep, req);
}
EXPORT_SYMBOL_GPL(usb_ep_free_request);
/**
* usb_ep_queue - queues (submits) an I/O request to an endpoint.
* @ep:the endpoint associated with the request
* @req:the request being submitted
* @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
* pre-allocate all necessary memory with the request.
*
* This tells the device controller to perform the specified request through
* that endpoint (reading or writing a buffer). When the request completes,
* including being canceled by usb_ep_dequeue(), the request's completion
* routine is called to return the request to the driver. Any endpoint
* (except control endpoints like ep0) may have more than one transfer
* request queued; they complete in FIFO order. Once a gadget driver
* submits a request, that request may not be examined or modified until it
* is given back to that driver through the completion callback.
*
* Each request is turned into one or more packets. The controller driver
* never merges adjacent requests into the same packet. OUT transfers
* will sometimes use data that's already buffered in the hardware.
* Drivers can rely on the fact that the first byte of the request's buffer
* always corresponds to the first byte of some USB packet, for both
* IN and OUT transfers.
*
* Bulk endpoints can queue any amount of data; the transfer is packetized
* automatically. The last packet will be short if the request doesn't fill it
* out completely. Zero length packets (ZLPs) should be avoided in portable
* protocols since not all usb hardware can successfully handle zero length
* packets. (ZLPs may be explicitly written, and may be implicitly written if
* the request 'zero' flag is set.) Bulk endpoints may also be used
* for interrupt transfers; but the reverse is not true, and some endpoints
* won't support every interrupt transfer. (Such as 768 byte packets.)
*
* Interrupt-only endpoints are less functional than bulk endpoints, for
* example by not supporting queueing or not handling buffers that are
* larger than the endpoint's maxpacket size. They may also treat data
* toggle differently.
*
* Control endpoints ... after getting a setup() callback, the driver queues
* one response (even if it would be zero length). That enables the
* status ack, after transferring data as specified in the response. Setup
* functions may return negative error codes to generate protocol stalls.
* (Note that some USB device controllers disallow protocol stall responses
* in some cases.) When control responses are deferred (the response is
* written after the setup callback returns), then usb_ep_set_halt() may be
* used on ep0 to trigger protocol stalls. Depending on the controller,
* it may not be possible to trigger a status-stage protocol stall when the
* data stage is over, that is, from within the response's completion
* routine.
*
* For periodic endpoints, like interrupt or isochronous ones, the usb host
* arranges to poll once per interval, and the gadget driver usually will
* have queued some data to transfer at that time.
*
* Note that @req's ->complete() callback must never be called from
* within usb_ep_queue() as that can create deadlock situations.
*
* This routine may be called in interrupt context.
*
* Returns zero, or a negative error code. Endpoints that are not enabled
* report errors; errors will also be
* reported when the usb peripheral is disconnected.
*
* If and only if @req is successfully queued (the return value is zero),
* @req->complete() will be called exactly once, when the Gadget core and
* UDC are finished with the request. When the completion function is called,
* control of the request is returned to the device driver which submitted it.
* The completion handler may then immediately free or reuse @req.
*/
int usb_ep_queue(struct usb_ep *ep,
struct usb_request *req, gfp_t gfp_flags)
{
int ret = 0;
if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
ret = -ESHUTDOWN;
goto out;
}
ret = ep->ops->queue(ep, req, gfp_flags);
out:
trace_usb_ep_queue(ep, req, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_queue);
/**
* usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
* @ep:the endpoint associated with the request
* @req:the request being canceled
*
* If the request is still active on the endpoint, it is dequeued and
* eventually its completion routine is called (with status -ECONNRESET);
* else a negative error code is returned. This routine is asynchronous,
* that is, it may return before the completion routine runs.
*
* Note that some hardware can't clear out write fifos (to unlink the request
* at the head of the queue) except as part of disconnecting from usb. Such
* restrictions prevent drivers from supporting configuration changes,
* even to configuration zero (a "chapter 9" requirement).
*
* This routine may be called in interrupt context.
*/
int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
int ret;
ret = ep->ops->dequeue(ep, req);
trace_usb_ep_dequeue(ep, req, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_dequeue);
/**
* usb_ep_set_halt - sets the endpoint halt feature.
* @ep: the non-isochronous endpoint being stalled
*
* Use this to stall an endpoint, perhaps as an error report.
* Except for control endpoints,
* the endpoint stays halted (will not stream any data) until the host
* clears this feature; drivers may need to empty the endpoint's request
* queue first, to make sure no inappropriate transfers happen.
*
* Note that while an endpoint CLEAR_FEATURE will be invisible to the
* gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
* current altsetting, see usb_ep_clear_halt(). When switching altsettings,
* it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
*
* This routine may be called in interrupt context.
*
* Returns zero, or a negative error code. On success, this call sets
* underlying hardware state that blocks data transfers.
* Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
* transfer requests are still queued, or if the controller hardware
* (usually a FIFO) still holds bytes that the host hasn't collected.
*/
int usb_ep_set_halt(struct usb_ep *ep)
{
int ret;
ret = ep->ops->set_halt(ep, 1);
trace_usb_ep_set_halt(ep, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_set_halt);
/**
* usb_ep_clear_halt - clears endpoint halt, and resets toggle
* @ep:the bulk or interrupt endpoint being reset
*
* Use this when responding to the standard usb "set interface" request,
* for endpoints that aren't reconfigured, after clearing any other state
* in the endpoint's i/o queue.
*
* This routine may be called in interrupt context.
*
* Returns zero, or a negative error code. On success, this call clears
* the underlying hardware state reflecting endpoint halt and data toggle.
* Note that some hardware can't support this request (like pxa2xx_udc),
* and accordingly can't correctly implement interface altsettings.
*/
int usb_ep_clear_halt(struct usb_ep *ep)
{
int ret;
ret = ep->ops->set_halt(ep, 0);
trace_usb_ep_clear_halt(ep, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_clear_halt);
/**
* usb_ep_set_wedge - sets the halt feature and ignores clear requests
* @ep: the endpoint being wedged
*
* Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
* requests. If the gadget driver clears the halt status, it will
* automatically unwedge the endpoint.
*
* This routine may be called in interrupt context.
*
* Returns zero on success, else negative errno.
*/
int usb_ep_set_wedge(struct usb_ep *ep)
{
int ret;
if (ep->ops->set_wedge)
ret = ep->ops->set_wedge(ep);
else
ret = ep->ops->set_halt(ep, 1);
trace_usb_ep_set_wedge(ep, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_set_wedge);
/**
* usb_ep_fifo_status - returns number of bytes in fifo, or error
* @ep: the endpoint whose fifo status is being checked.
*
* FIFO endpoints may have "unclaimed data" in them in certain cases,
* such as after aborted transfers. Hosts may not have collected all
* the IN data written by the gadget driver (and reported by a request
* completion). The gadget driver may not have collected all the data
* written OUT to it by the host. Drivers that need precise handling for
* fault reporting or recovery may need to use this call.
*
* This routine may be called in interrupt context.
*
* This returns the number of such bytes in the fifo, or a negative
* errno if the endpoint doesn't use a FIFO or doesn't support such
* precise handling.
*/
int usb_ep_fifo_status(struct usb_ep *ep)
{
int ret;
if (ep->ops->fifo_status)
ret = ep->ops->fifo_status(ep);
else
ret = -EOPNOTSUPP;
trace_usb_ep_fifo_status(ep, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_ep_fifo_status);
/**
* usb_ep_fifo_flush - flushes contents of a fifo
* @ep: the endpoint whose fifo is being flushed.
*
* This call may be used to flush the "unclaimed data" that may exist in
* an endpoint fifo after abnormal transaction terminations. The call
* must never be used except when endpoint is not being used for any
* protocol translation.
*
* This routine may be called in interrupt context.
*/
void usb_ep_fifo_flush(struct usb_ep *ep)
{
if (ep->ops->fifo_flush)
ep->ops->fifo_flush(ep);
trace_usb_ep_fifo_flush(ep, 0);
}
EXPORT_SYMBOL_GPL(usb_ep_fifo_flush);
/* ------------------------------------------------------------------------- */
/**
* usb_gadget_frame_number - returns the current frame number
* @gadget: controller that reports the frame number
*
* Returns the usb frame number, normally eleven bits from a SOF packet,
* or negative errno if this device doesn't support this capability.
*/
int usb_gadget_frame_number(struct usb_gadget *gadget)
{
int ret;
ret = gadget->ops->get_frame(gadget);
trace_usb_gadget_frame_number(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_frame_number);
/**
* usb_gadget_wakeup - tries to wake up the host connected to this gadget
* @gadget: controller used to wake up the host
*
* Returns zero on success, else negative error code if the hardware
* doesn't support such attempts, or its support has not been enabled
* by the usb host. Drivers must return device descriptors that report
* their ability to support this, or hosts won't enable it.
*
* This may also try to use SRP to wake the host and start enumeration,
* even if OTG isn't otherwise in use. OTG devices may also start
* remote wakeup even when hosts don't explicitly enable it.
*/
int usb_gadget_wakeup(struct usb_gadget *gadget)
{
int ret = 0;
if (!gadget->ops->wakeup) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->wakeup(gadget);
out:
trace_usb_gadget_wakeup(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_wakeup);
/**
* usb_gadget_set_remote_wakeup - configures the device remote wakeup feature.
* @gadget:the device being configured for remote wakeup
* @set:value to be configured.
*
* set to one to enable remote wakeup feature and zero to disable it.
*
* returns zero on success, else negative errno.
*/
int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set)
{
int ret = 0;
if (!gadget->ops->set_remote_wakeup) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->set_remote_wakeup(gadget, set);
out:
trace_usb_gadget_set_remote_wakeup(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_set_remote_wakeup);
/**
* usb_gadget_set_selfpowered - sets the device selfpowered feature.
* @gadget:the device being declared as self-powered
*
* this affects the device status reported by the hardware driver
* to reflect that it now has a local power supply.
*
* returns zero on success, else negative errno.
*/
int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
{
int ret = 0;
if (!gadget->ops->set_selfpowered) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->set_selfpowered(gadget, 1);
out:
trace_usb_gadget_set_selfpowered(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_set_selfpowered);
/**
* usb_gadget_clear_selfpowered - clear the device selfpowered feature.
* @gadget:the device being declared as bus-powered
*
* this affects the device status reported by the hardware driver.
* some hardware may not support bus-powered operation, in which
* case this feature's value can never change.
*
* returns zero on success, else negative errno.
*/
int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
{
int ret = 0;
if (!gadget->ops->set_selfpowered) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->set_selfpowered(gadget, 0);
out:
trace_usb_gadget_clear_selfpowered(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_clear_selfpowered);
/**
* usb_gadget_vbus_connect - Notify controller that VBUS is powered
* @gadget:The device which now has VBUS power.
* Context: can sleep
*
* This call is used by a driver for an external transceiver (or GPIO)
* that detects a VBUS power session starting. Common responses include
* resuming the controller, activating the D+ (or D-) pullup to let the
* host detect that a USB device is attached, and starting to draw power
* (8mA or possibly more, especially after SET_CONFIGURATION).
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_vbus_connect(struct usb_gadget *gadget)
{
int ret = 0;
if (!gadget->ops->vbus_session) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->vbus_session(gadget, 1);
out:
trace_usb_gadget_vbus_connect(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_vbus_connect);
/**
* usb_gadget_vbus_draw - constrain controller's VBUS power usage
* @gadget:The device whose VBUS usage is being described
* @mA:How much current to draw, in milliAmperes. This should be twice
* the value listed in the configuration descriptor bMaxPower field.
*
* This call is used by gadget drivers during SET_CONFIGURATION calls,
* reporting how much power the device may consume. For example, this
* could affect how quickly batteries are recharged.
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
int ret = 0;
if (!gadget->ops->vbus_draw) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->vbus_draw(gadget, mA);
if (!ret)
gadget->mA = mA;
out:
trace_usb_gadget_vbus_draw(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_vbus_draw);
/**
* usb_gadget_vbus_disconnect - notify controller about VBUS session end
* @gadget:the device whose VBUS supply is being described
* Context: can sleep
*
* This call is used by a driver for an external transceiver (or GPIO)
* that detects a VBUS power session ending. Common responses include
* reversing everything done in usb_gadget_vbus_connect().
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
{
int ret = 0;
if (!gadget->ops->vbus_session) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->vbus_session(gadget, 0);
out:
trace_usb_gadget_vbus_disconnect(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
static int usb_gadget_connect_locked(struct usb_gadget *gadget)
__must_hold(&gadget->udc->connect_lock)
{
int ret = 0;
if (!gadget->ops->pullup) {
ret = -EOPNOTSUPP;
goto out;
}
if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) {
/*
* If the gadget isn't usable (because it is deactivated,
* unbound, or not yet started), we only save the new state.
* The gadget will be connected automatically when it is
* activated/bound/started.
*/
gadget->connected = true;
goto out;
}
ret = gadget->ops->pullup(gadget, 1);
if (!ret)
gadget->connected = 1;
out:
trace_usb_gadget_connect(gadget, ret);
return ret;
}
/**
* usb_gadget_connect - software-controlled connect to USB host
* @gadget:the peripheral being connected
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
* is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_connect(struct usb_gadget *gadget)
{
int ret;
mutex_lock(&gadget->udc->connect_lock);
ret = usb_gadget_connect_locked(gadget);
mutex_unlock(&gadget->udc->connect_lock);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_connect);
static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
__must_hold(&gadget->udc->connect_lock)
{
int ret = 0;
if (!gadget->ops->pullup) {
ret = -EOPNOTSUPP;
goto out;
}
if (!gadget->connected)
goto out;
if (gadget->deactivated || !gadget->udc->started) {
/*
* If gadget is deactivated we only save new state.
* Gadget will stay disconnected after activation.
*/
gadget->connected = false;
goto out;
}
ret = gadget->ops->pullup(gadget, 0);
if (!ret)
gadget->connected = 0;
mutex_lock(&udc_lock);
if (gadget->udc->driver)
gadget->udc->driver->disconnect(gadget);
mutex_unlock(&udc_lock);
out:
trace_usb_gadget_disconnect(gadget, ret);
return ret;
}
/**
* usb_gadget_disconnect - software-controlled disconnect from USB host
* @gadget:the peripheral being disconnected
*
* Disables the D+ (or potentially D-) pullup, which the host may see
* as a disconnect (when a VBUS session is active). Not all systems
* support software pullup controls.
*
* Following a successful disconnect, invoke the ->disconnect() callback
* for the current gadget driver so that UDC drivers don't need to.
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_disconnect(struct usb_gadget *gadget)
{
int ret;
mutex_lock(&gadget->udc->connect_lock);
ret = usb_gadget_disconnect_locked(gadget);
mutex_unlock(&gadget->udc->connect_lock);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
/**
* usb_gadget_deactivate - deactivate function which is not ready to work
* @gadget: the peripheral being deactivated
*
* This routine may be used during the gadget driver bind() call to prevent
* the peripheral from ever being visible to the USB host, unless later
* usb_gadget_activate() is called. For example, user mode components may
* need to be activated before the system can talk to hosts.
*
* This routine may sleep; it must not be called in interrupt context
* (such as from within a gadget driver's disconnect() callback).
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_deactivate(struct usb_gadget *gadget)
{
int ret = 0;
mutex_lock(&gadget->udc->connect_lock);
if (gadget->deactivated)
goto unlock;
if (gadget->connected) {
ret = usb_gadget_disconnect_locked(gadget);
if (ret)
goto unlock;
/*
* If gadget was being connected before deactivation, we want
* to reconnect it in usb_gadget_activate().
*/
gadget->connected = true;
}
gadget->deactivated = true;
unlock:
mutex_unlock(&gadget->udc->connect_lock);
trace_usb_gadget_deactivate(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
/**
* usb_gadget_activate - activate function which is not ready to work
* @gadget: the peripheral being activated
*
* This routine activates gadget which was previously deactivated with
* usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
*
* This routine may sleep; it must not be called in interrupt context.
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_activate(struct usb_gadget *gadget)
{
int ret = 0;
mutex_lock(&gadget->udc->connect_lock);
if (!gadget->deactivated)
goto unlock;
gadget->deactivated = false;
/*
* If gadget has been connected before deactivation, or became connected
* while it was being deactivated, we call usb_gadget_connect().
*/
if (gadget->connected)
ret = usb_gadget_connect_locked(gadget);
unlock:
mutex_unlock(&gadget->udc->connect_lock);
trace_usb_gadget_activate(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_activate);
/* ------------------------------------------------------------------------- */
#ifdef CONFIG_HAS_DMA
int usb_gadget_map_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
if (req->length == 0)
return 0;
if (req->num_sgs) {
int mapped;
mapped = dma_map_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (mapped == 0) {
dev_err(dev, "failed to map SGs\n");
return -EFAULT;
}
req->num_mapped_sgs = mapped;
} else {
if (is_vmalloc_addr(req->buf)) {
dev_err(dev, "buffer is not dma capable\n");
return -EFAULT;
} else if (object_is_on_stack(req->buf)) {
dev_err(dev, "buffer is on stack\n");
return -EFAULT;
}
req->dma = dma_map_single(dev, req->buf, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (dma_mapping_error(dev, req->dma)) {
dev_err(dev, "failed to map buffer\n");
return -EFAULT;
}
req->dma_mapped = 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
int usb_gadget_map_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in)
{
return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
}
EXPORT_SYMBOL_GPL(usb_gadget_map_request);
void usb_gadget_unmap_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
if (req->length == 0)
return;
if (req->num_mapped_sgs) {
dma_unmap_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
} else if (req->dma_mapped) {
dma_unmap_single(dev, req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->dma_mapped = 0;
}
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
void usb_gadget_unmap_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in)
{
usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
#endif /* CONFIG_HAS_DMA */
/* ------------------------------------------------------------------------- */
/**
* usb_gadget_giveback_request - give the request back to the gadget layer
* @ep: the endpoint to be used with with the request
* @req: the request being given back
*
* This is called by device controller drivers in order to return the
* completed request back to the gadget layer.
*/
void usb_gadget_giveback_request(struct usb_ep *ep,
struct usb_request *req)
{
if (likely(req->status == 0))
usb_led_activity(USB_LED_EVENT_GADGET);
trace_usb_gadget_giveback_request(ep, req, 0);
req->complete(ep, req);
}
EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
/* ------------------------------------------------------------------------- */
/**
* gadget_find_ep_by_name - returns ep whose name is the same as sting passed
* in second parameter or NULL if searched endpoint not found
* @g: controller to check for quirk
* @name: name of searched endpoint
*/
struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
{
struct usb_ep *ep;
gadget_for_each_ep(ep, g) {
if (!strcmp(ep->name, name))
return ep;
}
return NULL;
}
EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
/* ------------------------------------------------------------------------- */
int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *ep_comp)
{
u8 type;
u16 max;
int num_req_streams = 0;
/* endpoint already claimed? */
if (ep->claimed)
return 0;
type = usb_endpoint_type(desc);
max = usb_endpoint_maxp(desc);
if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
return 0;
if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
return 0;
if (max > ep->maxpacket_limit)
return 0;
/* "high bandwidth" works only at high speed */
if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
return 0;
switch (type) {
case USB_ENDPOINT_XFER_CONTROL:
/* only support ep0 for portable CONTROL traffic */
return 0;
case USB_ENDPOINT_XFER_ISOC:
if (!ep->caps.type_iso)
return 0;
/* ISO: limit 1023 bytes full speed, 1024 high/super speed */
if (!gadget_is_dualspeed(gadget) && max > 1023)
return 0;
break;
case USB_ENDPOINT_XFER_BULK:
if (!ep->caps.type_bulk)
return 0;
if (ep_comp && gadget_is_superspeed(gadget)) {
/* Get the number of required streams from the
* EP companion descriptor and see if the EP
* matches it
*/
num_req_streams = ep_comp->bmAttributes & 0x1f;
if (num_req_streams > ep->max_streams)
return 0;
}
break;
case USB_ENDPOINT_XFER_INT:
/* Bulk endpoints handle interrupt transfers,
* except the toggle-quirky iso-synch kind
*/
if (!ep->caps.type_int && !ep->caps.type_bulk)
return 0;
/* INT: limit 64 bytes full speed, 1024 high/super speed */
if (!gadget_is_dualspeed(gadget) && max > 64)
return 0;
break;
}
return 1;
}
EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
/**
* usb_gadget_check_config - checks if the UDC can support the binded
* configuration
* @gadget: controller to check the USB configuration
*
* Ensure that a UDC is able to support the requested resources by a
* configuration, and that there are no resource limitations, such as
* internal memory allocated to all requested endpoints.
*
* Returns zero on success, else a negative errno.
*/
int usb_gadget_check_config(struct usb_gadget *gadget)
{
if (gadget->ops->check_config)
return gadget->ops->check_config(gadget);
return 0;
}
EXPORT_SYMBOL_GPL(usb_gadget_check_config);
/* ------------------------------------------------------------------------- */
static void usb_gadget_state_work(struct work_struct *work)
{
struct usb_gadget *gadget = work_to_gadget(work);
struct usb_udc *udc = gadget->udc;
if (udc)
sysfs_notify(&udc->dev.kobj, NULL, "state");
}
void usb_gadget_set_state(struct usb_gadget *gadget,
enum usb_device_state state)
{
gadget->state = state;
schedule_work(&gadget->work);
}
EXPORT_SYMBOL_GPL(usb_gadget_set_state);
/* ------------------------------------------------------------------------- */
/* Acquire connect_lock before calling this function. */
static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
{
if (udc->vbus)
usb_gadget_connect_locked(udc->gadget);
else
usb_gadget_disconnect_locked(udc->gadget);
}
static void vbus_event_work(struct work_struct *work)
{
struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work);
mutex_lock(&udc->connect_lock);
usb_udc_connect_control_locked(udc);
mutex_unlock(&udc->connect_lock);
}
/**
* usb_udc_vbus_handler - updates the udc core vbus status, and try to
* connect or disconnect gadget
* @gadget: The gadget which vbus change occurs
* @status: The vbus status
*
* The udc driver calls it when it wants to connect or disconnect gadget
* according to vbus status.
*
* This function can be invoked from interrupt context by irq handlers of
* the gadget drivers, however, usb_udc_connect_control() has to run in
* non-atomic context due to the following:
* a. Some of the gadget driver implementations expect the ->pullup
* callback to be invoked in non-atomic context.
* b. usb_gadget_disconnect() acquires udc_lock which is a mutex.
* Hence offload invocation of usb_udc_connect_control() to workqueue.
*/
void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
{
struct usb_udc *udc = gadget->udc;
if (udc) {
udc->vbus = status;
schedule_work(&udc->vbus_work);
}
}
EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
/**
* usb_gadget_udc_reset - notifies the udc core that bus reset occurs
* @gadget: The gadget which bus reset occurs
* @driver: The gadget driver we want to notify
*
* If the udc driver has bus reset handler, it needs to call this when the bus
* reset occurs, it notifies the gadget driver that the bus reset occurs as
* well as updates gadget state.
*/
void usb_gadget_udc_reset(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
driver->reset(gadget);
usb_gadget_set_state(gadget, USB_STATE_DEFAULT);
}
EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
/**
* usb_gadget_udc_start_locked - tells usb device controller to start up
* @udc: The UDC to be started
*
* This call is issued by the UDC Class driver when it's about
* to register a gadget driver to the device controller, before
* calling gadget driver's bind() method.
*
* It allows the controller to be powered off until strictly
* necessary to have it powered on.
*
* Returns zero on success, else negative errno.
*
* Caller should acquire connect_lock before invoking this function.
*/
static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
__must_hold(&udc->connect_lock)
{
int ret;
if (udc->started) {
dev_err(&udc->dev, "UDC had already started\n");
return -EBUSY;
}
ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver);
if (!ret)
udc->started = true;
return ret;
}
/**
* usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
* @udc: The UDC to be stopped
*
* This call is issued by the UDC Class driver after calling
* gadget driver's unbind() method.
*
* The details are implementation specific, but it can go as
* far as powering off UDC completely and disable its data
* line pullups.
*
* Caller should acquire connect lock before invoking this function.
*/
static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
__must_hold(&udc->connect_lock)
{
if (!udc->started) {
dev_err(&udc->dev, "UDC had already stopped\n");
return;
}
udc->gadget->ops->udc_stop(udc->gadget);
udc->started = false;
}
/**
* usb_gadget_udc_set_speed - tells usb device controller speed supported by
* current driver
* @udc: The device we want to set maximum speed
* @speed: The maximum speed to allowed to run
*
* This call is issued by the UDC Class driver before calling
* usb_gadget_udc_start() in order to make sure that we don't try to
* connect on speeds the gadget driver doesn't support.
*/
static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
enum usb_device_speed speed)
{
struct usb_gadget *gadget = udc->gadget;
enum usb_device_speed s;
if (speed == USB_SPEED_UNKNOWN)
s = gadget->max_speed;
else
s = min(speed, gadget->max_speed);
if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate)
gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate);
else if (gadget->ops->udc_set_speed)
gadget->ops->udc_set_speed(gadget, s);
}
/**
* usb_gadget_enable_async_callbacks - tell usb device controller to enable asynchronous callbacks
* @udc: The UDC which should enable async callbacks
*
* This routine is used when binding gadget drivers. It undoes the effect
* of usb_gadget_disable_async_callbacks(); the UDC driver should enable IRQs
* (if necessary) and resume issuing callbacks.
*
* This routine will always be called in process context.
*/
static inline void usb_gadget_enable_async_callbacks(struct usb_udc *udc)
{
struct usb_gadget *gadget = udc->gadget;
if (gadget->ops->udc_async_callbacks)
gadget->ops->udc_async_callbacks(gadget, true);
}
/**
* usb_gadget_disable_async_callbacks - tell usb device controller to disable asynchronous callbacks
* @udc: The UDC which should disable async callbacks
*
* This routine is used when unbinding gadget drivers. It prevents a race:
* The UDC driver doesn't know when the gadget driver's ->unbind callback
* runs, so unless it is told to disable asynchronous callbacks, it might
* issue a callback (such as ->disconnect) after the unbind has completed.
*
* After this function runs, the UDC driver must suppress all ->suspend,
* ->resume, ->disconnect, ->reset, and ->setup callbacks to the gadget driver
* until async callbacks are again enabled. A simple-minded but effective
* way to accomplish this is to tell the UDC hardware not to generate any
* more IRQs.
*
* Request completion callbacks must still be issued. However, it's okay
* to defer them until the request is cancelled, since the pull-up will be
* turned off during the time period when async callbacks are disabled.
*
* This routine will always be called in process context.
*/
static inline void usb_gadget_disable_async_callbacks(struct usb_udc *udc)
{
struct usb_gadget *gadget = udc->gadget;
if (gadget->ops->udc_async_callbacks)
gadget->ops->udc_async_callbacks(gadget, false);
}
/**
* usb_udc_release - release the usb_udc struct
* @dev: the dev member within usb_udc
*
* This is called by driver's core in order to free memory once the last
* reference is released.
*/
static void usb_udc_release(struct device *dev)
{
struct usb_udc *udc;
udc = container_of(dev, struct usb_udc, dev);
dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
kfree(udc);
}
static const struct attribute_group *usb_udc_attr_groups[];
static void usb_udc_nop_release(struct device *dev)
{
dev_vdbg(dev, "%s\n", __func__);
}
/**
* usb_initialize_gadget - initialize a gadget and its embedded struct device
* @parent: the parent device to this udc. Usually the controller driver's
* device.
* @gadget: the gadget to be initialized.
* @release: a gadget release function.
*/
void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
void (*release)(struct device *dev))
{
INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
if (release)
gadget->dev.release = release;
else
gadget->dev.release = usb_udc_nop_release;
device_initialize(&gadget->dev);
gadget->dev.bus = &gadget_bus_type;
}
EXPORT_SYMBOL_GPL(usb_initialize_gadget);
/**
* usb_add_gadget - adds a new gadget to the udc class driver list
* @gadget: the gadget to be added to the list.
*
* Returns zero on success, negative errno otherwise.
* Does not do a final usb_put_gadget() if an error occurs.
*/
int usb_add_gadget(struct usb_gadget *gadget)
{
struct usb_udc *udc;
int ret = -ENOMEM;
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
goto error;
device_initialize(&udc->dev);
udc->dev.release = usb_udc_release;
udc->dev.class = &udc_class;
udc->dev.groups = usb_udc_attr_groups;
udc->dev.parent = gadget->dev.parent;
ret = dev_set_name(&udc->dev, "%s",
kobject_name(&gadget->dev.parent->kobj));
if (ret)
goto err_put_udc;
udc->gadget = gadget;
gadget->udc = udc;
mutex_init(&udc->connect_lock);
udc->started = false;
mutex_lock(&udc_lock);
list_add_tail(&udc->list, &udc_list);
mutex_unlock(&udc_lock);
INIT_WORK(&udc->vbus_work, vbus_event_work);
ret = device_add(&udc->dev);
if (ret)
goto err_unlist_udc;
usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
udc->vbus = true;
ret = ida_alloc(&gadget_id_numbers, GFP_KERNEL);
if (ret < 0)
goto err_del_udc;
gadget->id_number = ret;
dev_set_name(&gadget->dev, "gadget.%d", ret);
ret = device_add(&gadget->dev);
if (ret)
goto err_free_id;
return 0;
err_free_id:
ida_free(&gadget_id_numbers, gadget->id_number);
err_del_udc:
flush_work(&gadget->work);
device_del(&udc->dev);
err_unlist_udc:
mutex_lock(&udc_lock);
list_del(&udc->list);
mutex_unlock(&udc_lock);
err_put_udc:
put_device(&udc->dev);
error:
return ret;
}
EXPORT_SYMBOL_GPL(usb_add_gadget);
/**
* usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
* @parent: the parent device to this udc. Usually the controller driver's
* device.
* @gadget: the gadget to be added to the list.
* @release: a gadget release function.
*
* Returns zero on success, negative errno otherwise.
* Calls the gadget release function in the latter case.
*/
int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
void (*release)(struct device *dev))
{
int ret;
usb_initialize_gadget(parent, gadget, release);
ret = usb_add_gadget(gadget);
if (ret)
usb_put_gadget(gadget);
return ret;
}
EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
/**
* usb_get_gadget_udc_name - get the name of the first UDC controller
* This functions returns the name of the first UDC controller in the system.
* Please note that this interface is usefull only for legacy drivers which
* assume that there is only one UDC controller in the system and they need to
* get its name before initialization. There is no guarantee that the UDC
* of the returned name will be still available, when gadget driver registers
* itself.
*
* Returns pointer to string with UDC controller name on success, NULL
* otherwise. Caller should kfree() returned string.
*/
char *usb_get_gadget_udc_name(void)
{
struct usb_udc *udc;
char *name = NULL;
/* For now we take the first available UDC */
mutex_lock(&udc_lock);
list_for_each_entry(udc, &udc_list, list) {
if (!udc->driver) {
name = kstrdup(udc->gadget->name, GFP_KERNEL);
break;
}
}
mutex_unlock(&udc_lock);
return name;
}
EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
/**
* usb_add_gadget_udc - adds a new gadget to the udc class driver list
* @parent: the parent device to this udc. Usually the controller
* driver's device.
* @gadget: the gadget to be added to the list
*
* Returns zero on success, negative errno otherwise.
*/
int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
{
return usb_add_gadget_udc_release(parent, gadget, NULL);
}
EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
/**
* usb_del_gadget - deletes a gadget and unregisters its udc
* @gadget: the gadget to be deleted.
*
* This will unbind @gadget, if it is bound.
* It will not do a final usb_put_gadget().
*/
void usb_del_gadget(struct usb_gadget *gadget)
{
struct usb_udc *udc = gadget->udc;
if (!udc)
return;
dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
mutex_lock(&udc_lock);
list_del(&udc->list);
mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
flush_work(&gadget->work);
device_del(&gadget->dev);
ida_free(&gadget_id_numbers, gadget->id_number);
cancel_work_sync(&udc->vbus_work);
device_unregister(&udc->dev);
}
EXPORT_SYMBOL_GPL(usb_del_gadget);
/**
* usb_del_gadget_udc - unregisters a gadget
* @gadget: the gadget to be unregistered.
*
* Calls usb_del_gadget() and does a final usb_put_gadget().
*/
void usb_del_gadget_udc(struct usb_gadget *gadget)
{
usb_del_gadget(gadget);
usb_put_gadget(gadget);
}
EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
/* ------------------------------------------------------------------------- */
static int gadget_match_driver(struct device *dev, struct device_driver *drv)
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_udc *udc = gadget->udc;
struct usb_gadget_driver *driver = container_of(drv,
struct usb_gadget_driver, driver);
/* If the driver specifies a udc_name, it must match the UDC's name */
if (driver->udc_name &&
strcmp(driver->udc_name, dev_name(&udc->dev)) != 0)
return 0;
/* If the driver is already bound to a gadget, it doesn't match */
if (driver->is_bound)
return 0;
/* Otherwise any gadget driver matches any UDC */
return 1;
}
static int gadget_bind_driver(struct device *dev)
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_udc *udc = gadget->udc;
struct usb_gadget_driver *driver = container_of(dev->driver,
struct usb_gadget_driver, driver);
int ret = 0;
mutex_lock(&udc_lock);
if (driver->is_bound) {
mutex_unlock(&udc_lock);
return -ENXIO; /* Driver binds to only one gadget */
}
driver->is_bound = true;
udc->driver = driver;
mutex_unlock(&udc_lock);
dev_dbg(&udc->dev, "binding gadget driver [%s]\n", driver->function);
usb_gadget_udc_set_speed(udc, driver->max_speed);
ret = driver->bind(udc->gadget, driver);
if (ret)
goto err_bind;
mutex_lock(&udc->connect_lock);
ret = usb_gadget_udc_start_locked(udc);
if (ret) {
mutex_unlock(&udc->connect_lock);
goto err_start;
}
usb_gadget_enable_async_callbacks(udc);
udc->allow_connect = true;
usb_udc_connect_control_locked(udc);
mutex_unlock(&udc->connect_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
err_start:
driver->unbind(udc->gadget);
err_bind:
if (ret != -EISNAM)
dev_err(&udc->dev, "failed to start %s: %d\n",
driver->function, ret);
mutex_lock(&udc_lock);
udc->driver = NULL;
driver->is_bound = false;
mutex_unlock(&udc_lock);
return ret;
}
static void gadget_unbind_driver(struct device *dev)
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_udc *udc = gadget->udc;
struct usb_gadget_driver *driver = udc->driver;
dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
udc->allow_connect = false;
cancel_work_sync(&udc->vbus_work);
mutex_lock(&udc->connect_lock);
usb_gadget_disconnect_locked(gadget);
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
synchronize_irq(gadget->irq);
mutex_unlock(&udc->connect_lock);
udc->driver->unbind(gadget);
mutex_lock(&udc->connect_lock);
usb_gadget_udc_stop_locked(udc);
mutex_unlock(&udc->connect_lock);
mutex_lock(&udc_lock);
driver->is_bound = false;
udc->driver = NULL;
mutex_unlock(&udc_lock);
}
/* ------------------------------------------------------------------------- */
int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
struct module *owner, const char *mod_name)
{
int ret;
if (!driver || !driver->bind || !driver->setup)
return -EINVAL;
driver->driver.bus = &gadget_bus_type;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
ret = driver_register(&driver->driver);
if (ret) {
pr_warn("%s: driver registration failed: %d\n",
driver->function, ret);
return ret;
}
mutex_lock(&udc_lock);
if (!driver->is_bound) {
if (driver->match_existing_only) {
pr_warn("%s: couldn't find an available UDC or it's busy\n",
driver->function);
ret = -EBUSY;
} else {
pr_info("%s: couldn't find an available UDC\n",
driver->function);
ret = 0;
}
}
mutex_unlock(&udc_lock);
if (ret)
driver_unregister(&driver->driver);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_register_driver_owner);
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
if (!driver || !driver->unbind)
return -EINVAL;
driver_unregister(&driver->driver);
return 0;
}
EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
/* ------------------------------------------------------------------------- */
static ssize_t srp_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
if (sysfs_streq(buf, "1"))
usb_gadget_wakeup(udc->gadget);
return n;
}
static DEVICE_ATTR_WO(srp);
static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
ssize_t ret;
device_lock(&udc->gadget->dev);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
ret = -EOPNOTSUPP;
goto out;
}
if (sysfs_streq(buf, "connect")) {
mutex_lock(&udc->connect_lock);
usb_gadget_udc_start_locked(udc);
usb_gadget_connect_locked(udc->gadget);
mutex_unlock(&udc->connect_lock);
} else if (sysfs_streq(buf, "disconnect")) {
mutex_lock(&udc->connect_lock);
usb_gadget_disconnect_locked(udc->gadget);
usb_gadget_udc_stop_locked(udc);
mutex_unlock(&udc->connect_lock);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
ret = -EINVAL;
goto out;
}
ret = n;
out:
device_unlock(&udc->gadget->dev);
return ret;
}
static DEVICE_ATTR_WO(soft_connect);
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
struct usb_gadget *gadget = udc->gadget;
return sprintf(buf, "%s\n", usb_state_string(gadget->state));
}
static DEVICE_ATTR_RO(state);
static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
struct usb_gadget_driver *drv;
int rc = 0;
mutex_lock(&udc_lock);
drv = udc->driver;
if (drv && drv->function)
rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
mutex_unlock(&udc_lock);
return rc;
}
static DEVICE_ATTR_RO(function);
#define USB_UDC_SPEED_ATTR(name, param) \
ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
return scnprintf(buf, PAGE_SIZE, "%s\n", \
usb_speed_string(udc->gadget->param)); \
} \
static DEVICE_ATTR_RO(name)
static USB_UDC_SPEED_ATTR(current_speed, speed);
static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
#define USB_UDC_ATTR(name) \
ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
struct usb_gadget *gadget = udc->gadget; \
\
return scnprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
} \
static DEVICE_ATTR_RO(name)
static USB_UDC_ATTR(is_otg);
static USB_UDC_ATTR(is_a_peripheral);
static USB_UDC_ATTR(b_hnp_enable);
static USB_UDC_ATTR(a_hnp_support);
static USB_UDC_ATTR(a_alt_hnp_support);
static USB_UDC_ATTR(is_selfpowered);
static struct attribute *usb_udc_attrs[] = {
&dev_attr_srp.attr,
&dev_attr_soft_connect.attr,
&dev_attr_state.attr,
&dev_attr_function.attr,
&dev_attr_current_speed.attr,
&dev_attr_maximum_speed.attr,
&dev_attr_is_otg.attr,
&dev_attr_is_a_peripheral.attr,
&dev_attr_b_hnp_enable.attr,
&dev_attr_a_hnp_support.attr,
&dev_attr_a_alt_hnp_support.attr,
&dev_attr_is_selfpowered.attr,
NULL,
};
static const struct attribute_group usb_udc_attr_group = {
.attrs = usb_udc_attrs,
};
static const struct attribute_group *usb_udc_attr_groups[] = {
&usb_udc_attr_group,
NULL,
};
static int usb_udc_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
int ret;
ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
if (ret) {
dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
return ret;
}
mutex_lock(&udc_lock);
if (udc->driver)
ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
udc->driver->function);
mutex_unlock(&udc_lock);
if (ret) {
dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
return ret;
}
return 0;
}
static const struct class udc_class = {
.name = "udc",
.dev_uevent = usb_udc_uevent,
};
static const struct bus_type gadget_bus_type = {
.name = "gadget",
.probe = gadget_bind_driver,
.remove = gadget_unbind_driver,
.match = gadget_match_driver,
};
static int __init usb_udc_init(void)
{
int rc;
rc = class_register(&udc_class);
if (rc)
return rc;
rc = bus_register(&gadget_bus_type);
if (rc)
class_unregister(&udc_class);
return rc;
}
subsys_initcall(usb_udc_init);
static void __exit usb_udc_exit(void)
{
bus_unregister(&gadget_bus_type);
class_unregister(&udc_class);
}
module_exit(usb_udc_exit);
MODULE_DESCRIPTION("UDC Framework");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/gadget/udc/core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for PLX NET2272 USB device controller
*
* Copyright (C) 2005-2006 PLX Technology, Inc.
* Copyright (C) 2006-2011 Analog Devices, Inc.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include "net2272.h"
#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
static const char driver_name[] = "net2272";
static const char driver_vers[] = "2006 October 17/mainline";
static const char driver_desc[] = DRIVER_DESC;
static const char ep0name[] = "ep0";
static const char * const ep_name[] = {
ep0name,
"ep-a", "ep-b", "ep-c",
};
#ifdef CONFIG_USB_NET2272_DMA
/*
* use_dma: the NET2272 can use an external DMA controller.
* Note that since there is no generic DMA api, some functions,
* notably request_dma, start_dma, and cancel_dma will need to be
* modified for your platform's particular dma controller.
*
* If use_dma is disabled, pio will be used instead.
*/
static bool use_dma = false;
module_param(use_dma, bool, 0644);
/*
* dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
* The NET2272 can only use dma for a single endpoint at a time.
* At some point this could be modified to allow either endpoint
* to take control of dma as it becomes available.
*
* Note that DMA should not be used on OUT endpoints unless it can
* be guaranteed that no short packets will arrive on an IN endpoint
* while the DMA operation is pending. Otherwise the OUT DMA will
* terminate prematurely (See NET2272 Errata 630-0213-0101)
*/
static ushort dma_ep = 1;
module_param(dma_ep, ushort, 0644);
/*
* dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
* mode 0 == Slow DREQ mode
* mode 1 == Fast DREQ mode
* mode 2 == Burst mode
*/
static ushort dma_mode = 2;
module_param(dma_mode, ushort, 0644);
#else
#define use_dma 0
#define dma_ep 1
#define dma_mode 2
#endif
/*
* fifo_mode: net2272 buffer configuration:
* mode 0 == ep-{a,b,c} 512db each
* mode 1 == ep-a 1k, ep-{b,c} 512db
* mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
* mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
*/
static ushort fifo_mode;
module_param(fifo_mode, ushort, 0644);
/*
* enable_suspend: When enabled, the driver will respond to
* USB suspend requests by powering down the NET2272. Otherwise,
* USB suspend requests will be ignored. This is acceptable for
* self-powered devices. For bus powered devices set this to 1.
*/
static ushort enable_suspend;
module_param(enable_suspend, ushort, 0644);
static void assert_out_naking(struct net2272_ep *ep, const char *where)
{
u8 tmp;
#ifndef DEBUG
return;
#endif
tmp = net2272_ep_read(ep, EP_STAT0);
if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
ep->ep.name, where, tmp);
net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
}
}
#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
static void stop_out_naking(struct net2272_ep *ep)
{
u8 tmp = net2272_ep_read(ep, EP_STAT0);
if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
}
#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
static char *type_string(u8 bmAttributes)
{
switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK: return "bulk";
case USB_ENDPOINT_XFER_ISOC: return "iso";
case USB_ENDPOINT_XFER_INT: return "intr";
default: return "control";
}
}
static char *buf_state_string(unsigned state)
{
switch (state) {
case BUFF_FREE: return "free";
case BUFF_VALID: return "valid";
case BUFF_LCL: return "local";
case BUFF_USB: return "usb";
default: return "unknown";
}
}
static char *dma_mode_string(void)
{
if (!use_dma)
return "PIO";
switch (dma_mode) {
case 0: return "SLOW DREQ";
case 1: return "FAST DREQ";
case 2: return "BURST";
default: return "invalid";
}
}
static void net2272_dequeue_all(struct net2272_ep *);
static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
static int net2272_fifo_status(struct usb_ep *);
static const struct usb_ep_ops net2272_ep_ops;
/*---------------------------------------------------------------------------*/
static int
net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct net2272 *dev;
struct net2272_ep *ep;
u32 max;
u8 tmp;
unsigned long flags;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || !desc || ep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
max = usb_endpoint_maxp(desc);
spin_lock_irqsave(&dev->lock, flags);
_ep->maxpacket = max;
ep->desc = desc;
/* net2272_ep_reset() has already been called */
ep->stopped = 0;
ep->wedged = 0;
/* set speed-dependent max packet */
net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
/* set type, direction, address; reset fifo counters */
net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
tmp = usb_endpoint_type(desc);
if (usb_endpoint_xfer_bulk(desc)) {
/* catch some particularly blatant driver bugs */
if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
(dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ERANGE;
}
}
ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
tmp <<= ENDPOINT_TYPE;
tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
tmp |= (1 << ENDPOINT_ENABLE);
/* for OUT transfers, block the rx fifo until a read is posted */
ep->is_in = usb_endpoint_dir_in(desc);
if (!ep->is_in)
net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
net2272_ep_write(ep, EP_CFG, tmp);
/* enable irqs */
tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
net2272_write(dev, IRQENB0, tmp);
tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
| net2272_ep_read(ep, EP_IRQENB);
net2272_ep_write(ep, EP_IRQENB, tmp);
tmp = desc->bEndpointAddress;
dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
_ep->name, tmp & 0x0f, PIPEDIR(tmp),
type_string(desc->bmAttributes), max,
net2272_ep_read(ep, EP_CFG));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static void net2272_ep_reset(struct net2272_ep *ep)
{
u8 tmp;
ep->desc = NULL;
INIT_LIST_HEAD(&ep->queue);
usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.ops = &net2272_ep_ops;
/* disable irqs, endpoint */
net2272_ep_write(ep, EP_IRQENB, 0);
/* init to our chosen defaults, notably so that we NAK OUT
* packets until the driver queues a read.
*/
tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
net2272_ep_write(ep, EP_RSPSET, tmp);
tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
if (ep->num != 0)
tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
net2272_ep_write(ep, EP_RSPCLR, tmp);
/* scrub most status bits, and flush any fifo state */
net2272_ep_write(ep, EP_STAT0,
(1 << DATA_IN_TOKEN_INTERRUPT)
| (1 << DATA_OUT_TOKEN_INTERRUPT)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
net2272_ep_write(ep, EP_STAT1,
(1 << TIMEOUT)
| (1 << USB_OUT_ACK_SENT)
| (1 << USB_OUT_NAK_SENT)
| (1 << USB_IN_ACK_RCVD)
| (1 << USB_IN_NAK_SENT)
| (1 << USB_STALL_SENT)
| (1 << LOCAL_OUT_ZLP)
| (1 << BUFFER_FLUSH));
/* fifo size is handled separately */
}
static int net2272_disable(struct usb_ep *_ep)
{
struct net2272_ep *ep;
unsigned long flags;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || !ep->desc || _ep->name == ep0name)
return -EINVAL;
spin_lock_irqsave(&ep->dev->lock, flags);
net2272_dequeue_all(ep);
net2272_ep_reset(ep);
dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
spin_unlock_irqrestore(&ep->dev->lock, flags);
return 0;
}
/*---------------------------------------------------------------------------*/
static struct usb_request *
net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct net2272_request *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct net2272_request *req;
if (!_ep || !_req)
return;
req = container_of(_req, struct net2272_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
static void
net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
{
struct net2272 *dev;
unsigned stopped = ep->stopped;
if (ep->num == 0) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt(ep);
}
allow_status(ep);
}
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (use_dma && ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req,
ep->is_in);
if (status && status != -ESHUTDOWN)
dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length, req->req.buf);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
static int
net2272_write_packet(struct net2272_ep *ep, u8 *buf,
struct net2272_request *req, unsigned max)
{
u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
u16 *bufp;
unsigned length, count;
u8 tmp;
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
ep->ep.name, req, max, length,
(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
count = length;
bufp = (u16 *)buf;
while (likely(count >= 2)) {
/* no byte-swap required; chip endian set during init */
writew(*bufp++, ep_data);
count -= 2;
}
buf = (u8 *)bufp;
/* write final byte by placing the NET2272 into 8-bit mode */
if (unlikely(count)) {
tmp = net2272_read(ep->dev, LOCCTL);
net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
writeb(*buf, ep_data);
net2272_write(ep->dev, LOCCTL, tmp);
}
return length;
}
/* returns: 0: still running, 1: completed, negative: errno */
static int
net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
{
u8 *buf;
unsigned count, max;
int status;
dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
ep->ep.name, req->req.actual, req->req.length);
/*
* Keep loading the endpoint until the final packet is loaded,
* or the endpoint buffer is full.
*/
top:
/*
* Clear interrupt status
* - Packet Transmitted interrupt will become set again when the
* host successfully takes another packet
*/
net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
buf = req->req.buf + req->req.actual;
prefetch(buf);
/* force pagesel */
net2272_ep_read(ep, EP_STAT0);
max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
(net2272_ep_read(ep, EP_AVAIL0));
if (max < ep->ep.maxpacket)
max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
| (net2272_ep_read(ep, EP_AVAIL0));
count = net2272_write_packet(ep, buf, req, max);
/* see if we are done */
if (req->req.length == req->req.actual) {
/* validate short or zlp packet */
if (count < ep->ep.maxpacket)
set_fifo_bytecount(ep, 0);
net2272_done(ep, req, 0);
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request,
queue);
status = net2272_kick_dma(ep, req);
if (status < 0)
if ((net2272_ep_read(ep, EP_STAT0)
& (1 << BUFFER_EMPTY)))
goto top;
}
return 1;
}
net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
}
return 0;
}
static void
net2272_out_flush(struct net2272_ep *ep)
{
ASSERT_OUT_NAKING(ep);
net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
}
static int
net2272_read_packet(struct net2272_ep *ep, u8 *buf,
struct net2272_request *req, unsigned avail)
{
u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
unsigned is_short;
u16 *bufp;
req->req.actual += avail;
dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
ep->ep.name, req, avail,
(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
is_short = (avail < ep->ep.maxpacket);
if (unlikely(avail == 0)) {
/* remove any zlp from the buffer */
(void)readw(ep_data);
return is_short;
}
/* Ensure we get the final byte */
if (unlikely(avail % 2))
avail++;
bufp = (u16 *)buf;
do {
*bufp++ = readw(ep_data);
avail -= 2;
} while (avail);
/*
* To avoid false endpoint available race condition must read
* ep stat0 twice in the case of a short transfer
*/
if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
net2272_ep_read(ep, EP_STAT0);
return is_short;
}
static int
net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
{
u8 *buf;
unsigned is_short;
int count;
int tmp;
int cleanup = 0;
dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
ep->ep.name, req->req.actual, req->req.length);
top:
do {
buf = req->req.buf + req->req.actual;
prefetchw(buf);
count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
| net2272_ep_read(ep, EP_AVAIL0);
net2272_ep_write(ep, EP_STAT0,
(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
(1 << DATA_PACKET_RECEIVED_INTERRUPT));
tmp = req->req.length - req->req.actual;
if (count > tmp) {
if ((tmp % ep->ep.maxpacket) != 0) {
dev_err(ep->dev->dev,
"%s out fifo %d bytes, expected %d\n",
ep->ep.name, count, tmp);
cleanup = 1;
}
count = (tmp > 0) ? tmp : 0;
}
is_short = net2272_read_packet(ep, buf, req, count);
/* completion */
if (unlikely(cleanup || is_short ||
req->req.actual == req->req.length)) {
if (cleanup) {
net2272_out_flush(ep);
net2272_done(ep, req, -EOVERFLOW);
} else
net2272_done(ep, req, 0);
/* re-initialize endpoint transfer registers
* otherwise they may result in erroneous pre-validation
* for subsequent control reads
*/
if (unlikely(ep->num == 0)) {
net2272_ep_write(ep, EP_TRANSFER2, 0);
net2272_ep_write(ep, EP_TRANSFER1, 0);
net2272_ep_write(ep, EP_TRANSFER0, 0);
}
if (!list_empty(&ep->queue)) {
int status;
req = list_entry(ep->queue.next,
struct net2272_request, queue);
status = net2272_kick_dma(ep, req);
if ((status < 0) &&
!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
goto top;
}
return 1;
}
} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
return 0;
}
static void
net2272_pio_advance(struct net2272_ep *ep)
{
struct net2272_request *req;
if (unlikely(list_empty(&ep->queue)))
return;
req = list_entry(ep->queue.next, struct net2272_request, queue);
(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
}
/* returns 0 on success, else negative errno */
static int
net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
unsigned len, unsigned dir)
{
dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
ep, buf, len, dir);
/* The NET2272 only supports a single dma channel */
if (dev->dma_busy)
return -EBUSY;
/*
* EP_TRANSFER (used to determine the number of bytes received
* in an OUT transfer) is 24 bits wide; don't ask for more than that.
*/
if ((dir == 1) && (len > 0x1000000))
return -EINVAL;
dev->dma_busy = 1;
/* initialize platform's dma */
#ifdef CONFIG_USB_PCI
/* NET2272 addr, buffer addr, length, etc. */
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
/* Setup PLX 9054 DMA mode */
writel((1 << LOCAL_BUS_WIDTH) |
(1 << TA_READY_INPUT_ENABLE) |
(0 << LOCAL_BURST_ENABLE) |
(1 << DONE_INTERRUPT_ENABLE) |
(1 << LOCAL_ADDRESSING_MODE) |
(1 << DEMAND_MODE) |
(1 << DMA_EOT_ENABLE) |
(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
(1 << DMA_CHANNEL_INTERRUPT_SELECT),
dev->rdk1.plx9054_base_addr + DMAMODE0);
writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
writel((dir << DIRECTION_OF_TRANSFER) |
(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
dev->rdk1.plx9054_base_addr + DMADPR0);
writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
readl(dev->rdk1.plx9054_base_addr + INTCSR),
dev->rdk1.plx9054_base_addr + INTCSR);
break;
}
#endif
net2272_write(dev, DMAREQ,
(0 << DMA_BUFFER_VALID) |
(1 << DMA_REQUEST_ENABLE) |
(1 << DMA_CONTROL_DACK) |
(dev->dma_eot_polarity << EOT_POLARITY) |
(dev->dma_dack_polarity << DACK_POLARITY) |
(dev->dma_dreq_polarity << DREQ_POLARITY) |
((ep >> 1) << DMA_ENDPOINT_SELECT));
(void) net2272_read(dev, SCRATCH);
return 0;
}
static void
net2272_start_dma(struct net2272 *dev)
{
/* start platform's dma controller */
#ifdef CONFIG_USB_PCI
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
dev->rdk1.plx9054_base_addr + DMACSR0);
break;
}
#endif
}
/* returns 0 on success, else negative errno */
static int
net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
{
unsigned size;
u8 tmp;
if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
return -EINVAL;
/* don't use dma for odd-length transfers
* otherwise, we'd need to deal with the last byte with pio
*/
if (req->req.length & 1)
return -EINVAL;
dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
ep->ep.name, req, (unsigned long long) req->req.dma);
net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
/* The NET2272 can only use DMA on one endpoint at a time */
if (ep->dev->dma_busy)
return -EBUSY;
/* Make sure we only DMA an even number of bytes (we'll use
* pio to complete the transfer)
*/
size = req->req.length;
size &= ~1;
/* device-to-host transfer */
if (ep->is_in) {
/* initialize platform's dma controller */
if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
/* unable to obtain DMA channel; return error and use pio mode */
return -EBUSY;
req->req.actual += size;
/* host-to-device transfer */
} else {
tmp = net2272_ep_read(ep, EP_STAT0);
/* initialize platform's dma controller */
if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
/* unable to obtain DMA channel; return error and use pio mode */
return -EBUSY;
if (!(tmp & (1 << BUFFER_EMPTY)))
ep->not_empty = 1;
else
ep->not_empty = 0;
/* allow the endpoint's buffer to fill */
net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
/* this transfer completed and data's already in the fifo
* return error so pio gets used.
*/
if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
/* deassert dreq */
net2272_write(ep->dev, DMAREQ,
(0 << DMA_BUFFER_VALID) |
(0 << DMA_REQUEST_ENABLE) |
(1 << DMA_CONTROL_DACK) |
(ep->dev->dma_eot_polarity << EOT_POLARITY) |
(ep->dev->dma_dack_polarity << DACK_POLARITY) |
(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
((ep->num >> 1) << DMA_ENDPOINT_SELECT));
return -EBUSY;
}
}
/* Don't use per-packet interrupts: use dma interrupts only */
net2272_ep_write(ep, EP_IRQENB, 0);
net2272_start_dma(ep->dev);
return 0;
}
static void net2272_cancel_dma(struct net2272 *dev)
{
#ifdef CONFIG_USB_PCI
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
(1 << CHANNEL_DONE)))
continue; /* wait for dma to stabalize */
/* dma abort generates an interrupt */
writeb(1 << CHANNEL_CLEAR_INTERRUPT,
dev->rdk1.plx9054_base_addr + DMACSR0);
break;
}
#endif
dev->dma_busy = 0;
}
/*---------------------------------------------------------------------------*/
static int
net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct net2272_request *req;
struct net2272_ep *ep;
struct net2272 *dev;
unsigned long flags;
int status = -1;
u8 s;
req = container_of(_req, struct net2272_request, req);
if (!_req || !_req->complete || !_req->buf
|| !list_empty(&req->queue))
return -EINVAL;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* set up dma mapping in case the caller didn't */
if (use_dma && ep->dma) {
status = usb_gadget_map_request(&dev->gadget, _req,
ep->is_in);
if (status)
return status;
}
dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
_ep->name, _req, _req->length, _req->buf,
(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* kickstart this i/o queue? */
if (list_empty(&ep->queue) && !ep->stopped) {
/* maybe there's no control data, just status ack */
if (ep->num == 0 && _req->length == 0) {
net2272_done(ep, req, 0);
dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
goto done;
}
/* Return zlp, don't let it block subsequent packets */
s = net2272_ep_read(ep, EP_STAT0);
if (s & (1 << BUFFER_EMPTY)) {
/* Buffer is empty check for a blocking zlp, handle it */
if ((s & (1 << NAK_OUT_PACKETS)) &&
net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
/*
* Request is going to terminate with a short packet ...
* hope the client is ready for it!
*/
status = net2272_read_fifo(ep, req);
/* clear short packet naking */
net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
goto done;
}
}
/* try dma first */
status = net2272_kick_dma(ep, req);
if (status < 0) {
/* dma failed (most likely in use by another endpoint)
* fallback to pio
*/
status = 0;
if (ep->is_in)
status = net2272_write_fifo(ep, req);
else {
s = net2272_ep_read(ep, EP_STAT0);
if ((s & (1 << BUFFER_EMPTY)) == 0)
status = net2272_read_fifo(ep, req);
}
if (unlikely(status != 0)) {
if (status > 0)
status = 0;
req = NULL;
}
}
}
if (likely(req))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue)))
net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
done:
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* dequeue ALL requests */
static void
net2272_dequeue_all(struct net2272_ep *ep)
{
struct net2272_request *req;
/* called with spinlock held */
ep->stopped = 1;
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request,
queue);
net2272_done(ep, req, -ESHUTDOWN);
}
}
/* dequeue JUST ONE request */
static int
net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct net2272_ep *ep;
struct net2272_request *req = NULL, *iter;
unsigned long flags;
int stopped;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0) || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->dev->lock, flags);
stopped = ep->stopped;
ep->stopped = 1;
/* make sure it's still queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
return -EINVAL;
}
/* queue head may be partially complete */
if (ep->queue.next == &req->queue) {
dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
net2272_done(ep, req, -ECONNRESET);
}
ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
return 0;
}
/*---------------------------------------------------------------------------*/
static int
net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct net2272_ep *ep;
unsigned long flags;
int ret = 0;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -EINVAL;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
return -EINVAL;
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue))
ret = -EAGAIN;
else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
ret = -EAGAIN;
else {
dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
value ? "set" : "clear",
wedged ? "wedge" : "halt");
/* set/clear */
if (value) {
if (ep->num == 0)
ep->dev->protocol_stall = 1;
else
set_halt(ep);
if (wedged)
ep->wedged = 1;
} else {
clear_halt(ep);
ep->wedged = 0;
}
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return ret;
}
static int
net2272_set_halt(struct usb_ep *_ep, int value)
{
return net2272_set_halt_and_wedge(_ep, value, 0);
}
static int
net2272_set_wedge(struct usb_ep *_ep)
{
if (!_ep || _ep->name == ep0name)
return -EINVAL;
return net2272_set_halt_and_wedge(_ep, 1, 1);
}
static int
net2272_fifo_status(struct usb_ep *_ep)
{
struct net2272_ep *ep;
u16 avail;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -ENODEV;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
avail |= net2272_ep_read(ep, EP_AVAIL0);
if (avail > ep->fifo_size)
return -EOVERFLOW;
if (ep->is_in)
avail = ep->fifo_size - avail;
return avail;
}
static void
net2272_fifo_flush(struct usb_ep *_ep)
{
struct net2272_ep *ep;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return;
net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
}
static const struct usb_ep_ops net2272_ep_ops = {
.enable = net2272_enable,
.disable = net2272_disable,
.alloc_request = net2272_alloc_request,
.free_request = net2272_free_request,
.queue = net2272_queue,
.dequeue = net2272_dequeue,
.set_halt = net2272_set_halt,
.set_wedge = net2272_set_wedge,
.fifo_status = net2272_fifo_status,
.fifo_flush = net2272_fifo_flush,
};
/*---------------------------------------------------------------------------*/
static int
net2272_get_frame(struct usb_gadget *_gadget)
{
struct net2272 *dev;
unsigned long flags;
u16 ret;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
ret = net2272_read(dev, FRAME1) << 8;
ret |= net2272_read(dev, FRAME0);
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int
net2272_wakeup(struct usb_gadget *_gadget)
{
struct net2272 *dev;
u8 tmp;
unsigned long flags;
if (!_gadget)
return 0;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
tmp = net2272_read(dev, USBCTL0);
if (tmp & (1 << IO_WAKEUP_ENABLE))
net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static int
net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
{
if (!_gadget)
return -ENODEV;
_gadget->is_selfpowered = (value != 0);
return 0;
}
static int
net2272_pullup(struct usb_gadget *_gadget, int is_on)
{
struct net2272 *dev;
u8 tmp;
unsigned long flags;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
tmp = net2272_read(dev, USBCTL0);
dev->softconnect = (is_on != 0);
if (is_on)
tmp |= (1 << USB_DETECT_ENABLE);
else
tmp &= ~(1 << USB_DETECT_ENABLE);
net2272_write(dev, USBCTL0, tmp);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static int net2272_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static int net2272_stop(struct usb_gadget *_gadget);
static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
static const struct usb_gadget_ops net2272_ops = {
.get_frame = net2272_get_frame,
.wakeup = net2272_wakeup,
.set_selfpowered = net2272_set_selfpowered,
.pullup = net2272_pullup,
.udc_start = net2272_start,
.udc_stop = net2272_stop,
.udc_async_callbacks = net2272_async_callbacks,
};
/*---------------------------------------------------------------------------*/
static ssize_t
registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
{
struct net2272 *dev;
char *next;
unsigned size, t;
unsigned long flags;
u8 t1, t2;
int i;
const char *s;
dev = dev_get_drvdata(_dev);
next = buf;
size = PAGE_SIZE;
spin_lock_irqsave(&dev->lock, flags);
/* Main Control Registers */
t = scnprintf(next, size, "%s version %s,"
"chiprev %02x, locctl %02x\n"
"irqenb0 %02x irqenb1 %02x "
"irqstat0 %02x irqstat1 %02x\n",
driver_name, driver_vers, dev->chiprev,
net2272_read(dev, LOCCTL),
net2272_read(dev, IRQENB0),
net2272_read(dev, IRQENB1),
net2272_read(dev, IRQSTAT0),
net2272_read(dev, IRQSTAT1));
size -= t;
next += t;
/* DMA */
t1 = net2272_read(dev, DMAREQ);
t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
t1, ep_name[(t1 & 0x01) + 1],
t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
t1 & (1 << DMA_REQUEST) ? "req " : "",
t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
size -= t;
next += t;
/* USB Control Registers */
t1 = net2272_read(dev, USBCTL1);
if (t1 & (1 << VBUS_PIN)) {
if (t1 & (1 << USB_HIGH_SPEED))
s = "high speed";
else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
s = "powered";
else
s = "full speed";
} else
s = "not attached";
t = scnprintf(next, size,
"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
net2272_read(dev, USBCTL0), t1,
net2272_read(dev, OURADDR), s);
size -= t;
next += t;
/* Endpoint Registers */
for (i = 0; i < 4; ++i) {
struct net2272_ep *ep;
ep = &dev->ep[i];
if (i && !ep->desc)
continue;
t1 = net2272_ep_read(ep, EP_CFG);
t2 = net2272_ep_read(ep, EP_RSPSET);
t = scnprintf(next, size,
"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
"irqenb %02x\n",
ep->ep.name, t1, t2,
(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
net2272_ep_read(ep, EP_IRQENB));
size -= t;
next += t;
t = scnprintf(next, size,
"\tstat0 %02x stat1 %02x avail %04x "
"(ep%d%s-%s)%s\n",
net2272_ep_read(ep, EP_STAT0),
net2272_ep_read(ep, EP_STAT1),
(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
t1 & 0x0f,
ep->is_in ? "in" : "out",
type_string(t1 >> 5),
ep->stopped ? "*" : "");
size -= t;
next += t;
t = scnprintf(next, size,
"\tep_transfer %06x\n",
((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
size -= t;
next += t;
t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
t = scnprintf(next, size,
"\tbuf-a %s buf-b %s\n",
buf_state_string(t1),
buf_state_string(t2));
size -= t;
next += t;
}
spin_unlock_irqrestore(&dev->lock, flags);
return PAGE_SIZE - size;
}
static DEVICE_ATTR_RO(registers);
/*---------------------------------------------------------------------------*/
static void
net2272_set_fifo_mode(struct net2272 *dev, int mode)
{
u8 tmp;
tmp = net2272_read(dev, LOCCTL) & 0x3f;
tmp |= (mode << 6);
net2272_write(dev, LOCCTL, tmp);
INIT_LIST_HEAD(&dev->gadget.ep_list);
/* always ep-a, ep-c ... maybe not ep-b */
list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
switch (mode) {
case 0:
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
break;
case 1:
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = 1024;
dev->ep[2].fifo_size = 512;
break;
case 2:
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
break;
case 3:
dev->ep[1].fifo_size = 1024;
break;
}
/* ep-c is always 2 512 byte buffers */
list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
dev->ep[3].fifo_size = 512;
}
/*---------------------------------------------------------------------------*/
static void
net2272_usb_reset(struct net2272 *dev)
{
dev->gadget.speed = USB_SPEED_UNKNOWN;
net2272_cancel_dma(dev);
net2272_write(dev, IRQENB0, 0);
net2272_write(dev, IRQENB1, 0);
/* clear irq state */
net2272_write(dev, IRQSTAT0, 0xff);
net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
net2272_write(dev, DMAREQ,
(0 << DMA_BUFFER_VALID) |
(0 << DMA_REQUEST_ENABLE) |
(1 << DMA_CONTROL_DACK) |
(dev->dma_eot_polarity << EOT_POLARITY) |
(dev->dma_dack_polarity << DACK_POLARITY) |
(dev->dma_dreq_polarity << DREQ_POLARITY) |
((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
net2272_cancel_dma(dev);
net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
* note that the higher level gadget drivers are expected to convert data to little endian.
* Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
*/
net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
}
static void
net2272_usb_reinit(struct net2272 *dev)
{
int i;
/* basic endpoint init */
for (i = 0; i < 4; ++i) {
struct net2272_ep *ep = &dev->ep[i];
ep->ep.name = ep_name[i];
ep->dev = dev;
ep->num = i;
ep->not_empty = 0;
if (use_dma && ep->num == dma_ep)
ep->dma = 1;
if (i > 0 && i <= 3)
ep->fifo_size = 512;
else
ep->fifo_size = 64;
net2272_ep_reset(ep);
if (i == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
dev->gadget.ep0 = &dev->ep[0].ep;
dev->ep[0].stopped = 0;
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
}
static void
net2272_ep0_start(struct net2272 *dev)
{
struct net2272_ep *ep0 = &dev->ep[0];
net2272_ep_write(ep0, EP_RSPSET,
(1 << NAK_OUT_PACKETS_MODE) |
(1 << ALT_NAK_OUT_PACKETS));
net2272_ep_write(ep0, EP_RSPCLR,
(1 << HIDE_STATUS_PHASE) |
(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
net2272_write(dev, USBCTL0,
(dev->softconnect << USB_DETECT_ENABLE) |
(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
(1 << IO_WAKEUP_ENABLE));
net2272_write(dev, IRQENB0,
(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
(1 << DMA_DONE_INTERRUPT_ENABLE));
net2272_write(dev, IRQENB1,
(1 << VBUS_INTERRUPT_ENABLE) |
(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
}
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int net2272_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver)
{
struct net2272 *dev;
unsigned i;
if (!driver || !driver->setup ||
driver->max_speed != USB_SPEED_HIGH)
return -EINVAL;
dev = container_of(_gadget, struct net2272, gadget);
for (i = 0; i < 4; ++i)
dev->ep[i].irqs = 0;
/* hook up the driver ... */
dev->softconnect = 1;
dev->driver = driver;
/* ... then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
net2272_ep0_start(dev);
return 0;
}
static void
stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
{
int i;
/* don't disconnect if it's not connected */
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* stop hardware; prevent new request submissions;
* and kill any outstanding requests.
*/
net2272_usb_reset(dev);
for (i = 0; i < 4; ++i)
net2272_dequeue_all(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
if (dev->async_callbacks && driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
net2272_usb_reinit(dev);
}
static int net2272_stop(struct usb_gadget *_gadget)
{
struct net2272 *dev;
unsigned long flags;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
stop_activity(dev, NULL);
spin_unlock_irqrestore(&dev->lock, flags);
dev->driver = NULL;
return 0;
}
static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
{
struct net2272 *dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irq(&dev->lock);
dev->async_callbacks = enable;
spin_unlock_irq(&dev->lock);
}
/*---------------------------------------------------------------------------*/
/* handle ep-a/ep-b dma completions */
static void
net2272_handle_dma(struct net2272_ep *ep)
{
struct net2272_request *req;
unsigned len;
int status;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct net2272_request, queue);
else
req = NULL;
dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
/* Ensure DREQ is de-asserted */
net2272_write(ep->dev, DMAREQ,
(0 << DMA_BUFFER_VALID)
| (0 << DMA_REQUEST_ENABLE)
| (1 << DMA_CONTROL_DACK)
| (ep->dev->dma_eot_polarity << EOT_POLARITY)
| (ep->dev->dma_dack_polarity << DACK_POLARITY)
| (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
| (ep->dma << DMA_ENDPOINT_SELECT));
ep->dev->dma_busy = 0;
net2272_ep_write(ep, EP_IRQENB,
(1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
| net2272_ep_read(ep, EP_IRQENB));
/* device-to-host transfer completed */
if (ep->is_in) {
/* validate a short packet or zlp if necessary */
if ((req->req.length % ep->ep.maxpacket != 0) ||
req->req.zero)
set_fifo_bytecount(ep, 0);
net2272_done(ep, req, 0);
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request, queue);
status = net2272_kick_dma(ep, req);
if (status < 0)
net2272_pio_advance(ep);
}
/* host-to-device transfer completed */
} else {
/* terminated with a short packet? */
if (net2272_read(ep->dev, IRQSTAT0) &
(1 << DMA_DONE_INTERRUPT)) {
/* abort system dma */
net2272_cancel_dma(ep->dev);
}
/* EP_TRANSFER will contain the number of bytes
* actually received.
* NOTE: There is no overflow detection on EP_TRANSFER:
* We can't deal with transfers larger than 2^24 bytes!
*/
len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
| (net2272_ep_read(ep, EP_TRANSFER0));
if (ep->not_empty)
len += 4;
req->req.actual += len;
/* get any remaining data */
net2272_pio_advance(ep);
}
}
/*---------------------------------------------------------------------------*/
static void
net2272_handle_ep(struct net2272_ep *ep)
{
struct net2272_request *req;
u8 stat0, stat1;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct net2272_request, queue);
else
req = NULL;
/* ack all, and handle what we care about */
stat0 = net2272_ep_read(ep, EP_STAT0);
stat1 = net2272_ep_read(ep, EP_STAT1);
ep->irqs++;
dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
ep->ep.name, stat0, stat1, req ? &req->req : NULL);
net2272_ep_write(ep, EP_STAT0, stat0 &
~((1 << NAK_OUT_PACKETS)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
net2272_ep_write(ep, EP_STAT1, stat1);
/* data packet(s) received (in the fifo, OUT)
* direction must be validated, otherwise control read status phase
* could be interpreted as a valid packet
*/
if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
net2272_pio_advance(ep);
/* data packet(s) transmitted (IN) */
else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
net2272_pio_advance(ep);
}
static struct net2272_ep *
net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
{
struct net2272_ep *ep;
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
return &dev->ep[0];
list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
u8 bEndpointAddress;
if (!ep->desc)
continue;
bEndpointAddress = ep->desc->bEndpointAddress;
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
continue;
if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
return ep;
}
return NULL;
}
/*
* USB Test Packet:
* JKJKJKJK * 9
* JJKKJJKK * 8
* JJJJKKKK * 8
* JJJJJJJKKKKKKK * 8
* JJJJJJJK * 8
* {JKKKKKKK * 10}, JK
*/
static const u8 net2272_test_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
};
static void
net2272_set_test_mode(struct net2272 *dev, int mode)
{
int i;
/* Disable all net2272 interrupts:
* Nothing but a power cycle should stop the test.
*/
net2272_write(dev, IRQENB0, 0x00);
net2272_write(dev, IRQENB1, 0x00);
/* Force tranceiver to high-speed */
net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
net2272_write(dev, PAGESEL, 0);
net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
net2272_write(dev, EP_RSPCLR,
(1 << CONTROL_STATUS_PHASE_HANDSHAKE)
| (1 << HIDE_STATUS_PHASE));
net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
/* wait for status phase to complete */
while (!(net2272_read(dev, EP_STAT0) &
(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
;
/* Enable test mode */
net2272_write(dev, USBTEST, mode);
/* load test packet */
if (mode == USB_TEST_PACKET) {
/* switch to 8 bit mode */
net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
~(1 << DATA_WIDTH));
for (i = 0; i < sizeof(net2272_test_packet); ++i)
net2272_write(dev, EP_DATA, net2272_test_packet[i]);
/* Validate test packet */
net2272_write(dev, EP_TRANSFER0, 0);
}
}
static void
net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
{
struct net2272_ep *ep;
u8 num, scratch;
/* starting a control request? */
if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
union {
u8 raw[8];
struct usb_ctrlrequest r;
} u;
int tmp = 0;
struct net2272_request *req;
if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
dev->gadget.speed = USB_SPEED_HIGH;
else
dev->gadget.speed = USB_SPEED_FULL;
dev_dbg(dev->dev, "%s\n",
usb_speed_string(dev->gadget.speed));
}
ep = &dev->ep[0];
ep->irqs++;
/* make sure any leftover interrupt state is cleared */
stat &= ~(1 << ENDPOINT_0_INTERRUPT);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request, queue);
net2272_done(ep, req,
(req->req.actual == req->req.length) ? 0 : -EPROTO);
}
ep->stopped = 0;
dev->protocol_stall = 0;
net2272_ep_write(ep, EP_STAT0,
(1 << DATA_IN_TOKEN_INTERRUPT)
| (1 << DATA_OUT_TOKEN_INTERRUPT)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
net2272_ep_write(ep, EP_STAT1,
(1 << TIMEOUT)
| (1 << USB_OUT_ACK_SENT)
| (1 << USB_OUT_NAK_SENT)
| (1 << USB_IN_ACK_RCVD)
| (1 << USB_IN_NAK_SENT)
| (1 << USB_STALL_SENT)
| (1 << LOCAL_OUT_ZLP));
/*
* Ensure Control Read pre-validation setting is beyond maximum size
* - Control Writes can leave non-zero values in EP_TRANSFER. If
* an EP0 transfer following the Control Write is a Control Read,
* the NET2272 sees the non-zero EP_TRANSFER as an unexpected
* pre-validation count.
* - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
* the pre-validation count cannot cause an unexpected validatation
*/
net2272_write(dev, PAGESEL, 0);
net2272_write(dev, EP_TRANSFER2, 0xff);
net2272_write(dev, EP_TRANSFER1, 0xff);
net2272_write(dev, EP_TRANSFER0, 0xff);
u.raw[0] = net2272_read(dev, SETUP0);
u.raw[1] = net2272_read(dev, SETUP1);
u.raw[2] = net2272_read(dev, SETUP2);
u.raw[3] = net2272_read(dev, SETUP3);
u.raw[4] = net2272_read(dev, SETUP4);
u.raw[5] = net2272_read(dev, SETUP5);
u.raw[6] = net2272_read(dev, SETUP6);
u.raw[7] = net2272_read(dev, SETUP7);
/*
* If you have a big endian cpu make sure le16_to_cpus
* performs the proper byte swapping here...
*/
le16_to_cpus(&u.r.wValue);
le16_to_cpus(&u.r.wIndex);
le16_to_cpus(&u.r.wLength);
/* ack the irq */
net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
stat ^= (1 << SETUP_PACKET_INTERRUPT);
/* watch control traffic at the token level, and force
* synchronization before letting the status phase happen.
*/
ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
if (ep->is_in) {
scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
stop_out_naking(ep);
} else
scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
net2272_ep_write(ep, EP_IRQENB, scratch);
if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
goto delegate;
switch (u.r.bRequest) {
case USB_REQ_GET_STATUS: {
struct net2272_ep *e;
u16 status = 0;
switch (u.r.bRequestType & USB_RECIP_MASK) {
case USB_RECIP_ENDPOINT:
e = net2272_get_ep_by_addr(dev, u.r.wIndex);
if (!e || u.r.wLength > 2)
goto do_stall;
if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
status = cpu_to_le16(1);
else
status = cpu_to_le16(0);
/* don't bother with a request object! */
net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
writew(status, net2272_reg_addr(dev, EP_DATA));
set_fifo_bytecount(&dev->ep[0], 0);
allow_status(ep);
dev_vdbg(dev->dev, "%s stat %02x\n",
ep->ep.name, status);
goto next_endpoints;
case USB_RECIP_DEVICE:
if (u.r.wLength > 2)
goto do_stall;
if (dev->gadget.is_selfpowered)
status = (1 << USB_DEVICE_SELF_POWERED);
/* don't bother with a request object! */
net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
writew(status, net2272_reg_addr(dev, EP_DATA));
set_fifo_bytecount(&dev->ep[0], 0);
allow_status(ep);
dev_vdbg(dev->dev, "device stat %02x\n", status);
goto next_endpoints;
case USB_RECIP_INTERFACE:
if (u.r.wLength > 2)
goto do_stall;
/* don't bother with a request object! */
net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
writew(status, net2272_reg_addr(dev, EP_DATA));
set_fifo_bytecount(&dev->ep[0], 0);
allow_status(ep);
dev_vdbg(dev->dev, "interface status %02x\n", status);
goto next_endpoints;
}
break;
}
case USB_REQ_CLEAR_FEATURE: {
struct net2272_ep *e;
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (u.r.wValue != USB_ENDPOINT_HALT ||
u.r.wLength != 0)
goto do_stall;
e = net2272_get_ep_by_addr(dev, u.r.wIndex);
if (!e)
goto do_stall;
if (e->wedged) {
dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
ep->ep.name);
} else {
dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
clear_halt(e);
}
allow_status(ep);
goto next_endpoints;
}
case USB_REQ_SET_FEATURE: {
struct net2272_ep *e;
if (u.r.bRequestType == USB_RECIP_DEVICE) {
if (u.r.wIndex != NORMAL_OPERATION)
net2272_set_test_mode(dev, (u.r.wIndex >> 8));
allow_status(ep);
dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
goto next_endpoints;
} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (u.r.wValue != USB_ENDPOINT_HALT ||
u.r.wLength != 0)
goto do_stall;
e = net2272_get_ep_by_addr(dev, u.r.wIndex);
if (!e)
goto do_stall;
set_halt(e);
allow_status(ep);
dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
goto next_endpoints;
}
case USB_REQ_SET_ADDRESS: {
net2272_write(dev, OURADDR, u.r.wValue & 0xff);
allow_status(ep);
break;
}
default:
delegate:
dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
"ep_cfg %08x\n",
u.r.bRequestType, u.r.bRequest,
u.r.wValue, u.r.wIndex,
net2272_ep_read(ep, EP_CFG));
if (dev->async_callbacks) {
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &u.r);
spin_lock(&dev->lock);
}
}
/* stall ep0 on error */
if (tmp < 0) {
do_stall:
dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
u.r.bRequestType, u.r.bRequest, tmp);
dev->protocol_stall = 1;
}
/* endpoint dma irq? */
} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
net2272_cancel_dma(dev);
net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
stat &= ~(1 << DMA_DONE_INTERRUPT);
num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
? 2 : 1;
ep = &dev->ep[num];
net2272_handle_dma(ep);
}
next_endpoints:
/* endpoint data irq? */
scratch = stat & 0x0f;
stat &= ~0x0f;
for (num = 0; scratch; num++) {
u8 t;
/* does this endpoint's FIFO and queue need tending? */
t = 1 << num;
if ((scratch & t) == 0)
continue;
scratch ^= t;
ep = &dev->ep[num];
net2272_handle_ep(ep);
}
/* some interrupts we can just ignore */
stat &= ~(1 << SOF_INTERRUPT);
if (stat)
dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
}
static void
net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
{
u8 tmp, mask;
/* after disconnect there's nothing else to do! */
tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
if (stat & tmp) {
bool reset = false;
bool disconnect = false;
/*
* Ignore disconnects and resets if the speed hasn't been set.
* VBUS can bounce and there's always an initial reset.
*/
net2272_write(dev, IRQSTAT1, tmp);
if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
if ((stat & (1 << VBUS_INTERRUPT)) &&
(net2272_read(dev, USBCTL1) &
(1 << VBUS_PIN)) == 0) {
disconnect = true;
dev_dbg(dev->dev, "disconnect %s\n",
dev->driver->driver.name);
} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
(net2272_read(dev, USBCTL1) & mask)
== 0) {
reset = true;
dev_dbg(dev->dev, "reset %s\n",
dev->driver->driver.name);
}
if (disconnect || reset) {
stop_activity(dev, dev->driver);
net2272_ep0_start(dev);
if (dev->async_callbacks) {
spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)(&dev->gadget);
spin_lock(&dev->lock);
}
return;
}
}
stat &= ~tmp;
if (!stat)
return;
}
tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
if (stat & tmp) {
net2272_write(dev, IRQSTAT1, tmp);
if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
if (dev->async_callbacks && dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
if (!enable_suspend) {
stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
}
} else {
if (dev->async_callbacks && dev->driver->resume)
dev->driver->resume(&dev->gadget);
}
stat &= ~tmp;
}
/* clear any other status/irqs */
if (stat)
net2272_write(dev, IRQSTAT1, stat);
/* some status we can just ignore */
stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
| (1 << SUSPEND_REQUEST_INTERRUPT)
| (1 << RESUME_INTERRUPT));
if (!stat)
return;
else
dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
}
static irqreturn_t net2272_irq(int irq, void *_dev)
{
struct net2272 *dev = _dev;
#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
u32 intcsr;
#endif
#if defined(PLX_PCI_RDK)
u8 dmareq;
#endif
spin_lock(&dev->lock);
#if defined(PLX_PCI_RDK)
intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
}
if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
dev->rdk1.plx9054_base_addr + DMACSR0);
dmareq = net2272_read(dev, DMAREQ);
if (dmareq & 0x01)
net2272_handle_dma(&dev->ep[2]);
else
net2272_handle_dma(&dev->ep[1]);
}
#endif
#if defined(PLX_PCI_RDK2)
/* see if PCI int for us by checking irqstat */
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
spin_unlock(&dev->lock);
return IRQ_NONE;
}
/* check dma interrupts */
#endif
/* Platform/devcice interrupt handler */
#if !defined(PLX_PCI_RDK)
net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
#endif
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
static int net2272_present(struct net2272 *dev)
{
/*
* Quick test to see if CPU can communicate properly with the NET2272.
* Verifies connection using writes and reads to write/read and
* read-only registers.
*
* This routine is strongly recommended especially during early bring-up
* of new hardware, however for designs that do not apply Power On System
* Tests (POST) it may discarded (or perhaps minimized).
*/
unsigned int ii;
u8 val, refval;
/* Verify NET2272 write/read SCRATCH register can write and read */
refval = net2272_read(dev, SCRATCH);
for (ii = 0; ii < 0x100; ii += 7) {
net2272_write(dev, SCRATCH, ii);
val = net2272_read(dev, SCRATCH);
if (val != ii) {
dev_dbg(dev->dev,
"%s: write/read SCRATCH register test failed: "
"wrote:0x%2.2x, read:0x%2.2x\n",
__func__, ii, val);
return -EINVAL;
}
}
/* To be nice, we write the original SCRATCH value back: */
net2272_write(dev, SCRATCH, refval);
/* Verify NET2272 CHIPREV register is read-only: */
refval = net2272_read(dev, CHIPREV_2272);
for (ii = 0; ii < 0x100; ii += 7) {
net2272_write(dev, CHIPREV_2272, ii);
val = net2272_read(dev, CHIPREV_2272);
if (val != refval) {
dev_dbg(dev->dev,
"%s: write/read CHIPREV register test failed: "
"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
__func__, ii, val, refval);
return -EINVAL;
}
}
/*
* Verify NET2272's "NET2270 legacy revision" register
* - NET2272 has two revision registers. The NET2270 legacy revision
* register should read the same value, regardless of the NET2272
* silicon revision. The legacy register applies to NET2270
* firmware being applied to the NET2272.
*/
val = net2272_read(dev, CHIPREV_LEGACY);
if (val != NET2270_LEGACY_REV) {
/*
* Unexpected legacy revision value
* - Perhaps the chip is a NET2270?
*/
dev_dbg(dev->dev,
"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
__func__, NET2270_LEGACY_REV, val);
return -EINVAL;
}
/*
* Verify NET2272 silicon revision
* - This revision register is appropriate for the silicon version
* of the NET2272
*/
val = net2272_read(dev, CHIPREV_2272);
switch (val) {
case CHIPREV_NET2272_R1:
/*
* NET2272 Rev 1 has DMA related errata:
* - Newer silicon (Rev 1A or better) required
*/
dev_dbg(dev->dev,
"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
__func__);
break;
case CHIPREV_NET2272_R1A:
break;
default:
/* NET2272 silicon version *may* not work with this firmware */
dev_dbg(dev->dev,
"%s: unexpected silicon revision register value: "
" CHIPREV_2272: 0x%2.2x\n",
__func__, val);
/*
* Return Success, even though the chip rev is not an expected value
* - Older, pre-built firmware can attempt to operate on newer silicon
* - Often, new silicon is perfectly compatible
*/
}
/* Success: NET2272 checks out OK */
return 0;
}
static void
net2272_gadget_release(struct device *_dev)
{
struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
kfree(dev);
}
/*---------------------------------------------------------------------------*/
static void
net2272_remove(struct net2272 *dev)
{
if (dev->added)
usb_del_gadget(&dev->gadget);
free_irq(dev->irq, dev);
iounmap(dev->base_addr);
device_remove_file(dev->dev, &dev_attr_registers);
dev_info(dev->dev, "unbind\n");
}
static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
{
struct net2272 *ret;
if (!irq) {
dev_dbg(dev, "No IRQ!\n");
return ERR_PTR(-ENODEV);
}
/* alloc, and start init */
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
spin_lock_init(&ret->lock);
ret->irq = irq;
ret->dev = dev;
ret->gadget.ops = &net2272_ops;
ret->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
ret->gadget.name = driver_name;
usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
return ret;
}
static int
net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
{
int ret;
/* See if there... */
if (net2272_present(dev)) {
dev_warn(dev->dev, "2272 not found!\n");
ret = -ENODEV;
goto err;
}
net2272_usb_reset(dev);
net2272_usb_reinit(dev);
ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
if (ret) {
dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
goto err;
}
dev->chiprev = net2272_read(dev, CHIPREV_2272);
/* done */
dev_info(dev->dev, "%s\n", driver_desc);
dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
dev->irq, dev->base_addr, dev->chiprev,
dma_mode_string());
dev_info(dev->dev, "version: %s\n", driver_vers);
ret = device_create_file(dev->dev, &dev_attr_registers);
if (ret)
goto err_irq;
ret = usb_add_gadget(&dev->gadget);
if (ret)
goto err_add_udc;
dev->added = 1;
return 0;
err_add_udc:
device_remove_file(dev->dev, &dev_attr_registers);
err_irq:
free_irq(dev->irq, dev);
err:
return ret;
}
#ifdef CONFIG_USB_PCI
/*
* wrap this driver around the specified device, but
* don't respond over USB until a gadget driver binds to us
*/
static int
net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
{
unsigned long resource, len, tmp;
void __iomem *mem_mapped_addr[4];
int ret, i;
/*
* BAR 0 holds PLX 9054 config registers
* BAR 1 is i/o memory; unused here
* BAR 2 holds EPLD config registers
* BAR 3 holds NET2272 registers
*/
/* Find and map all address spaces */
for (i = 0; i < 4; ++i) {
if (i == 1)
continue; /* BAR1 unused */
resource = pci_resource_start(pdev, i);
len = pci_resource_len(pdev, i);
if (!request_mem_region(resource, len, driver_name)) {
dev_dbg(dev->dev, "controller already in use\n");
ret = -EBUSY;
goto err;
}
mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
goto err;
}
}
dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
dev->rdk1.epld_base_addr = mem_mapped_addr[2];
dev->base_addr = mem_mapped_addr[3];
/* Set PLX 9054 bus width (16 bits) */
tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
dev->rdk1.plx9054_base_addr + LBRD1);
/* Enable PLX 9054 Interrupts */
writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
(1 << PCI_INTERRUPT_ENABLE) |
(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
dev->rdk1.plx9054_base_addr + DMACSR0);
/* reset */
writeb((1 << EPLD_DMA_ENABLE) |
(1 << DMA_CTL_DACK) |
(1 << DMA_TIMEOUT_ENABLE) |
(1 << USER) |
(0 << MPX_MODE) |
(1 << BUSWIDTH) |
(1 << NET2272_RESET),
dev->base_addr + EPLD_IO_CONTROL_REGISTER);
mb();
writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
~(1 << NET2272_RESET),
dev->base_addr + EPLD_IO_CONTROL_REGISTER);
udelay(200);
return 0;
err:
while (--i >= 0) {
if (i == 1)
continue; /* BAR1 unused */
iounmap(mem_mapped_addr[i]);
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
return ret;
}
static int
net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
{
unsigned long resource, len;
void __iomem *mem_mapped_addr[2];
int ret, i;
/*
* BAR 0 holds FGPA config registers
* BAR 1 holds NET2272 registers
*/
/* Find and map all address spaces, bar2-3 unused in rdk 2 */
for (i = 0; i < 2; ++i) {
resource = pci_resource_start(pdev, i);
len = pci_resource_len(pdev, i);
if (!request_mem_region(resource, len, driver_name)) {
dev_dbg(dev->dev, "controller already in use\n");
ret = -EBUSY;
goto err;
}
mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
goto err;
}
}
dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
dev->base_addr = mem_mapped_addr[1];
mb();
/* Set 2272 bus width (16 bits) and reset */
writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
udelay(200);
writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
/* Print fpga version number */
dev_info(dev->dev, "RDK2 FPGA version %08x\n",
readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
/* Enable FPGA Interrupts */
writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
return 0;
err:
while (--i >= 0) {
iounmap(mem_mapped_addr[i]);
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
return ret;
}
static int
net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net2272 *dev;
int ret;
dev = net2272_probe_init(&pdev->dev, pdev->irq);
if (IS_ERR(dev))
return PTR_ERR(dev);
dev->dev_id = pdev->device;
if (pci_enable_device(pdev) < 0) {
ret = -ENODEV;
goto err_put;
}
pci_set_master(pdev);
switch (pdev->device) {
case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
default: BUG();
}
if (ret)
goto err_pci;
ret = net2272_probe_fin(dev, 0);
if (ret)
goto err_pci;
pci_set_drvdata(pdev, dev);
return 0;
err_pci:
pci_disable_device(pdev);
err_put:
usb_put_gadget(&dev->gadget);
return ret;
}
static void
net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
{
int i;
/* disable PLX 9054 interrupts */
writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
~(1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
/* clean up resources allocated during probe() */
iounmap(dev->rdk1.plx9054_base_addr);
iounmap(dev->rdk1.epld_base_addr);
for (i = 0; i < 4; ++i) {
if (i == 1)
continue; /* BAR1 unused */
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
}
static void
net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
{
int i;
/* disable fpga interrupts
writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
~(1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
*/
/* clean up resources allocated during probe() */
iounmap(dev->rdk2.fpga_base_addr);
for (i = 0; i < 2; ++i)
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
static void
net2272_pci_remove(struct pci_dev *pdev)
{
struct net2272 *dev = pci_get_drvdata(pdev);
net2272_remove(dev);
switch (pdev->device) {
case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
default: BUG();
}
pci_disable_device(pdev);
usb_put_gadget(&dev->gadget);
}
/* Table of matching PCI IDs */
static struct pci_device_id pci_ids[] = {
{ /* RDK 1 card */
.class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
.class_mask = 0,
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_RDK1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ /* RDK 2 card */
.class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
.class_mask = 0,
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_RDK2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
static struct pci_driver net2272_pci_driver = {
.name = driver_name,
.id_table = pci_ids,
.probe = net2272_pci_probe,
.remove = net2272_pci_remove,
};
static int net2272_pci_register(void)
{
return pci_register_driver(&net2272_pci_driver);
}
static void net2272_pci_unregister(void)
{
pci_unregister_driver(&net2272_pci_driver);
}
#else
static inline int net2272_pci_register(void) { return 0; }
static inline void net2272_pci_unregister(void) { }
#endif
/*---------------------------------------------------------------------------*/
static int
net2272_plat_probe(struct platform_device *pdev)
{
struct net2272 *dev;
int ret;
unsigned int irqflags;
resource_size_t base, len;
struct resource *iomem, *iomem_bus, *irq_res;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
if (!irq_res || !iomem) {
dev_err(&pdev->dev, "must provide irq/base addr");
return -EINVAL;
}
dev = net2272_probe_init(&pdev->dev, irq_res->start);
if (IS_ERR(dev))
return PTR_ERR(dev);
irqflags = 0;
if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
irqflags |= IRQF_TRIGGER_RISING;
if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
irqflags |= IRQF_TRIGGER_FALLING;
if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
irqflags |= IRQF_TRIGGER_HIGH;
if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
irqflags |= IRQF_TRIGGER_LOW;
base = iomem->start;
len = resource_size(iomem);
if (iomem_bus)
dev->base_shift = iomem_bus->start;
if (!request_mem_region(base, len, driver_name)) {
dev_dbg(dev->dev, "get request memory region!\n");
ret = -EBUSY;
goto err;
}
dev->base_addr = ioremap(base, len);
if (!dev->base_addr) {
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
goto err_req;
}
ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
if (ret)
goto err_io;
platform_set_drvdata(pdev, dev);
dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
return 0;
err_io:
iounmap(dev->base_addr);
err_req:
release_mem_region(base, len);
err:
usb_put_gadget(&dev->gadget);
return ret;
}
static void
net2272_plat_remove(struct platform_device *pdev)
{
struct net2272 *dev = platform_get_drvdata(pdev);
net2272_remove(dev);
release_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]));
usb_put_gadget(&dev->gadget);
}
static struct platform_driver net2272_plat_driver = {
.probe = net2272_plat_probe,
.remove_new = net2272_plat_remove,
.driver = {
.name = driver_name,
},
/* FIXME .suspend, .resume */
};
MODULE_ALIAS("platform:net2272");
static int __init net2272_init(void)
{
int ret;
ret = net2272_pci_register();
if (ret)
return ret;
ret = platform_driver_register(&net2272_plat_driver);
if (ret)
goto err_pci;
return ret;
err_pci:
net2272_pci_unregister();
return ret;
}
module_init(net2272_init);
static void __exit net2272_cleanup(void)
{
net2272_pci_unregister();
platform_driver_unregister(&net2272_plat_driver);
}
module_exit(net2272_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("PLX Technology, Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/net2272.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <[email protected]>
* Neil Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/platform_data/mv_usb.h>
#include <asm/unaligned.h>
#include "mv_udc.h"
#define DRIVER_DESC "Marvell PXA USB Device Controller driver"
#define ep_dir(ep) (((ep)->ep_num == 0) ? \
((ep)->udc->ep0_dir) : ((ep)->direction))
/* timeout value -- usec */
#define RESET_TIMEOUT 10000
#define FLUSH_TIMEOUT 10000
#define EPSTATUS_TIMEOUT 10000
#define PRIME_TIMEOUT 10000
#define READSAFE_TIMEOUT 1000
#define LOOPS_USEC_SHIFT 1
#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
static DECLARE_COMPLETION(release_done);
static const char driver_name[] = "mv_udc";
static void nuke(struct mv_ep *ep, int status);
static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
/* for endpoint 0 operations */
static const struct usb_endpoint_descriptor mv_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = EP0_MAX_PKT_SIZE,
};
static void ep0_reset(struct mv_udc *udc)
{
struct mv_ep *ep;
u32 epctrlx;
int i = 0;
/* ep0 in and out */
for (i = 0; i < 2; i++) {
ep = &udc->eps[i];
ep->udc = udc;
/* ep0 dQH */
ep->dqh = &udc->ep_dqh[i];
/* configure ep0 endpoint capabilities in dQH */
ep->dqh->max_packet_length =
(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
| EP_QUEUE_HEAD_IOS;
ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
epctrlx = readl(&udc->op_regs->epctrlx[0]);
if (i) { /* TX */
epctrlx |= EPCTRL_TX_ENABLE
| (USB_ENDPOINT_XFER_CONTROL
<< EPCTRL_TX_EP_TYPE_SHIFT);
} else { /* RX */
epctrlx |= EPCTRL_RX_ENABLE
| (USB_ENDPOINT_XFER_CONTROL
<< EPCTRL_RX_EP_TYPE_SHIFT);
}
writel(epctrlx, &udc->op_regs->epctrlx[0]);
}
}
/* protocol ep0 stall, will automatically be cleared on new transaction */
static void ep0_stall(struct mv_udc *udc)
{
u32 epctrlx;
/* set TX and RX to stall */
epctrlx = readl(&udc->op_regs->epctrlx[0]);
epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
writel(epctrlx, &udc->op_regs->epctrlx[0]);
/* update ep0 state */
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = EP_DIR_OUT;
}
static int process_ep_req(struct mv_udc *udc, int index,
struct mv_req *curr_req)
{
struct mv_dtd *curr_dtd;
struct mv_dqh *curr_dqh;
int actual, remaining_length;
int i, direction;
int retval = 0;
u32 errors;
u32 bit_pos;
curr_dqh = &udc->ep_dqh[index];
direction = index % 2;
curr_dtd = curr_req->head;
actual = curr_req->req.length;
for (i = 0; i < curr_req->dtd_count; i++) {
if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
udc->eps[index].name);
return 1;
}
errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
if (!errors) {
remaining_length =
(curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
actual -= remaining_length;
if (remaining_length) {
if (direction) {
dev_dbg(&udc->dev->dev,
"TX dTD remains data\n");
retval = -EPROTO;
break;
} else
break;
}
} else {
dev_info(&udc->dev->dev,
"complete_tr error: ep=%d %s: error = 0x%x\n",
index >> 1, direction ? "SEND" : "RECV",
errors);
if (errors & DTD_STATUS_HALTED) {
/* Clear the errors and Halt condition */
curr_dqh->size_ioc_int_sts &= ~errors;
retval = -EPIPE;
} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
retval = -EPROTO;
} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
retval = -EILSEQ;
}
}
if (i != curr_req->dtd_count - 1)
curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
}
if (retval)
return retval;
if (direction == EP_DIR_OUT)
bit_pos = 1 << curr_req->ep->ep_num;
else
bit_pos = 1 << (16 + curr_req->ep->ep_num);
while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) {
if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
while (readl(&udc->op_regs->epstatus) & bit_pos)
udelay(1);
break;
}
udelay(1);
}
curr_req->req.actual = actual;
return 0;
}
/*
* done() - retire a request; caller blocked irqs
* @status : request status to be set, only works when
* request is still in progress.
*/
static void done(struct mv_ep *ep, struct mv_req *req, int status)
__releases(&ep->udc->lock)
__acquires(&ep->udc->lock)
{
struct mv_udc *udc = NULL;
unsigned char stopped = ep->stopped;
struct mv_dtd *curr_td, *next_td;
int j;
udc = (struct mv_udc *)ep->udc;
/* Removed the req from fsl_ep->queue */
list_del_init(&req->queue);
/* req.status should be set as -EINPROGRESS in ep_queue() */
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
/* Free dtd for the request */
next_td = req->head;
for (j = 0; j < req->dtd_count; j++) {
curr_td = next_td;
if (j != req->dtd_count - 1)
next_td = curr_td->next_dtd_virt;
dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
}
usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
if (status && (status != -ESHUTDOWN))
dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
ep->stopped = 1;
spin_unlock(&ep->udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->udc->lock);
ep->stopped = stopped;
}
static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
{
struct mv_udc *udc;
struct mv_dqh *dqh;
u32 bit_pos, direction;
u32 usbcmd, epstatus;
unsigned int loops;
int retval = 0;
udc = ep->udc;
direction = ep_dir(ep);
dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
/* check if the pipe is empty */
if (!(list_empty(&ep->queue))) {
struct mv_req *lastreq;
lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
lastreq->tail->dtd_next =
req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
wmb();
if (readl(&udc->op_regs->epprime) & bit_pos)
goto done;
loops = LOOPS(READSAFE_TIMEOUT);
while (1) {
/* start with setting the semaphores */
usbcmd = readl(&udc->op_regs->usbcmd);
usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
writel(usbcmd, &udc->op_regs->usbcmd);
/* read the endpoint status */
epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
/*
* Reread the ATDTW semaphore bit to check if it is
* cleared. When hardware see a hazard, it will clear
* the bit or else we remain set to 1 and we can
* proceed with priming of endpoint if not already
* primed.
*/
if (readl(&udc->op_regs->usbcmd)
& USBCMD_ATDTW_TRIPWIRE_SET)
break;
loops--;
if (loops == 0) {
dev_err(&udc->dev->dev,
"Timeout for ATDTW_TRIPWIRE...\n");
retval = -ETIME;
goto done;
}
udelay(LOOPS_USEC);
}
/* Clear the semaphore */
usbcmd = readl(&udc->op_regs->usbcmd);
usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
writel(usbcmd, &udc->op_regs->usbcmd);
if (epstatus)
goto done;
}
/* Write dQH next pointer and terminate bit to 0 */
dqh->next_dtd_ptr = req->head->td_dma
& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
/* clear active and halt bit, in case set from a previous error */
dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
/* Ensure that updates to the QH will occur before priming. */
wmb();
/* Prime the Endpoint */
writel(bit_pos, &udc->op_regs->epprime);
done:
return retval;
}
static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
dma_addr_t *dma, int *is_last)
{
struct mv_dtd *dtd;
struct mv_udc *udc;
struct mv_dqh *dqh;
u32 temp, mult = 0;
/* how big will this transfer be? */
if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
dqh = req->ep->dqh;
mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
& 0x3;
*length = min(req->req.length - req->req.actual,
(unsigned)(mult * req->ep->ep.maxpacket));
} else
*length = min(req->req.length - req->req.actual,
(unsigned)EP_MAX_LENGTH_TRANSFER);
udc = req->ep->udc;
/*
* Be careful that no _GFP_HIGHMEM is set,
* or we can not use dma_to_virt
*/
dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
if (dtd == NULL)
return dtd;
dtd->td_dma = *dma;
/* initialize buffer page pointers */
temp = (u32)(req->req.dma + req->req.actual);
dtd->buff_ptr0 = cpu_to_le32(temp);
temp &= ~0xFFF;
dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
req->req.actual += *length;
/* zlp is needed if req->req.zero is set */
if (req->req.zero) {
if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
*is_last = 1;
else
*is_last = 0;
} else if (req->req.length == req->req.actual)
*is_last = 1;
else
*is_last = 0;
/* Fill in the transfer size; set active bit */
temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
/* Enable interrupt for the last dtd of a request */
if (*is_last && !req->req.no_interrupt)
temp |= DTD_IOC;
temp |= mult << 10;
dtd->size_ioc_sts = temp;
mb();
return dtd;
}
/* generate dTD linked list for a request */
static int req_to_dtd(struct mv_req *req)
{
unsigned count;
int is_last, is_first = 1;
struct mv_dtd *dtd, *last_dtd = NULL;
dma_addr_t dma;
do {
dtd = build_dtd(req, &count, &dma, &is_last);
if (dtd == NULL)
return -ENOMEM;
if (is_first) {
is_first = 0;
req->head = dtd;
} else {
last_dtd->dtd_next = dma;
last_dtd->next_dtd_virt = dtd;
}
last_dtd = dtd;
req->dtd_count++;
} while (!is_last);
/* set terminate bit to 1 for the last dTD */
dtd->dtd_next = DTD_NEXT_TERMINATE;
req->tail = dtd;
return 0;
}
static int mv_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct mv_udc *udc;
struct mv_ep *ep;
struct mv_dqh *dqh;
u16 max = 0;
u32 bit_pos, epctrlx, direction;
const unsigned char zlt = 1;
unsigned char ios, mult;
unsigned long flags;
ep = container_of(_ep, struct mv_ep, ep);
udc = ep->udc;
if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
direction = ep_dir(ep);
max = usb_endpoint_maxp(desc);
/*
* disable HW zero length termination select
* driver handles zero length packet through req->req.zero
*/
bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
/* Check if the Endpoint is Primed */
if ((readl(&udc->op_regs->epprime) & bit_pos)
|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
dev_info(&udc->dev->dev,
"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
(unsigned)readl(&udc->op_regs->epprime),
(unsigned)readl(&udc->op_regs->epstatus),
(unsigned)bit_pos);
goto en_done;
}
/* Set the max packet length, interrupt on Setup and Mult fields */
ios = 0;
mult = 0;
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
break;
case USB_ENDPOINT_XFER_CONTROL:
ios = 1;
break;
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
mult = usb_endpoint_maxp_mult(desc);
/* 3 transactions at most */
if (mult > 3)
goto en_done;
break;
default:
goto en_done;
}
spin_lock_irqsave(&udc->lock, flags);
/* Get the endpoint queue head address */
dqh = ep->dqh;
dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
| (mult << EP_QUEUE_HEAD_MULT_POS)
| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
| (ios ? EP_QUEUE_HEAD_IOS : 0);
dqh->next_dtd_ptr = 1;
dqh->size_ioc_int_sts = 0;
ep->ep.maxpacket = max;
ep->ep.desc = desc;
ep->stopped = 0;
/* Enable the endpoint for Rx or Tx and set the endpoint type */
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
if (direction == EP_DIR_IN) {
epctrlx &= ~EPCTRL_TX_ALL_MASK;
epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
<< EPCTRL_TX_EP_TYPE_SHIFT);
} else {
epctrlx &= ~EPCTRL_RX_ALL_MASK;
epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
<< EPCTRL_RX_EP_TYPE_SHIFT);
}
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
/*
* Implement Guideline (GL# USB-7) The unused endpoint type must
* be programmed to bulk.
*/
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
epctrlx |= (USB_ENDPOINT_XFER_BULK
<< EPCTRL_RX_EP_TYPE_SHIFT);
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
}
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
epctrlx |= (USB_ENDPOINT_XFER_BULK
<< EPCTRL_TX_EP_TYPE_SHIFT);
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
en_done:
return -EINVAL;
}
static int mv_ep_disable(struct usb_ep *_ep)
{
struct mv_udc *udc;
struct mv_ep *ep;
struct mv_dqh *dqh;
u32 epctrlx, direction;
unsigned long flags;
ep = container_of(_ep, struct mv_ep, ep);
if ((_ep == NULL) || !ep->ep.desc)
return -EINVAL;
udc = ep->udc;
/* Get the endpoint queue head address */
dqh = ep->dqh;
spin_lock_irqsave(&udc->lock, flags);
direction = ep_dir(ep);
/* Reset the max packet length and the interrupt on Setup */
dqh->max_packet_length = 0;
/* Disable the endpoint for Rx or Tx and reset the endpoint type */
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
epctrlx &= ~((direction == EP_DIR_IN)
? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
/* nuke all pending requests (does flush) */
nuke(ep, -ESHUTDOWN);
ep->ep.desc = NULL;
ep->stopped = 1;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_request *
mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct mv_req *req;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct mv_req *req = NULL;
req = container_of(_req, struct mv_req, req);
if (_req)
kfree(req);
}
static void mv_ep_fifo_flush(struct usb_ep *_ep)
{
struct mv_udc *udc;
u32 bit_pos, direction;
struct mv_ep *ep;
unsigned int loops;
if (!_ep)
return;
ep = container_of(_ep, struct mv_ep, ep);
if (!ep->ep.desc)
return;
udc = ep->udc;
direction = ep_dir(ep);
if (ep->ep_num == 0)
bit_pos = (1 << 16) | 1;
else if (direction == EP_DIR_OUT)
bit_pos = 1 << ep->ep_num;
else
bit_pos = 1 << (16 + ep->ep_num);
loops = LOOPS(EPSTATUS_TIMEOUT);
do {
unsigned int inter_loops;
if (loops == 0) {
dev_err(&udc->dev->dev,
"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
(unsigned)readl(&udc->op_regs->epstatus),
(unsigned)bit_pos);
return;
}
/* Write 1 to the Flush register */
writel(bit_pos, &udc->op_regs->epflush);
/* Wait until flushing completed */
inter_loops = LOOPS(FLUSH_TIMEOUT);
while (readl(&udc->op_regs->epflush)) {
/*
* ENDPTFLUSH bit should be cleared to indicate this
* operation is complete
*/
if (inter_loops == 0) {
dev_err(&udc->dev->dev,
"TIMEOUT for ENDPTFLUSH=0x%x,"
"bit_pos=0x%x\n",
(unsigned)readl(&udc->op_regs->epflush),
(unsigned)bit_pos);
return;
}
inter_loops--;
udelay(LOOPS_USEC);
}
loops--;
} while (readl(&udc->op_regs->epstatus) & bit_pos);
}
/* queues (submits) an I/O request to an endpoint */
static int
mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
struct mv_req *req = container_of(_req, struct mv_req, req);
struct mv_udc *udc = ep->udc;
unsigned long flags;
int retval;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
dev_err(&udc->dev->dev, "%s, bad params", __func__);
return -EINVAL;
}
if (unlikely(!_ep || !ep->ep.desc)) {
dev_err(&udc->dev->dev, "%s, bad ep", __func__);
return -EINVAL;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
req->ep = ep;
/* map virtual address to hardware */
retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
if (retval)
return retval;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->dtd_count = 0;
spin_lock_irqsave(&udc->lock, flags);
/* build dtds and push them to device queue */
if (!req_to_dtd(req)) {
retval = queue_dtd(ep, req);
if (retval) {
spin_unlock_irqrestore(&udc->lock, flags);
dev_err(&udc->dev->dev, "Failed to queue dtd\n");
goto err_unmap_dma;
}
} else {
spin_unlock_irqrestore(&udc->lock, flags);
dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
retval = -ENOMEM;
goto err_unmap_dma;
}
/* Update ep0 state */
if (ep->ep_num == 0)
udc->ep0_state = DATA_STATE_XMIT;
/* irq handler advances the queue */
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
err_unmap_dma:
usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
return retval;
}
static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
{
struct mv_dqh *dqh = ep->dqh;
u32 bit_pos;
/* Write dQH next pointer and terminate bit to 0 */
dqh->next_dtd_ptr = req->head->td_dma
& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
/* clear active and halt bit, in case set from a previous error */
dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
/* Ensure that updates to the QH will occure before priming. */
wmb();
bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
/* Prime the Endpoint */
writel(bit_pos, &ep->udc->op_regs->epprime);
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
struct mv_req *req = NULL, *iter;
struct mv_udc *udc = ep->udc;
unsigned long flags;
int stopped, ret = 0;
u32 epctrlx;
if (!_ep || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
stopped = ep->stopped;
/* Stop the ep before we deal with the queue */
ep->stopped = 1;
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
if (ep_dir(ep) == EP_DIR_IN)
epctrlx &= ~EPCTRL_TX_ENABLE;
else
epctrlx &= ~EPCTRL_RX_ENABLE;
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
ret = -EINVAL;
goto out;
}
/* The request is in progress, or completed but not dequeued */
if (ep->queue.next == &req->queue) {
_req->status = -ECONNRESET;
mv_ep_fifo_flush(_ep); /* flush current transfer */
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
struct mv_req *next_req;
next_req = list_entry(req->queue.next,
struct mv_req, queue);
/* Point the QH to the first TD of next request */
mv_prime_ep(ep, next_req);
} else {
struct mv_dqh *qh;
qh = ep->dqh;
qh->next_dtd_ptr = 1;
qh->size_ioc_int_sts = 0;
}
/* The request hasn't been processed, patch up the TD chain */
} else {
struct mv_req *prev_req;
prev_req = list_entry(req->queue.prev, struct mv_req, queue);
writel(readl(&req->tail->dtd_next),
&prev_req->tail->dtd_next);
}
done(ep, req, -ECONNRESET);
/* Enable EP */
out:
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
if (ep_dir(ep) == EP_DIR_IN)
epctrlx |= EPCTRL_TX_ENABLE;
else
epctrlx |= EPCTRL_RX_ENABLE;
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
ep->stopped = stopped;
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
{
u32 epctrlx;
epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
if (stall) {
if (direction == EP_DIR_IN)
epctrlx |= EPCTRL_TX_EP_STALL;
else
epctrlx |= EPCTRL_RX_EP_STALL;
} else {
if (direction == EP_DIR_IN) {
epctrlx &= ~EPCTRL_TX_EP_STALL;
epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
} else {
epctrlx &= ~EPCTRL_RX_EP_STALL;
epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
}
}
writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
}
static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
{
u32 epctrlx;
epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
if (direction == EP_DIR_OUT)
return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
else
return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
}
static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
{
struct mv_ep *ep;
unsigned long flags;
int status = 0;
struct mv_udc *udc;
ep = container_of(_ep, struct mv_ep, ep);
udc = ep->udc;
if (!_ep || !ep->ep.desc) {
status = -EINVAL;
goto out;
}
if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
status = -EOPNOTSUPP;
goto out;
}
/*
* Attempt to halt IN ep will fail if any transfer requests
* are still queue
*/
if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
status = -EAGAIN;
goto out;
}
spin_lock_irqsave(&ep->udc->lock, flags);
ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
if (halt && wedge)
ep->wedge = 1;
else if (!halt)
ep->wedge = 0;
spin_unlock_irqrestore(&ep->udc->lock, flags);
if (ep->ep_num == 0) {
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = EP_DIR_OUT;
}
out:
return status;
}
static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
{
return mv_ep_set_halt_wedge(_ep, halt, 0);
}
static int mv_ep_set_wedge(struct usb_ep *_ep)
{
return mv_ep_set_halt_wedge(_ep, 1, 1);
}
static const struct usb_ep_ops mv_ep_ops = {
.enable = mv_ep_enable,
.disable = mv_ep_disable,
.alloc_request = mv_alloc_request,
.free_request = mv_free_request,
.queue = mv_ep_queue,
.dequeue = mv_ep_dequeue,
.set_wedge = mv_ep_set_wedge,
.set_halt = mv_ep_set_halt,
.fifo_flush = mv_ep_fifo_flush, /* flush fifo */
};
static int udc_clock_enable(struct mv_udc *udc)
{
return clk_prepare_enable(udc->clk);
}
static void udc_clock_disable(struct mv_udc *udc)
{
clk_disable_unprepare(udc->clk);
}
static void udc_stop(struct mv_udc *udc)
{
u32 tmp;
/* Disable interrupts */
tmp = readl(&udc->op_regs->usbintr);
tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
writel(tmp, &udc->op_regs->usbintr);
udc->stopped = 1;
/* Reset the Run the bit in the command register to stop VUSB */
tmp = readl(&udc->op_regs->usbcmd);
tmp &= ~USBCMD_RUN_STOP;
writel(tmp, &udc->op_regs->usbcmd);
}
static void udc_start(struct mv_udc *udc)
{
u32 usbintr;
usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
| USBINTR_PORT_CHANGE_DETECT_EN
| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
/* Enable interrupts */
writel(usbintr, &udc->op_regs->usbintr);
udc->stopped = 0;
/* Set the Run bit in the command register */
writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
}
static int udc_reset(struct mv_udc *udc)
{
unsigned int loops;
u32 tmp, portsc;
/* Stop the controller */
tmp = readl(&udc->op_regs->usbcmd);
tmp &= ~USBCMD_RUN_STOP;
writel(tmp, &udc->op_regs->usbcmd);
/* Reset the controller to get default values */
writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
/* wait for reset to complete */
loops = LOOPS(RESET_TIMEOUT);
while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
if (loops == 0) {
dev_err(&udc->dev->dev,
"Wait for RESET completed TIMEOUT\n");
return -ETIMEDOUT;
}
loops--;
udelay(LOOPS_USEC);
}
/* set controller to device mode */
tmp = readl(&udc->op_regs->usbmode);
tmp |= USBMODE_CTRL_MODE_DEVICE;
/* turn setup lockout off, require setup tripwire in usbcmd */
tmp |= USBMODE_SETUP_LOCK_OFF;
writel(tmp, &udc->op_regs->usbmode);
writel(0x0, &udc->op_regs->epsetupstat);
/* Configure the Endpoint List Address */
writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
&udc->op_regs->eplistaddr);
portsc = readl(&udc->op_regs->portsc[0]);
if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
if (udc->force_fs)
portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
else
portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
writel(portsc, &udc->op_regs->portsc[0]);
tmp = readl(&udc->op_regs->epctrlx[0]);
tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
writel(tmp, &udc->op_regs->epctrlx[0]);
return 0;
}
static int mv_udc_enable_internal(struct mv_udc *udc)
{
int retval;
if (udc->active)
return 0;
dev_dbg(&udc->dev->dev, "enable udc\n");
retval = udc_clock_enable(udc);
if (retval)
return retval;
if (udc->pdata->phy_init) {
retval = udc->pdata->phy_init(udc->phy_regs);
if (retval) {
dev_err(&udc->dev->dev,
"init phy error %d\n", retval);
udc_clock_disable(udc);
return retval;
}
}
udc->active = 1;
return 0;
}
static int mv_udc_enable(struct mv_udc *udc)
{
if (udc->clock_gating)
return mv_udc_enable_internal(udc);
return 0;
}
static void mv_udc_disable_internal(struct mv_udc *udc)
{
if (udc->active) {
dev_dbg(&udc->dev->dev, "disable udc\n");
if (udc->pdata->phy_deinit)
udc->pdata->phy_deinit(udc->phy_regs);
udc_clock_disable(udc);
udc->active = 0;
}
}
static void mv_udc_disable(struct mv_udc *udc)
{
if (udc->clock_gating)
mv_udc_disable_internal(udc);
}
static int mv_udc_get_frame(struct usb_gadget *gadget)
{
struct mv_udc *udc;
u16 retval;
if (!gadget)
return -ENODEV;
udc = container_of(gadget, struct mv_udc, gadget);
retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
return retval;
}
/* Tries to wake up the host connected to this gadget */
static int mv_udc_wakeup(struct usb_gadget *gadget)
{
struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
u32 portsc;
/* Remote wakeup feature not enabled by host */
if (!udc->remote_wakeup)
return -ENOTSUPP;
portsc = readl(&udc->op_regs->portsc);
/* not suspended? */
if (!(portsc & PORTSCX_PORT_SUSPEND))
return 0;
/* trigger force resume */
portsc |= PORTSCX_PORT_FORCE_RESUME;
writel(portsc, &udc->op_regs->portsc[0]);
return 0;
}
static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct mv_udc *udc;
unsigned long flags;
int retval = 0;
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->vbus_active = (is_active != 0);
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
/* Clock is disabled, need re-init registers */
udc_reset(udc);
ep0_reset(udc);
udc_start(udc);
}
} else if (udc->driver && udc->softconnect) {
if (!udc->active)
goto out;
/* stop all the transfer in queue*/
stop_activity(udc, udc->driver);
udc_stop(udc);
mv_udc_disable(udc);
}
out:
spin_unlock_irqrestore(&udc->lock, flags);
return retval;
}
static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
{
struct mv_udc *udc;
unsigned long flags;
int retval = 0;
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->softconnect = (is_on != 0);
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
/* Clock is disabled, need re-init registers */
udc_reset(udc);
ep0_reset(udc);
udc_start(udc);
}
} else if (udc->driver && udc->vbus_active) {
/* stop all the transfer in queue*/
stop_activity(udc, udc->driver);
udc_stop(udc);
mv_udc_disable(udc);
}
spin_unlock_irqrestore(&udc->lock, flags);
return retval;
}
static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
static int mv_udc_stop(struct usb_gadget *);
/* device controller usb_gadget_ops structure */
static const struct usb_gadget_ops mv_ops = {
/* returns the current frame number */
.get_frame = mv_udc_get_frame,
/* tries to wake up the host connected to this gadget */
.wakeup = mv_udc_wakeup,
/* notify controller that VBUS is powered or not */
.vbus_session = mv_udc_vbus_session,
/* D+ pullup, software-controlled connect/disconnect to USB host */
.pullup = mv_udc_pullup,
.udc_start = mv_udc_start,
.udc_stop = mv_udc_stop,
};
static int eps_init(struct mv_udc *udc)
{
struct mv_ep *ep;
char name[14];
int i;
/* initialize ep0 */
ep = &udc->eps[0];
ep->udc = udc;
strncpy(ep->name, "ep0", sizeof(ep->name));
ep->ep.name = ep->name;
ep->ep.ops = &mv_ep_ops;
ep->wedge = 0;
ep->stopped = 0;
usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
ep->ep.caps.type_control = true;
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
ep->ep_num = 0;
ep->ep.desc = &mv_ep0_desc;
INIT_LIST_HEAD(&ep->queue);
ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
/* initialize other endpoints */
for (i = 2; i < udc->max_eps * 2; i++) {
ep = &udc->eps[i];
if (i % 2) {
snprintf(name, sizeof(name), "ep%din", i / 2);
ep->direction = EP_DIR_IN;
ep->ep.caps.dir_in = true;
} else {
snprintf(name, sizeof(name), "ep%dout", i / 2);
ep->direction = EP_DIR_OUT;
ep->ep.caps.dir_out = true;
}
ep->udc = udc;
strncpy(ep->name, name, sizeof(ep->name));
ep->ep.name = ep->name;
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
ep->ep.ops = &mv_ep_ops;
ep->stopped = 0;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
ep->ep_num = i / 2;
INIT_LIST_HEAD(&ep->queue);
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
ep->dqh = &udc->ep_dqh[i];
}
return 0;
}
/* delete all endpoint requests, called with spinlock held */
static void nuke(struct mv_ep *ep, int status)
{
/* called with spinlock held */
ep->stopped = 1;
/* endpoint fifo flush */
mv_ep_fifo_flush(&ep->ep);
while (!list_empty(&ep->queue)) {
struct mv_req *req = NULL;
req = list_entry(ep->queue.next, struct mv_req, queue);
done(ep, req, status);
}
}
static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
{
struct mv_ep *ep;
nuke(&udc->eps[0], -ESHUTDOWN);
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
nuke(ep, -ESHUTDOWN);
}
/* report reset; the driver is already quiesced */
if (driver) {
spin_unlock(&udc->lock);
usb_gadget_udc_reset(&udc->gadget, driver);
spin_lock(&udc->lock);
}
}
/* stop all USB activities */
static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
{
struct mv_ep *ep;
nuke(&udc->eps[0], -ESHUTDOWN);
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
nuke(ep, -ESHUTDOWN);
}
/* report disconnect; the driver is already quiesced */
if (driver) {
spin_unlock(&udc->lock);
driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
}
static int mv_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct mv_udc *udc;
int retval = 0;
unsigned long flags;
udc = container_of(gadget, struct mv_udc, gadget);
if (udc->driver)
return -EBUSY;
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver ... */
udc->driver = driver;
udc->usb_state = USB_STATE_ATTACHED;
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = EP_DIR_OUT;
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->transceiver) {
retval = otg_set_peripheral(udc->transceiver->otg,
&udc->gadget);
if (retval) {
dev_err(&udc->dev->dev,
"unable to register peripheral to otg\n");
udc->driver = NULL;
return retval;
}
}
/* When boot with cable attached, there will be no vbus irq occurred */
if (udc->qwork)
queue_work(udc->qwork, &udc->vbus_work);
return 0;
}
static int mv_udc_stop(struct usb_gadget *gadget)
{
struct mv_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
mv_udc_enable(udc);
udc_stop(udc);
/* stop all usb activities */
udc->gadget.speed = USB_SPEED_UNKNOWN;
stop_activity(udc, NULL);
mv_udc_disable(udc);
spin_unlock_irqrestore(&udc->lock, flags);
/* unbind gadget driver */
udc->driver = NULL;
return 0;
}
static void mv_set_ptc(struct mv_udc *udc, u32 mode)
{
u32 portsc;
portsc = readl(&udc->op_regs->portsc[0]);
portsc |= mode << 16;
writel(portsc, &udc->op_regs->portsc[0]);
}
static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
{
struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
struct mv_req *req = container_of(_req, struct mv_req, req);
struct mv_udc *udc;
unsigned long flags;
udc = mvep->udc;
dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
spin_lock_irqsave(&udc->lock, flags);
if (req->test_mode) {
mv_set_ptc(udc, req->test_mode);
req->test_mode = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
}
static int
udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
{
int retval = 0;
struct mv_req *req;
struct mv_ep *ep;
ep = &udc->eps[0];
udc->ep0_dir = direction;
udc->ep0_state = WAIT_FOR_OUT_STATUS;
req = udc->status_req;
/* fill in the reqest structure */
if (empty == false) {
*((u16 *) req->req.buf) = cpu_to_le16(status);
req->req.length = 2;
} else
req->req.length = 0;
req->ep = ep;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
if (udc->test_mode) {
req->req.complete = prime_status_complete;
req->test_mode = udc->test_mode;
udc->test_mode = 0;
} else
req->req.complete = NULL;
req->dtd_count = 0;
if (req->req.dma == DMA_ADDR_INVALID) {
req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
req->req.buf, req->req.length,
ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->mapped = 1;
}
/* prime the data phase */
if (!req_to_dtd(req)) {
retval = queue_dtd(ep, req);
if (retval) {
dev_err(&udc->dev->dev,
"Failed to queue dtd when prime status\n");
goto out;
}
} else{ /* no mem */
retval = -ENOMEM;
dev_err(&udc->dev->dev,
"Failed to dma_pool_alloc when prime status\n");
goto out;
}
list_add_tail(&req->queue, &ep->queue);
return 0;
out:
usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
return retval;
}
static void mv_udc_testmode(struct mv_udc *udc, u16 index)
{
if (index <= USB_TEST_FORCE_ENABLE) {
udc->test_mode = index;
if (udc_prime_status(udc, EP_DIR_IN, 0, true))
ep0_stall(udc);
} else
dev_err(&udc->dev->dev,
"This test mode(%d) is not supported\n", index);
}
static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
{
udc->dev_addr = (u8)setup->wValue;
/* update usb state */
udc->usb_state = USB_STATE_ADDRESS;
if (udc_prime_status(udc, EP_DIR_IN, 0, true))
ep0_stall(udc);
}
static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
struct usb_ctrlrequest *setup)
{
u16 status = 0;
int retval;
if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
!= (USB_DIR_IN | USB_TYPE_STANDARD))
return;
if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
status = 1 << USB_DEVICE_SELF_POWERED;
status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
} else if ((setup->bRequestType & USB_RECIP_MASK)
== USB_RECIP_INTERFACE) {
/* get interface status */
status = 0;
} else if ((setup->bRequestType & USB_RECIP_MASK)
== USB_RECIP_ENDPOINT) {
u8 ep_num, direction;
ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
? EP_DIR_IN : EP_DIR_OUT;
status = ep_is_stall(udc, ep_num, direction)
<< USB_ENDPOINT_HALT;
}
retval = udc_prime_status(udc, EP_DIR_IN, status, false);
if (retval)
ep0_stall(udc);
else
udc->ep0_state = DATA_STATE_XMIT;
}
static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
{
u8 ep_num;
u8 direction;
struct mv_ep *ep;
if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
switch (setup->wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
udc->remote_wakeup = 0;
break;
default:
goto out;
}
} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
switch (setup->wValue) {
case USB_ENDPOINT_HALT:
ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
? EP_DIR_IN : EP_DIR_OUT;
if (setup->wValue != 0 || setup->wLength != 0
|| ep_num > udc->max_eps)
goto out;
ep = &udc->eps[ep_num * 2 + direction];
if (ep->wedge == 1)
break;
spin_unlock(&udc->lock);
ep_set_stall(udc, ep_num, direction, 0);
spin_lock(&udc->lock);
break;
default:
goto out;
}
} else
goto out;
if (udc_prime_status(udc, EP_DIR_IN, 0, true))
ep0_stall(udc);
out:
return;
}
static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
{
u8 ep_num;
u8 direction;
if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
switch (setup->wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
udc->remote_wakeup = 1;
break;
case USB_DEVICE_TEST_MODE:
if (setup->wIndex & 0xFF
|| udc->gadget.speed != USB_SPEED_HIGH)
ep0_stall(udc);
if (udc->usb_state != USB_STATE_CONFIGURED
&& udc->usb_state != USB_STATE_ADDRESS
&& udc->usb_state != USB_STATE_DEFAULT)
ep0_stall(udc);
mv_udc_testmode(udc, (setup->wIndex >> 8));
goto out;
default:
goto out;
}
} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
switch (setup->wValue) {
case USB_ENDPOINT_HALT:
ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
? EP_DIR_IN : EP_DIR_OUT;
if (setup->wValue != 0 || setup->wLength != 0
|| ep_num > udc->max_eps)
goto out;
spin_unlock(&udc->lock);
ep_set_stall(udc, ep_num, direction, 1);
spin_lock(&udc->lock);
break;
default:
goto out;
}
} else
goto out;
if (udc_prime_status(udc, EP_DIR_IN, 0, true))
ep0_stall(udc);
out:
return;
}
static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
struct usb_ctrlrequest *setup)
__releases(&ep->udc->lock)
__acquires(&ep->udc->lock)
{
bool delegate = false;
nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
setup->bRequestType, setup->bRequest,
setup->wValue, setup->wIndex, setup->wLength);
/* We process some standard setup requests here */
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup->bRequest) {
case USB_REQ_GET_STATUS:
ch9getstatus(udc, ep_num, setup);
break;
case USB_REQ_SET_ADDRESS:
ch9setaddress(udc, setup);
break;
case USB_REQ_CLEAR_FEATURE:
ch9clearfeature(udc, setup);
break;
case USB_REQ_SET_FEATURE:
ch9setfeature(udc, setup);
break;
default:
delegate = true;
}
} else
delegate = true;
/* delegate USB standard requests to the gadget driver */
if (delegate == true) {
/* USB requests handled by gadget */
if (setup->wLength) {
/* DATA phase from gadget, STATUS phase from udc */
udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
? EP_DIR_IN : EP_DIR_OUT;
spin_unlock(&udc->lock);
if (udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
ep0_stall(udc);
spin_lock(&udc->lock);
udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
? DATA_STATE_XMIT : DATA_STATE_RECV;
} else {
/* no DATA phase, IN STATUS phase from gadget */
udc->ep0_dir = EP_DIR_IN;
spin_unlock(&udc->lock);
if (udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
ep0_stall(udc);
spin_lock(&udc->lock);
udc->ep0_state = WAIT_FOR_OUT_STATUS;
}
}
}
/* complete DATA or STATUS phase of ep0 prime status phase if needed */
static void ep0_req_complete(struct mv_udc *udc,
struct mv_ep *ep0, struct mv_req *req)
{
u32 new_addr;
if (udc->usb_state == USB_STATE_ADDRESS) {
/* set the new address */
new_addr = (u32)udc->dev_addr;
writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
&udc->op_regs->deviceaddr);
}
done(ep0, req, 0);
switch (udc->ep0_state) {
case DATA_STATE_XMIT:
/* receive status phase */
if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
ep0_stall(udc);
break;
case DATA_STATE_RECV:
/* send status phase */
if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
ep0_stall(udc);
break;
case WAIT_FOR_OUT_STATUS:
udc->ep0_state = WAIT_FOR_SETUP;
break;
case WAIT_FOR_SETUP:
dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
break;
default:
ep0_stall(udc);
break;
}
}
static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
{
u32 temp;
struct mv_dqh *dqh;
dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
/* Clear bit in ENDPTSETUPSTAT */
writel((1 << ep_num), &udc->op_regs->epsetupstat);
/* while a hazard exists when setup package arrives */
do {
/* Set Setup Tripwire */
temp = readl(&udc->op_regs->usbcmd);
writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
/* Copy the setup packet to local buffer */
memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
/* Clear Setup Tripwire */
temp = readl(&udc->op_regs->usbcmd);
writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
}
static void irq_process_tr_complete(struct mv_udc *udc)
{
u32 tmp, bit_pos;
int i, ep_num = 0, direction = 0;
struct mv_ep *curr_ep;
struct mv_req *curr_req, *temp_req;
int status;
/*
* We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
* because the setup packets are to be read ASAP
*/
/* Process all Setup packet received interrupts */
tmp = readl(&udc->op_regs->epsetupstat);
if (tmp) {
for (i = 0; i < udc->max_eps; i++) {
if (tmp & (1 << i)) {
get_setup_data(udc, i,
(u8 *)(&udc->local_setup_buff));
handle_setup_packet(udc, i,
&udc->local_setup_buff);
}
}
}
/* Don't clear the endpoint setup status register here.
* It is cleared as a setup packet is read out of the buffer
*/
/* Process non-setup transaction complete interrupts */
tmp = readl(&udc->op_regs->epcomplete);
if (!tmp)
return;
writel(tmp, &udc->op_regs->epcomplete);
for (i = 0; i < udc->max_eps * 2; i++) {
ep_num = i >> 1;
direction = i % 2;
bit_pos = 1 << (ep_num + 16 * direction);
if (!(bit_pos & tmp))
continue;
if (i == 1)
curr_ep = &udc->eps[0];
else
curr_ep = &udc->eps[i];
/* process the req queue until an uncomplete request */
list_for_each_entry_safe(curr_req, temp_req,
&curr_ep->queue, queue) {
status = process_ep_req(udc, i, curr_req);
if (status)
break;
/* write back status to req */
curr_req->req.status = status;
/* ep0 request completion */
if (ep_num == 0) {
ep0_req_complete(udc, curr_ep, curr_req);
break;
} else {
done(curr_ep, curr_req, status);
}
}
}
}
static void irq_process_reset(struct mv_udc *udc)
{
u32 tmp;
unsigned int loops;
udc->ep0_dir = EP_DIR_OUT;
udc->ep0_state = WAIT_FOR_SETUP;
udc->remote_wakeup = 0; /* default to 0 on reset */
/* The address bits are past bit 25-31. Set the address */
tmp = readl(&udc->op_regs->deviceaddr);
tmp &= ~(USB_DEVICE_ADDRESS_MASK);
writel(tmp, &udc->op_regs->deviceaddr);
/* Clear all the setup token semaphores */
tmp = readl(&udc->op_regs->epsetupstat);
writel(tmp, &udc->op_regs->epsetupstat);
/* Clear all the endpoint complete status bits */
tmp = readl(&udc->op_regs->epcomplete);
writel(tmp, &udc->op_regs->epcomplete);
/* wait until all endptprime bits cleared */
loops = LOOPS(PRIME_TIMEOUT);
while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
if (loops == 0) {
dev_err(&udc->dev->dev,
"Timeout for ENDPTPRIME = 0x%x\n",
readl(&udc->op_regs->epprime));
break;
}
loops--;
udelay(LOOPS_USEC);
}
/* Write 1s to the Flush register */
writel((u32)~0, &udc->op_regs->epflush);
if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
dev_info(&udc->dev->dev, "usb bus reset\n");
udc->usb_state = USB_STATE_DEFAULT;
/* reset all the queues, stop all USB activities */
gadget_reset(udc, udc->driver);
} else {
dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
readl(&udc->op_regs->portsc));
/*
* re-initialize
* controller reset
*/
udc_reset(udc);
/* reset all the queues, stop all USB activities */
stop_activity(udc, udc->driver);
/* reset ep0 dQH and endptctrl */
ep0_reset(udc);
/* enable interrupt and set controller to run state */
udc_start(udc);
udc->usb_state = USB_STATE_ATTACHED;
}
}
static void handle_bus_resume(struct mv_udc *udc)
{
udc->usb_state = udc->resume_state;
udc->resume_state = 0;
/* report resume to the driver */
if (udc->driver) {
if (udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
}
static void irq_process_suspend(struct mv_udc *udc)
{
udc->resume_state = udc->usb_state;
udc->usb_state = USB_STATE_SUSPENDED;
if (udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
}
static void irq_process_port_change(struct mv_udc *udc)
{
u32 portsc;
portsc = readl(&udc->op_regs->portsc[0]);
if (!(portsc & PORTSCX_PORT_RESET)) {
/* Get the speed */
u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
switch (speed) {
case PORTSCX_PORT_SPEED_HIGH:
udc->gadget.speed = USB_SPEED_HIGH;
break;
case PORTSCX_PORT_SPEED_FULL:
udc->gadget.speed = USB_SPEED_FULL;
break;
case PORTSCX_PORT_SPEED_LOW:
udc->gadget.speed = USB_SPEED_LOW;
break;
default:
udc->gadget.speed = USB_SPEED_UNKNOWN;
break;
}
}
if (portsc & PORTSCX_PORT_SUSPEND) {
udc->resume_state = udc->usb_state;
udc->usb_state = USB_STATE_SUSPENDED;
if (udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (!(portsc & PORTSCX_PORT_SUSPEND)
&& udc->usb_state == USB_STATE_SUSPENDED) {
handle_bus_resume(udc);
}
if (!udc->resume_state)
udc->usb_state = USB_STATE_DEFAULT;
}
static void irq_process_error(struct mv_udc *udc)
{
/* Increment the error count */
udc->errors++;
}
static irqreturn_t mv_udc_irq(int irq, void *dev)
{
struct mv_udc *udc = (struct mv_udc *)dev;
u32 status, intr;
/* Disable ISR when stopped bit is set */
if (udc->stopped)
return IRQ_NONE;
spin_lock(&udc->lock);
status = readl(&udc->op_regs->usbsts);
intr = readl(&udc->op_regs->usbintr);
status &= intr;
if (status == 0) {
spin_unlock(&udc->lock);
return IRQ_NONE;
}
/* Clear all the interrupts occurred */
writel(status, &udc->op_regs->usbsts);
if (status & USBSTS_ERR)
irq_process_error(udc);
if (status & USBSTS_RESET)
irq_process_reset(udc);
if (status & USBSTS_PORT_CHANGE)
irq_process_port_change(udc);
if (status & USBSTS_INT)
irq_process_tr_complete(udc);
if (status & USBSTS_SUSPEND)
irq_process_suspend(udc);
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
{
struct mv_udc *udc = (struct mv_udc *)dev;
/* polling VBUS and init phy may cause too much time*/
if (udc->qwork)
queue_work(udc->qwork, &udc->vbus_work);
return IRQ_HANDLED;
}
static void mv_udc_vbus_work(struct work_struct *work)
{
struct mv_udc *udc;
unsigned int vbus;
udc = container_of(work, struct mv_udc, vbus_work);
if (!udc->pdata->vbus)
return;
vbus = udc->pdata->vbus->poll();
dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
if (vbus == VBUS_HIGH)
mv_udc_vbus_session(&udc->gadget, 1);
else if (vbus == VBUS_LOW)
mv_udc_vbus_session(&udc->gadget, 0);
}
/* release device structure */
static void gadget_release(struct device *_dev)
{
struct mv_udc *udc;
udc = dev_get_drvdata(_dev);
complete(udc->done);
}
static void mv_udc_remove(struct platform_device *pdev)
{
struct mv_udc *udc;
udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
if (udc->qwork)
destroy_workqueue(udc->qwork);
/* free memory allocated in probe */
dma_pool_destroy(udc->dtd_pool);
if (udc->ep_dqh)
dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
udc->ep_dqh, udc->ep_dqh_dma);
mv_udc_disable(udc);
/* free dev, wait for the release() finished */
wait_for_completion(udc->done);
}
static int mv_udc_probe(struct platform_device *pdev)
{
struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mv_udc *udc;
int retval = 0;
struct resource *r;
size_t size;
if (pdata == NULL) {
dev_err(&pdev->dev, "missing platform_data\n");
return -ENODEV;
}
udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
if (udc == NULL)
return -ENOMEM;
udc->done = &release_done;
udc->pdata = dev_get_platdata(&pdev->dev);
spin_lock_init(&udc->lock);
udc->dev = pdev;
if (pdata->mode == MV_USB_MODE_OTG) {
udc->transceiver = devm_usb_get_phy(&pdev->dev,
USB_PHY_TYPE_USB2);
if (IS_ERR(udc->transceiver)) {
retval = PTR_ERR(udc->transceiver);
if (retval == -ENXIO)
return retval;
udc->transceiver = NULL;
return -EPROBE_DEFER;
}
}
/* udc only have one sysclk. */
udc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(udc->clk))
return PTR_ERR(udc->clk);
r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
if (r == NULL) {
dev_err(&pdev->dev, "no I/O memory resource defined\n");
return -ENODEV;
}
udc->cap_regs = (struct mv_cap_regs __iomem *)
devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (udc->cap_regs == NULL) {
dev_err(&pdev->dev, "failed to map I/O memory\n");
return -EBUSY;
}
r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
if (r == NULL) {
dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
return -ENODEV;
}
udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (udc->phy_regs == NULL) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
return -EBUSY;
}
/* we will acces controller register, so enable the clk */
retval = mv_udc_enable_internal(udc);
if (retval)
return retval;
udc->op_regs =
(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
+ (readl(&udc->cap_regs->caplength_hciversion)
& CAPLENGTH_MASK));
udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
/*
* some platform will use usb to download image, it may not disconnect
* usb gadget before loading kernel. So first stop udc here.
*/
udc_stop(udc);
writel(0xFFFFFFFF, &udc->op_regs->usbsts);
size = udc->max_eps * sizeof(struct mv_dqh) *2;
size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
&udc->ep_dqh_dma, GFP_KERNEL);
if (udc->ep_dqh == NULL) {
dev_err(&pdev->dev, "allocate dQH memory failed\n");
retval = -ENOMEM;
goto err_disable_clock;
}
udc->ep_dqh_size = size;
/* create dTD dma_pool resource */
udc->dtd_pool = dma_pool_create("mv_dtd",
&pdev->dev,
sizeof(struct mv_dtd),
DTD_ALIGNMENT,
DMA_BOUNDARY);
if (!udc->dtd_pool) {
retval = -ENOMEM;
goto err_free_dma;
}
size = udc->max_eps * sizeof(struct mv_ep) *2;
udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (udc->eps == NULL) {
retval = -ENOMEM;
goto err_destroy_dma;
}
/* initialize ep0 status request structure */
udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
GFP_KERNEL);
if (!udc->status_req) {
retval = -ENOMEM;
goto err_destroy_dma;
}
INIT_LIST_HEAD(&udc->status_req->queue);
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = devm_kzalloc(&pdev->dev, 8, GFP_KERNEL);
if (!udc->status_req->req.buf) {
retval = -ENOMEM;
goto err_destroy_dma;
}
udc->status_req->req.dma = DMA_ADDR_INVALID;
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
udc->ep0_dir = EP_DIR_OUT;
udc->remote_wakeup = 0;
r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
if (r == NULL) {
dev_err(&pdev->dev, "no IRQ resource defined\n");
retval = -ENODEV;
goto err_destroy_dma;
}
udc->irq = r->start;
if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
IRQF_SHARED, driver_name, udc)) {
dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
udc->irq);
retval = -ENODEV;
goto err_destroy_dma;
}
/* initialize gadget structure */
udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
/* the "gadget" abstracts/virtualizes the controller */
udc->gadget.name = driver_name; /* gadget name */
eps_init(udc);
/* VBUS detect: we can disable/enable clock on demand.*/
if (udc->transceiver)
udc->clock_gating = 1;
else if (pdata->vbus) {
udc->clock_gating = 1;
retval = devm_request_threaded_irq(&pdev->dev,
pdata->vbus->irq, NULL,
mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
if (retval) {
dev_info(&pdev->dev,
"Can not request irq for VBUS, "
"disable clock gating\n");
udc->clock_gating = 0;
}
udc->qwork = create_singlethread_workqueue("mv_udc_queue");
if (!udc->qwork) {
dev_err(&pdev->dev, "cannot create workqueue\n");
retval = -ENOMEM;
goto err_destroy_dma;
}
INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
}
/*
* When clock gating is supported, we can disable clk and phy.
* If not, it means that VBUS detection is not supported, we
* have to enable vbus active all the time to let controller work.
*/
if (udc->clock_gating)
mv_udc_disable_internal(udc);
else
udc->vbus_active = 1;
retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
gadget_release);
if (retval)
goto err_create_workqueue;
platform_set_drvdata(pdev, udc);
dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
udc->clock_gating ? "with" : "without");
return 0;
err_create_workqueue:
if (udc->qwork)
destroy_workqueue(udc->qwork);
err_destroy_dma:
dma_pool_destroy(udc->dtd_pool);
err_free_dma:
dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
udc->ep_dqh, udc->ep_dqh_dma);
err_disable_clock:
mv_udc_disable_internal(udc);
return retval;
}
#ifdef CONFIG_PM
static int mv_udc_suspend(struct device *dev)
{
struct mv_udc *udc;
udc = dev_get_drvdata(dev);
/* if OTG is enabled, the following will be done in OTG driver*/
if (udc->transceiver)
return 0;
if (udc->pdata->vbus && udc->pdata->vbus->poll)
if (udc->pdata->vbus->poll() == VBUS_HIGH) {
dev_info(&udc->dev->dev, "USB cable is connected!\n");
return -EAGAIN;
}
/*
* only cable is unplugged, udc can suspend.
* So do not care about clock_gating == 1.
*/
if (!udc->clock_gating) {
udc_stop(udc);
spin_lock_irq(&udc->lock);
/* stop all usb activities */
stop_activity(udc, udc->driver);
spin_unlock_irq(&udc->lock);
mv_udc_disable_internal(udc);
}
return 0;
}
static int mv_udc_resume(struct device *dev)
{
struct mv_udc *udc;
int retval;
udc = dev_get_drvdata(dev);
/* if OTG is enabled, the following will be done in OTG driver*/
if (udc->transceiver)
return 0;
if (!udc->clock_gating) {
retval = mv_udc_enable_internal(udc);
if (retval)
return retval;
if (udc->driver && udc->softconnect) {
udc_reset(udc);
ep0_reset(udc);
udc_start(udc);
}
}
return 0;
}
static const struct dev_pm_ops mv_udc_pm_ops = {
.suspend = mv_udc_suspend,
.resume = mv_udc_resume,
};
#endif
static void mv_udc_shutdown(struct platform_device *pdev)
{
struct mv_udc *udc;
u32 mode;
udc = platform_get_drvdata(pdev);
/* reset controller mode to IDLE */
mv_udc_enable(udc);
mode = readl(&udc->op_regs->usbmode);
mode &= ~3;
writel(mode, &udc->op_regs->usbmode);
mv_udc_disable(udc);
}
static struct platform_driver udc_driver = {
.probe = mv_udc_probe,
.remove_new = mv_udc_remove,
.shutdown = mv_udc_shutdown,
.driver = {
.name = "mv-udc",
#ifdef CONFIG_PM
.pm = &mv_udc_pm_ops,
#endif
},
};
module_platform_driver(udc_driver);
MODULE_ALIAS("platform:mv-udc");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Chao Xie <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/mv_udc_core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* MAX3420 Device Controller driver for USB.
*
* Author: Jaswinder Singh Brar <[email protected]>
* (C) Copyright 2019-2020 Linaro Ltd
*
* Based on:
* o MAX3420E datasheet
* https://datasheets.maximintegrated.com/en/ds/MAX3420E.pdf
* o MAX342{0,1}E Programming Guides
* https://pdfserv.maximintegrated.com/en/an/AN3598.pdf
* https://pdfserv.maximintegrated.com/en/an/AN3785.pdf
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/prefetch.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/spi/spi.h>
#include <linux/gpio/consumer.h>
#define MAX3420_MAX_EPS 4
#define MAX3420_EP_MAX_PACKET 64 /* Same for all Endpoints */
#define MAX3420_EPNAME_SIZE 16 /* Buffer size for endpoint name */
#define MAX3420_ACKSTAT BIT(0)
#define MAX3420_SPI_DIR_RD 0 /* read register from MAX3420 */
#define MAX3420_SPI_DIR_WR 1 /* write register to MAX3420 */
/* SPI commands: */
#define MAX3420_SPI_DIR_SHIFT 1
#define MAX3420_SPI_REG_SHIFT 3
#define MAX3420_REG_EP0FIFO 0
#define MAX3420_REG_EP1FIFO 1
#define MAX3420_REG_EP2FIFO 2
#define MAX3420_REG_EP3FIFO 3
#define MAX3420_REG_SUDFIFO 4
#define MAX3420_REG_EP0BC 5
#define MAX3420_REG_EP1BC 6
#define MAX3420_REG_EP2BC 7
#define MAX3420_REG_EP3BC 8
#define MAX3420_REG_EPSTALLS 9
#define ACKSTAT BIT(6)
#define STLSTAT BIT(5)
#define STLEP3IN BIT(4)
#define STLEP2IN BIT(3)
#define STLEP1OUT BIT(2)
#define STLEP0OUT BIT(1)
#define STLEP0IN BIT(0)
#define MAX3420_REG_CLRTOGS 10
#define EP3DISAB BIT(7)
#define EP2DISAB BIT(6)
#define EP1DISAB BIT(5)
#define CTGEP3IN BIT(4)
#define CTGEP2IN BIT(3)
#define CTGEP1OUT BIT(2)
#define MAX3420_REG_EPIRQ 11
#define MAX3420_REG_EPIEN 12
#define SUDAVIRQ BIT(5)
#define IN3BAVIRQ BIT(4)
#define IN2BAVIRQ BIT(3)
#define OUT1DAVIRQ BIT(2)
#define OUT0DAVIRQ BIT(1)
#define IN0BAVIRQ BIT(0)
#define MAX3420_REG_USBIRQ 13
#define MAX3420_REG_USBIEN 14
#define OSCOKIRQ BIT(0)
#define RWUDNIRQ BIT(1)
#define BUSACTIRQ BIT(2)
#define URESIRQ BIT(3)
#define SUSPIRQ BIT(4)
#define NOVBUSIRQ BIT(5)
#define VBUSIRQ BIT(6)
#define URESDNIRQ BIT(7)
#define MAX3420_REG_USBCTL 15
#define HOSCSTEN BIT(7)
#define VBGATE BIT(6)
#define CHIPRES BIT(5)
#define PWRDOWN BIT(4)
#define CONNECT BIT(3)
#define SIGRWU BIT(2)
#define MAX3420_REG_CPUCTL 16
#define IE BIT(0)
#define MAX3420_REG_PINCTL 17
#define EP3INAK BIT(7)
#define EP2INAK BIT(6)
#define EP0INAK BIT(5)
#define FDUPSPI BIT(4)
#define INTLEVEL BIT(3)
#define POSINT BIT(2)
#define GPXB BIT(1)
#define GPXA BIT(0)
#define MAX3420_REG_REVISION 18
#define MAX3420_REG_FNADDR 19
#define FNADDR_MASK 0x7f
#define MAX3420_REG_IOPINS 20
#define MAX3420_REG_IOPINS2 21
#define MAX3420_REG_GPINIRQ 22
#define MAX3420_REG_GPINIEN 23
#define MAX3420_REG_GPINPOL 24
#define MAX3420_REG_HIRQ 25
#define MAX3420_REG_HIEN 26
#define MAX3420_REG_MODE 27
#define MAX3420_REG_PERADDR 28
#define MAX3420_REG_HCTL 29
#define MAX3420_REG_HXFR 30
#define MAX3420_REG_HRSL 31
#define ENABLE_IRQ BIT(0)
#define IOPIN_UPDATE BIT(1)
#define REMOTE_WAKEUP BIT(2)
#define CONNECT_HOST GENMASK(4, 3)
#define HCONNECT (1 << 3)
#define HDISCONNECT (3 << 3)
#define UDC_START GENMASK(6, 5)
#define START (1 << 5)
#define STOP (3 << 5)
#define ENABLE_EP GENMASK(8, 7)
#define ENABLE (1 << 7)
#define DISABLE (3 << 7)
#define STALL_EP GENMASK(10, 9)
#define STALL (1 << 9)
#define UNSTALL (3 << 9)
#define MAX3420_CMD(c) FIELD_PREP(GENMASK(7, 3), c)
#define MAX3420_SPI_CMD_RD(c) (MAX3420_CMD(c) | (0 << 1))
#define MAX3420_SPI_CMD_WR(c) (MAX3420_CMD(c) | (1 << 1))
struct max3420_req {
struct usb_request usb_req;
struct list_head queue;
struct max3420_ep *ep;
};
struct max3420_ep {
struct usb_ep ep_usb;
struct max3420_udc *udc;
struct list_head queue;
char name[MAX3420_EPNAME_SIZE];
unsigned int maxpacket;
spinlock_t lock;
int halted;
u32 todo;
int id;
};
struct max3420_udc {
struct usb_gadget gadget;
struct max3420_ep ep[MAX3420_MAX_EPS];
struct usb_gadget_driver *driver;
struct task_struct *thread_task;
int remote_wkp, is_selfpowered;
bool vbus_active, softconnect;
struct usb_ctrlrequest setup;
struct mutex spi_bus_mutex;
struct max3420_req ep0req;
struct spi_device *spi;
struct device *dev;
spinlock_t lock;
bool suspended;
u8 ep0buf[64];
u32 todo;
};
#define to_max3420_req(r) container_of((r), struct max3420_req, usb_req)
#define to_max3420_ep(e) container_of((e), struct max3420_ep, ep_usb)
#define to_udc(g) container_of((g), struct max3420_udc, gadget)
#define DRIVER_DESC "MAX3420 USB Device-Mode Driver"
static const char driver_name[] = "max3420-udc";
/* Control endpoint configuration.*/
static const struct usb_endpoint_descriptor ep0_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(MAX3420_EP_MAX_PACKET),
};
static void spi_ack_ctrl(struct max3420_udc *udc)
{
struct spi_device *spi = udc->spi;
struct spi_transfer transfer;
struct spi_message msg;
u8 txdata[1];
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
txdata[0] = MAX3420_ACKSTAT;
transfer.tx_buf = txdata;
transfer.len = 1;
spi_message_add_tail(&transfer, &msg);
spi_sync(spi, &msg);
}
static u8 spi_rd8_ack(struct max3420_udc *udc, u8 reg, int actstat)
{
struct spi_device *spi = udc->spi;
struct spi_transfer transfer;
struct spi_message msg;
u8 txdata[2], rxdata[2];
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
txdata[0] = MAX3420_SPI_CMD_RD(reg) | (actstat ? MAX3420_ACKSTAT : 0);
transfer.tx_buf = txdata;
transfer.rx_buf = rxdata;
transfer.len = 2;
spi_message_add_tail(&transfer, &msg);
spi_sync(spi, &msg);
return rxdata[1];
}
static u8 spi_rd8(struct max3420_udc *udc, u8 reg)
{
return spi_rd8_ack(udc, reg, 0);
}
static void spi_wr8_ack(struct max3420_udc *udc, u8 reg, u8 val, int actstat)
{
struct spi_device *spi = udc->spi;
struct spi_transfer transfer;
struct spi_message msg;
u8 txdata[2];
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
txdata[0] = MAX3420_SPI_CMD_WR(reg) | (actstat ? MAX3420_ACKSTAT : 0);
txdata[1] = val;
transfer.tx_buf = txdata;
transfer.len = 2;
spi_message_add_tail(&transfer, &msg);
spi_sync(spi, &msg);
}
static void spi_wr8(struct max3420_udc *udc, u8 reg, u8 val)
{
spi_wr8_ack(udc, reg, val, 0);
}
static void spi_rd_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len)
{
struct spi_device *spi = udc->spi;
struct spi_transfer transfer;
struct spi_message msg;
u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {};
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
local_buf[0] = MAX3420_SPI_CMD_RD(reg);
transfer.tx_buf = &local_buf[0];
transfer.rx_buf = &local_buf[0];
transfer.len = len + 1;
spi_message_add_tail(&transfer, &msg);
spi_sync(spi, &msg);
memcpy(buf, &local_buf[1], len);
}
static void spi_wr_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len)
{
struct spi_device *spi = udc->spi;
struct spi_transfer transfer;
struct spi_message msg;
u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {};
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
local_buf[0] = MAX3420_SPI_CMD_WR(reg);
memcpy(&local_buf[1], buf, len);
transfer.tx_buf = local_buf;
transfer.len = len + 1;
spi_message_add_tail(&transfer, &msg);
spi_sync(spi, &msg);
}
static int spi_max3420_enable(struct max3420_ep *ep)
{
struct max3420_udc *udc = ep->udc;
unsigned long flags;
u8 epdis, epien;
int todo;
spin_lock_irqsave(&ep->lock, flags);
todo = ep->todo & ENABLE_EP;
ep->todo &= ~ENABLE_EP;
spin_unlock_irqrestore(&ep->lock, flags);
if (!todo || ep->id == 0)
return false;
epien = spi_rd8(udc, MAX3420_REG_EPIEN);
epdis = spi_rd8(udc, MAX3420_REG_CLRTOGS);
if (todo == ENABLE) {
epdis &= ~BIT(ep->id + 4);
epien |= BIT(ep->id + 1);
} else {
epdis |= BIT(ep->id + 4);
epien &= ~BIT(ep->id + 1);
}
spi_wr8(udc, MAX3420_REG_CLRTOGS, epdis);
spi_wr8(udc, MAX3420_REG_EPIEN, epien);
return true;
}
static int spi_max3420_stall(struct max3420_ep *ep)
{
struct max3420_udc *udc = ep->udc;
unsigned long flags;
u8 epstalls;
int todo;
spin_lock_irqsave(&ep->lock, flags);
todo = ep->todo & STALL_EP;
ep->todo &= ~STALL_EP;
spin_unlock_irqrestore(&ep->lock, flags);
if (!todo || ep->id == 0)
return false;
epstalls = spi_rd8(udc, MAX3420_REG_EPSTALLS);
if (todo == STALL) {
ep->halted = 1;
epstalls |= BIT(ep->id + 1);
} else {
u8 clrtogs;
ep->halted = 0;
epstalls &= ~BIT(ep->id + 1);
clrtogs = spi_rd8(udc, MAX3420_REG_CLRTOGS);
clrtogs |= BIT(ep->id + 1);
spi_wr8(udc, MAX3420_REG_CLRTOGS, clrtogs);
}
spi_wr8(udc, MAX3420_REG_EPSTALLS, epstalls | ACKSTAT);
return true;
}
static int spi_max3420_rwkup(struct max3420_udc *udc)
{
unsigned long flags;
int wake_remote;
u8 usbctl;
spin_lock_irqsave(&udc->lock, flags);
wake_remote = udc->todo & REMOTE_WAKEUP;
udc->todo &= ~REMOTE_WAKEUP;
spin_unlock_irqrestore(&udc->lock, flags);
if (!wake_remote || !udc->suspended)
return false;
/* Set Remote-WkUp Signal*/
usbctl = spi_rd8(udc, MAX3420_REG_USBCTL);
usbctl |= SIGRWU;
spi_wr8(udc, MAX3420_REG_USBCTL, usbctl);
msleep_interruptible(5);
/* Clear Remote-WkUp Signal*/
usbctl = spi_rd8(udc, MAX3420_REG_USBCTL);
usbctl &= ~SIGRWU;
spi_wr8(udc, MAX3420_REG_USBCTL, usbctl);
udc->suspended = false;
return true;
}
static void max3420_nuke(struct max3420_ep *ep, int status);
static void __max3420_stop(struct max3420_udc *udc)
{
u8 val;
int i;
/* clear all pending requests */
for (i = 1; i < MAX3420_MAX_EPS; i++)
max3420_nuke(&udc->ep[i], -ECONNRESET);
/* Disable IRQ to CPU */
spi_wr8(udc, MAX3420_REG_CPUCTL, 0);
val = spi_rd8(udc, MAX3420_REG_USBCTL);
val |= PWRDOWN;
if (udc->is_selfpowered)
val &= ~HOSCSTEN;
else
val |= HOSCSTEN;
spi_wr8(udc, MAX3420_REG_USBCTL, val);
}
static void __max3420_start(struct max3420_udc *udc)
{
u8 val;
/* Need this delay if bus-powered,
* but even for self-powered it helps stability
*/
msleep_interruptible(250);
/* configure SPI */
spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI);
/* Chip Reset */
spi_wr8(udc, MAX3420_REG_USBCTL, CHIPRES);
msleep_interruptible(5);
spi_wr8(udc, MAX3420_REG_USBCTL, 0);
/* Poll for OSC to stabilize */
while (1) {
val = spi_rd8(udc, MAX3420_REG_USBIRQ);
if (val & OSCOKIRQ)
break;
cond_resched();
}
/* Enable PULL-UP only when Vbus detected */
val = spi_rd8(udc, MAX3420_REG_USBCTL);
val |= VBGATE | CONNECT;
spi_wr8(udc, MAX3420_REG_USBCTL, val);
val = URESDNIRQ | URESIRQ;
if (udc->is_selfpowered)
val |= NOVBUSIRQ;
spi_wr8(udc, MAX3420_REG_USBIEN, val);
/* Enable only EP0 interrupts */
val = IN0BAVIRQ | OUT0DAVIRQ | SUDAVIRQ;
spi_wr8(udc, MAX3420_REG_EPIEN, val);
/* Enable IRQ to CPU */
spi_wr8(udc, MAX3420_REG_CPUCTL, IE);
}
static int max3420_start(struct max3420_udc *udc)
{
unsigned long flags;
int todo;
spin_lock_irqsave(&udc->lock, flags);
todo = udc->todo & UDC_START;
udc->todo &= ~UDC_START;
spin_unlock_irqrestore(&udc->lock, flags);
if (!todo)
return false;
if (udc->vbus_active && udc->softconnect)
__max3420_start(udc);
else
__max3420_stop(udc);
return true;
}
static irqreturn_t max3420_vbus_handler(int irq, void *dev_id)
{
struct max3420_udc *udc = dev_id;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* its a vbus change interrupt */
udc->vbus_active = !udc->vbus_active;
udc->todo |= UDC_START;
usb_udc_vbus_handler(&udc->gadget, udc->vbus_active);
usb_gadget_set_state(&udc->gadget, udc->vbus_active
? USB_STATE_POWERED : USB_STATE_NOTATTACHED);
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->thread_task)
wake_up_process(udc->thread_task);
return IRQ_HANDLED;
}
static irqreturn_t max3420_irq_handler(int irq, void *dev_id)
{
struct max3420_udc *udc = dev_id;
struct spi_device *spi = udc->spi;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
if ((udc->todo & ENABLE_IRQ) == 0) {
disable_irq_nosync(spi->irq);
udc->todo |= ENABLE_IRQ;
}
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->thread_task)
wake_up_process(udc->thread_task);
return IRQ_HANDLED;
}
static void max3420_getstatus(struct max3420_udc *udc)
{
struct max3420_ep *ep;
u16 status = 0;
switch (udc->setup.bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
/* Get device status */
status = udc->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED;
status |= (udc->remote_wkp << USB_DEVICE_REMOTE_WAKEUP);
break;
case USB_RECIP_INTERFACE:
if (udc->driver->setup(&udc->gadget, &udc->setup) < 0)
goto stall;
break;
case USB_RECIP_ENDPOINT:
ep = &udc->ep[udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK];
if (udc->setup.wIndex & USB_DIR_IN) {
if (!ep->ep_usb.caps.dir_in)
goto stall;
} else {
if (!ep->ep_usb.caps.dir_out)
goto stall;
}
if (ep->halted)
status = 1 << USB_ENDPOINT_HALT;
break;
default:
goto stall;
}
status = cpu_to_le16(status);
spi_wr_buf(udc, MAX3420_REG_EP0FIFO, &status, 2);
spi_wr8_ack(udc, MAX3420_REG_EP0BC, 2, 1);
return;
stall:
dev_err(udc->dev, "Can't respond to getstatus request\n");
spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT);
}
static void max3420_set_clear_feature(struct max3420_udc *udc)
{
struct max3420_ep *ep;
int set = udc->setup.bRequest == USB_REQ_SET_FEATURE;
unsigned long flags;
int id;
switch (udc->setup.bRequestType) {
case USB_RECIP_DEVICE:
if (udc->setup.wValue != USB_DEVICE_REMOTE_WAKEUP)
break;
if (udc->setup.bRequest == USB_REQ_SET_FEATURE)
udc->remote_wkp = 1;
else
udc->remote_wkp = 0;
return spi_ack_ctrl(udc);
case USB_RECIP_ENDPOINT:
if (udc->setup.wValue != USB_ENDPOINT_HALT)
break;
id = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
ep = &udc->ep[id];
spin_lock_irqsave(&ep->lock, flags);
ep->todo &= ~STALL_EP;
if (set)
ep->todo |= STALL;
else
ep->todo |= UNSTALL;
spin_unlock_irqrestore(&ep->lock, flags);
spi_max3420_stall(ep);
return;
default:
break;
}
dev_err(udc->dev, "Can't respond to SET/CLEAR FEATURE\n");
spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT);
}
static void max3420_handle_setup(struct max3420_udc *udc)
{
struct usb_ctrlrequest setup;
spi_rd_buf(udc, MAX3420_REG_SUDFIFO, (void *)&setup, 8);
udc->setup = setup;
udc->setup.wValue = cpu_to_le16(setup.wValue);
udc->setup.wIndex = cpu_to_le16(setup.wIndex);
udc->setup.wLength = cpu_to_le16(setup.wLength);
switch (udc->setup.bRequest) {
case USB_REQ_GET_STATUS:
/* Data+Status phase form udc */
if ((udc->setup.bRequestType &
(USB_DIR_IN | USB_TYPE_MASK)) !=
(USB_DIR_IN | USB_TYPE_STANDARD)) {
break;
}
return max3420_getstatus(udc);
case USB_REQ_SET_ADDRESS:
/* Status phase from udc */
if (udc->setup.bRequestType != (USB_DIR_OUT |
USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
break;
}
spi_rd8_ack(udc, MAX3420_REG_FNADDR, 1);
dev_dbg(udc->dev, "Assigned Address=%d\n", udc->setup.wValue);
return;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
/* Requests with no data phase, status phase from udc */
if ((udc->setup.bRequestType & USB_TYPE_MASK)
!= USB_TYPE_STANDARD)
break;
return max3420_set_clear_feature(udc);
default:
break;
}
if (udc->driver->setup(&udc->gadget, &setup) < 0) {
/* Stall EP0 */
spi_wr8(udc, MAX3420_REG_EPSTALLS,
STLEP0IN | STLEP0OUT | STLSTAT);
}
}
static void max3420_req_done(struct max3420_req *req, int status)
{
struct max3420_ep *ep = req->ep;
struct max3420_udc *udc = ep->udc;
if (req->usb_req.status == -EINPROGRESS)
req->usb_req.status = status;
else
status = req->usb_req.status;
if (status && status != -ESHUTDOWN)
dev_err(udc->dev, "%s done %p, status %d\n",
ep->ep_usb.name, req, status);
if (req->usb_req.complete)
req->usb_req.complete(&ep->ep_usb, &req->usb_req);
}
static int max3420_do_data(struct max3420_udc *udc, int ep_id, int in)
{
struct max3420_ep *ep = &udc->ep[ep_id];
struct max3420_req *req;
int done, length, psz;
void *buf;
if (list_empty(&ep->queue))
return false;
req = list_first_entry(&ep->queue, struct max3420_req, queue);
buf = req->usb_req.buf + req->usb_req.actual;
psz = ep->ep_usb.maxpacket;
length = req->usb_req.length - req->usb_req.actual;
length = min(length, psz);
if (length == 0) {
done = 1;
goto xfer_done;
}
done = 0;
if (in) {
prefetch(buf);
spi_wr_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length);
spi_wr8(udc, MAX3420_REG_EP0BC + ep_id, length);
if (length < psz)
done = 1;
} else {
psz = spi_rd8(udc, MAX3420_REG_EP0BC + ep_id);
length = min(length, psz);
prefetchw(buf);
spi_rd_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length);
if (length < ep->ep_usb.maxpacket)
done = 1;
}
req->usb_req.actual += length;
if (req->usb_req.actual == req->usb_req.length)
done = 1;
xfer_done:
if (done) {
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
list_del_init(&req->queue);
spin_unlock_irqrestore(&ep->lock, flags);
if (ep_id == 0)
spi_ack_ctrl(udc);
max3420_req_done(req, 0);
}
return true;
}
static int max3420_handle_irqs(struct max3420_udc *udc)
{
u8 epien, epirq, usbirq, usbien, reg[4];
bool ret = false;
spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 4);
epirq = reg[0];
epien = reg[1];
usbirq = reg[2];
usbien = reg[3];
usbirq &= usbien;
epirq &= epien;
if (epirq & SUDAVIRQ) {
spi_wr8(udc, MAX3420_REG_EPIRQ, SUDAVIRQ);
max3420_handle_setup(udc);
return true;
}
if (usbirq & VBUSIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, VBUSIRQ);
dev_dbg(udc->dev, "Cable plugged in\n");
return true;
}
if (usbirq & NOVBUSIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, NOVBUSIRQ);
dev_dbg(udc->dev, "Cable pulled out\n");
return true;
}
if (usbirq & URESIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, URESIRQ);
dev_dbg(udc->dev, "USB Reset - Start\n");
return true;
}
if (usbirq & URESDNIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, URESDNIRQ);
dev_dbg(udc->dev, "USB Reset - END\n");
spi_wr8(udc, MAX3420_REG_USBIEN, URESDNIRQ | URESIRQ);
spi_wr8(udc, MAX3420_REG_EPIEN, SUDAVIRQ | IN0BAVIRQ
| OUT0DAVIRQ);
return true;
}
if (usbirq & SUSPIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, SUSPIRQ);
dev_dbg(udc->dev, "USB Suspend - Enter\n");
udc->suspended = true;
return true;
}
if (usbirq & BUSACTIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, BUSACTIRQ);
dev_dbg(udc->dev, "USB Suspend - Exit\n");
udc->suspended = false;
return true;
}
if (usbirq & RWUDNIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, RWUDNIRQ);
dev_dbg(udc->dev, "Asked Host to wakeup\n");
return true;
}
if (usbirq & OSCOKIRQ) {
spi_wr8(udc, MAX3420_REG_USBIRQ, OSCOKIRQ);
dev_dbg(udc->dev, "Osc stabilized, start work\n");
return true;
}
if (epirq & OUT0DAVIRQ && max3420_do_data(udc, 0, 0)) {
spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT0DAVIRQ, 1);
ret = true;
}
if (epirq & IN0BAVIRQ && max3420_do_data(udc, 0, 1))
ret = true;
if (epirq & OUT1DAVIRQ && max3420_do_data(udc, 1, 0)) {
spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT1DAVIRQ, 1);
ret = true;
}
if (epirq & IN2BAVIRQ && max3420_do_data(udc, 2, 1))
ret = true;
if (epirq & IN3BAVIRQ && max3420_do_data(udc, 3, 1))
ret = true;
return ret;
}
static int max3420_thread(void *dev_id)
{
struct max3420_udc *udc = dev_id;
struct spi_device *spi = udc->spi;
int i, loop_again = 1;
unsigned long flags;
while (!kthread_should_stop()) {
if (!loop_again) {
ktime_t kt = ns_to_ktime(1000 * 1000 * 250); /* 250ms */
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&udc->lock, flags);
if (udc->todo & ENABLE_IRQ) {
enable_irq(spi->irq);
udc->todo &= ~ENABLE_IRQ;
}
spin_unlock_irqrestore(&udc->lock, flags);
schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
}
loop_again = 0;
mutex_lock(&udc->spi_bus_mutex);
/* If bus-vbus_active and disconnected */
if (!udc->vbus_active || !udc->softconnect)
goto loop;
if (max3420_start(udc)) {
loop_again = 1;
goto loop;
}
if (max3420_handle_irqs(udc)) {
loop_again = 1;
goto loop;
}
if (spi_max3420_rwkup(udc)) {
loop_again = 1;
goto loop;
}
max3420_do_data(udc, 0, 1); /* get done with the EP0 ZLP */
for (i = 1; i < MAX3420_MAX_EPS; i++) {
struct max3420_ep *ep = &udc->ep[i];
if (spi_max3420_enable(ep))
loop_again = 1;
if (spi_max3420_stall(ep))
loop_again = 1;
}
loop:
mutex_unlock(&udc->spi_bus_mutex);
}
set_current_state(TASK_RUNNING);
dev_info(udc->dev, "SPI thread exiting\n");
return 0;
}
static int max3420_ep_set_halt(struct usb_ep *_ep, int stall)
{
struct max3420_ep *ep = to_max3420_ep(_ep);
struct max3420_udc *udc = ep->udc;
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
ep->todo &= ~STALL_EP;
if (stall)
ep->todo |= STALL;
else
ep->todo |= UNSTALL;
spin_unlock_irqrestore(&ep->lock, flags);
wake_up_process(udc->thread_task);
dev_dbg(udc->dev, "%sStall %s\n", stall ? "" : "Un", ep->name);
return 0;
}
static int __max3420_ep_enable(struct max3420_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
unsigned int maxp = usb_endpoint_maxp(desc);
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
ep->ep_usb.desc = desc;
ep->ep_usb.maxpacket = maxp;
ep->todo &= ~ENABLE_EP;
ep->todo |= ENABLE;
spin_unlock_irqrestore(&ep->lock, flags);
return 0;
}
static int max3420_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct max3420_ep *ep = to_max3420_ep(_ep);
struct max3420_udc *udc = ep->udc;
__max3420_ep_enable(ep, desc);
wake_up_process(udc->thread_task);
return 0;
}
static void max3420_nuke(struct max3420_ep *ep, int status)
{
struct max3420_req *req, *r;
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
list_for_each_entry_safe(req, r, &ep->queue, queue) {
list_del_init(&req->queue);
spin_unlock_irqrestore(&ep->lock, flags);
max3420_req_done(req, status);
spin_lock_irqsave(&ep->lock, flags);
}
spin_unlock_irqrestore(&ep->lock, flags);
}
static void __max3420_ep_disable(struct max3420_ep *ep)
{
struct max3420_udc *udc = ep->udc;
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
ep->ep_usb.desc = NULL;
ep->todo &= ~ENABLE_EP;
ep->todo |= DISABLE;
spin_unlock_irqrestore(&ep->lock, flags);
dev_dbg(udc->dev, "Disabled %s\n", ep->name);
}
static int max3420_ep_disable(struct usb_ep *_ep)
{
struct max3420_ep *ep = to_max3420_ep(_ep);
struct max3420_udc *udc = ep->udc;
max3420_nuke(ep, -ESHUTDOWN);
__max3420_ep_disable(ep);
wake_up_process(udc->thread_task);
return 0;
}
static struct usb_request *max3420_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct max3420_ep *ep = to_max3420_ep(_ep);
struct max3420_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
req->ep = ep;
return &req->usb_req;
}
static void max3420_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
kfree(to_max3420_req(_req));
}
static int max3420_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t ignored)
{
struct max3420_req *req = to_max3420_req(_req);
struct max3420_ep *ep = to_max3420_ep(_ep);
struct max3420_udc *udc = ep->udc;
unsigned long flags;
_req->status = -EINPROGRESS;
_req->actual = 0;
spin_lock_irqsave(&ep->lock, flags);
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&ep->lock, flags);
wake_up_process(udc->thread_task);
return 0;
}
static int max3420_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct max3420_req *t = NULL;
struct max3420_req *req = to_max3420_req(_req);
struct max3420_req *iter;
struct max3420_ep *ep = to_max3420_ep(_ep);
unsigned long flags;
spin_lock_irqsave(&ep->lock, flags);
/* Pluck the descriptor from queue */
list_for_each_entry(iter, &ep->queue, queue) {
if (iter != req)
continue;
list_del_init(&req->queue);
t = iter;
break;
}
spin_unlock_irqrestore(&ep->lock, flags);
if (t)
max3420_req_done(req, -ECONNRESET);
return 0;
}
static const struct usb_ep_ops max3420_ep_ops = {
.enable = max3420_ep_enable,
.disable = max3420_ep_disable,
.alloc_request = max3420_alloc_request,
.free_request = max3420_free_request,
.queue = max3420_ep_queue,
.dequeue = max3420_ep_dequeue,
.set_halt = max3420_ep_set_halt,
};
static int max3420_wakeup(struct usb_gadget *gadget)
{
struct max3420_udc *udc = to_udc(gadget);
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
/* Only if wakeup allowed by host */
if (udc->remote_wkp) {
udc->todo |= REMOTE_WAKEUP;
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->thread_task)
wake_up_process(udc->thread_task);
return ret;
}
static int max3420_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct max3420_udc *udc = to_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver */
udc->driver = driver;
udc->gadget.speed = USB_SPEED_FULL;
udc->gadget.is_selfpowered = udc->is_selfpowered;
udc->remote_wkp = 0;
udc->softconnect = true;
udc->todo |= UDC_START;
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->thread_task)
wake_up_process(udc->thread_task);
return 0;
}
static int max3420_udc_stop(struct usb_gadget *gadget)
{
struct max3420_udc *udc = to_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->is_selfpowered = udc->gadget.is_selfpowered;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->driver = NULL;
udc->softconnect = false;
udc->todo |= UDC_START;
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->thread_task)
wake_up_process(udc->thread_task);
return 0;
}
static const struct usb_gadget_ops max3420_udc_ops = {
.udc_start = max3420_udc_start,
.udc_stop = max3420_udc_stop,
.wakeup = max3420_wakeup,
};
static void max3420_eps_init(struct max3420_udc *udc)
{
int idx;
INIT_LIST_HEAD(&udc->gadget.ep_list);
for (idx = 0; idx < MAX3420_MAX_EPS; idx++) {
struct max3420_ep *ep = &udc->ep[idx];
spin_lock_init(&ep->lock);
INIT_LIST_HEAD(&ep->queue);
ep->udc = udc;
ep->id = idx;
ep->halted = 0;
ep->maxpacket = 0;
ep->ep_usb.name = ep->name;
ep->ep_usb.ops = &max3420_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep_usb, MAX3420_EP_MAX_PACKET);
if (idx == 0) { /* For EP0 */
ep->ep_usb.desc = &ep0_desc;
ep->ep_usb.maxpacket = usb_endpoint_maxp(&ep0_desc);
ep->ep_usb.caps.type_control = true;
ep->ep_usb.caps.dir_in = true;
ep->ep_usb.caps.dir_out = true;
snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep0");
continue;
}
if (idx == 1) { /* EP1 is OUT */
ep->ep_usb.caps.dir_in = false;
ep->ep_usb.caps.dir_out = true;
snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep1-bulk-out");
} else { /* EP2 & EP3 are IN */
ep->ep_usb.caps.dir_in = true;
ep->ep_usb.caps.dir_out = false;
snprintf(ep->name, MAX3420_EPNAME_SIZE,
"ep%d-bulk-in", idx);
}
ep->ep_usb.caps.type_iso = false;
ep->ep_usb.caps.type_int = false;
ep->ep_usb.caps.type_bulk = true;
list_add_tail(&ep->ep_usb.ep_list,
&udc->gadget.ep_list);
}
}
static int max3420_probe(struct spi_device *spi)
{
struct max3420_udc *udc;
int err, irq;
u8 reg[8];
if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
dev_err(&spi->dev, "UDC needs full duplex to work\n");
return -EINVAL;
}
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
err = spi_setup(spi);
if (err) {
dev_err(&spi->dev, "Unable to setup SPI bus\n");
return -EFAULT;
}
udc = devm_kzalloc(&spi->dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
udc->spi = spi;
udc->remote_wkp = 0;
/* Setup gadget structure */
udc->gadget.ops = &max3420_udc_ops;
udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.ep0 = &udc->ep[0].ep_usb;
udc->gadget.name = driver_name;
spin_lock_init(&udc->lock);
mutex_init(&udc->spi_bus_mutex);
udc->ep0req.ep = &udc->ep[0];
udc->ep0req.usb_req.buf = udc->ep0buf;
INIT_LIST_HEAD(&udc->ep0req.queue);
/* setup Endpoints */
max3420_eps_init(udc);
/* configure SPI */
spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8);
spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI);
err = usb_add_gadget_udc(&spi->dev, &udc->gadget);
if (err)
return err;
udc->dev = &udc->gadget.dev;
spi_set_drvdata(spi, udc);
irq = of_irq_get_byname(spi->dev.of_node, "udc");
err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0,
"max3420", udc);
if (err < 0)
goto del_gadget;
udc->thread_task = kthread_create(max3420_thread, udc,
"max3420-thread");
if (IS_ERR(udc->thread_task)) {
err = PTR_ERR(udc->thread_task);
goto del_gadget;
}
irq = of_irq_get_byname(spi->dev.of_node, "vbus");
if (irq <= 0) { /* no vbus irq implies self-powered design */
udc->is_selfpowered = 1;
udc->vbus_active = true;
udc->todo |= UDC_START;
usb_udc_vbus_handler(&udc->gadget, udc->vbus_active);
usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
max3420_start(udc);
} else {
udc->is_selfpowered = 0;
/* Detect current vbus status */
spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8);
if (reg[7] != 0xff)
udc->vbus_active = true;
err = devm_request_irq(&spi->dev, irq,
max3420_vbus_handler, 0, "vbus", udc);
if (err < 0)
goto del_gadget;
}
return 0;
del_gadget:
usb_del_gadget_udc(&udc->gadget);
return err;
}
static void max3420_remove(struct spi_device *spi)
{
struct max3420_udc *udc = spi_get_drvdata(spi);
unsigned long flags;
usb_del_gadget_udc(&udc->gadget);
spin_lock_irqsave(&udc->lock, flags);
kthread_stop(udc->thread_task);
spin_unlock_irqrestore(&udc->lock, flags);
}
static const struct of_device_id max3420_udc_of_match[] = {
{ .compatible = "maxim,max3420-udc"},
{ .compatible = "maxim,max3421-udc"},
{},
};
MODULE_DEVICE_TABLE(of, max3420_udc_of_match);
static struct spi_driver max3420_driver = {
.driver = {
.name = "max3420-udc",
.of_match_table = max3420_udc_of_match,
},
.probe = max3420_probe,
.remove = max3420_remove,
};
module_spi_driver(max3420_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Jassi Brar <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/max3420_udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536udc_pci.c -- AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2005-2007 AMD (https://www.amd.com)
* Author: Thomas Dahlmann
*/
/*
* The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
* It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
* provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
*
* Make sure that UDC is assigned to port 4 by BIOS settings (port can also
* be used as host port) and UOC bits PAD_EN and APU are set (should be done
* by BIOS init).
*
* UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
* work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
* can be used with gadget ether.
*
* This file does pci device registration, and the core driver implementation
* is done in amd5536udc.c
*
* The driver is split so as to use the core UDC driver which is based on
* Synopsys device controller IP (different than HS OTG IP) in UDCs
* integrated to SoC platforms.
*
*/
/* Driver strings */
#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
/* system */
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/prefetch.h>
#include <linux/pci.h>
/* udc specific */
#include "amd5536udc.h"
/* pointer to device object */
static struct udc *udc;
/* description */
static const char name[] = "amd5536udc-pci";
/* Reset all pci context */
static void udc_pci_remove(struct pci_dev *pdev)
{
struct udc *dev;
dev = pci_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
/* gadget driver must not be registered */
if (WARN_ON(dev->driver))
return;
/* dma pool cleanup */
free_dma_pools(dev);
/* reset controller */
writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
free_irq(pdev->irq, dev);
iounmap(dev->virt_addr);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
udc_remove(dev);
}
/* Called by pci bus driver to init pci context */
static int udc_pci_probe(
struct pci_dev *pdev,
const struct pci_device_id *id
)
{
struct udc *dev;
unsigned long resource;
unsigned long len;
int retval = 0;
/* one udc only */
if (udc) {
dev_dbg(&pdev->dev, "already probed\n");
return -EBUSY;
}
/* init */
dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
if (!dev)
return -ENOMEM;
/* pci setup */
if (pci_enable_device(pdev) < 0) {
retval = -ENODEV;
goto err_pcidev;
}
/* PCI resource allocation */
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, name)) {
dev_dbg(&pdev->dev, "pci device used already\n");
retval = -EBUSY;
goto err_memreg;
}
dev->virt_addr = ioremap(resource, len);
if (!dev->virt_addr) {
dev_dbg(&pdev->dev, "start address cannot be mapped\n");
retval = -EFAULT;
goto err_ioremap;
}
if (!pdev->irq) {
dev_err(&pdev->dev, "irq not set\n");
retval = -ENODEV;
goto err_irq;
}
spin_lock_init(&dev->lock);
/* udc csr registers base */
dev->csr = dev->virt_addr + UDC_CSR_ADDR;
/* dev registers base */
dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
/* ep registers base */
dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
/* fifo's base */
dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
retval = -EBUSY;
goto err_irq;
}
pci_set_drvdata(pdev, dev);
/* chip revision for Hs AMD5536 */
dev->chiprev = pdev->revision;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
dev->phys_addr = resource;
dev->irq = pdev->irq;
dev->pdev = pdev;
dev->dev = &pdev->dev;
/* init dma pools */
if (use_dma) {
retval = init_dma_pools(dev);
if (retval != 0)
goto err_dma;
}
/* general probing */
if (udc_probe(dev)) {
retval = -ENODEV;
goto err_probe;
}
udc = dev;
return 0;
err_probe:
if (use_dma)
free_dma_pools(dev);
err_dma:
free_irq(pdev->irq, dev);
err_irq:
iounmap(dev->virt_addr);
err_ioremap:
release_mem_region(resource, len);
err_memreg:
pci_disable_device(pdev);
err_pcidev:
kfree(dev);
return retval;
}
/* PCI device parameters */
static const struct pci_device_id pci_id[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{},
};
MODULE_DEVICE_TABLE(pci, pci_id);
/* PCI functions */
static struct pci_driver udc_pci_driver = {
.name = name,
.id_table = pci_id,
.probe = udc_pci_probe,
.remove = udc_pci_remove,
};
module_pci_driver(udc_pci_driver);
MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
MODULE_AUTHOR("Thomas Dahlmann");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/amd5536udc_pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
*/
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/platform_data/mv_usb.h>
#include <linux/clk.h>
#include "mv_u3d.h"
#define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
static const char driver_name[] = "mv_u3d";
static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
static void mv_u3d_stop_activity(struct mv_u3d *u3d,
struct usb_gadget_driver *driver);
/* for endpoint 0 operations */
static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE,
};
static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
{
struct mv_u3d_ep *ep;
u32 epxcr;
int i;
for (i = 0; i < 2; i++) {
ep = &u3d->eps[i];
ep->u3d = u3d;
/* ep0 ep context, ep0 in and out share the same ep context */
ep->ep_context = &u3d->ep_context[1];
}
/* reset ep state machine */
/* reset ep0 out */
epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
epxcr |= MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
udelay(5);
epxcr &= ~MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
<< MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
| (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
| MV_U3D_EPXCR_EP_TYPE_CONTROL);
iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
/* reset ep0 in */
epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
epxcr |= MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
udelay(5);
epxcr &= ~MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
<< MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
| (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
| MV_U3D_EPXCR_EP_TYPE_CONTROL);
iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
}
static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
{
u32 tmp;
dev_dbg(u3d->dev, "%s\n", __func__);
/* set TX and RX to stall */
tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
tmp |= MV_U3D_EPXCR_EP_HALT;
iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
tmp |= MV_U3D_EPXCR_EP_HALT;
iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
/* update ep0 state */
u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
}
static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
struct mv_u3d_req *curr_req)
{
struct mv_u3d_trb *curr_trb;
int actual, remaining_length = 0;
int direction, ep_num;
int retval = 0;
u32 tmp, status, length;
direction = index % 2;
ep_num = index / 2;
actual = curr_req->req.length;
while (!list_empty(&curr_req->trb_list)) {
curr_trb = list_entry(curr_req->trb_list.next,
struct mv_u3d_trb, trb_list);
if (!curr_trb->trb_hw->ctrl.own) {
dev_err(u3d->dev, "%s, TRB own error!\n",
u3d->eps[index].name);
return 1;
}
curr_trb->trb_hw->ctrl.own = 0;
if (direction == MV_U3D_EP_DIR_OUT)
tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
else
tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
if (status == MV_U3D_COMPLETE_SUCCESS ||
(status == MV_U3D_COMPLETE_SHORT_PACKET &&
direction == MV_U3D_EP_DIR_OUT)) {
remaining_length += length;
actual -= remaining_length;
} else {
dev_err(u3d->dev,
"complete_tr error: ep=%d %s: error = 0x%x\n",
index >> 1, direction ? "SEND" : "RECV",
status);
retval = -EPROTO;
}
list_del_init(&curr_trb->trb_list);
}
if (retval)
return retval;
curr_req->req.actual = actual;
return 0;
}
/*
* mv_u3d_done() - retire a request; caller blocked irqs
* @status : request status to be set, only works when
* request is still in progress.
*/
static
void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
__releases(&ep->udc->lock)
__acquires(&ep->udc->lock)
{
struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
/* Removed the req from ep queue */
list_del_init(&req->queue);
/* req.status should be set as -EINPROGRESS in ep_queue() */
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
/* Free trb for the request */
if (!req->chain)
dma_pool_free(u3d->trb_pool,
req->trb_head->trb_hw, req->trb_head->trb_dma);
else {
dma_unmap_single(ep->u3d->gadget.dev.parent,
(dma_addr_t)req->trb_head->trb_dma,
req->trb_count * sizeof(struct mv_u3d_trb_hw),
DMA_BIDIRECTIONAL);
kfree(req->trb_head->trb_hw);
}
kfree(req->trb_head);
usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
if (status && (status != -ESHUTDOWN)) {
dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
}
spin_unlock(&ep->u3d->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->u3d->lock);
}
static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
{
u32 tmp, direction;
struct mv_u3d *u3d;
struct mv_u3d_ep_context *ep_context;
int retval = 0;
u3d = ep->u3d;
direction = mv_u3d_ep_dir(ep);
/* ep0 in and out share the same ep context slot 1*/
if (ep->ep_num == 0)
ep_context = &(u3d->ep_context[1]);
else
ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
/* check if the pipe is empty or not */
if (!list_empty(&ep->queue)) {
dev_err(u3d->dev, "add trb to non-empty queue!\n");
retval = -ENOMEM;
WARN_ON(1);
} else {
ep_context->rsvd0 = cpu_to_le32(1);
ep_context->rsvd1 = 0;
/* Configure the trb address and set the DCS bit.
* Both DCS bit and own bit in trb should be set.
*/
ep_context->trb_addr_lo =
cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
ep_context->trb_addr_hi = 0;
/* Ensure that updates to the EP Context will
* occure before Ring Bell.
*/
wmb();
/* ring bell the ep */
if (ep->ep_num == 0)
tmp = 0x1;
else
tmp = ep->ep_num * 2
+ ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
iowrite32(tmp, &u3d->op_regs->doorbell);
}
return retval;
}
static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
unsigned *length, dma_addr_t *dma)
{
u32 temp;
unsigned int direction;
struct mv_u3d_trb *trb;
struct mv_u3d_trb_hw *trb_hw;
struct mv_u3d *u3d;
/* how big will this transfer be? */
*length = req->req.length - req->req.actual;
BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
u3d = req->ep->u3d;
trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
if (!trb)
return NULL;
/*
* Be careful that no _GFP_HIGHMEM is set,
* or we can not use dma_to_virt
* cannot use GFP_KERNEL in spin lock
*/
trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
if (!trb_hw) {
kfree(trb);
dev_err(u3d->dev,
"%s, dma_pool_alloc fail\n", __func__);
return NULL;
}
trb->trb_dma = *dma;
trb->trb_hw = trb_hw;
/* initialize buffer page pointers */
temp = (u32)(req->req.dma + req->req.actual);
trb_hw->buf_addr_lo = cpu_to_le32(temp);
trb_hw->buf_addr_hi = 0;
trb_hw->trb_len = cpu_to_le32(*length);
trb_hw->ctrl.own = 1;
if (req->ep->ep_num == 0)
trb_hw->ctrl.type = TYPE_DATA;
else
trb_hw->ctrl.type = TYPE_NORMAL;
req->req.actual += *length;
direction = mv_u3d_ep_dir(req->ep);
if (direction == MV_U3D_EP_DIR_IN)
trb_hw->ctrl.dir = 1;
else
trb_hw->ctrl.dir = 0;
/* Enable interrupt for the last trb of a request */
if (!req->req.no_interrupt)
trb_hw->ctrl.ioc = 1;
trb_hw->ctrl.chain = 0;
wmb();
return trb;
}
static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
struct mv_u3d_trb *trb, int *is_last)
{
u32 temp;
unsigned int direction;
struct mv_u3d *u3d;
/* how big will this transfer be? */
*length = min(req->req.length - req->req.actual,
(unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
u3d = req->ep->u3d;
trb->trb_dma = 0;
/* initialize buffer page pointers */
temp = (u32)(req->req.dma + req->req.actual);
trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
trb->trb_hw->buf_addr_hi = 0;
trb->trb_hw->trb_len = cpu_to_le32(*length);
trb->trb_hw->ctrl.own = 1;
if (req->ep->ep_num == 0)
trb->trb_hw->ctrl.type = TYPE_DATA;
else
trb->trb_hw->ctrl.type = TYPE_NORMAL;
req->req.actual += *length;
direction = mv_u3d_ep_dir(req->ep);
if (direction == MV_U3D_EP_DIR_IN)
trb->trb_hw->ctrl.dir = 1;
else
trb->trb_hw->ctrl.dir = 0;
/* zlp is needed if req->req.zero is set */
if (req->req.zero) {
if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
*is_last = 1;
else
*is_last = 0;
} else if (req->req.length == req->req.actual)
*is_last = 1;
else
*is_last = 0;
/* Enable interrupt for the last trb of a request */
if (*is_last && !req->req.no_interrupt)
trb->trb_hw->ctrl.ioc = 1;
if (*is_last)
trb->trb_hw->ctrl.chain = 0;
else {
trb->trb_hw->ctrl.chain = 1;
dev_dbg(u3d->dev, "chain trb\n");
}
wmb();
return 0;
}
/* generate TRB linked list for a request
* usb controller only supports continous trb chain,
* that trb structure physical address should be continous.
*/
static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
{
unsigned count;
int is_last;
struct mv_u3d_trb *trb;
struct mv_u3d_trb_hw *trb_hw;
struct mv_u3d *u3d;
dma_addr_t dma;
unsigned length;
unsigned trb_num;
u3d = req->ep->u3d;
INIT_LIST_HEAD(&req->trb_list);
length = req->req.length - req->req.actual;
/* normally the request transfer length is less than 16KB.
* we use buil_trb_one() to optimize it.
*/
if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
trb = mv_u3d_build_trb_one(req, &count, &dma);
list_add_tail(&trb->trb_list, &req->trb_list);
req->trb_head = trb;
req->trb_count = 1;
req->chain = 0;
} else {
trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
trb_num++;
trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
if (!trb)
return -ENOMEM;
trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
if (!trb_hw) {
kfree(trb);
return -ENOMEM;
}
do {
trb->trb_hw = trb_hw;
if (mv_u3d_build_trb_chain(req, &count,
trb, &is_last)) {
dev_err(u3d->dev,
"%s, mv_u3d_build_trb_chain fail\n",
__func__);
return -EIO;
}
list_add_tail(&trb->trb_list, &req->trb_list);
req->trb_count++;
trb++;
trb_hw++;
} while (!is_last);
req->trb_head = list_entry(req->trb_list.next,
struct mv_u3d_trb, trb_list);
req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
req->trb_head->trb_hw,
trb_num * sizeof(*trb_hw),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(u3d->gadget.dev.parent,
req->trb_head->trb_dma)) {
kfree(req->trb_head->trb_hw);
kfree(req->trb_head);
return -EFAULT;
}
req->chain = 1;
}
return 0;
}
static int
mv_u3d_start_queue(struct mv_u3d_ep *ep)
{
struct mv_u3d *u3d = ep->u3d;
struct mv_u3d_req *req;
int ret;
if (!list_empty(&ep->req_list) && !ep->processing)
req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
else
return 0;
ep->processing = 1;
/* set up dma mapping */
ret = usb_gadget_map_request(&u3d->gadget, &req->req,
mv_u3d_ep_dir(ep));
if (ret)
goto break_processing;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->trb_count = 0;
/* build trbs */
ret = mv_u3d_req_to_trb(req);
if (ret) {
dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
goto break_processing;
}
/* and push them to device queue */
ret = mv_u3d_queue_trb(ep, req);
if (ret)
goto break_processing;
/* irq handler advances the queue */
list_add_tail(&req->queue, &ep->queue);
return 0;
break_processing:
ep->processing = 0;
return ret;
}
static int mv_u3d_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct mv_u3d *u3d;
struct mv_u3d_ep *ep;
u16 max = 0;
unsigned maxburst = 0;
u32 epxcr, direction;
if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
ep = container_of(_ep, struct mv_u3d_ep, ep);
u3d = ep->u3d;
if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
direction = mv_u3d_ep_dir(ep);
max = le16_to_cpu(desc->wMaxPacketSize);
if (!_ep->maxburst)
_ep->maxburst = 1;
maxburst = _ep->maxburst;
/* Set the max burst size */
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
if (maxburst > 16) {
dev_dbg(u3d->dev,
"max burst should not be greater "
"than 16 on bulk ep\n");
maxburst = 1;
_ep->maxburst = maxburst;
}
dev_dbg(u3d->dev,
"maxburst: %d on bulk %s\n", maxburst, ep->name);
break;
case USB_ENDPOINT_XFER_CONTROL:
/* control transfer only supports maxburst as one */
maxburst = 1;
_ep->maxburst = maxburst;
break;
case USB_ENDPOINT_XFER_INT:
if (maxburst != 1) {
dev_dbg(u3d->dev,
"max burst should be 1 on int ep "
"if transfer size is not 1024\n");
maxburst = 1;
_ep->maxburst = maxburst;
}
break;
case USB_ENDPOINT_XFER_ISOC:
if (maxburst != 1) {
dev_dbg(u3d->dev,
"max burst should be 1 on isoc ep "
"if transfer size is not 1024\n");
maxburst = 1;
_ep->maxburst = maxburst;
}
break;
default:
goto en_done;
}
ep->ep.maxpacket = max;
ep->ep.desc = desc;
ep->enabled = 1;
/* Enable the endpoint for Rx or Tx and set the endpoint type */
if (direction == MV_U3D_EP_DIR_OUT) {
epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
epxcr |= MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
udelay(5);
epxcr &= ~MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
| ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
| (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
} else {
epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
epxcr |= MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
udelay(5);
epxcr &= ~MV_U3D_EPXCR_EP_INIT;
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
| ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
| (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
}
return 0;
en_done:
return -EINVAL;
}
static int mv_u3d_ep_disable(struct usb_ep *_ep)
{
struct mv_u3d *u3d;
struct mv_u3d_ep *ep;
u32 epxcr, direction;
unsigned long flags;
if (!_ep)
return -EINVAL;
ep = container_of(_ep, struct mv_u3d_ep, ep);
if (!ep->ep.desc)
return -EINVAL;
u3d = ep->u3d;
direction = mv_u3d_ep_dir(ep);
/* nuke all pending requests (does flush) */
spin_lock_irqsave(&u3d->lock, flags);
mv_u3d_nuke(ep, -ESHUTDOWN);
spin_unlock_irqrestore(&u3d->lock, flags);
/* Disable the endpoint for Rx or Tx and reset the endpoint type */
if (direction == MV_U3D_EP_DIR_OUT) {
epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
| USB_ENDPOINT_XFERTYPE_MASK);
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
} else {
epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
| USB_ENDPOINT_XFERTYPE_MASK);
iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
}
ep->enabled = 0;
ep->ep.desc = NULL;
return 0;
}
static struct usb_request *
mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct mv_u3d_req *req;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
kfree(req);
}
static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
{
struct mv_u3d *u3d;
u32 direction;
struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
unsigned int loops;
u32 tmp;
/* if endpoint is not enabled, cannot flush endpoint */
if (!ep->enabled)
return;
u3d = ep->u3d;
direction = mv_u3d_ep_dir(ep);
/* ep0 need clear bit after flushing fifo. */
if (!ep->ep_num) {
if (direction == MV_U3D_EP_DIR_OUT) {
tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
tmp |= MV_U3D_EPXCR_EP_FLUSH;
iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
udelay(10);
tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
} else {
tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
tmp |= MV_U3D_EPXCR_EP_FLUSH;
iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
udelay(10);
tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
}
return;
}
if (direction == MV_U3D_EP_DIR_OUT) {
tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
tmp |= MV_U3D_EPXCR_EP_FLUSH;
iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
/* Wait until flushing completed */
loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
MV_U3D_EPXCR_EP_FLUSH) {
/*
* EP_FLUSH bit should be cleared to indicate this
* operation is complete
*/
if (loops == 0) {
dev_dbg(u3d->dev,
"EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
direction ? "in" : "out");
return;
}
loops--;
udelay(LOOPS_USEC);
}
} else { /* EP_DIR_IN */
tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
tmp |= MV_U3D_EPXCR_EP_FLUSH;
iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
/* Wait until flushing completed */
loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
MV_U3D_EPXCR_EP_FLUSH) {
/*
* EP_FLUSH bit should be cleared to indicate this
* operation is complete
*/
if (loops == 0) {
dev_dbg(u3d->dev,
"EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
direction ? "in" : "out");
return;
}
loops--;
udelay(LOOPS_USEC);
}
}
}
/* queues (submits) an I/O request to an endpoint */
static int
mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct mv_u3d_ep *ep;
struct mv_u3d_req *req;
struct mv_u3d *u3d;
unsigned long flags;
int is_first_req = 0;
if (unlikely(!_ep || !_req))
return -EINVAL;
ep = container_of(_ep, struct mv_u3d_ep, ep);
u3d = ep->u3d;
req = container_of(_req, struct mv_u3d_req, req);
if (!ep->ep_num
&& u3d->ep0_state == MV_U3D_STATUS_STAGE
&& !_req->length) {
dev_dbg(u3d->dev, "ep0 status stage\n");
u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
return 0;
}
dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n",
__func__, _ep->name, req);
/* catch various bogus parameters */
if (!req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
dev_err(u3d->dev,
"%s, bad params, _req: 0x%p,"
"req->req.complete: 0x%p, req->req.buf: 0x%p,"
"list_empty: 0x%x\n",
__func__, _req,
req->req.complete, req->req.buf,
list_empty(&req->queue));
return -EINVAL;
}
if (unlikely(!ep->ep.desc)) {
dev_err(u3d->dev, "%s, bad ep\n", __func__);
return -EINVAL;
}
if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
dev_err(u3d->dev,
"bad params of driver/speed\n");
return -ESHUTDOWN;
}
req->ep = ep;
/* Software list handles usb request. */
spin_lock_irqsave(&ep->req_lock, flags);
is_first_req = list_empty(&ep->req_list);
list_add_tail(&req->list, &ep->req_list);
spin_unlock_irqrestore(&ep->req_lock, flags);
if (!is_first_req) {
dev_dbg(u3d->dev, "list is not empty\n");
return 0;
}
dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
spin_lock_irqsave(&u3d->lock, flags);
mv_u3d_start_queue(ep);
spin_unlock_irqrestore(&u3d->lock, flags);
return 0;
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct mv_u3d_ep *ep;
struct mv_u3d_req *req = NULL, *iter;
struct mv_u3d *u3d;
struct mv_u3d_ep_context *ep_context;
struct mv_u3d_req *next_req;
unsigned long flags;
int ret = 0;
if (!_ep || !_req)
return -EINVAL;
ep = container_of(_ep, struct mv_u3d_ep, ep);
u3d = ep->u3d;
spin_lock_irqsave(&ep->u3d->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
ret = -EINVAL;
goto out;
}
/* The request is in progress, or completed but not dequeued */
if (ep->queue.next == &req->queue) {
_req->status = -ECONNRESET;
mv_u3d_ep_fifo_flush(_ep);
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
dev_dbg(u3d->dev,
"it is the last request in this ep queue\n");
ep_context = ep->ep_context;
next_req = list_entry(req->queue.next,
struct mv_u3d_req, queue);
/* Point first TRB of next request to the EP context. */
iowrite32((unsigned long) next_req->trb_head,
&ep_context->trb_addr_lo);
} else {
struct mv_u3d_ep_context *ep_context;
ep_context = ep->ep_context;
ep_context->trb_addr_lo = 0;
ep_context->trb_addr_hi = 0;
}
} else
WARN_ON(1);
mv_u3d_done(ep, req, -ECONNRESET);
/* remove the req from the ep req list */
if (!list_empty(&ep->req_list)) {
struct mv_u3d_req *curr_req;
curr_req = list_entry(ep->req_list.next,
struct mv_u3d_req, list);
if (curr_req == req) {
list_del_init(&req->list);
ep->processing = 0;
}
}
out:
spin_unlock_irqrestore(&ep->u3d->lock, flags);
return ret;
}
static void
mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
{
u32 tmp;
struct mv_u3d_ep *ep = u3d->eps;
dev_dbg(u3d->dev, "%s\n", __func__);
if (direction == MV_U3D_EP_DIR_OUT) {
tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
if (stall)
tmp |= MV_U3D_EPXCR_EP_HALT;
else
tmp &= ~MV_U3D_EPXCR_EP_HALT;
iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
} else {
tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
if (stall)
tmp |= MV_U3D_EPXCR_EP_HALT;
else
tmp &= ~MV_U3D_EPXCR_EP_HALT;
iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
}
}
static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
{
struct mv_u3d_ep *ep;
unsigned long flags;
int status = 0;
struct mv_u3d *u3d;
ep = container_of(_ep, struct mv_u3d_ep, ep);
u3d = ep->u3d;
if (!ep->ep.desc) {
status = -EINVAL;
goto out;
}
if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
status = -EOPNOTSUPP;
goto out;
}
/*
* Attempt to halt IN ep will fail if any transfer requests
* are still queue
*/
if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
&& !list_empty(&ep->queue)) {
status = -EAGAIN;
goto out;
}
spin_lock_irqsave(&ep->u3d->lock, flags);
mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
if (halt && wedge)
ep->wedge = 1;
else if (!halt)
ep->wedge = 0;
spin_unlock_irqrestore(&ep->u3d->lock, flags);
if (ep->ep_num == 0)
u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
out:
return status;
}
static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
{
return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
}
static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
{
return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
}
static const struct usb_ep_ops mv_u3d_ep_ops = {
.enable = mv_u3d_ep_enable,
.disable = mv_u3d_ep_disable,
.alloc_request = mv_u3d_alloc_request,
.free_request = mv_u3d_free_request,
.queue = mv_u3d_ep_queue,
.dequeue = mv_u3d_ep_dequeue,
.set_wedge = mv_u3d_ep_set_wedge,
.set_halt = mv_u3d_ep_set_halt,
.fifo_flush = mv_u3d_ep_fifo_flush,
};
static void mv_u3d_controller_stop(struct mv_u3d *u3d)
{
u32 tmp;
if (!u3d->clock_gating && u3d->vbus_valid_detect)
iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
&u3d->vuc_regs->intrenable);
else
iowrite32(0, &u3d->vuc_regs->intrenable);
iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
iowrite32(~0x0, &u3d->vuc_regs->linkchange);
iowrite32(0x1, &u3d->vuc_regs->setuplock);
/* Reset the RUN bit in the command register to stop USB */
tmp = ioread32(&u3d->op_regs->usbcmd);
tmp &= ~MV_U3D_CMD_RUN_STOP;
iowrite32(tmp, &u3d->op_regs->usbcmd);
dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
ioread32(&u3d->op_regs->usbcmd));
}
static void mv_u3d_controller_start(struct mv_u3d *u3d)
{
u32 usbintr;
u32 temp;
/* enable link LTSSM state machine */
temp = ioread32(&u3d->vuc_regs->ltssm);
temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
iowrite32(temp, &u3d->vuc_regs->ltssm);
/* Enable interrupts */
usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
(u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
iowrite32(usbintr, &u3d->vuc_regs->intrenable);
/* Enable ctrl ep */
iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
/* Set the Run bit in the command register */
iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
ioread32(&u3d->op_regs->usbcmd));
}
static int mv_u3d_controller_reset(struct mv_u3d *u3d)
{
unsigned int loops;
u32 tmp;
/* Stop the controller */
tmp = ioread32(&u3d->op_regs->usbcmd);
tmp &= ~MV_U3D_CMD_RUN_STOP;
iowrite32(tmp, &u3d->op_regs->usbcmd);
/* Reset the controller to get default values */
iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
/* wait for reset to complete */
loops = LOOPS(MV_U3D_RESET_TIMEOUT);
while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
if (loops == 0) {
dev_err(u3d->dev,
"Wait for RESET completed TIMEOUT\n");
return -ETIMEDOUT;
}
loops--;
udelay(LOOPS_USEC);
}
/* Configure the Endpoint Context Address */
iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
iowrite32(0, &u3d->op_regs->dcbaaph);
return 0;
}
static int mv_u3d_enable(struct mv_u3d *u3d)
{
struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
int retval;
if (u3d->active)
return 0;
if (!u3d->clock_gating) {
u3d->active = 1;
return 0;
}
dev_dbg(u3d->dev, "enable u3d\n");
clk_enable(u3d->clk);
if (pdata->phy_init) {
retval = pdata->phy_init(u3d->phy_regs);
if (retval) {
dev_err(u3d->dev,
"init phy error %d\n", retval);
clk_disable(u3d->clk);
return retval;
}
}
u3d->active = 1;
return 0;
}
static void mv_u3d_disable(struct mv_u3d *u3d)
{
struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
if (u3d->clock_gating && u3d->active) {
dev_dbg(u3d->dev, "disable u3d\n");
if (pdata->phy_deinit)
pdata->phy_deinit(u3d->phy_regs);
clk_disable(u3d->clk);
u3d->active = 0;
}
}
static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct mv_u3d *u3d;
unsigned long flags;
int retval = 0;
u3d = container_of(gadget, struct mv_u3d, gadget);
spin_lock_irqsave(&u3d->lock, flags);
u3d->vbus_active = (is_active != 0);
dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, u3d->softconnect, u3d->vbus_active);
/*
* 1. external VBUS detect: we can disable/enable clock on demand.
* 2. UDC VBUS detect: we have to enable clock all the time.
* 3. No VBUS detect: we have to enable clock all the time.
*/
if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
retval = mv_u3d_enable(u3d);
if (retval == 0) {
/*
* after clock is disabled, we lost all the register
* context. We have to re-init registers
*/
mv_u3d_controller_reset(u3d);
mv_u3d_ep0_reset(u3d);
mv_u3d_controller_start(u3d);
}
} else if (u3d->driver && u3d->softconnect) {
if (!u3d->active)
goto out;
/* stop all the transfer in queue*/
mv_u3d_stop_activity(u3d, u3d->driver);
mv_u3d_controller_stop(u3d);
mv_u3d_disable(u3d);
}
out:
spin_unlock_irqrestore(&u3d->lock, flags);
return retval;
}
/* constrain controller's VBUS power usage
* This call is used by gadget drivers during SET_CONFIGURATION calls,
* reporting how much power the device may consume. For example, this
* could affect how quickly batteries are recharged.
*
* Returns zero on success, else negative errno.
*/
static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
u3d->power = mA;
return 0;
}
static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
{
struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
unsigned long flags;
int retval = 0;
spin_lock_irqsave(&u3d->lock, flags);
dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, u3d->softconnect, u3d->vbus_active);
u3d->softconnect = (is_on != 0);
if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
retval = mv_u3d_enable(u3d);
if (retval == 0) {
/*
* after clock is disabled, we lost all the register
* context. We have to re-init registers
*/
mv_u3d_controller_reset(u3d);
mv_u3d_ep0_reset(u3d);
mv_u3d_controller_start(u3d);
}
} else if (u3d->driver && u3d->vbus_active) {
/* stop all the transfer in queue*/
mv_u3d_stop_activity(u3d, u3d->driver);
mv_u3d_controller_stop(u3d);
mv_u3d_disable(u3d);
}
spin_unlock_irqrestore(&u3d->lock, flags);
return retval;
}
static int mv_u3d_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
unsigned long flags;
if (u3d->driver)
return -EBUSY;
spin_lock_irqsave(&u3d->lock, flags);
if (!u3d->clock_gating) {
clk_enable(u3d->clk);
if (pdata->phy_init)
pdata->phy_init(u3d->phy_regs);
}
/* hook up the driver ... */
u3d->driver = driver;
u3d->ep0_dir = USB_DIR_OUT;
spin_unlock_irqrestore(&u3d->lock, flags);
u3d->vbus_valid_detect = 1;
return 0;
}
static int mv_u3d_stop(struct usb_gadget *g)
{
struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
unsigned long flags;
u3d->vbus_valid_detect = 0;
spin_lock_irqsave(&u3d->lock, flags);
/* enable clock to access controller register */
clk_enable(u3d->clk);
if (pdata->phy_init)
pdata->phy_init(u3d->phy_regs);
mv_u3d_controller_stop(u3d);
/* stop all usb activities */
u3d->gadget.speed = USB_SPEED_UNKNOWN;
mv_u3d_stop_activity(u3d, NULL);
mv_u3d_disable(u3d);
if (pdata->phy_deinit)
pdata->phy_deinit(u3d->phy_regs);
clk_disable(u3d->clk);
spin_unlock_irqrestore(&u3d->lock, flags);
u3d->driver = NULL;
return 0;
}
/* device controller usb_gadget_ops structure */
static const struct usb_gadget_ops mv_u3d_ops = {
/* notify controller that VBUS is powered or not */
.vbus_session = mv_u3d_vbus_session,
/* constrain controller's VBUS power usage */
.vbus_draw = mv_u3d_vbus_draw,
.pullup = mv_u3d_pullup,
.udc_start = mv_u3d_start,
.udc_stop = mv_u3d_stop,
};
static int mv_u3d_eps_init(struct mv_u3d *u3d)
{
struct mv_u3d_ep *ep;
char name[14];
int i;
/* initialize ep0, ep0 in/out use eps[1] */
ep = &u3d->eps[1];
ep->u3d = u3d;
strncpy(ep->name, "ep0", sizeof(ep->name));
ep->ep.name = ep->name;
ep->ep.ops = &mv_u3d_ep_ops;
ep->wedge = 0;
usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
ep->ep.caps.type_control = true;
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
ep->ep_num = 0;
ep->ep.desc = &mv_u3d_ep0_desc;
INIT_LIST_HEAD(&ep->queue);
INIT_LIST_HEAD(&ep->req_list);
ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
/* add ep0 ep_context */
ep->ep_context = &u3d->ep_context[1];
/* initialize other endpoints */
for (i = 2; i < u3d->max_eps * 2; i++) {
ep = &u3d->eps[i];
if (i & 1) {
snprintf(name, sizeof(name), "ep%din", i >> 1);
ep->direction = MV_U3D_EP_DIR_IN;
ep->ep.caps.dir_in = true;
} else {
snprintf(name, sizeof(name), "ep%dout", i >> 1);
ep->direction = MV_U3D_EP_DIR_OUT;
ep->ep.caps.dir_out = true;
}
ep->u3d = u3d;
strncpy(ep->name, name, sizeof(ep->name));
ep->ep.name = ep->name;
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
ep->ep.ops = &mv_u3d_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
ep->ep_num = i / 2;
INIT_LIST_HEAD(&ep->queue);
list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
INIT_LIST_HEAD(&ep->req_list);
spin_lock_init(&ep->req_lock);
ep->ep_context = &u3d->ep_context[i];
}
return 0;
}
/* delete all endpoint requests, called with spinlock held */
static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
{
/* endpoint fifo flush */
mv_u3d_ep_fifo_flush(&ep->ep);
while (!list_empty(&ep->queue)) {
struct mv_u3d_req *req = NULL;
req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
mv_u3d_done(ep, req, status);
}
}
/* stop all USB activities */
static
void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
{
struct mv_u3d_ep *ep;
mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
mv_u3d_nuke(ep, -ESHUTDOWN);
}
/* report disconnect; the driver is already quiesced */
if (driver) {
spin_unlock(&u3d->lock);
driver->disconnect(&u3d->gadget);
spin_lock(&u3d->lock);
}
}
static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
{
/* Increment the error count */
u3d->errors++;
dev_err(u3d->dev, "%s\n", __func__);
}
static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
{
u32 linkchange;
linkchange = ioread32(&u3d->vuc_regs->linkchange);
iowrite32(linkchange, &u3d->vuc_regs->linkchange);
dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
ioread32(&u3d->vuc_regs->ltssmstate));
u3d->usb_state = USB_STATE_DEFAULT;
u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
/* set speed */
u3d->gadget.speed = USB_SPEED_SUPER;
}
if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
dev_dbg(u3d->dev, "link suspend\n");
u3d->resume_state = u3d->usb_state;
u3d->usb_state = USB_STATE_SUSPENDED;
}
if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
dev_dbg(u3d->dev, "link resume\n");
u3d->usb_state = u3d->resume_state;
u3d->resume_state = 0;
}
if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
dev_dbg(u3d->dev, "warm reset\n");
u3d->usb_state = USB_STATE_POWERED;
}
if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
dev_dbg(u3d->dev, "hot reset\n");
u3d->usb_state = USB_STATE_DEFAULT;
}
if (linkchange & MV_U3D_LINK_CHANGE_INACT)
dev_dbg(u3d->dev, "inactive\n");
if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
dev_dbg(u3d->dev, "ss.disabled\n");
if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
dev_dbg(u3d->dev, "vbus invalid\n");
u3d->usb_state = USB_STATE_ATTACHED;
u3d->vbus_valid_detect = 1;
/* if external vbus detect is not supported,
* we handle it here.
*/
if (!u3d->vbus) {
spin_unlock(&u3d->lock);
mv_u3d_vbus_session(&u3d->gadget, 0);
spin_lock(&u3d->lock);
}
}
}
static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
struct usb_ctrlrequest *setup)
{
u32 tmp;
if (u3d->usb_state != USB_STATE_DEFAULT) {
dev_err(u3d->dev,
"%s, cannot setaddr in this state (%d)\n",
__func__, u3d->usb_state);
goto err;
}
u3d->dev_addr = (u8)setup->wValue;
dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
if (u3d->dev_addr > 127) {
dev_err(u3d->dev,
"%s, u3d address is wrong (out of range)\n", __func__);
u3d->dev_addr = 0;
goto err;
}
/* update usb state */
u3d->usb_state = USB_STATE_ADDRESS;
/* set the new address */
tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
tmp &= ~0x7F;
tmp |= (u32)u3d->dev_addr;
iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
return;
err:
mv_u3d_ep0_stall(u3d);
}
static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
{
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
return 1;
return 0;
}
static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
struct usb_ctrlrequest *setup)
__releases(&u3c->lock)
__acquires(&u3c->lock)
{
bool delegate = false;
mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
setup->bRequestType, setup->bRequest,
setup->wValue, setup->wIndex, setup->wLength);
/* We process some stardard setup requests here */
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup->bRequest) {
case USB_REQ_GET_STATUS:
delegate = true;
break;
case USB_REQ_SET_ADDRESS:
mv_u3d_ch9setaddress(u3d, setup);
break;
case USB_REQ_CLEAR_FEATURE:
delegate = true;
break;
case USB_REQ_SET_FEATURE:
delegate = true;
break;
default:
delegate = true;
}
} else
delegate = true;
/* delegate USB standard requests to the gadget driver */
if (delegate) {
/* USB requests handled by gadget */
if (setup->wLength) {
/* DATA phase from gadget, STATUS phase from u3d */
u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
spin_unlock(&u3d->lock);
if (u3d->driver->setup(&u3d->gadget,
&u3d->local_setup_buff) < 0) {
dev_err(u3d->dev, "setup error!\n");
mv_u3d_ep0_stall(u3d);
}
spin_lock(&u3d->lock);
} else {
/* no DATA phase, STATUS phase from gadget */
u3d->ep0_dir = MV_U3D_EP_DIR_IN;
u3d->ep0_state = MV_U3D_STATUS_STAGE;
spin_unlock(&u3d->lock);
if (u3d->driver->setup(&u3d->gadget,
&u3d->local_setup_buff) < 0)
mv_u3d_ep0_stall(u3d);
spin_lock(&u3d->lock);
}
if (mv_u3d_is_set_configuration(setup)) {
dev_dbg(u3d->dev, "u3d configured\n");
u3d->usb_state = USB_STATE_CONFIGURED;
}
}
}
static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
{
struct mv_u3d_ep_context *epcontext;
epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
/* Copy the setup packet to local buffer */
memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
}
static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
{
u32 tmp, i;
/* Process all Setup packet received interrupts */
tmp = ioread32(&u3d->vuc_regs->setuplock);
if (tmp) {
for (i = 0; i < u3d->max_eps; i++) {
if (tmp & (1 << i)) {
mv_u3d_get_setup_data(u3d, i,
(u8 *)(&u3d->local_setup_buff));
mv_u3d_handle_setup_packet(u3d, i,
&u3d->local_setup_buff);
}
}
}
iowrite32(tmp, &u3d->vuc_regs->setuplock);
}
static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
{
u32 tmp, bit_pos;
int i, ep_num = 0, direction = 0;
struct mv_u3d_ep *curr_ep;
struct mv_u3d_req *curr_req, *temp_req;
int status;
tmp = ioread32(&u3d->vuc_regs->endcomplete);
dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
if (!tmp)
return;
iowrite32(tmp, &u3d->vuc_regs->endcomplete);
for (i = 0; i < u3d->max_eps * 2; i++) {
ep_num = i >> 1;
direction = i % 2;
bit_pos = 1 << (ep_num + 16 * direction);
if (!(bit_pos & tmp))
continue;
if (i == 0)
curr_ep = &u3d->eps[1];
else
curr_ep = &u3d->eps[i];
/* remove req out of ep request list after completion */
dev_dbg(u3d->dev, "tr comp: check req_list\n");
spin_lock(&curr_ep->req_lock);
if (!list_empty(&curr_ep->req_list)) {
struct mv_u3d_req *req;
req = list_entry(curr_ep->req_list.next,
struct mv_u3d_req, list);
list_del_init(&req->list);
curr_ep->processing = 0;
}
spin_unlock(&curr_ep->req_lock);
/* process the req queue until an uncomplete request */
list_for_each_entry_safe(curr_req, temp_req,
&curr_ep->queue, queue) {
status = mv_u3d_process_ep_req(u3d, i, curr_req);
if (status)
break;
/* write back status to req */
curr_req->req.status = status;
/* ep0 request completion */
if (ep_num == 0) {
mv_u3d_done(curr_ep, curr_req, 0);
break;
} else {
mv_u3d_done(curr_ep, curr_req, status);
}
}
dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
mv_u3d_start_queue(curr_ep);
}
}
static irqreturn_t mv_u3d_irq(int irq, void *dev)
{
struct mv_u3d *u3d = (struct mv_u3d *)dev;
u32 status, intr;
u32 bridgesetting;
u32 trbunderrun;
spin_lock(&u3d->lock);
status = ioread32(&u3d->vuc_regs->intrcause);
intr = ioread32(&u3d->vuc_regs->intrenable);
status &= intr;
if (status == 0) {
spin_unlock(&u3d->lock);
dev_err(u3d->dev, "irq error!\n");
return IRQ_NONE;
}
if (status & MV_U3D_USBINT_VBUS_VALID) {
bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
/* write vbus valid bit of bridge setting to clear */
bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
dev_dbg(u3d->dev, "vbus valid\n");
u3d->usb_state = USB_STATE_POWERED;
u3d->vbus_valid_detect = 0;
/* if external vbus detect is not supported,
* we handle it here.
*/
if (!u3d->vbus) {
spin_unlock(&u3d->lock);
mv_u3d_vbus_session(&u3d->gadget, 1);
spin_lock(&u3d->lock);
}
} else
dev_err(u3d->dev, "vbus bit is not set\n");
}
/* RX data is already in the 16KB FIFO.*/
if (status & MV_U3D_USBINT_UNDER_RUN) {
trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
mv_u3d_irq_process_error(u3d);
}
if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
/* write one to clear */
iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
| MV_U3D_USBINT_TXDESC_ERR),
&u3d->vuc_regs->intrcause);
dev_err(u3d->dev, "desc err 0x%x\n", status);
mv_u3d_irq_process_error(u3d);
}
if (status & MV_U3D_USBINT_LINK_CHG)
mv_u3d_irq_process_link_change(u3d);
if (status & MV_U3D_USBINT_TX_COMPLETE)
mv_u3d_irq_process_tr_complete(u3d);
if (status & MV_U3D_USBINT_RX_COMPLETE)
mv_u3d_irq_process_tr_complete(u3d);
if (status & MV_U3D_USBINT_SETUP)
mv_u3d_irq_process_setup(u3d);
spin_unlock(&u3d->lock);
return IRQ_HANDLED;
}
static void mv_u3d_remove(struct platform_device *dev)
{
struct mv_u3d *u3d = platform_get_drvdata(dev);
BUG_ON(u3d == NULL);
usb_del_gadget_udc(&u3d->gadget);
/* free memory allocated in probe */
dma_pool_destroy(u3d->trb_pool);
if (u3d->ep_context)
dma_free_coherent(&dev->dev, u3d->ep_context_size,
u3d->ep_context, u3d->ep_context_dma);
kfree(u3d->eps);
if (u3d->irq)
free_irq(u3d->irq, u3d);
if (u3d->cap_regs)
iounmap(u3d->cap_regs);
u3d->cap_regs = NULL;
kfree(u3d->status_req);
clk_put(u3d->clk);
kfree(u3d);
}
static int mv_u3d_probe(struct platform_device *dev)
{
struct mv_u3d *u3d;
struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
int retval = 0;
struct resource *r;
size_t size;
if (!dev_get_platdata(&dev->dev)) {
dev_err(&dev->dev, "missing platform_data\n");
retval = -ENODEV;
goto err_pdata;
}
u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
if (!u3d) {
retval = -ENOMEM;
goto err_alloc_private;
}
spin_lock_init(&u3d->lock);
platform_set_drvdata(dev, u3d);
u3d->dev = &dev->dev;
u3d->vbus = pdata->vbus;
u3d->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(u3d->clk)) {
retval = PTR_ERR(u3d->clk);
goto err_get_clk;
}
r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
if (!r) {
dev_err(&dev->dev, "no I/O memory resource defined\n");
retval = -ENODEV;
goto err_get_cap_regs;
}
u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
ioremap(r->start, resource_size(r));
if (!u3d->cap_regs) {
dev_err(&dev->dev, "failed to map I/O memory\n");
retval = -EBUSY;
goto err_map_cap_regs;
} else {
dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n",
(unsigned long) r->start,
(unsigned long) u3d->cap_regs);
}
/* we will access controller register, so enable the u3d controller */
retval = clk_enable(u3d->clk);
if (retval) {
dev_err(&dev->dev, "clk_enable error %d\n", retval);
goto err_u3d_enable;
}
if (pdata->phy_init) {
retval = pdata->phy_init(u3d->phy_regs);
if (retval) {
dev_err(&dev->dev, "init phy error %d\n", retval);
clk_disable(u3d->clk);
goto err_phy_init;
}
}
u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs
+ MV_U3D_USB3_OP_REGS_OFFSET);
u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs
+ ioread32(&u3d->cap_regs->vuoff));
u3d->max_eps = 16;
/*
* some platform will use usb to download image, it may not disconnect
* usb gadget before loading kernel. So first stop u3d here.
*/
mv_u3d_controller_stop(u3d);
iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
if (pdata->phy_deinit)
pdata->phy_deinit(u3d->phy_regs);
clk_disable(u3d->clk);
size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
& ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
&u3d->ep_context_dma, GFP_KERNEL);
if (!u3d->ep_context) {
dev_err(&dev->dev, "allocate ep context memory failed\n");
retval = -ENOMEM;
goto err_alloc_ep_context;
}
u3d->ep_context_size = size;
/* create TRB dma_pool resource */
u3d->trb_pool = dma_pool_create("u3d_trb",
&dev->dev,
sizeof(struct mv_u3d_trb_hw),
MV_U3D_TRB_ALIGNMENT,
MV_U3D_DMA_BOUNDARY);
if (!u3d->trb_pool) {
retval = -ENOMEM;
goto err_alloc_trb_pool;
}
size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
u3d->eps = kzalloc(size, GFP_KERNEL);
if (!u3d->eps) {
retval = -ENOMEM;
goto err_alloc_eps;
}
/* initialize ep0 status request structure */
u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
if (!u3d->status_req) {
retval = -ENOMEM;
goto err_alloc_status_req;
}
INIT_LIST_HEAD(&u3d->status_req->queue);
/* allocate a small amount of memory to get valid address */
u3d->status_req->req.buf = (char *)u3d->status_req
+ sizeof(struct mv_u3d_req);
u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
u3d->resume_state = USB_STATE_NOTATTACHED;
u3d->usb_state = USB_STATE_ATTACHED;
u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
u3d->remote_wakeup = 0;
r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
if (!r) {
dev_err(&dev->dev, "no IRQ resource defined\n");
retval = -ENODEV;
goto err_get_irq;
}
u3d->irq = r->start;
/* initialize gadget structure */
u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */
INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */
u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
/* the "gadget" abstracts/virtualizes the controller */
u3d->gadget.name = driver_name; /* gadget name */
mv_u3d_eps_init(u3d);
if (request_irq(u3d->irq, mv_u3d_irq,
IRQF_SHARED, driver_name, u3d)) {
u3d->irq = 0;
dev_err(&dev->dev, "Request irq %d for u3d failed\n",
u3d->irq);
retval = -ENODEV;
goto err_request_irq;
}
/* external vbus detection */
if (u3d->vbus) {
u3d->clock_gating = 1;
dev_err(&dev->dev, "external vbus detection\n");
}
if (!u3d->clock_gating)
u3d->vbus_active = 1;
/* enable usb3 controller vbus detection */
u3d->vbus_valid_detect = 1;
retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
if (retval)
goto err_unregister;
dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
u3d->clock_gating ? "with" : "without");
return 0;
err_unregister:
free_irq(u3d->irq, u3d);
err_get_irq:
err_request_irq:
kfree(u3d->status_req);
err_alloc_status_req:
kfree(u3d->eps);
err_alloc_eps:
dma_pool_destroy(u3d->trb_pool);
err_alloc_trb_pool:
dma_free_coherent(&dev->dev, u3d->ep_context_size,
u3d->ep_context, u3d->ep_context_dma);
err_alloc_ep_context:
err_phy_init:
err_u3d_enable:
iounmap(u3d->cap_regs);
err_map_cap_regs:
err_get_cap_regs:
clk_put(u3d->clk);
err_get_clk:
kfree(u3d);
err_alloc_private:
err_pdata:
return retval;
}
#ifdef CONFIG_PM_SLEEP
static int mv_u3d_suspend(struct device *dev)
{
struct mv_u3d *u3d = dev_get_drvdata(dev);
/*
* only cable is unplugged, usb can suspend.
* So do not care about clock_gating == 1, it is handled by
* vbus session.
*/
if (!u3d->clock_gating) {
mv_u3d_controller_stop(u3d);
spin_lock_irq(&u3d->lock);
/* stop all usb activities */
mv_u3d_stop_activity(u3d, u3d->driver);
spin_unlock_irq(&u3d->lock);
mv_u3d_disable(u3d);
}
return 0;
}
static int mv_u3d_resume(struct device *dev)
{
struct mv_u3d *u3d = dev_get_drvdata(dev);
int retval;
if (!u3d->clock_gating) {
retval = mv_u3d_enable(u3d);
if (retval)
return retval;
if (u3d->driver && u3d->softconnect) {
mv_u3d_controller_reset(u3d);
mv_u3d_ep0_reset(u3d);
mv_u3d_controller_start(u3d);
}
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
static void mv_u3d_shutdown(struct platform_device *dev)
{
struct mv_u3d *u3d = platform_get_drvdata(dev);
u32 tmp;
tmp = ioread32(&u3d->op_regs->usbcmd);
tmp &= ~MV_U3D_CMD_RUN_STOP;
iowrite32(tmp, &u3d->op_regs->usbcmd);
}
static struct platform_driver mv_u3d_driver = {
.probe = mv_u3d_probe,
.remove_new = mv_u3d_remove,
.shutdown = mv_u3d_shutdown,
.driver = {
.name = "mv-u3d",
.pm = &mv_u3d_pm_ops,
},
};
module_platform_driver(mv_u3d_driver);
MODULE_ALIAS("platform:mv-u3d");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Yu Xu <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/mv_u3d_core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* driver/usb/gadget/fsl_qe_udc.c
*
* Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Xie Xiaobo <[email protected]>
* Li Yang <[email protected]>
* Based on bareboard code from Shlomi Gridish.
*
* Description:
* Freescle QE/CPM USB Pheripheral Controller Driver
* The controller can be found on MPC8360, MPC8272, and etc.
* MPC8360 Rev 1.1 may need QE mircocode update
*/
#undef USB_TRACE
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <soc/fsl/qe/qe.h>
#include <asm/cpm.h>
#include <asm/dma.h>
#include <asm/reg.h>
#include "fsl_qe_udc.h"
#define DRIVER_DESC "Freescale QE/CPM USB Device Controller driver"
#define DRIVER_AUTHOR "Xie XiaoBo"
#define DRIVER_VERSION "1.0"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name[] = "fsl_qe_udc";
static const char driver_desc[] = DRIVER_DESC;
/*ep name is important in gadget, it should obey the convention of ep_match()*/
static const char *const ep_name[] = {
"ep0-control", /* everyone has ep0 */
/* 3 configurable endpoints */
"ep1",
"ep2",
"ep3",
};
static const struct usb_endpoint_descriptor qe_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
};
/********************************************************************
* Internal Used Function Start
********************************************************************/
/*-----------------------------------------------------------------
* done() - retire a request; caller blocked irqs
*--------------------------------------------------------------*/
static void done(struct qe_ep *ep, struct qe_req *req, int status)
{
struct qe_udc *udc = ep->udc;
unsigned char stopped = ep->stopped;
/* the req->queue pointer is used by ep_queue() func, in which
* the request will be added into a udc_ep->queue 'd tail
* so here the req will be dropped from the ep->queue
*/
list_del_init(&req->queue);
/* req.status should be set as -EINPROGRESS in ep_queue() */
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
if (req->mapped) {
dma_unmap_single(udc->gadget.dev.parent,
req->req.dma, req->req.length,
ep_is_in(ep)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
} else
dma_sync_single_for_cpu(udc->gadget.dev.parent,
req->req.dma, req->req.length,
ep_is_in(ep)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
if (status && (status != -ESHUTDOWN))
dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
ep->stopped = stopped;
}
/*-----------------------------------------------------------------
* nuke(): delete all requests related to this ep
*--------------------------------------------------------------*/
static void nuke(struct qe_ep *ep, int status)
{
/* Whether this eq has request linked */
while (!list_empty(&ep->queue)) {
struct qe_req *req = NULL;
req = list_entry(ep->queue.next, struct qe_req, queue);
done(ep, req, status);
}
}
/*---------------------------------------------------------------------------*
* USB and Endpoint manipulate process, include parameter and register *
*---------------------------------------------------------------------------*/
/* @value: 1--set stall 0--clean stall */
static int qe_eprx_stall_change(struct qe_ep *ep, int value)
{
u16 tem_usep;
u8 epnum = ep->epnum;
struct qe_udc *udc = ep->udc;
tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
tem_usep = tem_usep & ~USB_RHS_MASK;
if (value == 1)
tem_usep |= USB_RHS_STALL;
else if (ep->dir == USB_DIR_IN)
tem_usep |= USB_RHS_IGNORE_OUT;
out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
return 0;
}
static int qe_eptx_stall_change(struct qe_ep *ep, int value)
{
u16 tem_usep;
u8 epnum = ep->epnum;
struct qe_udc *udc = ep->udc;
tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
tem_usep = tem_usep & ~USB_THS_MASK;
if (value == 1)
tem_usep |= USB_THS_STALL;
else if (ep->dir == USB_DIR_OUT)
tem_usep |= USB_THS_IGNORE_IN;
out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
return 0;
}
static int qe_ep0_stall(struct qe_udc *udc)
{
qe_eptx_stall_change(&udc->eps[0], 1);
qe_eprx_stall_change(&udc->eps[0], 1);
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = 0;
return 0;
}
static int qe_eprx_nack(struct qe_ep *ep)
{
u8 epnum = ep->epnum;
struct qe_udc *udc = ep->udc;
if (ep->state == EP_STATE_IDLE) {
/* Set the ep's nack */
clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
USB_RHS_MASK, USB_RHS_NACK);
/* Mask Rx and Busy interrupts */
clrbits16(&udc->usb_regs->usb_usbmr,
(USB_E_RXB_MASK | USB_E_BSY_MASK));
ep->state = EP_STATE_NACK;
}
return 0;
}
static int qe_eprx_normal(struct qe_ep *ep)
{
struct qe_udc *udc = ep->udc;
if (ep->state == EP_STATE_NACK) {
clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
USB_RTHS_MASK, USB_THS_IGNORE_IN);
/* Unmask RX interrupts */
out_be16(&udc->usb_regs->usb_usber,
USB_E_BSY_MASK | USB_E_RXB_MASK);
setbits16(&udc->usb_regs->usb_usbmr,
(USB_E_RXB_MASK | USB_E_BSY_MASK));
ep->state = EP_STATE_IDLE;
ep->has_data = 0;
}
return 0;
}
static int qe_ep_cmd_stoptx(struct qe_ep *ep)
{
if (ep->udc->soc_type == PORT_CPM)
cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
CPM_USB_STOP_TX_OPCODE);
else
qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
ep->epnum, 0);
return 0;
}
static int qe_ep_cmd_restarttx(struct qe_ep *ep)
{
if (ep->udc->soc_type == PORT_CPM)
cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
else
qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
ep->epnum, 0);
return 0;
}
static int qe_ep_flushtxfifo(struct qe_ep *ep)
{
struct qe_udc *udc = ep->udc;
int i;
i = (int)ep->epnum;
qe_ep_cmd_stoptx(ep);
out_8(&udc->usb_regs->usb_uscom,
USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
out_be32(&udc->ep_param[i]->tstate, 0);
out_be16(&udc->ep_param[i]->tbcnt, 0);
ep->c_txbd = ep->txbase;
ep->n_txbd = ep->txbase;
qe_ep_cmd_restarttx(ep);
return 0;
}
static int qe_ep_filltxfifo(struct qe_ep *ep)
{
struct qe_udc *udc = ep->udc;
out_8(&udc->usb_regs->usb_uscom,
USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
return 0;
}
static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
{
struct qe_ep *ep;
u32 bdring_len;
struct qe_bd __iomem *bd;
int i;
ep = &udc->eps[pipe_num];
if (ep->dir == USB_DIR_OUT)
bdring_len = USB_BDRING_LEN_RX;
else
bdring_len = USB_BDRING_LEN;
bd = ep->rxbase;
for (i = 0; i < (bdring_len - 1); i++) {
out_be32((u32 __iomem *)bd, R_E | R_I);
bd++;
}
out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
bd = ep->txbase;
for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
out_be32(&bd->buf, 0);
out_be32((u32 __iomem *)bd, 0);
bd++;
}
out_be32((u32 __iomem *)bd, T_W);
return 0;
}
static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
{
struct qe_ep *ep;
u16 tmpusep;
ep = &udc->eps[pipe_num];
tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
tmpusep &= ~USB_RTHS_MASK;
switch (ep->dir) {
case USB_DIR_BOTH:
qe_ep_flushtxfifo(ep);
break;
case USB_DIR_OUT:
tmpusep |= USB_THS_IGNORE_IN;
break;
case USB_DIR_IN:
qe_ep_flushtxfifo(ep);
tmpusep |= USB_RHS_IGNORE_OUT;
break;
default:
break;
}
out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
qe_epbds_reset(udc, pipe_num);
return 0;
}
static int qe_ep_toggledata01(struct qe_ep *ep)
{
ep->data01 ^= 0x1;
return 0;
}
static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
{
struct qe_ep *ep = &udc->eps[pipe_num];
unsigned long tmp_addr = 0;
struct usb_ep_para __iomem *epparam;
int i;
struct qe_bd __iomem *bd;
int bdring_len;
if (ep->dir == USB_DIR_OUT)
bdring_len = USB_BDRING_LEN_RX;
else
bdring_len = USB_BDRING_LEN;
epparam = udc->ep_param[pipe_num];
/* alloc multi-ram for BD rings and set the ep parameters */
tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
if (IS_ERR_VALUE(tmp_addr))
return -ENOMEM;
out_be16(&epparam->rbase, (u16)tmp_addr);
out_be16(&epparam->tbase, (u16)(tmp_addr +
(sizeof(struct qe_bd) * bdring_len)));
out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
ep->rxbase = cpm_muram_addr(tmp_addr);
ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
* bdring_len));
ep->n_rxbd = ep->rxbase;
ep->e_rxbd = ep->rxbase;
ep->n_txbd = ep->txbase;
ep->c_txbd = ep->txbase;
ep->data01 = 0; /* data0 */
/* Init TX and RX bds */
bd = ep->rxbase;
for (i = 0; i < bdring_len - 1; i++) {
out_be32(&bd->buf, 0);
out_be32((u32 __iomem *)bd, 0);
bd++;
}
out_be32(&bd->buf, 0);
out_be32((u32 __iomem *)bd, R_W);
bd = ep->txbase;
for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
out_be32(&bd->buf, 0);
out_be32((u32 __iomem *)bd, 0);
bd++;
}
out_be32(&bd->buf, 0);
out_be32((u32 __iomem *)bd, T_W);
return 0;
}
static int qe_ep_rxbd_update(struct qe_ep *ep)
{
unsigned int size;
int i;
unsigned int tmp;
struct qe_bd __iomem *bd;
unsigned int bdring_len;
if (ep->rxbase == NULL)
return -EINVAL;
bd = ep->rxbase;
ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
if (!ep->rxframe)
return -ENOMEM;
qe_frame_init(ep->rxframe);
if (ep->dir == USB_DIR_OUT)
bdring_len = USB_BDRING_LEN_RX;
else
bdring_len = USB_BDRING_LEN;
size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
if (!ep->rxbuffer) {
kfree(ep->rxframe);
return -ENOMEM;
}
ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
if (ep->rxbuf_d == DMA_ADDR_INVALID) {
ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
ep->rxbuffer,
size,
DMA_FROM_DEVICE);
ep->rxbufmap = 1;
} else {
dma_sync_single_for_device(ep->udc->gadget.dev.parent,
ep->rxbuf_d, size,
DMA_FROM_DEVICE);
ep->rxbufmap = 0;
}
size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
tmp = ep->rxbuf_d;
tmp = (u32)(((tmp >> 2) << 2) + 4);
for (i = 0; i < bdring_len - 1; i++) {
out_be32(&bd->buf, tmp);
out_be32((u32 __iomem *)bd, (R_E | R_I));
tmp = tmp + size;
bd++;
}
out_be32(&bd->buf, tmp);
out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
return 0;
}
static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
{
struct qe_ep *ep = &udc->eps[pipe_num];
struct usb_ep_para __iomem *epparam;
u16 usep, logepnum;
u16 tmp;
u8 rtfcr = 0;
epparam = udc->ep_param[pipe_num];
usep = 0;
logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
usep |= (logepnum << USB_EPNUM_SHIFT);
switch (ep->ep.desc->bmAttributes & 0x03) {
case USB_ENDPOINT_XFER_BULK:
usep |= USB_TRANS_BULK;
break;
case USB_ENDPOINT_XFER_ISOC:
usep |= USB_TRANS_ISO;
break;
case USB_ENDPOINT_XFER_INT:
usep |= USB_TRANS_INT;
break;
default:
usep |= USB_TRANS_CTR;
break;
}
switch (ep->dir) {
case USB_DIR_OUT:
usep |= USB_THS_IGNORE_IN;
break;
case USB_DIR_IN:
usep |= USB_RHS_IGNORE_OUT;
break;
default:
break;
}
out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
rtfcr = 0x30;
out_8(&epparam->rbmr, rtfcr);
out_8(&epparam->tbmr, rtfcr);
tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
/* MRBLR must be divisble by 4 */
tmp = (u16)(((tmp >> 2) << 2) + 4);
out_be16(&epparam->mrblr, tmp);
return 0;
}
static int qe_ep_init(struct qe_udc *udc,
unsigned char pipe_num,
const struct usb_endpoint_descriptor *desc)
{
struct qe_ep *ep = &udc->eps[pipe_num];
unsigned long flags;
int reval = 0;
u16 max = 0;
max = usb_endpoint_maxp(desc);
/* check the max package size validate for this endpoint */
/* Refer to USB2.0 spec table 9-13,
*/
if (pipe_num != 0) {
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
if (strstr(ep->ep.name, "-iso")
|| strstr(ep->ep.name, "-int"))
goto en_done;
switch (udc->gadget.speed) {
case USB_SPEED_HIGH:
if ((max == 128) || (max == 256) || (max == 512))
break;
fallthrough;
default:
switch (max) {
case 4:
case 8:
case 16:
case 32:
case 64:
break;
default:
case USB_SPEED_LOW:
goto en_done;
}
}
break;
case USB_ENDPOINT_XFER_INT:
if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
goto en_done;
switch (udc->gadget.speed) {
case USB_SPEED_HIGH:
if (max <= 1024)
break;
fallthrough;
case USB_SPEED_FULL:
if (max <= 64)
break;
fallthrough;
default:
if (max <= 8)
break;
goto en_done;
}
break;
case USB_ENDPOINT_XFER_ISOC:
if (strstr(ep->ep.name, "-bulk")
|| strstr(ep->ep.name, "-int"))
goto en_done;
switch (udc->gadget.speed) {
case USB_SPEED_HIGH:
if (max <= 1024)
break;
fallthrough;
case USB_SPEED_FULL:
if (max <= 1023)
break;
fallthrough;
default:
goto en_done;
}
break;
case USB_ENDPOINT_XFER_CONTROL:
if (strstr(ep->ep.name, "-iso")
|| strstr(ep->ep.name, "-int"))
goto en_done;
switch (udc->gadget.speed) {
case USB_SPEED_HIGH:
case USB_SPEED_FULL:
switch (max) {
case 1:
case 2:
case 4:
case 8:
case 16:
case 32:
case 64:
break;
default:
goto en_done;
}
fallthrough;
case USB_SPEED_LOW:
switch (max) {
case 1:
case 2:
case 4:
case 8:
break;
default:
goto en_done;
}
default:
goto en_done;
}
break;
default:
goto en_done;
}
} /* if ep0*/
spin_lock_irqsave(&udc->lock, flags);
/* initialize ep structure */
ep->ep.maxpacket = max;
ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
ep->ep.desc = desc;
ep->stopped = 0;
ep->init = 1;
if (pipe_num == 0) {
ep->dir = USB_DIR_BOTH;
udc->ep0_dir = USB_DIR_OUT;
udc->ep0_state = WAIT_FOR_SETUP;
} else {
switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
case USB_DIR_OUT:
ep->dir = USB_DIR_OUT;
break;
case USB_DIR_IN:
ep->dir = USB_DIR_IN;
default:
break;
}
}
/* hardware special operation */
qe_ep_bd_init(udc, pipe_num);
if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
reval = qe_ep_rxbd_update(ep);
if (reval)
goto en_done1;
}
if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
if (!ep->txframe)
goto en_done2;
qe_frame_init(ep->txframe);
}
qe_ep_register_init(udc, pipe_num);
/* Now HW will be NAKing transfers to that EP,
* until a buffer is queued to it. */
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
en_done2:
kfree(ep->rxbuffer);
kfree(ep->rxframe);
en_done1:
spin_unlock_irqrestore(&udc->lock, flags);
en_done:
dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
return -ENODEV;
}
static inline void qe_usb_enable(struct qe_udc *udc)
{
setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
}
static inline void qe_usb_disable(struct qe_udc *udc)
{
clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
}
/*----------------------------------------------------------------------------*
* USB and EP basic manipulate function end *
*----------------------------------------------------------------------------*/
/******************************************************************************
UDC transmit and receive process
******************************************************************************/
static void recycle_one_rxbd(struct qe_ep *ep)
{
u32 bdstatus;
bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
bdstatus = R_I | R_E | (bdstatus & R_W);
out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
if (bdstatus & R_W)
ep->e_rxbd = ep->rxbase;
else
ep->e_rxbd++;
}
static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
{
u32 bdstatus;
struct qe_bd __iomem *bd, *nextbd;
unsigned char stop = 0;
nextbd = ep->n_rxbd;
bd = ep->e_rxbd;
bdstatus = in_be32((u32 __iomem *)bd);
while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
bdstatus = R_E | R_I | (bdstatus & R_W);
out_be32((u32 __iomem *)bd, bdstatus);
if (bdstatus & R_W)
bd = ep->rxbase;
else
bd++;
bdstatus = in_be32((u32 __iomem *)bd);
if (stopatnext && (bd == nextbd))
stop = 1;
}
ep->e_rxbd = bd;
}
static void ep_recycle_rxbds(struct qe_ep *ep)
{
struct qe_bd __iomem *bd = ep->n_rxbd;
u32 bdstatus;
u8 epnum = ep->epnum;
struct qe_udc *udc = ep->udc;
bdstatus = in_be32((u32 __iomem *)bd);
if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
bd = ep->rxbase +
((in_be16(&udc->ep_param[epnum]->rbptr) -
in_be16(&udc->ep_param[epnum]->rbase))
>> 3);
bdstatus = in_be32((u32 __iomem *)bd);
if (bdstatus & R_W)
bd = ep->rxbase;
else
bd++;
ep->e_rxbd = bd;
recycle_rxbds(ep, 0);
ep->e_rxbd = ep->n_rxbd;
} else
recycle_rxbds(ep, 1);
if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
qe_eprx_normal(ep);
ep->localnack = 0;
}
static void setup_received_handle(struct qe_udc *udc,
struct usb_ctrlrequest *setup);
static int qe_ep_rxframe_handle(struct qe_ep *ep);
static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
/* when BD PID is setup, handle the packet */
static int ep0_setup_handle(struct qe_udc *udc)
{
struct qe_ep *ep = &udc->eps[0];
struct qe_frame *pframe;
unsigned int fsize;
u8 *cp;
pframe = ep->rxframe;
if ((frame_get_info(pframe) & PID_SETUP)
&& (udc->ep0_state == WAIT_FOR_SETUP)) {
fsize = frame_get_length(pframe);
if (unlikely(fsize != 8))
return -EINVAL;
cp = (u8 *)&udc->local_setup_buff;
memcpy(cp, pframe->data, fsize);
ep->data01 = 1;
/* handle the usb command base on the usb_ctrlrequest */
setup_received_handle(udc, &udc->local_setup_buff);
return 0;
}
return -EINVAL;
}
static int qe_ep0_rx(struct qe_udc *udc)
{
struct qe_ep *ep = &udc->eps[0];
struct qe_frame *pframe;
struct qe_bd __iomem *bd;
u32 bdstatus, length;
u32 vaddr;
pframe = ep->rxframe;
if (ep->dir == USB_DIR_IN) {
dev_err(udc->dev, "ep0 not a control endpoint\n");
return -EINVAL;
}
bd = ep->n_rxbd;
bdstatus = in_be32((u32 __iomem *)bd);
length = bdstatus & BD_LENGTH_MASK;
while (!(bdstatus & R_E) && length) {
if ((bdstatus & R_F) && (bdstatus & R_L)
&& !(bdstatus & R_ERROR)) {
if (length == USB_CRC_SIZE) {
udc->ep0_state = WAIT_FOR_SETUP;
dev_vdbg(udc->dev,
"receive a ZLP in status phase\n");
} else {
qe_frame_clean(pframe);
vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
frame_set_data(pframe, (u8 *)vaddr);
frame_set_length(pframe,
(length - USB_CRC_SIZE));
frame_set_status(pframe, FRAME_OK);
switch (bdstatus & R_PID) {
case R_PID_SETUP:
frame_set_info(pframe, PID_SETUP);
break;
case R_PID_DATA1:
frame_set_info(pframe, PID_DATA1);
break;
default:
frame_set_info(pframe, PID_DATA0);
break;
}
if ((bdstatus & R_PID) == R_PID_SETUP)
ep0_setup_handle(udc);
else
qe_ep_rxframe_handle(ep);
}
} else {
dev_err(udc->dev, "The receive frame with error!\n");
}
/* note: don't clear the rxbd's buffer address */
recycle_one_rxbd(ep);
/* Get next BD */
if (bdstatus & R_W)
bd = ep->rxbase;
else
bd++;
bdstatus = in_be32((u32 __iomem *)bd);
length = bdstatus & BD_LENGTH_MASK;
}
ep->n_rxbd = bd;
return 0;
}
static int qe_ep_rxframe_handle(struct qe_ep *ep)
{
struct qe_frame *pframe;
u8 framepid = 0;
unsigned int fsize;
u8 *cp;
struct qe_req *req;
pframe = ep->rxframe;
if (frame_get_info(pframe) & PID_DATA1)
framepid = 0x1;
if (framepid != ep->data01) {
dev_err(ep->udc->dev, "the data01 error!\n");
return -EIO;
}
fsize = frame_get_length(pframe);
if (list_empty(&ep->queue)) {
dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
} else {
req = list_entry(ep->queue.next, struct qe_req, queue);
cp = (u8 *)(req->req.buf) + req->req.actual;
if (cp) {
memcpy(cp, pframe->data, fsize);
req->req.actual += fsize;
if ((fsize < ep->ep.maxpacket) ||
(req->req.actual >= req->req.length)) {
if (ep->epnum == 0)
ep0_req_complete(ep->udc, req);
else
done(ep, req, 0);
if (list_empty(&ep->queue) && ep->epnum != 0)
qe_eprx_nack(ep);
}
}
}
qe_ep_toggledata01(ep);
return 0;
}
static void ep_rx_tasklet(struct tasklet_struct *t)
{
struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet);
struct qe_ep *ep;
struct qe_frame *pframe;
struct qe_bd __iomem *bd;
unsigned long flags;
u32 bdstatus, length;
u32 vaddr, i;
spin_lock_irqsave(&udc->lock, flags);
for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
ep = &udc->eps[i];
if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
dev_dbg(udc->dev,
"This is a transmit ep or disable tasklet!\n");
continue;
}
pframe = ep->rxframe;
bd = ep->n_rxbd;
bdstatus = in_be32((u32 __iomem *)bd);
length = bdstatus & BD_LENGTH_MASK;
while (!(bdstatus & R_E) && length) {
if (list_empty(&ep->queue)) {
qe_eprx_nack(ep);
dev_dbg(udc->dev,
"The rxep have noreq %d\n",
ep->has_data);
break;
}
if ((bdstatus & R_F) && (bdstatus & R_L)
&& !(bdstatus & R_ERROR)) {
qe_frame_clean(pframe);
vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
frame_set_data(pframe, (u8 *)vaddr);
frame_set_length(pframe,
(length - USB_CRC_SIZE));
frame_set_status(pframe, FRAME_OK);
switch (bdstatus & R_PID) {
case R_PID_DATA1:
frame_set_info(pframe, PID_DATA1);
break;
case R_PID_SETUP:
frame_set_info(pframe, PID_SETUP);
break;
default:
frame_set_info(pframe, PID_DATA0);
break;
}
/* handle the rx frame */
qe_ep_rxframe_handle(ep);
} else {
dev_err(udc->dev,
"error in received frame\n");
}
/* note: don't clear the rxbd's buffer address */
/*clear the length */
out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
ep->has_data--;
if (!(ep->localnack))
recycle_one_rxbd(ep);
/* Get next BD */
if (bdstatus & R_W)
bd = ep->rxbase;
else
bd++;
bdstatus = in_be32((u32 __iomem *)bd);
length = bdstatus & BD_LENGTH_MASK;
}
ep->n_rxbd = bd;
if (ep->localnack)
ep_recycle_rxbds(ep);
ep->enable_tasklet = 0;
} /* for i=1 */
spin_unlock_irqrestore(&udc->lock, flags);
}
static int qe_ep_rx(struct qe_ep *ep)
{
struct qe_udc *udc;
struct qe_frame *pframe;
struct qe_bd __iomem *bd;
u16 swoffs, ucoffs, emptybds;
udc = ep->udc;
pframe = ep->rxframe;
if (ep->dir == USB_DIR_IN) {
dev_err(udc->dev, "transmit ep in rx function\n");
return -EINVAL;
}
bd = ep->n_rxbd;
swoffs = (u16)(bd - ep->rxbase);
ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
if (swoffs < ucoffs)
emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
else
emptybds = swoffs - ucoffs;
if (emptybds < MIN_EMPTY_BDS) {
qe_eprx_nack(ep);
ep->localnack = 1;
dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
}
ep->has_data = USB_BDRING_LEN_RX - emptybds;
if (list_empty(&ep->queue)) {
qe_eprx_nack(ep);
dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
ep->has_data);
return 0;
}
tasklet_schedule(&udc->rx_tasklet);
ep->enable_tasklet = 1;
return 0;
}
/* send data from a frame, no matter what tx_req */
static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
{
struct qe_udc *udc = ep->udc;
struct qe_bd __iomem *bd;
u16 saveusbmr;
u32 bdstatus, pidmask;
u32 paddr;
if (ep->dir == USB_DIR_OUT) {
dev_err(udc->dev, "receive ep passed to tx function\n");
return -EINVAL;
}
/* Disable the Tx interrupt */
saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
out_be16(&udc->usb_regs->usb_usbmr,
saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
bd = ep->n_txbd;
bdstatus = in_be32((u32 __iomem *)bd);
if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
if (frame_get_length(frame) == 0) {
frame_set_data(frame, udc->nullbuf);
frame_set_length(frame, 2);
frame->info |= (ZLP | NO_CRC);
dev_vdbg(udc->dev, "the frame size = 0\n");
}
paddr = virt_to_phys((void *)frame->data);
out_be32(&bd->buf, paddr);
bdstatus = (bdstatus&T_W);
if (!(frame_get_info(frame) & NO_CRC))
bdstatus |= T_R | T_I | T_L | T_TC
| frame_get_length(frame);
else
bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
/* if the packet is a ZLP in status phase */
if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
ep->data01 = 0x1;
if (ep->data01) {
pidmask = T_PID_DATA1;
frame->info |= PID_DATA1;
} else {
pidmask = T_PID_DATA0;
frame->info |= PID_DATA0;
}
bdstatus |= T_CNF;
bdstatus |= pidmask;
out_be32((u32 __iomem *)bd, bdstatus);
qe_ep_filltxfifo(ep);
/* enable the TX interrupt */
out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
qe_ep_toggledata01(ep);
if (bdstatus & T_W)
ep->n_txbd = ep->txbase;
else
ep->n_txbd++;
return 0;
} else {
out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
dev_vdbg(udc->dev, "The tx bd is not ready!\n");
return -EBUSY;
}
}
/* when a bd was transmitted, the function can
* handle the tx_req, not include ep0 */
static int txcomplete(struct qe_ep *ep, unsigned char restart)
{
if (ep->tx_req != NULL) {
struct qe_req *req = ep->tx_req;
unsigned zlp = 0, last_len = 0;
last_len = min_t(unsigned, req->req.length - ep->sent,
ep->ep.maxpacket);
if (!restart) {
int asent = ep->last;
ep->sent += asent;
ep->last -= asent;
} else {
ep->last = 0;
}
/* zlp needed when req->re.zero is set */
if (req->req.zero) {
if (last_len == 0 ||
(req->req.length % ep->ep.maxpacket) != 0)
zlp = 0;
else
zlp = 1;
} else
zlp = 0;
/* a request already were transmitted completely */
if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
done(ep, ep->tx_req, 0);
ep->tx_req = NULL;
ep->last = 0;
ep->sent = 0;
}
}
/* we should gain a new tx_req fot this endpoint */
if (ep->tx_req == NULL) {
if (!list_empty(&ep->queue)) {
ep->tx_req = list_entry(ep->queue.next, struct qe_req,
queue);
ep->last = 0;
ep->sent = 0;
}
}
return 0;
}
/* give a frame and a tx_req, send some data */
static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
{
unsigned int size;
u8 *buf;
qe_frame_clean(frame);
size = min_t(u32, (ep->tx_req->req.length - ep->sent),
ep->ep.maxpacket);
buf = (u8 *)ep->tx_req->req.buf + ep->sent;
if (buf && size) {
ep->last = size;
ep->tx_req->req.actual += size;
frame_set_data(frame, buf);
frame_set_length(frame, size);
frame_set_status(frame, FRAME_OK);
frame_set_info(frame, 0);
return qe_ep_tx(ep, frame);
}
return -EIO;
}
/* give a frame struct,send a ZLP */
static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
{
struct qe_udc *udc = ep->udc;
if (frame == NULL)
return -ENODEV;
qe_frame_clean(frame);
frame_set_data(frame, (u8 *)udc->nullbuf);
frame_set_length(frame, 2);
frame_set_status(frame, FRAME_OK);
frame_set_info(frame, (ZLP | NO_CRC | infor));
return qe_ep_tx(ep, frame);
}
static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
{
struct qe_req *req = ep->tx_req;
int reval;
if (req == NULL)
return -ENODEV;
if ((req->req.length - ep->sent) > 0)
reval = qe_usb_senddata(ep, frame);
else
reval = sendnulldata(ep, frame, 0);
return reval;
}
/* if direction is DIR_IN, the status is Device->Host
* if direction is DIR_OUT, the status transaction is Device<-Host
* in status phase, udc create a request and gain status */
static int ep0_prime_status(struct qe_udc *udc, int direction)
{
struct qe_ep *ep = &udc->eps[0];
if (direction == USB_DIR_IN) {
udc->ep0_state = DATA_STATE_NEED_ZLP;
udc->ep0_dir = USB_DIR_IN;
sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
} else {
udc->ep0_dir = USB_DIR_OUT;
udc->ep0_state = WAIT_FOR_OUT_STATUS;
}
return 0;
}
/* a request complete in ep0, whether gadget request or udc request */
static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
{
struct qe_ep *ep = &udc->eps[0];
/* because usb and ep's status already been set in ch9setaddress() */
switch (udc->ep0_state) {
case DATA_STATE_XMIT:
done(ep, req, 0);
/* receive status phase */
if (ep0_prime_status(udc, USB_DIR_OUT))
qe_ep0_stall(udc);
break;
case DATA_STATE_NEED_ZLP:
done(ep, req, 0);
udc->ep0_state = WAIT_FOR_SETUP;
break;
case DATA_STATE_RECV:
done(ep, req, 0);
/* send status phase */
if (ep0_prime_status(udc, USB_DIR_IN))
qe_ep0_stall(udc);
break;
case WAIT_FOR_OUT_STATUS:
done(ep, req, 0);
udc->ep0_state = WAIT_FOR_SETUP;
break;
case WAIT_FOR_SETUP:
dev_vdbg(udc->dev, "Unexpected interrupt\n");
break;
default:
qe_ep0_stall(udc);
break;
}
}
static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
{
struct qe_req *tx_req = NULL;
struct qe_frame *frame = ep->txframe;
if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
if (!restart)
ep->udc->ep0_state = WAIT_FOR_SETUP;
else
sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
return 0;
}
tx_req = ep->tx_req;
if (tx_req != NULL) {
if (!restart) {
int asent = ep->last;
ep->sent += asent;
ep->last -= asent;
} else {
ep->last = 0;
}
/* a request already were transmitted completely */
if ((ep->tx_req->req.length - ep->sent) <= 0) {
ep->tx_req->req.actual = (unsigned int)ep->sent;
ep0_req_complete(ep->udc, ep->tx_req);
ep->tx_req = NULL;
ep->last = 0;
ep->sent = 0;
}
} else {
dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
}
return 0;
}
static int ep0_txframe_handle(struct qe_ep *ep)
{
/* if have error, transmit again */
if (frame_get_status(ep->txframe) & FRAME_ERROR) {
qe_ep_flushtxfifo(ep);
dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
if (frame_get_info(ep->txframe) & PID_DATA0)
ep->data01 = 0;
else
ep->data01 = 1;
ep0_txcomplete(ep, 1);
} else
ep0_txcomplete(ep, 0);
frame_create_tx(ep, ep->txframe);
return 0;
}
static int qe_ep0_txconf(struct qe_ep *ep)
{
struct qe_bd __iomem *bd;
struct qe_frame *pframe;
u32 bdstatus;
bd = ep->c_txbd;
bdstatus = in_be32((u32 __iomem *)bd);
while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
pframe = ep->txframe;
/* clear and recycle the BD */
out_be32((u32 __iomem *)bd, bdstatus & T_W);
out_be32(&bd->buf, 0);
if (bdstatus & T_W)
ep->c_txbd = ep->txbase;
else
ep->c_txbd++;
if (ep->c_txbd == ep->n_txbd) {
if (bdstatus & DEVICE_T_ERROR) {
frame_set_status(pframe, FRAME_ERROR);
if (bdstatus & T_TO)
pframe->status |= TX_ER_TIMEOUT;
if (bdstatus & T_UN)
pframe->status |= TX_ER_UNDERUN;
}
ep0_txframe_handle(ep);
}
bd = ep->c_txbd;
bdstatus = in_be32((u32 __iomem *)bd);
}
return 0;
}
static int ep_txframe_handle(struct qe_ep *ep)
{
if (frame_get_status(ep->txframe) & FRAME_ERROR) {
qe_ep_flushtxfifo(ep);
dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
if (frame_get_info(ep->txframe) & PID_DATA0)
ep->data01 = 0;
else
ep->data01 = 1;
txcomplete(ep, 1);
} else
txcomplete(ep, 0);
frame_create_tx(ep, ep->txframe); /* send the data */
return 0;
}
/* confirm the already trainsmited bd */
static int qe_ep_txconf(struct qe_ep *ep)
{
struct qe_bd __iomem *bd;
struct qe_frame *pframe = NULL;
u32 bdstatus;
unsigned char breakonrxinterrupt = 0;
bd = ep->c_txbd;
bdstatus = in_be32((u32 __iomem *)bd);
while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
pframe = ep->txframe;
if (bdstatus & DEVICE_T_ERROR) {
frame_set_status(pframe, FRAME_ERROR);
if (bdstatus & T_TO)
pframe->status |= TX_ER_TIMEOUT;
if (bdstatus & T_UN)
pframe->status |= TX_ER_UNDERUN;
}
/* clear and recycle the BD */
out_be32((u32 __iomem *)bd, bdstatus & T_W);
out_be32(&bd->buf, 0);
if (bdstatus & T_W)
ep->c_txbd = ep->txbase;
else
ep->c_txbd++;
/* handle the tx frame */
ep_txframe_handle(ep);
bd = ep->c_txbd;
bdstatus = in_be32((u32 __iomem *)bd);
}
if (breakonrxinterrupt)
return -EIO;
else
return 0;
}
/* Add a request in queue, and try to transmit a packet */
static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
{
int reval = 0;
if (ep->tx_req == NULL) {
ep->sent = 0;
ep->last = 0;
txcomplete(ep, 0); /* can gain a new tx_req */
reval = frame_create_tx(ep, ep->txframe);
}
return reval;
}
/* Maybe this is a good ideal */
static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
{
struct qe_udc *udc = ep->udc;
struct qe_frame *pframe = NULL;
struct qe_bd __iomem *bd;
u32 bdstatus, length;
u32 vaddr, fsize;
u8 *cp;
u8 finish_req = 0;
u8 framepid;
if (list_empty(&ep->queue)) {
dev_vdbg(udc->dev, "the req already finish!\n");
return 0;
}
pframe = ep->rxframe;
bd = ep->n_rxbd;
bdstatus = in_be32((u32 __iomem *)bd);
length = bdstatus & BD_LENGTH_MASK;
while (!(bdstatus & R_E) && length) {
if (finish_req)
break;
if ((bdstatus & R_F) && (bdstatus & R_L)
&& !(bdstatus & R_ERROR)) {
qe_frame_clean(pframe);
vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
frame_set_data(pframe, (u8 *)vaddr);
frame_set_length(pframe, (length - USB_CRC_SIZE));
frame_set_status(pframe, FRAME_OK);
switch (bdstatus & R_PID) {
case R_PID_DATA1:
frame_set_info(pframe, PID_DATA1); break;
default:
frame_set_info(pframe, PID_DATA0); break;
}
/* handle the rx frame */
if (frame_get_info(pframe) & PID_DATA1)
framepid = 0x1;
else
framepid = 0;
if (framepid != ep->data01) {
dev_vdbg(udc->dev, "the data01 error!\n");
} else {
fsize = frame_get_length(pframe);
cp = (u8 *)(req->req.buf) + req->req.actual;
if (cp) {
memcpy(cp, pframe->data, fsize);
req->req.actual += fsize;
if ((fsize < ep->ep.maxpacket)
|| (req->req.actual >=
req->req.length)) {
finish_req = 1;
done(ep, req, 0);
if (list_empty(&ep->queue))
qe_eprx_nack(ep);
}
}
qe_ep_toggledata01(ep);
}
} else {
dev_err(udc->dev, "The receive frame with error!\n");
}
/* note: don't clear the rxbd's buffer address *
* only Clear the length */
out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
ep->has_data--;
/* Get next BD */
if (bdstatus & R_W)
bd = ep->rxbase;
else
bd++;
bdstatus = in_be32((u32 __iomem *)bd);
length = bdstatus & BD_LENGTH_MASK;
}
ep->n_rxbd = bd;
ep_recycle_rxbds(ep);
return 0;
}
/* only add the request in queue */
static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
{
if (ep->state == EP_STATE_NACK) {
if (ep->has_data <= 0) {
/* Enable rx and unmask rx interrupt */
qe_eprx_normal(ep);
} else {
/* Copy the exist BD data */
ep_req_rx(ep, req);
}
}
return 0;
}
/********************************************************************
Internal Used Function End
********************************************************************/
/*-----------------------------------------------------------------------
Endpoint Management Functions For Gadget
-----------------------------------------------------------------------*/
static int qe_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct qe_udc *udc;
struct qe_ep *ep;
int retval = 0;
unsigned char epnum;
ep = container_of(_ep, struct qe_ep, ep);
/* catch various bogus parameters */
if (!_ep || !desc || _ep->name == ep_name[0] ||
(desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
udc = ep->udc;
if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
epnum = (u8)desc->bEndpointAddress & 0xF;
retval = qe_ep_init(udc, epnum, desc);
if (retval != 0) {
cpm_muram_free(cpm_muram_offset(ep->rxbase));
dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
return -EINVAL;
}
dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
return 0;
}
static int qe_ep_disable(struct usb_ep *_ep)
{
struct qe_udc *udc;
struct qe_ep *ep;
unsigned long flags;
unsigned int size;
ep = container_of(_ep, struct qe_ep, ep);
udc = ep->udc;
if (!_ep || !ep->ep.desc) {
dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
return -EINVAL;
}
spin_lock_irqsave(&udc->lock, flags);
/* Nuke all pending requests (does flush) */
nuke(ep, -ESHUTDOWN);
ep->ep.desc = NULL;
ep->stopped = 1;
ep->tx_req = NULL;
qe_ep_reset(udc, ep->epnum);
spin_unlock_irqrestore(&udc->lock, flags);
cpm_muram_free(cpm_muram_offset(ep->rxbase));
if (ep->dir == USB_DIR_OUT)
size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
(USB_BDRING_LEN_RX + 1);
else
size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
(USB_BDRING_LEN + 1);
if (ep->dir != USB_DIR_IN) {
kfree(ep->rxframe);
if (ep->rxbufmap) {
dma_unmap_single(udc->gadget.dev.parent,
ep->rxbuf_d, size,
DMA_FROM_DEVICE);
ep->rxbuf_d = DMA_ADDR_INVALID;
} else {
dma_sync_single_for_cpu(
udc->gadget.dev.parent,
ep->rxbuf_d, size,
DMA_FROM_DEVICE);
}
kfree(ep->rxbuffer);
}
if (ep->dir != USB_DIR_OUT)
kfree(ep->txframe);
dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
return 0;
}
static struct usb_request *qe_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct qe_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct qe_req *req;
req = container_of(_req, struct qe_req, req);
if (_req)
kfree(req);
}
static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
{
struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
struct qe_req *req = container_of(_req, struct qe_req, req);
struct qe_udc *udc;
int reval;
udc = ep->udc;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
dev_dbg(udc->dev, "bad params\n");
return -EINVAL;
}
if (!_ep || (!ep->ep.desc && ep_index(ep))) {
dev_dbg(udc->dev, "bad ep\n");
return -EINVAL;
}
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
req->ep = ep;
/* map virtual address to hardware */
if (req->req.dma == DMA_ADDR_INVALID) {
req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
req->req.buf,
req->req.length,
ep_is_in(ep)
? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
req->mapped = 1;
} else {
dma_sync_single_for_device(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
ep_is_in(ep)
? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
req->mapped = 0;
}
req->req.status = -EINPROGRESS;
req->req.actual = 0;
list_add_tail(&req->queue, &ep->queue);
dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
ep->name, req->req.length);
/* push the request to device */
if (ep_is_in(ep))
reval = ep_req_send(ep, req);
/* EP0 */
if (ep_index(ep) == 0 && req->req.length > 0) {
if (ep_is_in(ep))
udc->ep0_state = DATA_STATE_XMIT;
else
udc->ep0_state = DATA_STATE_RECV;
}
if (ep->dir == USB_DIR_OUT)
reval = ep_req_receive(ep, req);
return 0;
}
/* queues (submits) an I/O request to an endpoint */
static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
struct qe_udc *udc = ep->udc;
unsigned long flags;
int ret;
spin_lock_irqsave(&udc->lock, flags);
ret = __qe_ep_queue(_ep, _req);
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
struct qe_req *req = NULL;
struct qe_req *iter;
unsigned long flags;
if (!_ep || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore(&ep->udc->lock, flags);
return -EINVAL;
}
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
/*-----------------------------------------------------------------
* modify the endpoint halt feature
* @ep: the non-isochronous endpoint being stalled
* @value: 1--set halt 0--clear halt
* Returns zero, or a negative error code.
*----------------------------------------------------------------*/
static int qe_ep_set_halt(struct usb_ep *_ep, int value)
{
struct qe_ep *ep;
unsigned long flags;
int status = -EOPNOTSUPP;
struct qe_udc *udc;
ep = container_of(_ep, struct qe_ep, ep);
if (!_ep || !ep->ep.desc) {
status = -EINVAL;
goto out;
}
udc = ep->udc;
/* Attempt to halt IN ep will fail if any transfer requests
* are still queue */
if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
status = -EAGAIN;
goto out;
}
status = 0;
spin_lock_irqsave(&ep->udc->lock, flags);
qe_eptx_stall_change(ep, value);
qe_eprx_stall_change(ep, value);
spin_unlock_irqrestore(&ep->udc->lock, flags);
if (ep->epnum == 0) {
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = 0;
}
/* set data toggle to DATA0 on clear halt */
if (value == 0)
ep->data01 = 0;
out:
dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
value ? "set" : "clear", status);
return status;
}
static const struct usb_ep_ops qe_ep_ops = {
.enable = qe_ep_enable,
.disable = qe_ep_disable,
.alloc_request = qe_alloc_request,
.free_request = qe_free_request,
.queue = qe_ep_queue,
.dequeue = qe_ep_dequeue,
.set_halt = qe_ep_set_halt,
};
/*------------------------------------------------------------------------
Gadget Driver Layer Operations
------------------------------------------------------------------------*/
/* Get the current frame number */
static int qe_get_frame(struct usb_gadget *gadget)
{
struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
u16 tmp;
tmp = in_be16(&udc->usb_param->frame_n);
if (tmp & 0x8000)
return tmp & 0x07ff;
return -EINVAL;
}
static int fsl_qe_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int fsl_qe_stop(struct usb_gadget *gadget);
/* defined in usb_gadget.h */
static const struct usb_gadget_ops qe_gadget_ops = {
.get_frame = qe_get_frame,
.udc_start = fsl_qe_start,
.udc_stop = fsl_qe_stop,
};
/*-------------------------------------------------------------------------
USB ep0 Setup process in BUS Enumeration
-------------------------------------------------------------------------*/
static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
{
struct qe_ep *ep = &udc->eps[pipe];
nuke(ep, -ECONNRESET);
ep->tx_req = NULL;
return 0;
}
static int reset_queues(struct qe_udc *udc)
{
u8 pipe;
for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
udc_reset_ep_queue(udc, pipe);
/* report disconnect; the driver is already quiesced */
spin_unlock(&udc->lock);
usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_lock(&udc->lock);
return 0;
}
static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
u16 length)
{
/* Save the new address to device struct */
udc->device_address = (u8) value;
/* Update usb state */
udc->usb_state = USB_STATE_ADDRESS;
/* Status phase , send a ZLP */
if (ep0_prime_status(udc, USB_DIR_IN))
qe_ep0_stall(udc);
}
static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
{
struct qe_req *req = container_of(_req, struct qe_req, req);
req->req.buf = NULL;
kfree(req);
}
static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
u16 index, u16 length)
{
u16 usb_status = 0;
struct qe_req *req;
struct qe_ep *ep;
int status = 0;
ep = &udc->eps[0];
if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
/* Get device status */
usb_status = 1 << USB_DEVICE_SELF_POWERED;
} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
/* Get interface status */
/* We don't have interface information in udc driver */
usb_status = 0;
} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
/* Get endpoint status */
int pipe = index & USB_ENDPOINT_NUMBER_MASK;
if (pipe >= USB_MAX_ENDPOINTS)
goto stall;
struct qe_ep *target_ep = &udc->eps[pipe];
u16 usep;
/* stall if endpoint doesn't exist */
if (!target_ep->ep.desc)
goto stall;
usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
if (index & USB_DIR_IN) {
if (target_ep->dir != USB_DIR_IN)
goto stall;
if ((usep & USB_THS_MASK) == USB_THS_STALL)
usb_status = 1 << USB_ENDPOINT_HALT;
} else {
if (target_ep->dir != USB_DIR_OUT)
goto stall;
if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
usb_status = 1 << USB_ENDPOINT_HALT;
}
}
req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
struct qe_req, req);
req->req.length = 2;
req->req.buf = udc->statusbuf;
*(u16 *)req->req.buf = cpu_to_le16(usb_status);
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->req.complete = ownercomplete;
udc->ep0_dir = USB_DIR_IN;
/* data phase */
status = __qe_ep_queue(&ep->ep, &req->req);
if (status == 0)
return;
stall:
dev_err(udc->dev, "Can't respond to getstatus request \n");
qe_ep0_stall(udc);
}
/* only handle the setup request, suppose the device in normal status */
static void setup_received_handle(struct qe_udc *udc,
struct usb_ctrlrequest *setup)
{
/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
u16 wValue = le16_to_cpu(setup->wValue);
u16 wIndex = le16_to_cpu(setup->wIndex);
u16 wLength = le16_to_cpu(setup->wLength);
/* clear the previous request in the ep0 */
udc_reset_ep_queue(udc, 0);
if (setup->bRequestType & USB_DIR_IN)
udc->ep0_dir = USB_DIR_IN;
else
udc->ep0_dir = USB_DIR_OUT;
switch (setup->bRequest) {
case USB_REQ_GET_STATUS:
/* Data+Status phase form udc */
if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
!= (USB_DIR_IN | USB_TYPE_STANDARD))
break;
ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
wLength);
return;
case USB_REQ_SET_ADDRESS:
/* Status phase from udc */
if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
USB_RECIP_DEVICE))
break;
ch9setaddress(udc, wValue, wIndex, wLength);
return;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
/* Requests with no data phase, status phase from udc */
if ((setup->bRequestType & USB_TYPE_MASK)
!= USB_TYPE_STANDARD)
break;
if ((setup->bRequestType & USB_RECIP_MASK)
== USB_RECIP_ENDPOINT) {
int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
struct qe_ep *ep;
if (wValue != 0 || wLength != 0
|| pipe >= USB_MAX_ENDPOINTS)
break;
ep = &udc->eps[pipe];
spin_unlock(&udc->lock);
qe_ep_set_halt(&ep->ep,
(setup->bRequest == USB_REQ_SET_FEATURE)
? 1 : 0);
spin_lock(&udc->lock);
}
ep0_prime_status(udc, USB_DIR_IN);
return;
default:
break;
}
if (wLength) {
/* Data phase from gadget, status phase from udc */
if (setup->bRequestType & USB_DIR_IN) {
udc->ep0_state = DATA_STATE_XMIT;
udc->ep0_dir = USB_DIR_IN;
} else {
udc->ep0_state = DATA_STATE_RECV;
udc->ep0_dir = USB_DIR_OUT;
}
spin_unlock(&udc->lock);
if (udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
qe_ep0_stall(udc);
spin_lock(&udc->lock);
} else {
/* No data phase, IN status from gadget */
udc->ep0_dir = USB_DIR_IN;
spin_unlock(&udc->lock);
if (udc->driver->setup(&udc->gadget,
&udc->local_setup_buff) < 0)
qe_ep0_stall(udc);
spin_lock(&udc->lock);
udc->ep0_state = DATA_STATE_NEED_ZLP;
}
}
/*-------------------------------------------------------------------------
USB Interrupt handlers
-------------------------------------------------------------------------*/
static void suspend_irq(struct qe_udc *udc)
{
udc->resume_state = udc->usb_state;
udc->usb_state = USB_STATE_SUSPENDED;
/* report suspend to the driver ,serial.c not support this*/
if (udc->driver->suspend)
udc->driver->suspend(&udc->gadget);
}
static void resume_irq(struct qe_udc *udc)
{
udc->usb_state = udc->resume_state;
udc->resume_state = 0;
/* report resume to the driver , serial.c not support this*/
if (udc->driver->resume)
udc->driver->resume(&udc->gadget);
}
static void idle_irq(struct qe_udc *udc)
{
u8 usbs;
usbs = in_8(&udc->usb_regs->usb_usbs);
if (usbs & USB_IDLE_STATUS_MASK) {
if ((udc->usb_state) != USB_STATE_SUSPENDED)
suspend_irq(udc);
} else {
if (udc->usb_state == USB_STATE_SUSPENDED)
resume_irq(udc);
}
}
static int reset_irq(struct qe_udc *udc)
{
unsigned char i;
if (udc->usb_state == USB_STATE_DEFAULT)
return 0;
qe_usb_disable(udc);
out_8(&udc->usb_regs->usb_usadr, 0);
for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
if (udc->eps[i].init)
qe_ep_reset(udc, i);
}
reset_queues(udc);
udc->usb_state = USB_STATE_DEFAULT;
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = USB_DIR_OUT;
qe_usb_enable(udc);
return 0;
}
static int bsy_irq(struct qe_udc *udc)
{
return 0;
}
static int txe_irq(struct qe_udc *udc)
{
return 0;
}
/* ep0 tx interrupt also in here */
static int tx_irq(struct qe_udc *udc)
{
struct qe_ep *ep;
struct qe_bd __iomem *bd;
int i, res = 0;
if ((udc->usb_state == USB_STATE_ADDRESS)
&& (in_8(&udc->usb_regs->usb_usadr) == 0))
out_8(&udc->usb_regs->usb_usadr, udc->device_address);
for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
ep = &udc->eps[i];
if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
bd = ep->c_txbd;
if (!(in_be32((u32 __iomem *)bd) & T_R)
&& (in_be32(&bd->buf))) {
/* confirm the transmitted bd */
if (ep->epnum == 0)
res = qe_ep0_txconf(ep);
else
res = qe_ep_txconf(ep);
}
}
}
return res;
}
/* setup packect's rx is handle in the function too */
static void rx_irq(struct qe_udc *udc)
{
struct qe_ep *ep;
struct qe_bd __iomem *bd;
int i;
for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
ep = &udc->eps[i];
if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
bd = ep->n_rxbd;
if (!(in_be32((u32 __iomem *)bd) & R_E)
&& (in_be32(&bd->buf))) {
if (ep->epnum == 0) {
qe_ep0_rx(udc);
} else {
/*non-setup package receive*/
qe_ep_rx(ep);
}
}
}
}
}
static irqreturn_t qe_udc_irq(int irq, void *_udc)
{
struct qe_udc *udc = (struct qe_udc *)_udc;
u16 irq_src;
irqreturn_t status = IRQ_NONE;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
irq_src = in_be16(&udc->usb_regs->usb_usber) &
in_be16(&udc->usb_regs->usb_usbmr);
/* Clear notification bits */
out_be16(&udc->usb_regs->usb_usber, irq_src);
/* USB Interrupt */
if (irq_src & USB_E_IDLE_MASK) {
idle_irq(udc);
irq_src &= ~USB_E_IDLE_MASK;
status = IRQ_HANDLED;
}
if (irq_src & USB_E_TXB_MASK) {
tx_irq(udc);
irq_src &= ~USB_E_TXB_MASK;
status = IRQ_HANDLED;
}
if (irq_src & USB_E_RXB_MASK) {
rx_irq(udc);
irq_src &= ~USB_E_RXB_MASK;
status = IRQ_HANDLED;
}
if (irq_src & USB_E_RESET_MASK) {
reset_irq(udc);
irq_src &= ~USB_E_RESET_MASK;
status = IRQ_HANDLED;
}
if (irq_src & USB_E_BSY_MASK) {
bsy_irq(udc);
irq_src &= ~USB_E_BSY_MASK;
status = IRQ_HANDLED;
}
if (irq_src & USB_E_TXE_MASK) {
txe_irq(udc);
irq_src &= ~USB_E_TXE_MASK;
status = IRQ_HANDLED;
}
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
/*-------------------------------------------------------------------------
Gadget driver probe and unregister.
--------------------------------------------------------------------------*/
static int fsl_qe_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct qe_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct qe_udc, gadget);
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver */
udc->driver = driver;
udc->gadget.speed = driver->max_speed;
/* Enable IRQ reg and Set usbcmd reg EN bit */
qe_usb_enable(udc);
out_be16(&udc->usb_regs->usb_usber, 0xffff);
out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
udc->usb_state = USB_STATE_ATTACHED;
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = USB_DIR_OUT;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int fsl_qe_stop(struct usb_gadget *gadget)
{
struct qe_udc *udc;
struct qe_ep *loop_ep;
unsigned long flags;
udc = container_of(gadget, struct qe_udc, gadget);
/* stop usb controller, disable intr */
qe_usb_disable(udc);
/* in fact, no needed */
udc->usb_state = USB_STATE_ATTACHED;
udc->ep0_state = WAIT_FOR_SETUP;
udc->ep0_dir = 0;
/* stand operation */
spin_lock_irqsave(&udc->lock, flags);
udc->gadget.speed = USB_SPEED_UNKNOWN;
nuke(&udc->eps[0], -ESHUTDOWN);
list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
nuke(loop_ep, -ESHUTDOWN);
spin_unlock_irqrestore(&udc->lock, flags);
udc->driver = NULL;
return 0;
}
/* udc structure's alloc and setup, include ep-param alloc */
static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
{
struct qe_udc *udc;
struct device_node *np = ofdev->dev.of_node;
unsigned long tmp_addr = 0;
struct usb_device_para __iomem *usbpram;
unsigned int i;
u64 size;
u32 offset;
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
goto cleanup;
udc->dev = &ofdev->dev;
/* get default address of usb parameter in MURAM from device tree */
offset = *of_get_address(np, 1, &size, NULL);
udc->usb_param = cpm_muram_addr(offset);
memset_io(udc->usb_param, 0, size);
usbpram = udc->usb_param;
out_be16(&usbpram->frame_n, 0);
out_be32(&usbpram->rstate, 0);
tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
sizeof(struct usb_ep_para)),
USB_EP_PARA_ALIGNMENT);
if (IS_ERR_VALUE(tmp_addr))
goto cleanup;
for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
out_be16(&usbpram->epptr[i], (u16)tmp_addr);
udc->ep_param[i] = cpm_muram_addr(tmp_addr);
tmp_addr += 32;
}
memset_io(udc->ep_param[0], 0,
USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
udc->ep0_dir = 0;
spin_lock_init(&udc->lock);
return udc;
cleanup:
kfree(udc);
return NULL;
}
/* USB Controller register init */
static int qe_udc_reg_init(struct qe_udc *udc)
{
struct usb_ctlr __iomem *qe_usbregs;
qe_usbregs = udc->usb_regs;
/* Spec says that we must enable the USB controller to change mode. */
out_8(&qe_usbregs->usb_usmod, 0x01);
/* Mode changed, now disable it, since muram isn't initialized yet. */
out_8(&qe_usbregs->usb_usmod, 0x00);
/* Initialize the rest. */
out_be16(&qe_usbregs->usb_usbmr, 0);
out_8(&qe_usbregs->usb_uscom, 0);
out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
return 0;
}
static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
{
struct qe_ep *ep = &udc->eps[pipe_num];
ep->udc = udc;
strcpy(ep->name, ep_name[pipe_num]);
ep->ep.name = ep_name[pipe_num];
if (pipe_num == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
ep->ep.ops = &qe_ep_ops;
ep->stopped = 1;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
ep->ep.desc = NULL;
ep->dir = 0xff;
ep->epnum = (u8)pipe_num;
ep->sent = 0;
ep->last = 0;
ep->init = 0;
ep->rxframe = NULL;
ep->txframe = NULL;
ep->tx_req = NULL;
ep->state = EP_STATE_IDLE;
ep->has_data = 0;
/* the queue lists any req for this ep */
INIT_LIST_HEAD(&ep->queue);
/* gagdet.ep_list used for ep_autoconfig so no ep0*/
if (pipe_num != 0)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
ep->gadget = &udc->gadget;
return 0;
}
/*-----------------------------------------------------------------------
* UDC device Driver operation functions *
*----------------------------------------------------------------------*/
static void qe_udc_release(struct device *dev)
{
struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
int i;
complete(udc->done);
cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
for (i = 0; i < USB_MAX_ENDPOINTS; i++)
udc->ep_param[i] = NULL;
kfree(udc);
}
/* Driver probe functions */
static const struct of_device_id qe_udc_match[];
static int qe_udc_probe(struct platform_device *ofdev)
{
struct qe_udc *udc;
const struct of_device_id *match;
struct device_node *np = ofdev->dev.of_node;
struct qe_ep *ep;
unsigned int ret = 0;
unsigned int i;
const void *prop;
match = of_match_device(qe_udc_match, &ofdev->dev);
if (!match)
return -EINVAL;
prop = of_get_property(np, "mode", NULL);
if (!prop || strcmp(prop, "peripheral"))
return -ENODEV;
/* Initialize the udc structure including QH member and other member */
udc = qe_udc_config(ofdev);
if (!udc) {
dev_err(&ofdev->dev, "failed to initialize\n");
return -ENOMEM;
}
udc->soc_type = (unsigned long)match->data;
udc->usb_regs = of_iomap(np, 0);
if (!udc->usb_regs) {
ret = -ENOMEM;
goto err1;
}
/* initialize usb hw reg except for regs for EP,
* leave usbintr reg untouched*/
qe_udc_reg_init(udc);
/* here comes the stand operations for probe
* set the qe_udc->gadget.xxx */
udc->gadget.ops = &qe_gadget_ops;
/* gadget.ep0 is a pointer */
udc->gadget.ep0 = &udc->eps[0].ep;
INIT_LIST_HEAD(&udc->gadget.ep_list);
/* modify in register gadget process */
udc->gadget.speed = USB_SPEED_UNKNOWN;
/* name: Identifies the controller hardware type. */
udc->gadget.name = driver_name;
udc->gadget.dev.parent = &ofdev->dev;
/* initialize qe_ep struct */
for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
/* because the ep type isn't decide here so
* qe_ep_init() should be called in ep_enable() */
/* setup the qe_ep struct and link ep.ep.list
* into gadget.ep_list */
qe_ep_config(udc, (unsigned char)i);
}
/* ep0 initialization in here */
ret = qe_ep_init(udc, 0, &qe_ep0_desc);
if (ret)
goto err2;
/* create a buf for ZLP send, need to remain zeroed */
udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
if (udc->nullbuf == NULL) {
ret = -ENOMEM;
goto err3;
}
/* buffer for data of get_status request */
udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
if (udc->statusbuf == NULL) {
ret = -ENOMEM;
goto err3;
}
udc->nullp = virt_to_phys((void *)udc->nullbuf);
if (udc->nullp == DMA_ADDR_INVALID) {
udc->nullp = dma_map_single(
udc->gadget.dev.parent,
udc->nullbuf,
256,
DMA_TO_DEVICE);
udc->nullmap = 1;
} else {
dma_sync_single_for_device(udc->gadget.dev.parent,
udc->nullp, 256,
DMA_TO_DEVICE);
}
tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet);
/* request irq and disable DR */
udc->usb_irq = irq_of_parse_and_map(np, 0);
if (!udc->usb_irq) {
ret = -EINVAL;
goto err_noirq;
}
ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
driver_name, udc);
if (ret) {
dev_err(udc->dev, "cannot request irq %d err %d\n",
udc->usb_irq, ret);
goto err4;
}
ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
qe_udc_release);
if (ret)
goto err5;
platform_set_drvdata(ofdev, udc);
dev_info(udc->dev,
"%s USB controller initialized as device\n",
(udc->soc_type == PORT_QE) ? "QE" : "CPM");
return 0;
err5:
free_irq(udc->usb_irq, udc);
err4:
irq_dispose_mapping(udc->usb_irq);
err_noirq:
if (udc->nullmap) {
dma_unmap_single(udc->gadget.dev.parent,
udc->nullp, 256,
DMA_TO_DEVICE);
udc->nullp = DMA_ADDR_INVALID;
} else {
dma_sync_single_for_cpu(udc->gadget.dev.parent,
udc->nullp, 256,
DMA_TO_DEVICE);
}
err3:
ep = &udc->eps[0];
cpm_muram_free(cpm_muram_offset(ep->rxbase));
kfree(ep->rxframe);
kfree(ep->rxbuffer);
kfree(ep->txframe);
err2:
iounmap(udc->usb_regs);
err1:
kfree(udc);
return ret;
}
#ifdef CONFIG_PM
static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
{
return -ENOTSUPP;
}
static int qe_udc_resume(struct platform_device *dev)
{
return -ENOTSUPP;
}
#endif
static void qe_udc_remove(struct platform_device *ofdev)
{
struct qe_udc *udc = platform_get_drvdata(ofdev);
struct qe_ep *ep;
unsigned int size;
DECLARE_COMPLETION_ONSTACK(done);
usb_del_gadget_udc(&udc->gadget);
udc->done = &done;
tasklet_disable(&udc->rx_tasklet);
if (udc->nullmap) {
dma_unmap_single(udc->gadget.dev.parent,
udc->nullp, 256,
DMA_TO_DEVICE);
udc->nullp = DMA_ADDR_INVALID;
} else {
dma_sync_single_for_cpu(udc->gadget.dev.parent,
udc->nullp, 256,
DMA_TO_DEVICE);
}
ep = &udc->eps[0];
cpm_muram_free(cpm_muram_offset(ep->rxbase));
size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
kfree(ep->rxframe);
if (ep->rxbufmap) {
dma_unmap_single(udc->gadget.dev.parent,
ep->rxbuf_d, size,
DMA_FROM_DEVICE);
ep->rxbuf_d = DMA_ADDR_INVALID;
} else {
dma_sync_single_for_cpu(udc->gadget.dev.parent,
ep->rxbuf_d, size,
DMA_FROM_DEVICE);
}
kfree(ep->rxbuffer);
kfree(ep->txframe);
free_irq(udc->usb_irq, udc);
irq_dispose_mapping(udc->usb_irq);
tasklet_kill(&udc->rx_tasklet);
iounmap(udc->usb_regs);
/* wait for release() of gadget.dev to free udc */
wait_for_completion(&done);
}
/*-------------------------------------------------------------------------*/
static const struct of_device_id qe_udc_match[] = {
{
.compatible = "fsl,mpc8323-qe-usb",
.data = (void *)PORT_QE,
},
{
.compatible = "fsl,mpc8360-qe-usb",
.data = (void *)PORT_QE,
},
{
.compatible = "fsl,mpc8272-cpm-usb",
.data = (void *)PORT_CPM,
},
{},
};
MODULE_DEVICE_TABLE(of, qe_udc_match);
static struct platform_driver udc_driver = {
.driver = {
.name = driver_name,
.of_match_table = qe_udc_match,
},
.probe = qe_udc_probe,
.remove_new = qe_udc_remove,
#ifdef CONFIG_PM
.suspend = qe_udc_suspend,
.resume = qe_udc_resume,
#endif
};
module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/fsl_qe_udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB Gadget driver for LPC32xx
*
* Authors:
* Kevin Wells <[email protected]>
* Mike James
* Roland Stigge <[email protected]>
*
* Copyright (C) 2006 Philips Semiconductors
* Copyright (C) 2009 NXP Semiconductors
* Copyright (C) 2012 Roland Stigge
*
* Note: This driver is based on original work done by Mike James for
* the LPC3180.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/isp1301.h>
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
/*
* USB device configuration structure
*/
typedef void (*usc_chg_event)(int);
struct lpc32xx_usbd_cfg {
int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
usc_chg_event conn_chgb; /* Connection change event (optional) */
usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
};
/*
* controller driver data structures
*/
/* 16 endpoints (not to be confused with 32 hardware endpoints) */
#define NUM_ENDPOINTS 16
/*
* IRQ indices make reading the code a little easier
*/
#define IRQ_USB_LP 0
#define IRQ_USB_HP 1
#define IRQ_USB_DEVDMA 2
#define IRQ_USB_ATX 3
#define EP_OUT 0 /* RX (from host) */
#define EP_IN 1 /* TX (to host) */
/* Returns the interrupt mask for the selected hardware endpoint */
#define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
#define EP_INT_TYPE 0
#define EP_ISO_TYPE 1
#define EP_BLK_TYPE 2
#define EP_CTL_TYPE 3
/* EP0 states */
#define WAIT_FOR_SETUP 0 /* Wait for setup packet */
#define DATA_IN 1 /* Expect dev->host transfer */
#define DATA_OUT 2 /* Expect host->dev transfer */
/* DD (DMA Descriptor) structure, requires word alignment, this is already
* defined in the LPC32XX USB device header file, but this version is slightly
* modified to tag some work data with each DMA descriptor. */
struct lpc32xx_usbd_dd_gad {
u32 dd_next_phy;
u32 dd_setup;
u32 dd_buffer_addr;
u32 dd_status;
u32 dd_iso_ps_mem_addr;
u32 this_dma;
u32 iso_status[6]; /* 5 spare */
u32 dd_next_v;
};
/*
* Logical endpoint structure
*/
struct lpc32xx_ep {
struct usb_ep ep;
struct list_head queue;
struct lpc32xx_udc *udc;
u32 hwep_num_base; /* Physical hardware EP */
u32 hwep_num; /* Maps to hardware endpoint */
u32 maxpacket;
u32 lep;
bool is_in;
bool req_pending;
u32 eptype;
u32 totalints;
bool wedge;
};
enum atx_type {
ISP1301,
STOTG04,
};
/*
* Common UDC structure
*/
struct lpc32xx_udc {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct platform_device *pdev;
struct device *dev;
spinlock_t lock;
struct i2c_client *isp1301_i2c_client;
/* Board and device specific */
struct lpc32xx_usbd_cfg *board;
void __iomem *udp_baseaddr;
int udp_irq[4];
struct clk *usb_slv_clk;
/* DMA support */
u32 *udca_v_base;
u32 udca_p_base;
struct dma_pool *dd_cache;
/* Common EP and control data */
u32 enabled_devints;
u32 enabled_hwepints;
u32 dev_status;
u32 realized_eps;
/* VBUS detection, pullup, and power flags */
u8 vbus;
u8 last_vbus;
int pullup;
int poweron;
enum atx_type atx;
/* Work queues related to I2C support */
struct work_struct pullup_job;
struct work_struct power_job;
/* USB device peripheral - various */
struct lpc32xx_ep ep[NUM_ENDPOINTS];
bool enabled;
bool clocked;
bool suspended;
int ep0state;
atomic_t enabled_ep_cnt;
wait_queue_head_t ep_disable_wait_queue;
};
/*
* Endpoint request
*/
struct lpc32xx_request {
struct usb_request req;
struct list_head queue;
struct lpc32xx_usbd_dd_gad *dd_desc_ptr;
bool mapped;
bool send_zlp;
};
static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
{
return container_of(g, struct lpc32xx_udc, gadget);
}
#define ep_dbg(epp, fmt, arg...) \
dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
#define ep_err(epp, fmt, arg...) \
dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
#define ep_info(epp, fmt, arg...) \
dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
#define ep_warn(epp, fmt, arg...) \
dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
#define UDCA_BUFF_SIZE (128)
/**********************************************************************
* USB device controller register offsets
**********************************************************************/
#define USBD_DEVINTST(x) ((x) + 0x200)
#define USBD_DEVINTEN(x) ((x) + 0x204)
#define USBD_DEVINTCLR(x) ((x) + 0x208)
#define USBD_DEVINTSET(x) ((x) + 0x20C)
#define USBD_CMDCODE(x) ((x) + 0x210)
#define USBD_CMDDATA(x) ((x) + 0x214)
#define USBD_RXDATA(x) ((x) + 0x218)
#define USBD_TXDATA(x) ((x) + 0x21C)
#define USBD_RXPLEN(x) ((x) + 0x220)
#define USBD_TXPLEN(x) ((x) + 0x224)
#define USBD_CTRL(x) ((x) + 0x228)
#define USBD_DEVINTPRI(x) ((x) + 0x22C)
#define USBD_EPINTST(x) ((x) + 0x230)
#define USBD_EPINTEN(x) ((x) + 0x234)
#define USBD_EPINTCLR(x) ((x) + 0x238)
#define USBD_EPINTSET(x) ((x) + 0x23C)
#define USBD_EPINTPRI(x) ((x) + 0x240)
#define USBD_REEP(x) ((x) + 0x244)
#define USBD_EPIND(x) ((x) + 0x248)
#define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
/* DMA support registers only below */
/* Set, clear, or get enabled state of the DMA request status. If
* enabled, an IN or OUT token will start a DMA transfer for the EP */
#define USBD_DMARST(x) ((x) + 0x250)
#define USBD_DMARCLR(x) ((x) + 0x254)
#define USBD_DMARSET(x) ((x) + 0x258)
/* DMA UDCA head pointer */
#define USBD_UDCAH(x) ((x) + 0x280)
/* EP DMA status, enable, and disable. This is used to specifically
* enabled or disable DMA for a specific EP */
#define USBD_EPDMAST(x) ((x) + 0x284)
#define USBD_EPDMAEN(x) ((x) + 0x288)
#define USBD_EPDMADIS(x) ((x) + 0x28C)
/* DMA master interrupts enable and pending interrupts */
#define USBD_DMAINTST(x) ((x) + 0x290)
#define USBD_DMAINTEN(x) ((x) + 0x294)
/* DMA end of transfer interrupt enable, disable, status */
#define USBD_EOTINTST(x) ((x) + 0x2A0)
#define USBD_EOTINTCLR(x) ((x) + 0x2A4)
#define USBD_EOTINTSET(x) ((x) + 0x2A8)
/* New DD request interrupt enable, disable, status */
#define USBD_NDDRTINTST(x) ((x) + 0x2AC)
#define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
#define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
/* DMA error interrupt enable, disable, status */
#define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
#define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
#define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
/**********************************************************************
* USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
* USBD_DEVINTPRI register definitions
**********************************************************************/
#define USBD_ERR_INT (1 << 9)
#define USBD_EP_RLZED (1 << 8)
#define USBD_TXENDPKT (1 << 7)
#define USBD_RXENDPKT (1 << 6)
#define USBD_CDFULL (1 << 5)
#define USBD_CCEMPTY (1 << 4)
#define USBD_DEV_STAT (1 << 3)
#define USBD_EP_SLOW (1 << 2)
#define USBD_EP_FAST (1 << 1)
#define USBD_FRAME (1 << 0)
/**********************************************************************
* USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
* USBD_EPINTPRI register definitions
**********************************************************************/
/* End point selection macro (RX) */
#define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
/* End point selection macro (TX) */
#define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
/**********************************************************************
* USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
* USBD_EPDMAEN/USBD_EPDMADIS/
* USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
* USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
* USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
* register definitions
**********************************************************************/
/* Endpoint selection macro */
#define USBD_EP_SEL(e) (1 << (e))
/**********************************************************************
* SBD_DMAINTST/USBD_DMAINTEN
**********************************************************************/
#define USBD_SYS_ERR_INT (1 << 2)
#define USBD_NEW_DD_INT (1 << 1)
#define USBD_EOT_INT (1 << 0)
/**********************************************************************
* USBD_RXPLEN register definitions
**********************************************************************/
#define USBD_PKT_RDY (1 << 11)
#define USBD_DV (1 << 10)
#define USBD_PK_LEN_MASK 0x3FF
/**********************************************************************
* USBD_CTRL register definitions
**********************************************************************/
#define USBD_LOG_ENDPOINT(e) ((e) << 2)
#define USBD_WR_EN (1 << 1)
#define USBD_RD_EN (1 << 0)
/**********************************************************************
* USBD_CMDCODE register definitions
**********************************************************************/
#define USBD_CMD_CODE(c) ((c) << 16)
#define USBD_CMD_PHASE(p) ((p) << 8)
/**********************************************************************
* USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
**********************************************************************/
#define USBD_DMAEP(e) (1 << (e))
/* DD (DMA Descriptor) structure, requires word alignment */
struct lpc32xx_usbd_dd {
u32 *dd_next;
u32 dd_setup;
u32 dd_buffer_addr;
u32 dd_status;
u32 dd_iso_ps_mem_addr;
};
/* dd_setup bit defines */
#define DD_SETUP_ATLE_DMA_MODE 0x01
#define DD_SETUP_NEXT_DD_VALID 0x04
#define DD_SETUP_ISO_EP 0x10
#define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
#define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
/* dd_status bit defines */
#define DD_STATUS_DD_RETIRED 0x01
#define DD_STATUS_STS_MASK 0x1E
#define DD_STATUS_STS_NS 0x00 /* Not serviced */
#define DD_STATUS_STS_BS 0x02 /* Being serviced */
#define DD_STATUS_STS_NC 0x04 /* Normal completion */
#define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
#define DD_STATUS_STS_DOR 0x08 /* Data overrun */
#define DD_STATUS_STS_SE 0x12 /* System error */
#define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
#define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
#define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
#define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
#define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
/*
*
* Protocol engine bits below
*
*/
/* Device Interrupt Bit Definitions */
#define FRAME_INT 0x00000001
#define EP_FAST_INT 0x00000002
#define EP_SLOW_INT 0x00000004
#define DEV_STAT_INT 0x00000008
#define CCEMTY_INT 0x00000010
#define CDFULL_INT 0x00000020
#define RxENDPKT_INT 0x00000040
#define TxENDPKT_INT 0x00000080
#define EP_RLZED_INT 0x00000100
#define ERR_INT 0x00000200
/* Rx & Tx Packet Length Definitions */
#define PKT_LNGTH_MASK 0x000003FF
#define PKT_DV 0x00000400
#define PKT_RDY 0x00000800
/* USB Control Definitions */
#define CTRL_RD_EN 0x00000001
#define CTRL_WR_EN 0x00000002
/* Command Codes */
#define CMD_SET_ADDR 0x00D00500
#define CMD_CFG_DEV 0x00D80500
#define CMD_SET_MODE 0x00F30500
#define CMD_RD_FRAME 0x00F50500
#define DAT_RD_FRAME 0x00F50200
#define CMD_RD_TEST 0x00FD0500
#define DAT_RD_TEST 0x00FD0200
#define CMD_SET_DEV_STAT 0x00FE0500
#define CMD_GET_DEV_STAT 0x00FE0500
#define DAT_GET_DEV_STAT 0x00FE0200
#define CMD_GET_ERR_CODE 0x00FF0500
#define DAT_GET_ERR_CODE 0x00FF0200
#define CMD_RD_ERR_STAT 0x00FB0500
#define DAT_RD_ERR_STAT 0x00FB0200
#define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
#define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
#define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
#define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
#define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
#define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
#define CMD_CLR_BUF 0x00F20500
#define DAT_CLR_BUF 0x00F20200
#define CMD_VALID_BUF 0x00FA0500
/* Device Address Register Definitions */
#define DEV_ADDR_MASK 0x7F
#define DEV_EN 0x80
/* Device Configure Register Definitions */
#define CONF_DVICE 0x01
/* Device Mode Register Definitions */
#define AP_CLK 0x01
#define INAK_CI 0x02
#define INAK_CO 0x04
#define INAK_II 0x08
#define INAK_IO 0x10
#define INAK_BI 0x20
#define INAK_BO 0x40
/* Device Status Register Definitions */
#define DEV_CON 0x01
#define DEV_CON_CH 0x02
#define DEV_SUS 0x04
#define DEV_SUS_CH 0x08
#define DEV_RST 0x10
/* Error Code Register Definitions */
#define ERR_EC_MASK 0x0F
#define ERR_EA 0x10
/* Error Status Register Definitions */
#define ERR_PID 0x01
#define ERR_UEPKT 0x02
#define ERR_DCRC 0x04
#define ERR_TIMOUT 0x08
#define ERR_EOP 0x10
#define ERR_B_OVRN 0x20
#define ERR_BTSTF 0x40
#define ERR_TGL 0x80
/* Endpoint Select Register Definitions */
#define EP_SEL_F 0x01
#define EP_SEL_ST 0x02
#define EP_SEL_STP 0x04
#define EP_SEL_PO 0x08
#define EP_SEL_EPN 0x10
#define EP_SEL_B_1_FULL 0x20
#define EP_SEL_B_2_FULL 0x40
/* Endpoint Status Register Definitions */
#define EP_STAT_ST 0x01
#define EP_STAT_DA 0x20
#define EP_STAT_RF_MO 0x40
#define EP_STAT_CND_ST 0x80
/* Clear Buffer Register Definitions */
#define CLR_BUF_PO 0x01
/* DMA Interrupt Bit Definitions */
#define EOT_INT 0x01
#define NDD_REQ_INT 0x02
#define SYS_ERR_INT 0x04
#define DRIVER_VERSION "1.03"
static const char driver_name[] = "lpc32xx_udc";
/*
*
* proc interface support
*
*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
static const char debug_filename[] = "driver/udc";
static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
{
struct lpc32xx_request *req;
seq_printf(s, "\n");
seq_printf(s, "%12s, maxpacket %4d %3s",
ep->ep.name, ep->ep.maxpacket,
ep->is_in ? "in" : "out");
seq_printf(s, " type %4s", epnames[ep->eptype]);
seq_printf(s, " ints: %12d", ep->totalints);
if (list_empty(&ep->queue))
seq_printf(s, "\t(queue empty)\n");
else {
list_for_each_entry(req, &ep->queue, queue) {
u32 length = req->req.actual;
seq_printf(s, "\treq %p len %d/%d buf %p\n",
&req->req, length,
req->req.length, req->req.buf);
}
}
}
static int udc_show(struct seq_file *s, void *unused)
{
struct lpc32xx_udc *udc = s->private;
struct lpc32xx_ep *ep;
unsigned long flags;
seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
spin_lock_irqsave(&udc->lock, flags);
seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
udc->vbus ? "present" : "off",
udc->enabled ? (udc->vbus ? "active" : "enabled") :
"disabled",
udc->gadget.is_selfpowered ? "self" : "VBUS",
udc->suspended ? ", suspended" : "",
udc->driver ? udc->driver->driver.name : "(none)");
if (udc->enabled && udc->vbus) {
proc_ep_show(s, &udc->ep[0]);
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list)
proc_ep_show(s, ep);
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(udc);
static void create_debug_file(struct lpc32xx_udc *udc)
{
debugfs_create_file(debug_filename, 0, NULL, udc, &udc_fops);
}
static void remove_debug_file(struct lpc32xx_udc *udc)
{
debugfs_lookup_and_remove(debug_filename, NULL);
}
#else
static inline void create_debug_file(struct lpc32xx_udc *udc) {}
static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
#endif
/* Primary initialization sequence for the ISP1301 transceiver */
static void isp1301_udc_configure(struct lpc32xx_udc *udc)
{
u8 value;
s32 vendor, product;
vendor = i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00);
product = i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02);
if (vendor == 0x0483 && product == 0xa0c4)
udc->atx = STOTG04;
/* LPC32XX only supports DAT_SE0 USB mode */
/* This sequence is important */
/* Disable transparent UART mode first */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
MC1_UART_EN);
/* Set full speed and SE0 mode */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
/*
* The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
*/
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
value = MC2_BI_DI;
if (udc->atx != STOTG04)
value |= MC2_SPD_SUSP_CTRL;
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_2, value);
/* Driver VBUS_DRV high or low depending on board setup */
if (udc->board->vbus_drv_pol != 0)
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
else
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
OTG1_VBUS_DRV);
/* Bi-directional mode with suspend control
* Enable both pulldowns for now - the pullup will be enable when VBUS
* is detected */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1,
(0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
/* Discharge VBUS (just in case) */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
msleep(1);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
OTG1_VBUS_DISCHRG);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", vendor);
dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", product);
dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
}
/* Enables or disables the USB device pullup via the ISP1301 transceiver */
static void isp1301_pullup_set(struct lpc32xx_udc *udc)
{
if (udc->pullup)
/* Enable pullup for bus signalling */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP);
else
/* Enable pullup for bus signalling */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
OTG1_DP_PULLUP);
}
static void pullup_work(struct work_struct *work)
{
struct lpc32xx_udc *udc =
container_of(work, struct lpc32xx_udc, pullup_job);
isp1301_pullup_set(udc);
}
static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
int block)
{
if (en_pullup == udc->pullup)
return;
udc->pullup = en_pullup;
if (block)
isp1301_pullup_set(udc);
else
/* defer slow i2c pull up setting */
schedule_work(&udc->pullup_job);
}
#ifdef CONFIG_PM
/* Powers up or down the ISP1301 transceiver */
static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
{
/* There is no "global power down" register for stotg04 */
if (udc->atx == STOTG04)
return;
if (enable != 0)
/* Power up ISP1301 - this ISP1301 will automatically wakeup
when VBUS is detected */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
MC2_GLOBAL_PWR_DN);
else
/* Power down ISP1301 */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
}
static void power_work(struct work_struct *work)
{
struct lpc32xx_udc *udc =
container_of(work, struct lpc32xx_udc, power_job);
isp1301_set_powerstate(udc, udc->poweron);
}
#endif
/*
*
* USB protocol engine command/data read/write helper functions
*
*/
/* Issues a single command to the USB device state machine */
static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
{
u32 pass = 0;
int to;
/* EP may lock on CLRI if this read isn't done */
u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
(void) tmp;
while (pass == 0) {
writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
/* Write command code */
writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
to = 10000;
while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
USBD_CCEMPTY) == 0) && (to > 0)) {
to--;
}
if (to > 0)
pass = 1;
cpu_relax();
}
}
/* Issues 2 commands (or command and data) to the USB device state machine */
static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
u32 data)
{
udc_protocol_cmd_w(udc, cmd);
udc_protocol_cmd_w(udc, data);
}
/* Issues a single command to the USB device state machine and reads
* response data */
static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
{
int to = 1000;
/* Write a command and read data from the protocol engine */
writel((USBD_CDFULL | USBD_CCEMPTY),
USBD_DEVINTCLR(udc->udp_baseaddr));
/* Write command code */
udc_protocol_cmd_w(udc, cmd);
while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
&& (to > 0))
to--;
if (!to)
dev_dbg(udc->dev,
"Protocol engine didn't receive response (CDFULL)\n");
return readl(USBD_CMDDATA(udc->udp_baseaddr));
}
/*
*
* USB device interrupt mask support functions
*
*/
/* Enable one or more USB device interrupts */
static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
{
udc->enabled_devints |= devmask;
writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
}
/* Disable one or more USB device interrupts */
static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
{
udc->enabled_devints &= ~mask;
writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
}
/* Clear one or more USB device interrupts */
static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
{
writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
}
/*
*
* Endpoint interrupt disable/enable functions
*
*/
/* Enable one or more USB endpoint interrupts */
static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
{
udc->enabled_hwepints |= (1 << hwep);
writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
}
/* Disable one or more USB endpoint interrupts */
static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
{
udc->enabled_hwepints &= ~(1 << hwep);
writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
}
/* Clear one or more USB endpoint interrupts */
static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
{
writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
}
/* Enable DMA for the HW channel */
static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
{
writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
}
/* Disable DMA for the HW channel */
static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
{
writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
}
/*
*
* Endpoint realize/unrealize functions
*
*/
/* Before an endpoint can be used, it needs to be realized
* in the USB protocol engine - this realizes the endpoint.
* The interrupt (FIFO or DMA) is not enabled with this function */
static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
u32 maxpacket)
{
int to = 1000;
writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
writel(hwep, USBD_EPIND(udc->udp_baseaddr));
udc->realized_eps |= (1 << hwep);
writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
/* Wait until endpoint is realized in hardware */
while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
USBD_EP_RLZED)) && (to > 0))
to--;
if (!to)
dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
}
/* Unrealize an EP */
static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
{
udc->realized_eps &= ~(1 << hwep);
writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
}
/*
*
* Endpoint support functions
*
*/
/* Select and clear endpoint interrupt */
static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
{
udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
}
/* Disables the endpoint in the USB protocol engine */
static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
{
udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
DAT_WR_BYTE(EP_STAT_DA));
}
/* Stalls the endpoint - endpoint will return STALL */
static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
{
udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
DAT_WR_BYTE(EP_STAT_ST));
}
/* Clear stall or reset endpoint */
static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
{
udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
DAT_WR_BYTE(0));
}
/* Select an endpoint for endpoint status, clear, validate */
static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
{
udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
}
/*
*
* Endpoint buffer management functions
*
*/
/* Clear the current endpoint's buffer */
static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
{
udc_select_hwep(udc, hwep);
udc_protocol_cmd_w(udc, CMD_CLR_BUF);
}
/* Validate the current endpoint's buffer */
static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
{
udc_select_hwep(udc, hwep);
udc_protocol_cmd_w(udc, CMD_VALID_BUF);
}
static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
{
/* Clear EP interrupt */
uda_clear_hwepint(udc, hwep);
return udc_selep_clrint(udc, hwep);
}
/*
*
* USB EP DMA support
*
*/
/* Allocate a DMA Descriptor */
static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
{
dma_addr_t dma;
struct lpc32xx_usbd_dd_gad *dd;
dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma);
if (dd)
dd->this_dma = dma;
return dd;
}
/* Free a DMA Descriptor */
static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
{
dma_pool_free(udc->dd_cache, dd, dd->this_dma);
}
/*
*
* USB setup and shutdown functions
*
*/
/* Enables or disables most of the USB system clocks when low power mode is
* needed. Clocks are typically started on a connection event, and disabled
* when a cable is disconnected */
static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
{
if (enable != 0) {
if (udc->clocked)
return;
udc->clocked = 1;
clk_prepare_enable(udc->usb_slv_clk);
} else {
if (!udc->clocked)
return;
udc->clocked = 0;
clk_disable_unprepare(udc->usb_slv_clk);
}
}
/* Set/reset USB device address */
static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
{
/* Address will be latched at the end of the status phase, or
latched immediately if function is called twice */
udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
DAT_WR_BYTE(DEV_EN | addr));
}
/* Setup up a IN request for DMA transfer - this consists of determining the
* list of DMA addresses for the transfer, allocating DMA Descriptors,
* installing the DD into the UDCA, and then enabling the DMA for that EP */
static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
struct lpc32xx_request *req;
u32 hwep = ep->hwep_num;
ep->req_pending = 1;
/* There will always be a request waiting here */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
/* Place the DD Descriptor into the UDCA */
udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
/* Enable DMA and interrupt for the HW EP */
udc_ep_dma_enable(udc, hwep);
/* Clear ZLP if last packet is not of MAXP size */
if (req->req.length % ep->ep.maxpacket)
req->send_zlp = 0;
return 0;
}
/* Setup up a OUT request for DMA transfer - this consists of determining the
* list of DMA addresses for the transfer, allocating DMA Descriptors,
* installing the DD into the UDCA, and then enabling the DMA for that EP */
static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
struct lpc32xx_request *req;
u32 hwep = ep->hwep_num;
ep->req_pending = 1;
/* There will always be a request waiting here */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
/* Place the DD Descriptor into the UDCA */
udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
/* Enable DMA and interrupt for the HW EP */
udc_ep_dma_enable(udc, hwep);
return 0;
}
static void udc_disable(struct lpc32xx_udc *udc)
{
u32 i;
/* Disable device */
udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
/* Disable all device interrupts (including EP0) */
uda_disable_devint(udc, 0x3FF);
/* Disable and reset all endpoint interrupts */
for (i = 0; i < 32; i++) {
uda_disable_hwepint(udc, i);
uda_clear_hwepint(udc, i);
udc_disable_hwep(udc, i);
udc_unrealize_hwep(udc, i);
udc->udca_v_base[i] = 0;
/* Disable and clear all interrupts and DMA */
udc_ep_dma_disable(udc, i);
writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
}
/* Disable DMA interrupts */
writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
writel(0, USBD_UDCAH(udc->udp_baseaddr));
}
static void udc_enable(struct lpc32xx_udc *udc)
{
u32 i;
struct lpc32xx_ep *ep = &udc->ep[0];
/* Start with known state */
udc_disable(udc);
/* Enable device */
udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
/* EP interrupts on high priority, FRAME interrupt on low priority */
writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
/* Clear any pending device interrupts */
writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
/* Setup UDCA - not yet used (DMA) */
writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
/* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
for (i = 0; i <= 1; i++) {
udc_realize_hwep(udc, i, ep->ep.maxpacket);
uda_enable_hwepint(udc, i);
udc_select_hwep(udc, i);
udc_clrstall_hwep(udc, i);
udc_clr_buffer_hwep(udc, i);
}
/* Device interrupt setup */
uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
USBD_EP_FAST));
uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
USBD_EP_FAST));
/* Set device address to 0 - called twice to force a latch in the USB
engine without the need of a setup packet status closure */
udc_set_address(udc, 0);
udc_set_address(udc, 0);
/* Enable master DMA interrupts */
writel((USBD_SYS_ERR_INT | USBD_EOT_INT),
USBD_DMAINTEN(udc->udp_baseaddr));
udc->dev_status = 0;
}
/*
*
* USB device board specific events handled via callbacks
*
*/
/* Connection change event - notify board function of change */
static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
{
/* Just notify of a connection change event (optional) */
if (udc->board->conn_chgb != NULL)
udc->board->conn_chgb(conn);
}
/* Suspend/resume event - notify board function of change */
static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
{
/* Just notify of a Suspend/resume change event (optional) */
if (udc->board->susp_chgb != NULL)
udc->board->susp_chgb(conn);
if (conn)
udc->suspended = 0;
else
udc->suspended = 1;
}
/* Remote wakeup enable/disable - notify board function of change */
static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
{
if (udc->board->rmwk_chgb != NULL)
udc->board->rmwk_chgb(udc->dev_status &
(1 << USB_DEVICE_REMOTE_WAKEUP));
}
/* Reads data from FIFO, adjusts for alignment and data size */
static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
{
int n, i, bl;
u16 *p16;
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */
switch (((uintptr_t) data) & 0x3) {
case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
/* Copy 32-bit aligned data first */
for (n = 0; n < cbytes; n += 4)
*p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
/* Handle any remaining bytes */
bl = bytes - cbytes;
if (bl) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
for (n = 0; n < bl; n++)
data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
}
break;
case 1: /* 8-bit aligned */
case 3:
/* Each byte has to be handled independently */
for (n = 0; n < bytes; n += 4) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
bl = bytes - n;
if (bl > 4)
bl = 4;
for (i = 0; i < bl; i++)
data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
}
break;
case 2: /* 16-bit aligned */
p16 = (u16 *) data;
cbytes = (bytes & ~0x3);
/* Copy 32-bit sized objects first with 16-bit alignment */
for (n = 0; n < cbytes; n += 4) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
*p16++ = (u16)(tmp & 0xFFFF);
*p16++ = (u16)((tmp >> 16) & 0xFFFF);
}
/* Handle any remaining bytes */
bl = bytes - cbytes;
if (bl) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
for (n = 0; n < bl; n++)
data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
}
break;
}
}
/* Read data from the FIFO for an endpoint. This function is for endpoints (such
* as EP0) that don't use DMA. This function should only be called if a packet
* is known to be ready to read for the endpoint. Note that the endpoint must
* be selected in the protocol engine prior to this call. */
static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
u32 bytes)
{
u32 tmpv;
int to = 1000;
u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
/* Setup read of endpoint */
writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
/* Wait until packet is ready */
while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
PKT_RDY) == 0) && (to > 0))
to--;
if (!to)
dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
/* Mask out count */
tmp = tmpv & PKT_LNGTH_MASK;
if (bytes < tmp)
tmp = bytes;
if ((tmp > 0) && (data != NULL))
udc_pop_fifo(udc, (u8 *) data, tmp);
writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
/* Clear the buffer */
udc_clr_buffer_hwep(udc, hwep);
return tmp;
}
/* Stuffs data into the FIFO, adjusts for alignment and data size */
static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
{
int n, i, bl;
u16 *p16;
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */
switch (((uintptr_t) data) & 0x3) {
case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
/* Copy 32-bit aligned data first */
for (n = 0; n < cbytes; n += 4)
writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
/* Handle any remaining bytes */
bl = bytes - cbytes;
if (bl) {
tmp = 0;
for (n = 0; n < bl; n++)
tmp |= data[cbytes + n] << (n * 8);
writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
}
break;
case 1: /* 8-bit aligned */
case 3:
/* Each byte has to be handled independently */
for (n = 0; n < bytes; n += 4) {
bl = bytes - n;
if (bl > 4)
bl = 4;
tmp = 0;
for (i = 0; i < bl; i++)
tmp |= data[n + i] << (i * 8);
writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
}
break;
case 2: /* 16-bit aligned */
p16 = (u16 *) data;
cbytes = (bytes & ~0x3);
/* Copy 32-bit aligned data first */
for (n = 0; n < cbytes; n += 4) {
tmp = *p16++ & 0xFFFF;
tmp |= (*p16++ & 0xFFFF) << 16;
writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
}
/* Handle any remaining bytes */
bl = bytes - cbytes;
if (bl) {
tmp = 0;
for (n = 0; n < bl; n++)
tmp |= data[cbytes + n] << (n * 8);
writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
}
break;
}
}
/* Write data to the FIFO for an endpoint. This function is for endpoints (such
* as EP0) that don't use DMA. Note that the endpoint must be selected in the
* protocol engine prior to this call. */
static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
u32 bytes)
{
u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
if ((bytes > 0) && (data == NULL))
return;
/* Setup write of endpoint */
writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
/* Need at least 1 byte to trigger TX */
if (bytes == 0)
writel(0, USBD_TXDATA(udc->udp_baseaddr));
else
udc_stuff_fifo(udc, (u8 *) data, bytes);
writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
udc_val_buffer_hwep(udc, hwep);
}
/* USB device reset - resets USB to a default state with just EP0
enabled */
static void uda_usb_reset(struct lpc32xx_udc *udc)
{
u32 i = 0;
/* Re-init device controller and EP0 */
udc_enable(udc);
udc->gadget.speed = USB_SPEED_FULL;
for (i = 1; i < NUM_ENDPOINTS; i++) {
struct lpc32xx_ep *ep = &udc->ep[i];
ep->req_pending = 0;
}
}
/* Send a ZLP on EP0 */
static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
{
udc_write_hwep(udc, EP_IN, NULL, 0);
}
/* Get current frame number */
static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
{
u16 flo, fhi;
udc_protocol_cmd_w(udc, CMD_RD_FRAME);
flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
return (fhi << 8) | flo;
}
/* Set the device as configured - enables all endpoints */
static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
{
udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
}
/* Set the device as unconfigured - disables all endpoints */
static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
{
udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
}
/* reinit == restore initial software state */
static void udc_reinit(struct lpc32xx_udc *udc)
{
u32 i;
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
for (i = 0; i < NUM_ENDPOINTS; i++) {
struct lpc32xx_ep *ep = &udc->ep[i];
if (i != 0)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
INIT_LIST_HEAD(&ep->queue);
ep->req_pending = 0;
}
udc->ep0state = WAIT_FOR_SETUP;
}
/* Must be called with lock */
static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
{
struct lpc32xx_udc *udc = ep->udc;
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
if (ep->lep) {
usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
/* Free DDs */
udc_dd_free(udc, req->dd_desc_ptr);
}
if (status && status != -ESHUTDOWN)
ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
ep->req_pending = 0;
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
/* Must be called with lock */
static void nuke(struct lpc32xx_ep *ep, int status)
{
struct lpc32xx_request *req;
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
done(ep, req, status);
}
if (status == -ESHUTDOWN) {
uda_disable_hwepint(ep->udc, ep->hwep_num);
udc_disable_hwep(ep->udc, ep->hwep_num);
}
}
/* IN endpoint 0 transfer */
static int udc_ep0_in_req(struct lpc32xx_udc *udc)
{
struct lpc32xx_request *req;
struct lpc32xx_ep *ep0 = &udc->ep[0];
u32 tsend, ts = 0;
if (list_empty(&ep0->queue))
/* Nothing to send */
return 0;
else
req = list_entry(ep0->queue.next, struct lpc32xx_request,
queue);
tsend = ts = req->req.length - req->req.actual;
if (ts == 0) {
/* Send a ZLP */
udc_ep0_send_zlp(udc);
done(ep0, req, 0);
return 1;
} else if (ts > ep0->ep.maxpacket)
ts = ep0->ep.maxpacket; /* Just send what we can */
/* Write data to the EP0 FIFO and start transfer */
udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
/* Increment data pointer */
req->req.actual += ts;
if (tsend >= ep0->ep.maxpacket)
return 0; /* Stay in data transfer state */
/* Transfer request is complete */
udc->ep0state = WAIT_FOR_SETUP;
done(ep0, req, 0);
return 1;
}
/* OUT endpoint 0 transfer */
static int udc_ep0_out_req(struct lpc32xx_udc *udc)
{
struct lpc32xx_request *req;
struct lpc32xx_ep *ep0 = &udc->ep[0];
u32 tr, bufferspace;
if (list_empty(&ep0->queue))
return 0;
else
req = list_entry(ep0->queue.next, struct lpc32xx_request,
queue);
if (req) {
if (req->req.length == 0) {
/* Just dequeue request */
done(ep0, req, 0);
udc->ep0state = WAIT_FOR_SETUP;
return 1;
}
/* Get data from FIFO */
bufferspace = req->req.length - req->req.actual;
if (bufferspace > ep0->ep.maxpacket)
bufferspace = ep0->ep.maxpacket;
/* Copy data to buffer */
prefetchw(req->req.buf + req->req.actual);
tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
bufferspace);
req->req.actual += bufferspace;
if (tr < ep0->ep.maxpacket) {
/* This is the last packet */
done(ep0, req, 0);
udc->ep0state = WAIT_FOR_SETUP;
return 1;
}
}
return 0;
}
/* Must be called with lock */
static void stop_activity(struct lpc32xx_udc *udc)
{
struct usb_gadget_driver *driver = udc->driver;
int i;
if (udc->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->suspended = 0;
for (i = 0; i < NUM_ENDPOINTS; i++) {
struct lpc32xx_ep *ep = &udc->ep[i];
nuke(ep, -ESHUTDOWN);
}
if (driver) {
spin_unlock(&udc->lock);
driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
isp1301_pullup_enable(udc, 0, 0);
udc_disable(udc);
udc_reinit(udc);
}
/*
* Activate or kill host pullup
* Can be called with or without lock
*/
static void pullup(struct lpc32xx_udc *udc, int is_on)
{
if (!udc->clocked)
return;
if (!udc->enabled || !udc->vbus)
is_on = 0;
if (is_on != udc->pullup)
isp1301_pullup_enable(udc, is_on, 0);
}
/* Must be called without lock */
static int lpc32xx_ep_disable(struct usb_ep *_ep)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
struct lpc32xx_udc *udc = ep->udc;
unsigned long flags;
if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
nuke(ep, -ESHUTDOWN);
/* Clear all DMA statuses for this EP */
udc_ep_dma_disable(udc, ep->hwep_num);
writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
/* Remove the DD pointer in the UDCA */
udc->udca_v_base[ep->hwep_num] = 0;
/* Disable and reset endpoint and interrupt */
uda_clear_hwepint(udc, ep->hwep_num);
udc_unrealize_hwep(udc, ep->hwep_num);
ep->hwep_num = 0;
spin_unlock_irqrestore(&udc->lock, flags);
atomic_dec(&udc->enabled_ep_cnt);
wake_up(&udc->ep_disable_wait_queue);
return 0;
}
/* Must be called without lock */
static int lpc32xx_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
struct lpc32xx_udc *udc;
u16 maxpacket;
u32 tmp;
unsigned long flags;
/* Verify EP data */
if ((!_ep) || (!ep) || (!desc) ||
(desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
udc = ep->udc;
maxpacket = usb_endpoint_maxp(desc);
if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
return -EINVAL;
}
/* Don't touch EP0 */
if (ep->hwep_num_base == 0) {
dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
return -EINVAL;
}
/* Is driver ready? */
if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
dev_dbg(udc->dev, "bogus device state\n");
return -ESHUTDOWN;
}
tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
switch (tmp) {
case USB_ENDPOINT_XFER_CONTROL:
return -EINVAL;
case USB_ENDPOINT_XFER_INT:
if (maxpacket > ep->maxpacket) {
dev_dbg(udc->dev,
"Bad INT endpoint maxpacket %d\n", maxpacket);
return -EINVAL;
}
break;
case USB_ENDPOINT_XFER_BULK:
switch (maxpacket) {
case 8:
case 16:
case 32:
case 64:
break;
default:
dev_dbg(udc->dev,
"Bad BULK endpoint maxpacket %d\n", maxpacket);
return -EINVAL;
}
break;
case USB_ENDPOINT_XFER_ISOC:
break;
}
spin_lock_irqsave(&udc->lock, flags);
/* Initialize endpoint to match the selected descriptor */
ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
ep->ep.maxpacket = maxpacket;
/* Map hardware endpoint from base and direction */
if (ep->is_in)
/* IN endpoints are offset 1 from the OUT endpoint */
ep->hwep_num = ep->hwep_num_base + EP_IN;
else
ep->hwep_num = ep->hwep_num_base;
ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
ep->hwep_num, maxpacket, (ep->is_in == 1));
/* Realize the endpoint, interrupt is enabled later when
* buffers are queued, IN EPs will NAK until buffers are ready */
udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
udc_clr_buffer_hwep(udc, ep->hwep_num);
uda_disable_hwepint(udc, ep->hwep_num);
udc_clrstall_hwep(udc, ep->hwep_num);
/* Clear all DMA statuses for this EP */
udc_ep_dma_disable(udc, ep->hwep_num);
writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
spin_unlock_irqrestore(&udc->lock, flags);
atomic_inc(&udc->enabled_ep_cnt);
return 0;
}
/*
* Allocate a USB request list
* Can be called with or without lock
*/
static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct lpc32xx_request *req;
req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
/*
* De-allocate a USB request list
* Can be called with or without lock
*/
static void lpc32xx_ep_free_request(struct usb_ep *_ep,
struct usb_request *_req)
{
struct lpc32xx_request *req;
req = container_of(_req, struct lpc32xx_request, req);
BUG_ON(!list_empty(&req->queue));
kfree(req);
}
/* Must be called without lock */
static int lpc32xx_ep_queue(struct usb_ep *_ep,
struct usb_request *_req, gfp_t gfp_flags)
{
struct lpc32xx_request *req;
struct lpc32xx_ep *ep;
struct lpc32xx_udc *udc;
unsigned long flags;
int status = 0;
req = container_of(_req, struct lpc32xx_request, req);
ep = container_of(_ep, struct lpc32xx_ep, ep);
if (!_ep || !_req || !_req->complete || !_req->buf ||
!list_empty(&req->queue))
return -EINVAL;
udc = ep->udc;
if (udc->gadget.speed == USB_SPEED_UNKNOWN)
return -EPIPE;
if (ep->lep) {
struct lpc32xx_usbd_dd_gad *dd;
status = usb_gadget_map_request(&udc->gadget, _req, ep->is_in);
if (status)
return status;
/* For the request, build a list of DDs */
dd = udc_dd_alloc(udc);
if (!dd) {
/* Error allocating DD */
return -ENOMEM;
}
req->dd_desc_ptr = dd;
/* Setup the DMA descriptor */
dd->dd_next_phy = dd->dd_next_v = 0;
dd->dd_buffer_addr = req->req.dma;
dd->dd_status = 0;
/* Special handling for ISO EPs */
if (ep->eptype == EP_ISO_TYPE) {
dd->dd_setup = DD_SETUP_ISO_EP |
DD_SETUP_PACKETLEN(0) |
DD_SETUP_DMALENBYTES(1);
dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
if (ep->is_in)
dd->iso_status[0] = req->req.length;
else
dd->iso_status[0] = 0;
} else
dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
DD_SETUP_DMALENBYTES(req->req.length);
}
ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
_req, _req->length, _req->buf, ep->is_in, _req->zero);
spin_lock_irqsave(&udc->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
req->send_zlp = _req->zero;
/* Kickstart empty queues */
if (list_empty(&ep->queue)) {
list_add_tail(&req->queue, &ep->queue);
if (ep->hwep_num_base == 0) {
/* Handle expected data direction */
if (ep->is_in) {
/* IN packet to host */
udc->ep0state = DATA_IN;
status = udc_ep0_in_req(udc);
} else {
/* OUT packet from host */
udc->ep0state = DATA_OUT;
status = udc_ep0_out_req(udc);
}
} else if (ep->is_in) {
/* IN packet to host and kick off transfer */
if (!ep->req_pending)
udc_ep_in_req_dma(udc, ep);
} else
/* OUT packet from host and kick off list */
if (!ep->req_pending)
udc_ep_out_req_dma(udc, ep);
} else
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return (status < 0) ? status : 0;
}
/* Must be called without lock */
static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct lpc32xx_ep *ep;
struct lpc32xx_request *req = NULL, *iter;
unsigned long flags;
ep = container_of(_ep, struct lpc32xx_ep, ep);
if (!_ep || ep->hwep_num_base == 0)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore(&ep->udc->lock, flags);
return -EINVAL;
}
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
/* Must be called without lock */
static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
struct lpc32xx_udc *udc;
unsigned long flags;
if ((!ep) || (ep->hwep_num <= 1))
return -EINVAL;
/* Don't halt an IN EP */
if (ep->is_in)
return -EAGAIN;
udc = ep->udc;
spin_lock_irqsave(&udc->lock, flags);
if (value == 1) {
/* stall */
udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
DAT_WR_BYTE(EP_STAT_ST));
} else {
/* End stall */
ep->wedge = 0;
udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
DAT_WR_BYTE(0));
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/* set the halt feature and ignores clear requests */
static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
if (!_ep || !ep->udc)
return -EINVAL;
ep->wedge = 1;
return usb_ep_set_halt(_ep);
}
static const struct usb_ep_ops lpc32xx_ep_ops = {
.enable = lpc32xx_ep_enable,
.disable = lpc32xx_ep_disable,
.alloc_request = lpc32xx_ep_alloc_request,
.free_request = lpc32xx_ep_free_request,
.queue = lpc32xx_ep_queue,
.dequeue = lpc32xx_ep_dequeue,
.set_halt = lpc32xx_ep_set_halt,
.set_wedge = lpc32xx_ep_set_wedge,
};
/* Send a ZLP on a non-0 IN EP */
static void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
/* Clear EP status */
udc_clearep_getsts(udc, ep->hwep_num);
/* Send ZLP via FIFO mechanism */
udc_write_hwep(udc, ep->hwep_num, NULL, 0);
}
/*
* Handle EP completion for ZLP
* This function will only be called when a delayed ZLP needs to be sent out
* after a DMA transfer has filled both buffers.
*/
static void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
u32 epstatus;
struct lpc32xx_request *req;
if (ep->hwep_num <= 0)
return;
uda_clear_hwepint(udc, ep->hwep_num);
/* If this interrupt isn't enabled, return now */
if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
return;
/* Get endpoint status */
epstatus = udc_clearep_getsts(udc, ep->hwep_num);
/*
* This should never happen, but protect against writing to the
* buffer when full.
*/
if (epstatus & EP_SEL_F)
return;
if (ep->is_in) {
udc_send_in_zlp(udc, ep);
uda_disable_hwepint(udc, ep->hwep_num);
} else
return;
/* If there isn't a request waiting, something went wrong */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
if (req) {
done(ep, req, 0);
/* Start another request if ready */
if (!list_empty(&ep->queue)) {
if (ep->is_in)
udc_ep_in_req_dma(udc, ep);
else
udc_ep_out_req_dma(udc, ep);
} else
ep->req_pending = 0;
}
}
/* DMA end of transfer completion */
static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
u32 status;
struct lpc32xx_request *req;
struct lpc32xx_usbd_dd_gad *dd;
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
ep->totalints++;
#endif
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
if (!req) {
ep_err(ep, "DMA interrupt on no req!\n");
return;
}
dd = req->dd_desc_ptr;
/* DMA descriptor should always be retired for this call */
if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
ep_warn(ep, "DMA descriptor did not retire\n");
/* Disable DMA */
udc_ep_dma_disable(udc, ep->hwep_num);
writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
/* System error? */
if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
(1 << ep->hwep_num)) {
writel((1 << ep->hwep_num),
USBD_SYSERRTINTCLR(udc->udp_baseaddr));
ep_err(ep, "AHB critical error!\n");
ep->req_pending = 0;
/* The error could have occurred on a packet of a multipacket
* transfer, so recovering the transfer is not possible. Close
* the request with an error */
done(ep, req, -ECONNABORTED);
return;
}
/* Handle the current DD's status */
status = dd->dd_status;
switch (status & DD_STATUS_STS_MASK) {
case DD_STATUS_STS_NS:
/* DD not serviced? This shouldn't happen! */
ep->req_pending = 0;
ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
status);
done(ep, req, -ECONNABORTED);
return;
case DD_STATUS_STS_BS:
/* Interrupt only fires on EOT - This shouldn't happen! */
ep->req_pending = 0;
ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
status);
done(ep, req, -ECONNABORTED);
return;
case DD_STATUS_STS_NC:
case DD_STATUS_STS_DUR:
/* Really just a short packet, not an underrun */
/* This is a good status and what we expect */
break;
default:
/* Data overrun, system error, or unknown */
ep->req_pending = 0;
ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
status);
done(ep, req, -ECONNABORTED);
return;
}
/* ISO endpoints are handled differently */
if (ep->eptype == EP_ISO_TYPE) {
if (ep->is_in)
req->req.actual = req->req.length;
else
req->req.actual = dd->iso_status[0] & 0xFFFF;
} else
req->req.actual += DD_STATUS_CURDMACNT(status);
/* Send a ZLP if necessary. This will be done for non-int
* packets which have a size that is a divisor of MAXP */
if (req->send_zlp) {
/*
* If at least 1 buffer is available, send the ZLP now.
* Otherwise, the ZLP send needs to be deferred until a
* buffer is available.
*/
if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
udc_clearep_getsts(udc, ep->hwep_num);
uda_enable_hwepint(udc, ep->hwep_num);
udc_clearep_getsts(udc, ep->hwep_num);
/* Let the EP interrupt handle the ZLP */
return;
} else
udc_send_in_zlp(udc, ep);
}
/* Transfer request is complete */
done(ep, req, 0);
/* Start another request if ready */
udc_clearep_getsts(udc, ep->hwep_num);
if (!list_empty((&ep->queue))) {
if (ep->is_in)
udc_ep_in_req_dma(udc, ep);
else
udc_ep_out_req_dma(udc, ep);
} else
ep->req_pending = 0;
}
/*
*
* Endpoint 0 functions
*
*/
static void udc_handle_dev(struct lpc32xx_udc *udc)
{
u32 tmp;
udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
if (tmp & DEV_RST)
uda_usb_reset(udc);
else if (tmp & DEV_CON_CH)
uda_power_event(udc, (tmp & DEV_CON));
else if (tmp & DEV_SUS_CH) {
if (tmp & DEV_SUS) {
if (udc->vbus == 0)
stop_activity(udc);
else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
udc->driver) {
/* Power down transceiver */
udc->poweron = 0;
schedule_work(&udc->pullup_job);
uda_resm_susp_event(udc, 1);
}
} else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
udc->driver && udc->vbus) {
uda_resm_susp_event(udc, 0);
/* Power up transceiver */
udc->poweron = 1;
schedule_work(&udc->pullup_job);
}
}
}
static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
{
struct lpc32xx_ep *ep;
u32 ep0buff = 0, tmp;
switch (reqtype & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
break; /* Not supported */
case USB_RECIP_DEVICE:
ep0buff = udc->gadget.is_selfpowered;
if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
break;
case USB_RECIP_ENDPOINT:
tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
ep = &udc->ep[tmp];
if ((tmp == 0) || (tmp >= NUM_ENDPOINTS))
return -EOPNOTSUPP;
if (wIndex & USB_DIR_IN) {
if (!ep->is_in)
return -EOPNOTSUPP; /* Something's wrong */
} else if (ep->is_in)
return -EOPNOTSUPP; /* Not an IN endpoint */
/* Get status of the endpoint */
udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
if (tmp & EP_SEL_ST)
ep0buff = (1 << USB_ENDPOINT_HALT);
else
ep0buff = 0;
break;
default:
break;
}
/* Return data */
udc_write_hwep(udc, EP_IN, &ep0buff, 2);
return 0;
}
static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
{
struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
struct usb_ctrlrequest ctrlpkt;
int i, bytes;
u16 wIndex, wValue, reqtype, req, tmp;
/* Nuke previous transfers */
nuke(ep0, -EPROTO);
/* Get setup packet */
bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
if (bytes != 8) {
ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
bytes);
return;
}
/* Native endianness */
wIndex = le16_to_cpu(ctrlpkt.wIndex);
wValue = le16_to_cpu(ctrlpkt.wValue);
reqtype = le16_to_cpu(ctrlpkt.bRequestType);
/* Set direction of EP0 */
if (likely(reqtype & USB_DIR_IN))
ep0->is_in = 1;
else
ep0->is_in = 0;
/* Handle SETUP packet */
req = le16_to_cpu(ctrlpkt.bRequest);
switch (req) {
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
switch (reqtype) {
case (USB_TYPE_STANDARD | USB_RECIP_DEVICE):
if (wValue != USB_DEVICE_REMOTE_WAKEUP)
goto stall; /* Nothing else handled */
/* Tell board about event */
if (req == USB_REQ_CLEAR_FEATURE)
udc->dev_status &=
~(1 << USB_DEVICE_REMOTE_WAKEUP);
else
udc->dev_status |=
(1 << USB_DEVICE_REMOTE_WAKEUP);
uda_remwkp_cgh(udc);
goto zlp_send;
case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
if ((wValue != USB_ENDPOINT_HALT) ||
(tmp >= NUM_ENDPOINTS))
break;
/* Find hardware endpoint from logical endpoint */
ep = &udc->ep[tmp];
tmp = ep->hwep_num;
if (tmp == 0)
break;
if (req == USB_REQ_SET_FEATURE)
udc_stall_hwep(udc, tmp);
else if (!ep->wedge)
udc_clrstall_hwep(udc, tmp);
goto zlp_send;
default:
break;
}
break;
case USB_REQ_SET_ADDRESS:
if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
udc_set_address(udc, wValue);
goto zlp_send;
}
break;
case USB_REQ_GET_STATUS:
udc_get_status(udc, reqtype, wIndex);
return;
default:
break; /* Let GadgetFS handle the descriptor instead */
}
if (likely(udc->driver)) {
/* device-2-host (IN) or no data setup command, process
* immediately */
spin_unlock(&udc->lock);
i = udc->driver->setup(&udc->gadget, &ctrlpkt);
spin_lock(&udc->lock);
if (req == USB_REQ_SET_CONFIGURATION) {
/* Configuration is set after endpoints are realized */
if (wValue) {
/* Set configuration */
udc_set_device_configured(udc);
udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
DAT_WR_BYTE(AP_CLK |
INAK_BI | INAK_II));
} else {
/* Clear configuration */
udc_set_device_unconfigured(udc);
/* Disable NAK interrupts */
udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
DAT_WR_BYTE(AP_CLK));
}
}
if (i < 0) {
/* setup processing failed, force stall */
dev_dbg(udc->dev,
"req %02x.%02x protocol STALL; stat %d\n",
reqtype, req, i);
udc->ep0state = WAIT_FOR_SETUP;
goto stall;
}
}
if (!ep0->is_in)
udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
return;
stall:
udc_stall_hwep(udc, EP_IN);
return;
zlp_send:
udc_ep0_send_zlp(udc);
return;
}
/* IN endpoint 0 transfer */
static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
{
struct lpc32xx_ep *ep0 = &udc->ep[0];
u32 epstatus;
/* Clear EP interrupt */
epstatus = udc_clearep_getsts(udc, EP_IN);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
ep0->totalints++;
#endif
/* Stalled? Clear stall and reset buffers */
if (epstatus & EP_SEL_ST) {
udc_clrstall_hwep(udc, EP_IN);
nuke(ep0, -ECONNABORTED);
udc->ep0state = WAIT_FOR_SETUP;
return;
}
/* Is a buffer available? */
if (!(epstatus & EP_SEL_F)) {
/* Handle based on current state */
if (udc->ep0state == DATA_IN)
udc_ep0_in_req(udc);
else {
/* Unknown state for EP0 oe end of DATA IN phase */
nuke(ep0, -ECONNABORTED);
udc->ep0state = WAIT_FOR_SETUP;
}
}
}
/* OUT endpoint 0 transfer */
static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
{
struct lpc32xx_ep *ep0 = &udc->ep[0];
u32 epstatus;
/* Clear EP interrupt */
epstatus = udc_clearep_getsts(udc, EP_OUT);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
ep0->totalints++;
#endif
/* Stalled? */
if (epstatus & EP_SEL_ST) {
udc_clrstall_hwep(udc, EP_OUT);
nuke(ep0, -ECONNABORTED);
udc->ep0state = WAIT_FOR_SETUP;
return;
}
/* A NAK may occur if a packet couldn't be received yet */
if (epstatus & EP_SEL_EPN)
return;
/* Setup packet incoming? */
if (epstatus & EP_SEL_STP) {
nuke(ep0, 0);
udc->ep0state = WAIT_FOR_SETUP;
}
/* Data available? */
if (epstatus & EP_SEL_F)
/* Handle based on current state */
switch (udc->ep0state) {
case WAIT_FOR_SETUP:
udc_handle_ep0_setup(udc);
break;
case DATA_OUT:
udc_ep0_out_req(udc);
break;
default:
/* Unknown state for EP0 */
nuke(ep0, -ECONNABORTED);
udc->ep0state = WAIT_FOR_SETUP;
}
}
/* Must be called without lock */
static int lpc32xx_get_frame(struct usb_gadget *gadget)
{
int frame;
unsigned long flags;
struct lpc32xx_udc *udc = to_udc(gadget);
if (!udc->clocked)
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
frame = (int) udc_get_current_frame(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return frame;
}
static int lpc32xx_wakeup(struct usb_gadget *gadget)
{
return -ENOTSUPP;
}
static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
{
gadget->is_selfpowered = (is_on != 0);
return 0;
}
/*
* vbus is here! turn everything on that's ready
* Must be called without lock
*/
static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
{
unsigned long flags;
struct lpc32xx_udc *udc = to_udc(gadget);
spin_lock_irqsave(&udc->lock, flags);
/* Doesn't need lock */
if (udc->driver) {
udc_clk_set(udc, 1);
udc_enable(udc);
pullup(udc, is_active);
} else {
stop_activity(udc);
pullup(udc, 0);
spin_unlock_irqrestore(&udc->lock, flags);
/*
* Wait for all the endpoints to disable,
* before disabling clocks. Don't wait if
* endpoints are not enabled.
*/
if (atomic_read(&udc->enabled_ep_cnt))
wait_event_interruptible(udc->ep_disable_wait_queue,
(atomic_read(&udc->enabled_ep_cnt) == 0));
spin_lock_irqsave(&udc->lock, flags);
udc_clk_set(udc, 0);
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/* Can be called with or without lock */
static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
{
struct lpc32xx_udc *udc = to_udc(gadget);
/* Doesn't need lock */
pullup(udc, is_on);
return 0;
}
static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *);
static int lpc32xx_stop(struct usb_gadget *);
static const struct usb_gadget_ops lpc32xx_udc_ops = {
.get_frame = lpc32xx_get_frame,
.wakeup = lpc32xx_wakeup,
.set_selfpowered = lpc32xx_set_selfpowered,
.vbus_session = lpc32xx_vbus_session,
.pullup = lpc32xx_pullup,
.udc_start = lpc32xx_start,
.udc_stop = lpc32xx_stop,
};
static void nop_release(struct device *dev)
{
/* nothing to free */
}
static const struct lpc32xx_udc controller_template = {
.gadget = {
.ops = &lpc32xx_udc_ops,
.name = driver_name,
.dev = {
.init_name = "gadget",
.release = nop_release,
}
},
.ep[0] = {
.ep = {
.name = "ep0",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 0,
.hwep_num = 0, /* Can be 0 or 1, has special handling */
.lep = 0,
.eptype = EP_CTL_TYPE,
},
.ep[1] = {
.ep = {
.name = "ep1-int",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 2,
.hwep_num = 0, /* 2 or 3, will be set later */
.lep = 1,
.eptype = EP_INT_TYPE,
},
.ep[2] = {
.ep = {
.name = "ep2-bulk",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 4,
.hwep_num = 0, /* 4 or 5, will be set later */
.lep = 2,
.eptype = EP_BLK_TYPE,
},
.ep[3] = {
.ep = {
.name = "ep3-iso",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 6,
.hwep_num = 0, /* 6 or 7, will be set later */
.lep = 3,
.eptype = EP_ISO_TYPE,
},
.ep[4] = {
.ep = {
.name = "ep4-int",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 8,
.hwep_num = 0, /* 8 or 9, will be set later */
.lep = 4,
.eptype = EP_INT_TYPE,
},
.ep[5] = {
.ep = {
.name = "ep5-bulk",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 10,
.hwep_num = 0, /* 10 or 11, will be set later */
.lep = 5,
.eptype = EP_BLK_TYPE,
},
.ep[6] = {
.ep = {
.name = "ep6-iso",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 12,
.hwep_num = 0, /* 12 or 13, will be set later */
.lep = 6,
.eptype = EP_ISO_TYPE,
},
.ep[7] = {
.ep = {
.name = "ep7-int",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 14,
.hwep_num = 0,
.lep = 7,
.eptype = EP_INT_TYPE,
},
.ep[8] = {
.ep = {
.name = "ep8-bulk",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 16,
.hwep_num = 0,
.lep = 8,
.eptype = EP_BLK_TYPE,
},
.ep[9] = {
.ep = {
.name = "ep9-iso",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 18,
.hwep_num = 0,
.lep = 9,
.eptype = EP_ISO_TYPE,
},
.ep[10] = {
.ep = {
.name = "ep10-int",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 20,
.hwep_num = 0,
.lep = 10,
.eptype = EP_INT_TYPE,
},
.ep[11] = {
.ep = {
.name = "ep11-bulk",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 22,
.hwep_num = 0,
.lep = 11,
.eptype = EP_BLK_TYPE,
},
.ep[12] = {
.ep = {
.name = "ep12-iso",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 24,
.hwep_num = 0,
.lep = 12,
.eptype = EP_ISO_TYPE,
},
.ep[13] = {
.ep = {
.name = "ep13-int",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 26,
.hwep_num = 0,
.lep = 13,
.eptype = EP_INT_TYPE,
},
.ep[14] = {
.ep = {
.name = "ep14-bulk",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 28,
.hwep_num = 0,
.lep = 14,
.eptype = EP_BLK_TYPE,
},
.ep[15] = {
.ep = {
.name = "ep15-bulk",
.ops = &lpc32xx_ep_ops,
.caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 30,
.hwep_num = 0,
.lep = 15,
.eptype = EP_BLK_TYPE,
},
};
/* ISO and status interrupts */
static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
{
u32 tmp, devstat;
struct lpc32xx_udc *udc = _udc;
spin_lock(&udc->lock);
/* Read the device status register */
devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
devstat &= ~USBD_EP_FAST;
writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
devstat = devstat & udc->enabled_devints;
/* Device specific handling needed? */
if (devstat & USBD_DEV_STAT)
udc_handle_dev(udc);
/* Start of frame? (devstat & FRAME_INT):
* The frame interrupt isn't really needed for ISO support,
* as the driver will queue the necessary packets */
/* Error? */
if (devstat & ERR_INT) {
/* All types of errors, from cable removal during transfer to
* misc protocol and bit errors. These are mostly for just info,
* as the USB hardware will work around these. If these errors
* happen alot, something is wrong. */
udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
}
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
/* EP interrupts */
static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
{
u32 tmp;
struct lpc32xx_udc *udc = _udc;
spin_lock(&udc->lock);
/* Read the device status register */
writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr));
/* Endpoints */
tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
/* Special handling for EP0 */
if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
/* Handle EP0 IN */
if (tmp & (EP_MASK_SEL(0, EP_IN)))
udc_handle_ep0_in(udc);
/* Handle EP0 OUT */
if (tmp & (EP_MASK_SEL(0, EP_OUT)))
udc_handle_ep0_out(udc);
}
/* All other EPs */
if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
int i;
/* Handle other EP interrupts */
for (i = 1; i < NUM_ENDPOINTS; i++) {
if (tmp & (1 << udc->ep[i].hwep_num))
udc_handle_eps(udc, &udc->ep[i]);
}
}
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
{
struct lpc32xx_udc *udc = _udc;
int i;
u32 tmp;
spin_lock(&udc->lock);
/* Handle EP DMA EOT interrupts */
tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
(readl(USBD_EPDMAST(udc->udp_baseaddr)) &
readl(USBD_NDDRTINTST(udc->udp_baseaddr))) |
readl(USBD_SYSERRTINTST(udc->udp_baseaddr));
for (i = 1; i < NUM_ENDPOINTS; i++) {
if (tmp & (1 << udc->ep[i].hwep_num))
udc_handle_dma_ep(udc, &udc->ep[i]);
}
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
/*
*
* VBUS detection, pullup handler, and Gadget cable state notification
*
*/
static void vbus_work(struct lpc32xx_udc *udc)
{
u8 value;
if (udc->enabled != 0) {
/* Discharge VBUS real quick */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
/* Give VBUS some time (100mS) to discharge */
msleep(100);
/* Disable VBUS discharge resistor */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
OTG1_VBUS_DISCHRG);
/* Clear interrupt */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_LATCH |
ISP1301_I2C_REG_CLEAR_ADDR, ~0);
/* Get the VBUS status from the transceiver */
value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_SOURCE);
/* VBUS on or off? */
if (value & INT_SESS_VLD)
udc->vbus = 1;
else
udc->vbus = 0;
/* VBUS changed? */
if (udc->last_vbus != udc->vbus) {
udc->last_vbus = udc->vbus;
lpc32xx_vbus_session(&udc->gadget, udc->vbus);
}
}
}
static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
{
struct lpc32xx_udc *udc = _udc;
vbus_work(udc);
return IRQ_HANDLED;
}
static int lpc32xx_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct lpc32xx_udc *udc = to_udc(gadget);
if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) {
dev_err(udc->dev, "bad parameter.\n");
return -EINVAL;
}
if (udc->driver) {
dev_err(udc->dev, "UDC already has a gadget driver\n");
return -EBUSY;
}
udc->driver = driver;
udc->gadget.dev.of_node = udc->dev->of_node;
udc->enabled = 1;
udc->gadget.is_selfpowered = 1;
udc->vbus = 0;
/* Force VBUS process once to check for cable insertion */
udc->last_vbus = udc->vbus = 0;
vbus_work(udc);
/* enable interrupts */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_FALLING, INT_SESS_VLD | INT_VBUS_VLD);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_RISING, INT_SESS_VLD | INT_VBUS_VLD);
return 0;
}
static int lpc32xx_stop(struct usb_gadget *gadget)
{
struct lpc32xx_udc *udc = to_udc(gadget);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
if (udc->clocked) {
spin_lock(&udc->lock);
stop_activity(udc);
spin_unlock(&udc->lock);
/*
* Wait for all the endpoints to disable,
* before disabling clocks. Don't wait if
* endpoints are not enabled.
*/
if (atomic_read(&udc->enabled_ep_cnt))
wait_event_interruptible(udc->ep_disable_wait_queue,
(atomic_read(&udc->enabled_ep_cnt) == 0));
spin_lock(&udc->lock);
udc_clk_set(udc, 0);
spin_unlock(&udc->lock);
}
udc->enabled = 0;
udc->driver = NULL;
return 0;
}
static void lpc32xx_udc_shutdown(struct platform_device *dev)
{
/* Force disconnect on reboot */
struct lpc32xx_udc *udc = platform_get_drvdata(dev);
pullup(udc, 0);
}
/*
* Callbacks to be overridden by options passed via OF (TODO)
*/
static void lpc32xx_usbd_conn_chg(int conn)
{
/* Do nothing, it might be nice to enable an LED
* based on conn state being !0 */
}
static void lpc32xx_usbd_susp_chg(int susp)
{
/* Device suspend if susp != 0 */
}
static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
{
/* Enable or disable USB remote wakeup */
}
static struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
.vbus_drv_pol = 0,
.conn_chgb = &lpc32xx_usbd_conn_chg,
.susp_chgb = &lpc32xx_usbd_susp_chg,
.rmwk_chgb = &lpc32xx_rmwkup_chg,
};
static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
static int lpc32xx_udc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct lpc32xx_udc *udc;
int retval, i;
dma_addr_t dma_handle;
struct device_node *isp1301_node;
udc = devm_kmemdup(dev, &controller_template, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
for (i = 0; i <= 15; i++)
udc->ep[i].udc = udc;
udc->gadget.ep0 = &udc->ep[0].ep;
/* init software state */
udc->gadget.dev.parent = dev;
udc->pdev = pdev;
udc->dev = &pdev->dev;
udc->enabled = 0;
if (pdev->dev.of_node) {
isp1301_node = of_parse_phandle(pdev->dev.of_node,
"transceiver", 0);
} else {
isp1301_node = NULL;
}
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
of_node_put(isp1301_node);
if (!udc->isp1301_i2c_client) {
return -EPROBE_DEFER;
}
dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
udc->isp1301_i2c_client->addr);
pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
return retval;
udc->board = &lpc32xx_usbddata;
/*
* Resources are mapped as follows:
* IORESOURCE_MEM, base address and size of USB space
* IORESOURCE_IRQ, USB device low priority interrupt number
* IORESOURCE_IRQ, USB device high priority interrupt number
* IORESOURCE_IRQ, USB device interrupt number
* IORESOURCE_IRQ, USB transceiver interrupt number
*/
spin_lock_init(&udc->lock);
/* Get IRQs */
for (i = 0; i < 4; i++) {
udc->udp_irq[i] = platform_get_irq(pdev, i);
if (udc->udp_irq[i] < 0)
return udc->udp_irq[i];
}
udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr)) {
dev_err(udc->dev, "IO map failure\n");
return PTR_ERR(udc->udp_baseaddr);
}
/* Get USB device clock */
udc->usb_slv_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(udc->usb_slv_clk)) {
dev_err(udc->dev, "failed to acquire USB device clock\n");
return PTR_ERR(udc->usb_slv_clk);
}
/* Enable USB device clock */
retval = clk_prepare_enable(udc->usb_slv_clk);
if (retval < 0) {
dev_err(udc->dev, "failed to start USB device clock\n");
return retval;
}
/* Setup deferred workqueue data */
udc->poweron = udc->pullup = 0;
INIT_WORK(&udc->pullup_job, pullup_work);
#ifdef CONFIG_PM
INIT_WORK(&udc->power_job, power_work);
#endif
/* All clocks are now on */
udc->clocked = 1;
isp1301_udc_configure(udc);
/* Allocate memory for the UDCA */
udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE,
&dma_handle,
(GFP_KERNEL | GFP_DMA));
if (!udc->udca_v_base) {
dev_err(udc->dev, "error getting UDCA region\n");
retval = -ENOMEM;
goto i2c_fail;
}
udc->udca_p_base = dma_handle;
dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
/* Setup the DD DMA memory pool */
udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
sizeof(struct lpc32xx_usbd_dd_gad),
sizeof(u32), 0);
if (!udc->dd_cache) {
dev_err(udc->dev, "error getting DD DMA region\n");
retval = -ENOMEM;
goto dma_alloc_fail;
}
/* Clear USB peripheral and initialize gadget endpoints */
udc_disable(udc);
udc_reinit(udc);
/* Request IRQs - low and high priority USB device IRQs are routed to
* the same handler, while the DMA interrupt is routed elsewhere */
retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_LP],
lpc32xx_usb_lp_irq, 0, "udc_lp", udc);
if (retval < 0) {
dev_err(udc->dev, "LP request irq %d failed\n",
udc->udp_irq[IRQ_USB_LP]);
goto irq_req_fail;
}
retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_HP],
lpc32xx_usb_hp_irq, 0, "udc_hp", udc);
if (retval < 0) {
dev_err(udc->dev, "HP request irq %d failed\n",
udc->udp_irq[IRQ_USB_HP]);
goto irq_req_fail;
}
retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_DEVDMA],
lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
if (retval < 0) {
dev_err(udc->dev, "DEV request irq %d failed\n",
udc->udp_irq[IRQ_USB_DEVDMA]);
goto irq_req_fail;
}
/* The transceiver interrupt is used for VBUS detection and will
kick off the VBUS handler function */
retval = devm_request_threaded_irq(dev, udc->udp_irq[IRQ_USB_ATX], NULL,
lpc32xx_usb_vbus_irq, IRQF_ONESHOT,
"udc_otg", udc);
if (retval < 0) {
dev_err(udc->dev, "VBUS request irq %d failed\n",
udc->udp_irq[IRQ_USB_ATX]);
goto irq_req_fail;
}
/* Initialize wait queue */
init_waitqueue_head(&udc->ep_disable_wait_queue);
atomic_set(&udc->enabled_ep_cnt, 0);
retval = usb_add_gadget_udc(dev, &udc->gadget);
if (retval < 0)
goto add_gadget_fail;
dev_set_drvdata(dev, udc);
device_init_wakeup(dev, 1);
create_debug_file(udc);
/* Disable clocks for now */
udc_clk_set(udc, 0);
dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
return 0;
add_gadget_fail:
irq_req_fail:
dma_pool_destroy(udc->dd_cache);
dma_alloc_fail:
dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
udc->udca_v_base, udc->udca_p_base);
i2c_fail:
clk_disable_unprepare(udc->usb_slv_clk);
dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
return retval;
}
static int lpc32xx_udc_remove(struct platform_device *pdev)
{
struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
udc_clk_set(udc, 1);
udc_disable(udc);
pullup(udc, 0);
device_init_wakeup(&pdev->dev, 0);
remove_debug_file(udc);
dma_pool_destroy(udc->dd_cache);
dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
udc->udca_v_base, udc->udca_p_base);
clk_disable_unprepare(udc->usb_slv_clk);
return 0;
}
#ifdef CONFIG_PM
static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
if (udc->clocked) {
/* Power down ISP */
udc->poweron = 0;
isp1301_set_powerstate(udc, 0);
/* Disable clocking */
udc_clk_set(udc, 0);
/* Keep clock flag on, so we know to re-enable clocks
on resume */
udc->clocked = 1;
/* Kill global USB clock */
clk_disable_unprepare(udc->usb_slv_clk);
}
return 0;
}
static int lpc32xx_udc_resume(struct platform_device *pdev)
{
struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
if (udc->clocked) {
/* Enable global USB clock */
clk_prepare_enable(udc->usb_slv_clk);
/* Enable clocking */
udc_clk_set(udc, 1);
/* ISP back to normal power mode */
udc->poweron = 1;
isp1301_set_powerstate(udc, 1);
}
return 0;
}
#else
#define lpc32xx_udc_suspend NULL
#define lpc32xx_udc_resume NULL
#endif
#ifdef CONFIG_OF
static const struct of_device_id lpc32xx_udc_of_match[] = {
{ .compatible = "nxp,lpc3220-udc", },
{ },
};
MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
#endif
static struct platform_driver lpc32xx_udc_driver = {
.remove = lpc32xx_udc_remove,
.shutdown = lpc32xx_udc_shutdown,
.suspend = lpc32xx_udc_suspend,
.resume = lpc32xx_udc_resume,
.driver = {
.name = driver_name,
.of_match_table = of_match_ptr(lpc32xx_udc_of_match),
},
};
module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe);
MODULE_DESCRIPTION("LPC32XX udc driver");
MODULE_AUTHOR("Kevin Wells <[email protected]>");
MODULE_AUTHOR("Roland Stigge <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:lpc32xx_udc");
| linux-master | drivers/usb/gadget/udc/lpc32xx_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 UDC (USB gadget)
*
* Copyright (C) 2006-2009 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <[email protected]>
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "r8a66597-udc.h"
#define DRIVER_VERSION "2011-09-26"
static const char udc_name[] = "r8a66597_udc";
static const char *r8a66597_ep_name[] = {
"ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
"ep8", "ep9",
};
static void init_controller(struct r8a66597 *r8a66597);
static void disable_controller(struct r8a66597 *r8a66597);
static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
static void irq_packet_write(struct r8a66597_ep *ep,
struct r8a66597_request *req);
static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags);
static void transfer_complete(struct r8a66597_ep *ep,
struct r8a66597_request *req, int status);
/*-------------------------------------------------------------------------*/
static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
{
return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
}
static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
unsigned long reg)
{
u16 tmp;
tmp = r8a66597_read(r8a66597, INTENB0);
r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
INTENB0);
r8a66597_bset(r8a66597, (1 << pipenum), reg);
r8a66597_write(r8a66597, tmp, INTENB0);
}
static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
unsigned long reg)
{
u16 tmp;
tmp = r8a66597_read(r8a66597, INTENB0);
r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
INTENB0);
r8a66597_bclr(r8a66597, (1 << pipenum), reg);
r8a66597_write(r8a66597, tmp, INTENB0);
}
static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
{
r8a66597_bset(r8a66597, CTRE, INTENB0);
r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
}
static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
__releases(r8a66597->lock)
__acquires(r8a66597->lock)
{
r8a66597_bclr(r8a66597, CTRE, INTENB0);
r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock(&r8a66597->lock);
r8a66597->driver->disconnect(&r8a66597->gadget);
spin_lock(&r8a66597->lock);
disable_controller(r8a66597);
init_controller(r8a66597);
r8a66597_bset(r8a66597, VBSE, INTENB0);
INIT_LIST_HEAD(&r8a66597->ep[0].queue);
}
static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
{
u16 pid = 0;
unsigned long offset;
if (pipenum == 0) {
pid = r8a66597_read(r8a66597, DCPCTR) & PID;
} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
pid = r8a66597_read(r8a66597, offset) & PID;
} else {
dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
pipenum);
}
return pid;
}
static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
u16 pid)
{
unsigned long offset;
if (pipenum == 0) {
r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
r8a66597_mdfy(r8a66597, pid, PID, offset);
} else {
dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
pipenum);
}
}
static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
{
control_reg_set_pid(r8a66597, pipenum, PID_BUF);
}
static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
{
control_reg_set_pid(r8a66597, pipenum, PID_NAK);
}
static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
{
control_reg_set_pid(r8a66597, pipenum, PID_STALL);
}
static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
{
u16 ret = 0;
unsigned long offset;
if (pipenum == 0) {
ret = r8a66597_read(r8a66597, DCPCTR);
} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
ret = r8a66597_read(r8a66597, offset);
} else {
dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
pipenum);
}
return ret;
}
static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
{
unsigned long offset;
pipe_stop(r8a66597, pipenum);
if (pipenum == 0) {
r8a66597_bset(r8a66597, SQCLR, DCPCTR);
} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
r8a66597_bset(r8a66597, SQCLR, offset);
} else {
dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
pipenum);
}
}
static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
{
unsigned long offset;
pipe_stop(r8a66597, pipenum);
if (pipenum == 0) {
r8a66597_bset(r8a66597, SQSET, DCPCTR);
} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
r8a66597_bset(r8a66597, SQSET, offset);
} else {
dev_err(r8a66597_to_dev(r8a66597),
"unexpect pipe num(%d)\n", pipenum);
}
}
static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
{
unsigned long offset;
if (pipenum == 0) {
return r8a66597_read(r8a66597, DCPCTR) & SQMON;
} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
return r8a66597_read(r8a66597, offset) & SQMON;
} else {
dev_err(r8a66597_to_dev(r8a66597),
"unexpect pipe num(%d)\n", pipenum);
}
return 0;
}
static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
{
return control_reg_sqmon(r8a66597, pipenum);
}
static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
u16 toggle)
{
if (toggle)
control_reg_sqset(r8a66597, pipenum);
else
control_reg_sqclr(r8a66597, pipenum);
}
static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
{
u16 tmp;
int size;
if (pipenum == 0) {
tmp = r8a66597_read(r8a66597, DCPCFG);
if ((tmp & R8A66597_CNTMD) != 0)
size = 256;
else {
tmp = r8a66597_read(r8a66597, DCPMAXP);
size = tmp & MAXP;
}
} else {
r8a66597_write(r8a66597, pipenum, PIPESEL);
tmp = r8a66597_read(r8a66597, PIPECFG);
if ((tmp & R8A66597_CNTMD) != 0) {
tmp = r8a66597_read(r8a66597, PIPEBUF);
size = ((tmp >> 10) + 1) * 64;
} else {
tmp = r8a66597_read(r8a66597, PIPEMAXP);
size = tmp & MXPS;
}
}
return size;
}
static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
{
if (r8a66597->pdata->on_chip)
return MBW_32;
else
return MBW_16;
}
static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
u16 isel, u16 fifosel)
{
u16 tmp, mask, loop;
int i = 0;
if (!pipenum) {
mask = ISEL | CURPIPE;
loop = isel;
} else {
mask = CURPIPE;
loop = pipenum;
}
r8a66597_mdfy(r8a66597, loop, mask, fifosel);
do {
tmp = r8a66597_read(r8a66597, fifosel);
if (i++ > 1000000) {
dev_err(r8a66597_to_dev(r8a66597),
"r8a66597: register%x, loop %x "
"is timeout\n", fifosel, loop);
break;
}
ndelay(1);
} while ((tmp & mask) != loop);
}
static void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
{
struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
if (ep->use_dma)
r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
ndelay(450);
if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
else
r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
if (ep->use_dma)
r8a66597_bset(r8a66597, DREQE, ep->fifosel);
}
static int pipe_buffer_setting(struct r8a66597 *r8a66597,
struct r8a66597_pipe_info *info)
{
u16 bufnum = 0, buf_bsize = 0;
u16 pipecfg = 0;
if (info->pipe == 0)
return -EINVAL;
r8a66597_write(r8a66597, info->pipe, PIPESEL);
if (info->dir_in)
pipecfg |= R8A66597_DIR;
pipecfg |= info->type;
pipecfg |= info->epnum;
switch (info->type) {
case R8A66597_INT:
bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
buf_bsize = 0;
break;
case R8A66597_BULK:
/* isochronous pipes may be used as bulk pipes */
if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
else
bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
buf_bsize = 7;
pipecfg |= R8A66597_DBLB;
if (!info->dir_in)
pipecfg |= R8A66597_SHTNAK;
break;
case R8A66597_ISO:
bufnum = R8A66597_BASE_BUFNUM +
(info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
buf_bsize = 7;
break;
}
if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
pr_err("r8a66597 pipe memory is insufficient\n");
return -ENOMEM;
}
r8a66597_write(r8a66597, pipecfg, PIPECFG);
r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
if (info->interval)
info->interval--;
r8a66597_write(r8a66597, info->interval, PIPEPERI);
return 0;
}
static void pipe_buffer_release(struct r8a66597 *r8a66597,
struct r8a66597_pipe_info *info)
{
if (info->pipe == 0)
return;
if (is_bulk_pipe(info->pipe)) {
r8a66597->bulk--;
} else if (is_interrupt_pipe(info->pipe)) {
r8a66597->interrupt--;
} else if (is_isoc_pipe(info->pipe)) {
r8a66597->isochronous--;
if (info->type == R8A66597_BULK)
r8a66597->bulk--;
} else {
dev_err(r8a66597_to_dev(r8a66597),
"ep_release: unexpect pipenum (%d)\n", info->pipe);
}
}
static void pipe_initialize(struct r8a66597_ep *ep)
{
struct r8a66597 *r8a66597 = ep->r8a66597;
r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
r8a66597_write(r8a66597, ACLRM, ep->pipectr);
r8a66597_write(r8a66597, 0, ep->pipectr);
r8a66597_write(r8a66597, SQCLR, ep->pipectr);
if (ep->use_dma) {
r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
ndelay(450);
r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
}
}
static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
struct r8a66597_ep *ep,
const struct usb_endpoint_descriptor *desc,
u16 pipenum, int dma)
{
ep->use_dma = 0;
ep->fifoaddr = CFIFO;
ep->fifosel = CFIFOSEL;
ep->fifoctr = CFIFOCTR;
ep->pipectr = get_pipectr_addr(pipenum);
if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
ep->pipetre = get_pipetre_addr(pipenum);
ep->pipetrn = get_pipetrn_addr(pipenum);
} else {
ep->pipetre = 0;
ep->pipetrn = 0;
}
ep->pipenum = pipenum;
ep->ep.maxpacket = usb_endpoint_maxp(desc);
r8a66597->pipenum2ep[pipenum] = ep;
r8a66597->epaddr2ep[usb_endpoint_num(desc)]
= ep;
INIT_LIST_HEAD(&ep->queue);
}
static void r8a66597_ep_release(struct r8a66597_ep *ep)
{
struct r8a66597 *r8a66597 = ep->r8a66597;
u16 pipenum = ep->pipenum;
if (pipenum == 0)
return;
if (ep->use_dma)
r8a66597->num_dma--;
ep->pipenum = 0;
ep->busy = 0;
ep->use_dma = 0;
}
static int alloc_pipe_config(struct r8a66597_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct r8a66597 *r8a66597 = ep->r8a66597;
struct r8a66597_pipe_info info;
int dma = 0;
unsigned char *counter;
int ret;
ep->ep.desc = desc;
if (ep->pipenum) /* already allocated pipe */
return 0;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
dev_err(r8a66597_to_dev(r8a66597),
"bulk pipe is insufficient\n");
return -ENODEV;
} else {
info.pipe = R8A66597_BASE_PIPENUM_ISOC
+ r8a66597->isochronous;
counter = &r8a66597->isochronous;
}
} else {
info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
counter = &r8a66597->bulk;
}
info.type = R8A66597_BULK;
dma = 1;
break;
case USB_ENDPOINT_XFER_INT:
if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
dev_err(r8a66597_to_dev(r8a66597),
"interrupt pipe is insufficient\n");
return -ENODEV;
}
info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
info.type = R8A66597_INT;
counter = &r8a66597->interrupt;
break;
case USB_ENDPOINT_XFER_ISOC:
if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
dev_err(r8a66597_to_dev(r8a66597),
"isochronous pipe is insufficient\n");
return -ENODEV;
}
info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
info.type = R8A66597_ISO;
counter = &r8a66597->isochronous;
break;
default:
dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
return -EINVAL;
}
ep->type = info.type;
info.epnum = usb_endpoint_num(desc);
info.maxpacket = usb_endpoint_maxp(desc);
info.interval = desc->bInterval;
if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
info.dir_in = 1;
else
info.dir_in = 0;
ret = pipe_buffer_setting(r8a66597, &info);
if (ret < 0) {
dev_err(r8a66597_to_dev(r8a66597),
"pipe_buffer_setting fail\n");
return ret;
}
(*counter)++;
if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
r8a66597->bulk++;
r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
pipe_initialize(ep);
return 0;
}
static int free_pipe_config(struct r8a66597_ep *ep)
{
struct r8a66597 *r8a66597 = ep->r8a66597;
struct r8a66597_pipe_info info;
info.pipe = ep->pipenum;
info.type = ep->type;
pipe_buffer_release(r8a66597, &info);
r8a66597_ep_release(ep);
return 0;
}
/*-------------------------------------------------------------------------*/
static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
{
enable_irq_ready(r8a66597, pipenum);
enable_irq_nrdy(r8a66597, pipenum);
}
static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
{
disable_irq_ready(r8a66597, pipenum);
disable_irq_nrdy(r8a66597, pipenum);
}
/* if complete is true, gadget driver complete function is not call */
static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
{
r8a66597->ep[0].internal_ccpl = ccpl;
pipe_start(r8a66597, 0);
r8a66597_bset(r8a66597, CCPL, DCPCTR);
}
static void start_ep0_write(struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
struct r8a66597 *r8a66597 = ep->r8a66597;
pipe_change(r8a66597, ep->pipenum);
r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
r8a66597_write(r8a66597, BCLR, ep->fifoctr);
if (req->req.length == 0) {
r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
pipe_start(r8a66597, 0);
transfer_complete(ep, req, 0);
} else {
r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
irq_ep0_write(ep, req);
}
}
static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
u16 fifosel)
{
u16 tmp;
tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
if (tmp == pipenum)
r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
}
static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
int enable)
{
struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
u16 tmp, toggle;
/* check current BFRE bit */
r8a66597_write(r8a66597, pipenum, PIPESEL);
tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
if ((enable && tmp) || (!enable && !tmp))
return;
/* change BFRE bit */
pipe_stop(r8a66597, pipenum);
disable_fifosel(r8a66597, pipenum, CFIFOSEL);
disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
toggle = save_usb_toggle(r8a66597, pipenum);
r8a66597_write(r8a66597, pipenum, PIPESEL);
if (enable)
r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
else
r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
/* initialize for internal BFRE flag */
r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
restore_usb_toggle(r8a66597, pipenum, toggle);
}
static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
struct r8a66597_dma *dma;
if (!r8a66597_is_sudmac(r8a66597))
return -ENODEV;
/* Check transfer type */
if (!is_bulk_pipe(ep->pipenum))
return -EIO;
if (r8a66597->dma.used)
return -EBUSY;
/* set SUDMAC parameters */
dma = &r8a66597->dma;
dma->used = 1;
if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) {
dma->dir = 1;
} else {
dma->dir = 0;
change_bfre_mode(r8a66597, ep->pipenum, 1);
}
/* set r8a66597_ep paramters */
ep->use_dma = 1;
ep->dma = dma;
ep->fifoaddr = D0FIFO;
ep->fifosel = D0FIFOSEL;
ep->fifoctr = D0FIFOCTR;
/* dma mapping */
return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
}
static void sudmac_free_channel(struct r8a66597 *r8a66597,
struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
if (!r8a66597_is_sudmac(r8a66597))
return;
usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
ep->dma->used = 0;
ep->use_dma = 0;
ep->fifoaddr = CFIFO;
ep->fifosel = CFIFOSEL;
ep->fifoctr = CFIFOCTR;
}
static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
BUG_ON(req->req.length == 0);
r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
}
static void start_packet_write(struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
struct r8a66597 *r8a66597 = ep->r8a66597;
u16 tmp;
pipe_change(r8a66597, ep->pipenum);
disable_irq_empty(r8a66597, ep->pipenum);
pipe_start(r8a66597, ep->pipenum);
if (req->req.length == 0) {
transfer_complete(ep, req, 0);
} else {
r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
/* PIO mode */
pipe_change(r8a66597, ep->pipenum);
disable_irq_empty(r8a66597, ep->pipenum);
pipe_start(r8a66597, ep->pipenum);
tmp = r8a66597_read(r8a66597, ep->fifoctr);
if (unlikely((tmp & FRDY) == 0))
pipe_irq_enable(r8a66597, ep->pipenum);
else
irq_packet_write(ep, req);
} else {
/* DMA mode */
pipe_change(r8a66597, ep->pipenum);
disable_irq_nrdy(r8a66597, ep->pipenum);
pipe_start(r8a66597, ep->pipenum);
enable_irq_nrdy(r8a66597, ep->pipenum);
sudmac_start(r8a66597, ep, req);
}
}
}
static void start_packet_read(struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
struct r8a66597 *r8a66597 = ep->r8a66597;
u16 pipenum = ep->pipenum;
if (ep->pipenum == 0) {
r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
r8a66597_write(r8a66597, BCLR, ep->fifoctr);
pipe_start(r8a66597, pipenum);
pipe_irq_enable(r8a66597, pipenum);
} else {
pipe_stop(r8a66597, pipenum);
if (ep->pipetre) {
enable_irq_nrdy(r8a66597, pipenum);
r8a66597_write(r8a66597, TRCLR, ep->pipetre);
r8a66597_write(r8a66597,
DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
ep->pipetrn);
r8a66597_bset(r8a66597, TRENB, ep->pipetre);
}
if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
/* PIO mode */
change_bfre_mode(r8a66597, ep->pipenum, 0);
pipe_start(r8a66597, pipenum); /* trigger once */
pipe_irq_enable(r8a66597, pipenum);
} else {
pipe_change(r8a66597, pipenum);
sudmac_start(r8a66597, ep, req);
pipe_start(r8a66597, pipenum); /* trigger once */
}
}
}
static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
{
if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
start_packet_write(ep, req);
else
start_packet_read(ep, req);
}
static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
{
u16 ctsq;
ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
switch (ctsq) {
case CS_RDDS:
start_ep0_write(ep, req);
break;
case CS_WRDS:
start_packet_read(ep, req);
break;
case CS_WRND:
control_end(ep->r8a66597, 0);
break;
default:
dev_err(r8a66597_to_dev(ep->r8a66597),
"start_ep0: unexpect ctsq(%x)\n", ctsq);
break;
}
}
static void init_controller(struct r8a66597 *r8a66597)
{
u16 vif = r8a66597->pdata->vif ? LDRV : 0;
u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
if (r8a66597->pdata->on_chip) {
if (r8a66597->pdata->buswait)
r8a66597_write(r8a66597, r8a66597->pdata->buswait,
SYSCFG1);
else
r8a66597_write(r8a66597, 0x0f, SYSCFG1);
r8a66597_bset(r8a66597, HSE, SYSCFG0);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
r8a66597_bset(r8a66597, USBE, SYSCFG0);
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
r8a66597_bset(r8a66597, irq_sense, INTENB1);
r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
DMA0CFG);
} else {
r8a66597_bset(r8a66597, vif | endian, PINCFG);
r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
XTAL, SYSCFG0);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
r8a66597_bset(r8a66597, USBE, SYSCFG0);
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
mdelay(3);
r8a66597_bset(r8a66597, PLLC, SYSCFG0);
mdelay(1);
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
r8a66597_bset(r8a66597, irq_sense, INTENB1);
r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
DMA0CFG);
}
}
static void disable_controller(struct r8a66597 *r8a66597)
{
if (r8a66597->pdata->on_chip) {
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
r8a66597_bclr(r8a66597, UTST, TESTMODE);
/* disable interrupts */
r8a66597_write(r8a66597, 0, INTENB0);
r8a66597_write(r8a66597, 0, INTENB1);
r8a66597_write(r8a66597, 0, BRDYENB);
r8a66597_write(r8a66597, 0, BEMPENB);
r8a66597_write(r8a66597, 0, NRDYENB);
/* clear status */
r8a66597_write(r8a66597, 0, BRDYSTS);
r8a66597_write(r8a66597, 0, NRDYSTS);
r8a66597_write(r8a66597, 0, BEMPSTS);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
} else {
r8a66597_bclr(r8a66597, UTST, TESTMODE);
r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
udelay(1);
r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
udelay(1);
udelay(1);
r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
}
}
static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
{
u16 tmp;
if (!r8a66597->pdata->on_chip) {
tmp = r8a66597_read(r8a66597, SYSCFG0);
if (!(tmp & XCKE))
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
}
}
static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
{
return list_entry(ep->queue.next, struct r8a66597_request, queue);
}
/*-------------------------------------------------------------------------*/
static void transfer_complete(struct r8a66597_ep *ep,
struct r8a66597_request *req, int status)
__releases(r8a66597->lock)
__acquires(r8a66597->lock)
{
int restart = 0;
if (unlikely(ep->pipenum == 0)) {
if (ep->internal_ccpl) {
ep->internal_ccpl = 0;
return;
}
}
list_del_init(&req->queue);
if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
req->req.status = -ESHUTDOWN;
else
req->req.status = status;
if (!list_empty(&ep->queue))
restart = 1;
if (ep->use_dma)
sudmac_free_channel(ep->r8a66597, ep, req);
spin_unlock(&ep->r8a66597->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->r8a66597->lock);
if (restart) {
req = get_request_from_ep(ep);
if (ep->ep.desc)
start_packet(ep, req);
}
}
static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
{
int i;
u16 tmp;
unsigned bufsize;
size_t size;
void *buf;
u16 pipenum = ep->pipenum;
struct r8a66597 *r8a66597 = ep->r8a66597;
pipe_change(r8a66597, pipenum);
r8a66597_bset(r8a66597, ISEL, ep->fifosel);
i = 0;
do {
tmp = r8a66597_read(r8a66597, ep->fifoctr);
if (i++ > 100000) {
dev_err(r8a66597_to_dev(r8a66597),
"pipe0 is busy. maybe cpu i/o bus "
"conflict. please power off this controller.");
return;
}
ndelay(1);
} while ((tmp & FRDY) == 0);
/* prepare parameters */
bufsize = get_buffer_size(r8a66597, pipenum);
buf = req->req.buf + req->req.actual;
size = min(bufsize, req->req.length - req->req.actual);
/* write fifo */
if (req->req.buf) {
if (size > 0)
r8a66597_write_fifo(r8a66597, ep, buf, size);
if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
}
/* update parameters */
req->req.actual += size;
/* check transfer finish */
if ((!req->req.zero && (req->req.actual == req->req.length))
|| (size % ep->ep.maxpacket)
|| (size == 0)) {
disable_irq_ready(r8a66597, pipenum);
disable_irq_empty(r8a66597, pipenum);
} else {
disable_irq_ready(r8a66597, pipenum);
enable_irq_empty(r8a66597, pipenum);
}
pipe_start(r8a66597, pipenum);
}
static void irq_packet_write(struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
u16 tmp;
unsigned bufsize;
size_t size;
void *buf;
u16 pipenum = ep->pipenum;
struct r8a66597 *r8a66597 = ep->r8a66597;
pipe_change(r8a66597, pipenum);
tmp = r8a66597_read(r8a66597, ep->fifoctr);
if (unlikely((tmp & FRDY) == 0)) {
pipe_stop(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
dev_err(r8a66597_to_dev(r8a66597),
"write fifo not ready. pipnum=%d\n", pipenum);
return;
}
/* prepare parameters */
bufsize = get_buffer_size(r8a66597, pipenum);
buf = req->req.buf + req->req.actual;
size = min(bufsize, req->req.length - req->req.actual);
/* write fifo */
if (req->req.buf) {
r8a66597_write_fifo(r8a66597, ep, buf, size);
if ((size == 0)
|| ((size % ep->ep.maxpacket) != 0)
|| ((bufsize != ep->ep.maxpacket)
&& (bufsize > size)))
r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
}
/* update parameters */
req->req.actual += size;
/* check transfer finish */
if ((!req->req.zero && (req->req.actual == req->req.length))
|| (size % ep->ep.maxpacket)
|| (size == 0)) {
disable_irq_ready(r8a66597, pipenum);
enable_irq_empty(r8a66597, pipenum);
} else {
disable_irq_empty(r8a66597, pipenum);
pipe_irq_enable(r8a66597, pipenum);
}
}
static void irq_packet_read(struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
u16 tmp;
int rcv_len, bufsize, req_len;
int size;
void *buf;
u16 pipenum = ep->pipenum;
struct r8a66597 *r8a66597 = ep->r8a66597;
int finish = 0;
pipe_change(r8a66597, pipenum);
tmp = r8a66597_read(r8a66597, ep->fifoctr);
if (unlikely((tmp & FRDY) == 0)) {
req->req.status = -EPIPE;
pipe_stop(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
return;
}
/* prepare parameters */
rcv_len = tmp & DTLN;
bufsize = get_buffer_size(r8a66597, pipenum);
buf = req->req.buf + req->req.actual;
req_len = req->req.length - req->req.actual;
if (rcv_len < bufsize)
size = min(rcv_len, req_len);
else
size = min(bufsize, req_len);
/* update parameters */
req->req.actual += size;
/* check transfer finish */
if ((!req->req.zero && (req->req.actual == req->req.length))
|| (size % ep->ep.maxpacket)
|| (size == 0)) {
pipe_stop(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
finish = 1;
}
/* read fifo */
if (req->req.buf) {
if (size == 0)
r8a66597_write(r8a66597, BCLR, ep->fifoctr);
else
r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
}
if ((ep->pipenum != 0) && finish)
transfer_complete(ep, req, 0);
}
static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
{
u16 check;
u16 pipenum;
struct r8a66597_ep *ep;
struct r8a66597_request *req;
if ((status & BRDY0) && (enb & BRDY0)) {
r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
ep = &r8a66597->ep[0];
req = get_request_from_ep(ep);
irq_packet_read(ep, req);
} else {
for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
check = 1 << pipenum;
if ((status & check) && (enb & check)) {
r8a66597_write(r8a66597, ~check, BRDYSTS);
ep = r8a66597->pipenum2ep[pipenum];
req = get_request_from_ep(ep);
if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
irq_packet_write(ep, req);
else
irq_packet_read(ep, req);
}
}
}
}
static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
{
u16 tmp;
u16 check;
u16 pipenum;
struct r8a66597_ep *ep;
struct r8a66597_request *req;
if ((status & BEMP0) && (enb & BEMP0)) {
r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
ep = &r8a66597->ep[0];
req = get_request_from_ep(ep);
irq_ep0_write(ep, req);
} else {
for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
check = 1 << pipenum;
if ((status & check) && (enb & check)) {
r8a66597_write(r8a66597, ~check, BEMPSTS);
tmp = control_reg_get(r8a66597, pipenum);
if ((tmp & INBUFM) == 0) {
disable_irq_empty(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
pipe_stop(r8a66597, pipenum);
ep = r8a66597->pipenum2ep[pipenum];
req = get_request_from_ep(ep);
if (!list_empty(&ep->queue))
transfer_complete(ep, req, 0);
}
}
}
}
}
static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
__releases(r8a66597->lock)
__acquires(r8a66597->lock)
{
struct r8a66597_ep *ep;
u16 pid;
u16 status = 0;
u16 w_index = le16_to_cpu(ctrl->wIndex);
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
status = r8a66597->device_status;
break;
case USB_RECIP_INTERFACE:
status = 0;
break;
case USB_RECIP_ENDPOINT:
ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
pid = control_reg_get_pid(r8a66597, ep->pipenum);
if (pid == PID_STALL)
status = 1 << USB_ENDPOINT_HALT;
else
status = 0;
break;
default:
pipe_stall(r8a66597, 0);
return; /* exit */
}
r8a66597->ep0_data = cpu_to_le16(status);
r8a66597->ep0_req->buf = &r8a66597->ep0_data;
r8a66597->ep0_req->length = 2;
/* AV: what happens if we get called again before that gets through? */
spin_unlock(&r8a66597->lock);
r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
spin_lock(&r8a66597->lock);
}
static void clear_feature(struct r8a66597 *r8a66597,
struct usb_ctrlrequest *ctrl)
{
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
control_end(r8a66597, 1);
break;
case USB_RECIP_INTERFACE:
control_end(r8a66597, 1);
break;
case USB_RECIP_ENDPOINT: {
struct r8a66597_ep *ep;
struct r8a66597_request *req;
u16 w_index = le16_to_cpu(ctrl->wIndex);
ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
if (!ep->wedge) {
pipe_stop(r8a66597, ep->pipenum);
control_reg_sqclr(r8a66597, ep->pipenum);
spin_unlock(&r8a66597->lock);
usb_ep_clear_halt(&ep->ep);
spin_lock(&r8a66597->lock);
}
control_end(r8a66597, 1);
req = get_request_from_ep(ep);
if (ep->busy) {
ep->busy = 0;
if (list_empty(&ep->queue))
break;
start_packet(ep, req);
} else if (!list_empty(&ep->queue))
pipe_start(r8a66597, ep->pipenum);
}
break;
default:
pipe_stall(r8a66597, 0);
break;
}
}
static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
{
u16 tmp;
int timeout = 3000;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
switch (le16_to_cpu(ctrl->wValue)) {
case USB_DEVICE_TEST_MODE:
control_end(r8a66597, 1);
/* Wait for the completion of status stage */
do {
tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
udelay(1);
} while (tmp != CS_IDST && timeout-- > 0);
if (tmp == CS_IDST)
r8a66597_bset(r8a66597,
le16_to_cpu(ctrl->wIndex >> 8),
TESTMODE);
break;
default:
pipe_stall(r8a66597, 0);
break;
}
break;
case USB_RECIP_INTERFACE:
control_end(r8a66597, 1);
break;
case USB_RECIP_ENDPOINT: {
struct r8a66597_ep *ep;
u16 w_index = le16_to_cpu(ctrl->wIndex);
ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
pipe_stall(r8a66597, ep->pipenum);
control_end(r8a66597, 1);
}
break;
default:
pipe_stall(r8a66597, 0);
break;
}
}
/* if return value is true, call class driver's setup() */
static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
{
u16 *p = (u16 *)ctrl;
unsigned long offset = USBREQ;
int i, ret = 0;
/* read fifo */
r8a66597_write(r8a66597, ~VALID, INTSTS0);
for (i = 0; i < 4; i++)
p[i] = r8a66597_read(r8a66597, offset + i*2);
/* check request */
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
get_status(r8a66597, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
clear_feature(r8a66597, ctrl);
break;
case USB_REQ_SET_FEATURE:
set_feature(r8a66597, ctrl);
break;
default:
ret = 1;
break;
}
} else
ret = 1;
return ret;
}
static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
{
u16 speed = get_usb_speed(r8a66597);
switch (speed) {
case HSMODE:
r8a66597->gadget.speed = USB_SPEED_HIGH;
break;
case FSMODE:
r8a66597->gadget.speed = USB_SPEED_FULL;
break;
default:
r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
}
}
static void irq_device_state(struct r8a66597 *r8a66597)
{
u16 dvsq;
dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
r8a66597_write(r8a66597, ~DVST, INTSTS0);
if (dvsq == DS_DFLT) {
/* bus reset */
spin_unlock(&r8a66597->lock);
usb_gadget_udc_reset(&r8a66597->gadget, r8a66597->driver);
spin_lock(&r8a66597->lock);
r8a66597_update_usb_speed(r8a66597);
}
if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
r8a66597_update_usb_speed(r8a66597);
if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
&& r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
r8a66597_update_usb_speed(r8a66597);
r8a66597->old_dvsq = dvsq;
}
static void irq_control_stage(struct r8a66597 *r8a66597)
__releases(r8a66597->lock)
__acquires(r8a66597->lock)
{
struct usb_ctrlrequest ctrl;
u16 ctsq;
ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
r8a66597_write(r8a66597, ~CTRT, INTSTS0);
switch (ctsq) {
case CS_IDST: {
struct r8a66597_ep *ep;
struct r8a66597_request *req;
ep = &r8a66597->ep[0];
req = get_request_from_ep(ep);
transfer_complete(ep, req, 0);
}
break;
case CS_RDDS:
case CS_WRDS:
case CS_WRND:
if (setup_packet(r8a66597, &ctrl)) {
spin_unlock(&r8a66597->lock);
if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
< 0)
pipe_stall(r8a66597, 0);
spin_lock(&r8a66597->lock);
}
break;
case CS_RDSS:
case CS_WRSS:
control_end(r8a66597, 0);
break;
default:
dev_err(r8a66597_to_dev(r8a66597),
"ctrl_stage: unexpect ctsq(%x)\n", ctsq);
break;
}
}
static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
{
u16 pipenum;
struct r8a66597_request *req;
u32 len;
int i = 0;
pipenum = ep->pipenum;
pipe_change(r8a66597, pipenum);
while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
udelay(1);
if (unlikely(i++ >= 10000)) { /* timeout = 10 msec */
dev_err(r8a66597_to_dev(r8a66597),
"%s: FRDY was not set (%d)\n",
__func__, pipenum);
return;
}
}
r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
req = get_request_from_ep(ep);
/* prepare parameters */
len = r8a66597_sudmac_read(r8a66597, CH0CBC);
req->req.actual += len;
/* clear */
r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
/* check transfer finish */
if ((!req->req.zero && (req->req.actual == req->req.length))
|| (len % ep->ep.maxpacket)) {
if (ep->dma->dir) {
disable_irq_ready(r8a66597, pipenum);
enable_irq_empty(r8a66597, pipenum);
} else {
/* Clear the interrupt flag for next transfer */
r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
transfer_complete(ep, req, 0);
}
}
}
static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
{
u32 irqsts;
struct r8a66597_ep *ep;
u16 pipenum;
irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
if (irqsts & CH0ENDS) {
r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
ep = r8a66597->pipenum2ep[pipenum];
sudmac_finish(r8a66597, ep);
}
}
static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
{
struct r8a66597 *r8a66597 = _r8a66597;
u16 intsts0;
u16 intenb0;
u16 savepipe;
u16 mask0;
spin_lock(&r8a66597->lock);
if (r8a66597_is_sudmac(r8a66597))
r8a66597_sudmac_irq(r8a66597);
intsts0 = r8a66597_read(r8a66597, INTSTS0);
intenb0 = r8a66597_read(r8a66597, INTENB0);
savepipe = r8a66597_read(r8a66597, CFIFOSEL);
mask0 = intsts0 & intenb0;
if (mask0) {
u16 brdysts = r8a66597_read(r8a66597, BRDYSTS);
u16 bempsts = r8a66597_read(r8a66597, BEMPSTS);
u16 brdyenb = r8a66597_read(r8a66597, BRDYENB);
u16 bempenb = r8a66597_read(r8a66597, BEMPENB);
if (mask0 & VBINT) {
r8a66597_write(r8a66597, 0xffff & ~VBINT,
INTSTS0);
r8a66597_start_xclock(r8a66597);
/* start vbus sampling */
r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
& VBSTS;
r8a66597->scount = R8A66597_MAX_SAMPLING;
mod_timer(&r8a66597->timer,
jiffies + msecs_to_jiffies(50));
}
if (intsts0 & DVSQ)
irq_device_state(r8a66597);
if ((intsts0 & BRDY) && (intenb0 & BRDYE)
&& (brdysts & brdyenb))
irq_pipe_ready(r8a66597, brdysts, brdyenb);
if ((intsts0 & BEMP) && (intenb0 & BEMPE)
&& (bempsts & bempenb))
irq_pipe_empty(r8a66597, bempsts, bempenb);
if (intsts0 & CTRT)
irq_control_stage(r8a66597);
}
r8a66597_write(r8a66597, savepipe, CFIFOSEL);
spin_unlock(&r8a66597->lock);
return IRQ_HANDLED;
}
static void r8a66597_timer(struct timer_list *t)
{
struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
unsigned long flags;
u16 tmp;
spin_lock_irqsave(&r8a66597->lock, flags);
tmp = r8a66597_read(r8a66597, SYSCFG0);
if (r8a66597->scount > 0) {
tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
if (tmp == r8a66597->old_vbus) {
r8a66597->scount--;
if (r8a66597->scount == 0) {
if (tmp == VBSTS)
r8a66597_usb_connect(r8a66597);
else
r8a66597_usb_disconnect(r8a66597);
} else {
mod_timer(&r8a66597->timer,
jiffies + msecs_to_jiffies(50));
}
} else {
r8a66597->scount = R8A66597_MAX_SAMPLING;
r8a66597->old_vbus = tmp;
mod_timer(&r8a66597->timer,
jiffies + msecs_to_jiffies(50));
}
}
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
/*-------------------------------------------------------------------------*/
static int r8a66597_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct r8a66597_ep *ep;
ep = container_of(_ep, struct r8a66597_ep, ep);
return alloc_pipe_config(ep, desc);
}
static int r8a66597_disable(struct usb_ep *_ep)
{
struct r8a66597_ep *ep;
struct r8a66597_request *req;
unsigned long flags;
ep = container_of(_ep, struct r8a66597_ep, ep);
BUG_ON(!ep);
while (!list_empty(&ep->queue)) {
req = get_request_from_ep(ep);
spin_lock_irqsave(&ep->r8a66597->lock, flags);
transfer_complete(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
}
pipe_irq_disable(ep->r8a66597, ep->pipenum);
return free_pipe_config(ep);
}
static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct r8a66597_request *req;
req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct r8a66597_request *req;
req = container_of(_req, struct r8a66597_request, req);
kfree(req);
}
static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct r8a66597_ep *ep;
struct r8a66597_request *req;
unsigned long flags;
int request = 0;
ep = container_of(_ep, struct r8a66597_ep, ep);
req = container_of(_req, struct r8a66597_request, req);
if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&ep->r8a66597->lock, flags);
if (list_empty(&ep->queue))
request = 1;
list_add_tail(&req->queue, &ep->queue);
req->req.actual = 0;
req->req.status = -EINPROGRESS;
if (ep->ep.desc == NULL) /* control */
start_ep0(ep, req);
else {
if (request && !ep->busy)
start_packet(ep, req);
}
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
return 0;
}
static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct r8a66597_ep *ep;
struct r8a66597_request *req;
unsigned long flags;
ep = container_of(_ep, struct r8a66597_ep, ep);
req = container_of(_req, struct r8a66597_request, req);
spin_lock_irqsave(&ep->r8a66597->lock, flags);
if (!list_empty(&ep->queue))
transfer_complete(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
return 0;
}
static int r8a66597_set_halt(struct usb_ep *_ep, int value)
{
struct r8a66597_ep *ep = container_of(_ep, struct r8a66597_ep, ep);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ep->r8a66597->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
} else if (value) {
ep->busy = 1;
pipe_stall(ep->r8a66597, ep->pipenum);
} else {
ep->busy = 0;
ep->wedge = 0;
pipe_stop(ep->r8a66597, ep->pipenum);
}
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
return ret;
}
static int r8a66597_set_wedge(struct usb_ep *_ep)
{
struct r8a66597_ep *ep;
unsigned long flags;
ep = container_of(_ep, struct r8a66597_ep, ep);
if (!ep || !ep->ep.desc)
return -EINVAL;
spin_lock_irqsave(&ep->r8a66597->lock, flags);
ep->wedge = 1;
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
return usb_ep_set_halt(_ep);
}
static void r8a66597_fifo_flush(struct usb_ep *_ep)
{
struct r8a66597_ep *ep;
unsigned long flags;
ep = container_of(_ep, struct r8a66597_ep, ep);
spin_lock_irqsave(&ep->r8a66597->lock, flags);
if (list_empty(&ep->queue) && !ep->busy) {
pipe_stop(ep->r8a66597, ep->pipenum);
r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
r8a66597_write(ep->r8a66597, 0, ep->pipectr);
}
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
}
static const struct usb_ep_ops r8a66597_ep_ops = {
.enable = r8a66597_enable,
.disable = r8a66597_disable,
.alloc_request = r8a66597_alloc_request,
.free_request = r8a66597_free_request,
.queue = r8a66597_queue,
.dequeue = r8a66597_dequeue,
.set_halt = r8a66597_set_halt,
.set_wedge = r8a66597_set_wedge,
.fifo_flush = r8a66597_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int r8a66597_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
if (!driver
|| driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
return -ENODEV;
/* hook up the driver */
r8a66597->driver = driver;
init_controller(r8a66597);
r8a66597_bset(r8a66597, VBSE, INTENB0);
if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
r8a66597_start_xclock(r8a66597);
/* start vbus sampling */
r8a66597->old_vbus = r8a66597_read(r8a66597,
INTSTS0) & VBSTS;
r8a66597->scount = R8A66597_MAX_SAMPLING;
mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
}
return 0;
}
static int r8a66597_stop(struct usb_gadget *gadget)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
unsigned long flags;
spin_lock_irqsave(&r8a66597->lock, flags);
r8a66597_bclr(r8a66597, VBSE, INTENB0);
disable_controller(r8a66597);
spin_unlock_irqrestore(&r8a66597->lock, flags);
r8a66597->driver = NULL;
return 0;
}
/*-------------------------------------------------------------------------*/
static int r8a66597_get_frame(struct usb_gadget *_gadget)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
}
static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
unsigned long flags;
spin_lock_irqsave(&r8a66597->lock, flags);
if (is_on)
r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
else
r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
spin_unlock_irqrestore(&r8a66597->lock, flags);
return 0;
}
static int r8a66597_set_selfpowered(struct usb_gadget *gadget, int is_self)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
gadget->is_selfpowered = (is_self != 0);
if (is_self)
r8a66597->device_status |= 1 << USB_DEVICE_SELF_POWERED;
else
r8a66597->device_status &= ~(1 << USB_DEVICE_SELF_POWERED);
return 0;
}
static const struct usb_gadget_ops r8a66597_gadget_ops = {
.get_frame = r8a66597_get_frame,
.udc_start = r8a66597_start,
.udc_stop = r8a66597_stop,
.pullup = r8a66597_pullup,
.set_selfpowered = r8a66597_set_selfpowered,
};
static void r8a66597_remove(struct platform_device *pdev)
{
struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
usb_del_gadget_udc(&r8a66597->gadget);
del_timer_sync(&r8a66597->timer);
r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
if (r8a66597->pdata->on_chip) {
clk_disable_unprepare(r8a66597->clk);
}
}
static void nop_completion(struct usb_ep *ep, struct usb_request *r)
{
}
static int r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
struct platform_device *pdev)
{
r8a66597->sudmac_reg =
devm_platform_ioremap_resource_byname(pdev, "sudmac");
return PTR_ERR_OR_ZERO(r8a66597->sudmac_reg);
}
static int r8a66597_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
char clk_name[8];
struct resource *ires;
int irq;
void __iomem *reg = NULL;
struct r8a66597 *r8a66597 = NULL;
int ret = 0;
int i;
unsigned long irq_trigger;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!ires)
return -EINVAL;
irq = ires->start;
irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
if (irq < 0) {
dev_err(dev, "platform_get_irq error.\n");
return -ENODEV;
}
/* initialize ucd */
r8a66597 = devm_kzalloc(dev, sizeof(struct r8a66597), GFP_KERNEL);
if (r8a66597 == NULL)
return -ENOMEM;
spin_lock_init(&r8a66597->lock);
platform_set_drvdata(pdev, r8a66597);
r8a66597->pdata = dev_get_platdata(dev);
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
r8a66597->gadget.ops = &r8a66597_gadget_ops;
r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.name = udc_name;
timer_setup(&r8a66597->timer, r8a66597_timer, 0);
r8a66597->reg = reg;
if (r8a66597->pdata->on_chip) {
snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
r8a66597->clk = devm_clk_get(dev, clk_name);
if (IS_ERR(r8a66597->clk)) {
dev_err(dev, "cannot get clock \"%s\"\n", clk_name);
return PTR_ERR(r8a66597->clk);
}
clk_prepare_enable(r8a66597->clk);
}
if (r8a66597->pdata->sudmac) {
ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
if (ret < 0)
goto clean_up2;
}
disable_controller(r8a66597); /* make sure controller is disabled */
ret = devm_request_irq(dev, irq, r8a66597_irq, IRQF_SHARED,
udc_name, r8a66597);
if (ret < 0) {
dev_err(dev, "request_irq error (%d)\n", ret);
goto clean_up2;
}
INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
struct r8a66597_ep *ep = &r8a66597->ep[i];
if (i != 0) {
INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
list_add_tail(&r8a66597->ep[i].ep.ep_list,
&r8a66597->gadget.ep_list);
}
ep->r8a66597 = r8a66597;
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = r8a66597_ep_name[i];
ep->ep.ops = &r8a66597_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, 512);
if (i == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
r8a66597->ep[0].pipenum = 0;
r8a66597->ep[0].fifoaddr = CFIFO;
r8a66597->ep[0].fifosel = CFIFOSEL;
r8a66597->ep[0].fifoctr = CFIFOCTR;
r8a66597->ep[0].pipectr = get_pipectr_addr(0);
r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
GFP_KERNEL);
if (r8a66597->ep0_req == NULL) {
ret = -ENOMEM;
goto clean_up2;
}
r8a66597->ep0_req->complete = nop_completion;
ret = usb_add_gadget_udc(dev, &r8a66597->gadget);
if (ret)
goto err_add_udc;
dev_info(dev, "version %s\n", DRIVER_VERSION);
return 0;
err_add_udc:
r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
clean_up2:
if (r8a66597->pdata->on_chip)
clk_disable_unprepare(r8a66597->clk);
if (r8a66597->ep0_req)
r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
return ret;
}
/*-------------------------------------------------------------------------*/
static struct platform_driver r8a66597_driver = {
.remove_new = r8a66597_remove,
.driver = {
.name = udc_name,
},
};
module_platform_driver_probe(r8a66597_driver, r8a66597_probe);
MODULE_DESCRIPTION("R8A66597 USB gadget driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:r8a66597_udc");
| linux-master | drivers/usb/gadget/udc/r8a66597-udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Atmel USBA high speed USB device controller
*
* Copyright (C) 2005-2007 Atmel Corporation
*/
#include <linux/clk.h>
#include <linux/clk/at91_pmc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ctype.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include "atmel_usba_udc.h"
#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
| IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
static int queue_dbg_open(struct inode *inode, struct file *file)
{
struct usba_ep *ep = inode->i_private;
struct usba_request *req, *req_copy;
struct list_head *queue_data;
queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
if (!queue_data)
return -ENOMEM;
INIT_LIST_HEAD(queue_data);
spin_lock_irq(&ep->udc->lock);
list_for_each_entry(req, &ep->queue, queue) {
req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
if (!req_copy)
goto fail;
list_add_tail(&req_copy->queue, queue_data);
}
spin_unlock_irq(&ep->udc->lock);
file->private_data = queue_data;
return 0;
fail:
spin_unlock_irq(&ep->udc->lock);
list_for_each_entry_safe(req, req_copy, queue_data, queue) {
list_del(&req->queue);
kfree(req);
}
kfree(queue_data);
return -ENOMEM;
}
/*
* bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
*
* b: buffer address
* l: buffer length
* I/i: interrupt/no interrupt
* Z/z: zero/no zero
* S/s: short ok/short not ok
* s: status
* n: nr_packets
* F/f: submitted/not submitted to FIFO
* D/d: using/not using DMA
* L/l: last transaction/not last transaction
*/
static ssize_t queue_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct list_head *queue = file->private_data;
struct usba_request *req, *tmp_req;
size_t len, remaining, actual = 0;
char tmpbuf[38];
if (!access_ok(buf, nbytes))
return -EFAULT;
inode_lock(file_inode(file));
list_for_each_entry_safe(req, tmp_req, queue, queue) {
len = snprintf(tmpbuf, sizeof(tmpbuf),
"%8p %08x %c%c%c %5d %c%c%c\n",
req->req.buf, req->req.length,
req->req.no_interrupt ? 'i' : 'I',
req->req.zero ? 'Z' : 'z',
req->req.short_not_ok ? 's' : 'S',
req->req.status,
req->submitted ? 'F' : 'f',
req->using_dma ? 'D' : 'd',
req->last_transaction ? 'L' : 'l');
len = min(len, sizeof(tmpbuf));
if (len > nbytes)
break;
list_del(&req->queue);
kfree(req);
remaining = __copy_to_user(buf, tmpbuf, len);
actual += len - remaining;
if (remaining)
break;
nbytes -= len;
buf += len;
}
inode_unlock(file_inode(file));
return actual;
}
static int queue_dbg_release(struct inode *inode, struct file *file)
{
struct list_head *queue_data = file->private_data;
struct usba_request *req, *tmp_req;
list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
list_del(&req->queue);
kfree(req);
}
kfree(queue_data);
return 0;
}
static int regs_dbg_open(struct inode *inode, struct file *file)
{
struct usba_udc *udc;
unsigned int i;
u32 *data;
int ret = -ENOMEM;
inode_lock(inode);
udc = inode->i_private;
data = kmalloc(inode->i_size, GFP_KERNEL);
if (!data)
goto out;
spin_lock_irq(&udc->lock);
for (i = 0; i < inode->i_size / 4; i++)
data[i] = readl_relaxed(udc->regs + i * 4);
spin_unlock_irq(&udc->lock);
file->private_data = data;
ret = 0;
out:
inode_unlock(inode);
return ret;
}
static ssize_t regs_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct inode *inode = file_inode(file);
int ret;
inode_lock(inode);
ret = simple_read_from_buffer(buf, nbytes, ppos,
file->private_data,
file_inode(file)->i_size);
inode_unlock(inode);
return ret;
}
static int regs_dbg_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static const struct file_operations queue_dbg_fops = {
.owner = THIS_MODULE,
.open = queue_dbg_open,
.llseek = no_llseek,
.read = queue_dbg_read,
.release = queue_dbg_release,
};
static const struct file_operations regs_dbg_fops = {
.owner = THIS_MODULE,
.open = regs_dbg_open,
.llseek = generic_file_llseek,
.read = regs_dbg_read,
.release = regs_dbg_release,
};
static void usba_ep_init_debugfs(struct usba_udc *udc,
struct usba_ep *ep)
{
struct dentry *ep_root;
ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
ep->debugfs_dir = ep_root;
debugfs_create_file("queue", 0400, ep_root, ep, &queue_dbg_fops);
if (ep->can_dma)
debugfs_create_u32("dma_status", 0400, ep_root,
&ep->last_dma_status);
if (ep_is_control(ep))
debugfs_create_u32("state", 0400, ep_root, &ep->state);
}
static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
{
debugfs_remove_recursive(ep->debugfs_dir);
}
static void usba_init_debugfs(struct usba_udc *udc)
{
struct dentry *root;
struct resource *regs_resource;
root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
CTRL_IOMEM_ID);
if (regs_resource) {
debugfs_create_file_size("regs", 0400, root, udc,
®s_dbg_fops,
resource_size(regs_resource));
}
usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
}
static void usba_cleanup_debugfs(struct usba_udc *udc)
{
usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
debugfs_remove_recursive(udc->debugfs_root);
}
#else
static inline void usba_ep_init_debugfs(struct usba_udc *udc,
struct usba_ep *ep)
{
}
static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
{
}
static inline void usba_init_debugfs(struct usba_udc *udc)
{
}
static inline void usba_cleanup_debugfs(struct usba_udc *udc)
{
}
#endif
static ushort fifo_mode;
module_param(fifo_mode, ushort, 0x0);
MODULE_PARM_DESC(fifo_mode, "Endpoint configuration mode");
/* mode 0 - uses autoconfig */
/* mode 1 - fits in 8KB, generic max fifo configuration */
static struct usba_fifo_cfg mode_1_cfg[] = {
{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
{ .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, },
{ .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 1, },
{ .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 1, },
{ .hw_ep_num = 4, .fifo_size = 1024, .nr_banks = 1, },
{ .hw_ep_num = 5, .fifo_size = 1024, .nr_banks = 1, },
{ .hw_ep_num = 6, .fifo_size = 1024, .nr_banks = 1, },
};
/* mode 2 - fits in 8KB, performance max fifo configuration */
static struct usba_fifo_cfg mode_2_cfg[] = {
{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
{ .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 3, },
{ .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 2, },
{ .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 2, },
};
/* mode 3 - fits in 8KB, mixed fifo configuration */
static struct usba_fifo_cfg mode_3_cfg[] = {
{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
{ .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, },
{ .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 3, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 6, .fifo_size = 512, .nr_banks = 2, },
};
/* mode 4 - fits in 8KB, custom fifo configuration */
static struct usba_fifo_cfg mode_4_cfg[] = {
{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
{ .hw_ep_num = 1, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 3, .fifo_size = 8, .nr_banks = 2, },
{ .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, },
{ .hw_ep_num = 6, .fifo_size = 16, .nr_banks = 2, },
{ .hw_ep_num = 7, .fifo_size = 8, .nr_banks = 2, },
{ .hw_ep_num = 8, .fifo_size = 8, .nr_banks = 2, },
};
/* Add additional configurations here */
static int usba_config_fifo_table(struct usba_udc *udc)
{
int n;
switch (fifo_mode) {
default:
fifo_mode = 0;
fallthrough;
case 0:
udc->fifo_cfg = NULL;
n = 0;
break;
case 1:
udc->fifo_cfg = mode_1_cfg;
n = ARRAY_SIZE(mode_1_cfg);
break;
case 2:
udc->fifo_cfg = mode_2_cfg;
n = ARRAY_SIZE(mode_2_cfg);
break;
case 3:
udc->fifo_cfg = mode_3_cfg;
n = ARRAY_SIZE(mode_3_cfg);
break;
case 4:
udc->fifo_cfg = mode_4_cfg;
n = ARRAY_SIZE(mode_4_cfg);
break;
}
DBG(DBG_HW, "Setup fifo_mode %d\n", fifo_mode);
return n;
}
static inline u32 usba_int_enb_get(struct usba_udc *udc)
{
return udc->int_enb_cache;
}
static inline void usba_int_enb_set(struct usba_udc *udc, u32 mask)
{
u32 val;
val = udc->int_enb_cache | mask;
usba_writel(udc, INT_ENB, val);
udc->int_enb_cache = val;
}
static inline void usba_int_enb_clear(struct usba_udc *udc, u32 mask)
{
u32 val;
val = udc->int_enb_cache & ~mask;
usba_writel(udc, INT_ENB, val);
udc->int_enb_cache = val;
}
static int vbus_is_present(struct usba_udc *udc)
{
if (udc->vbus_pin)
return gpiod_get_value(udc->vbus_pin);
/* No Vbus detection: Assume always present */
return 1;
}
static void toggle_bias(struct usba_udc *udc, int is_on)
{
if (udc->errata && udc->errata->toggle_bias)
udc->errata->toggle_bias(udc, is_on);
}
static void generate_bias_pulse(struct usba_udc *udc)
{
if (!udc->bias_pulse_needed)
return;
if (udc->errata && udc->errata->pulse_bias)
udc->errata->pulse_bias(udc);
udc->bias_pulse_needed = false;
}
static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
{
unsigned int transaction_len;
transaction_len = req->req.length - req->req.actual;
req->last_transaction = 1;
if (transaction_len > ep->ep.maxpacket) {
transaction_len = ep->ep.maxpacket;
req->last_transaction = 0;
} else if (transaction_len == ep->ep.maxpacket && req->req.zero)
req->last_transaction = 0;
DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
ep->ep.name, req, transaction_len,
req->last_transaction ? ", done" : "");
memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
req->req.actual += transaction_len;
}
static void submit_request(struct usba_ep *ep, struct usba_request *req)
{
DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
ep->ep.name, req, req->req.length);
req->req.actual = 0;
req->submitted = 1;
if (req->using_dma) {
if (req->req.length == 0) {
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
return;
}
if (req->req.zero)
usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
else
usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
usba_dma_writel(ep, ADDRESS, req->req.dma);
usba_dma_writel(ep, CONTROL, req->ctrl);
} else {
next_fifo_transaction(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
if (ep_is_control(ep))
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
} else {
if (ep_is_control(ep))
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
}
}
}
static void submit_next_request(struct usba_ep *ep)
{
struct usba_request *req;
if (list_empty(&ep->queue)) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
return;
}
req = list_entry(ep->queue.next, struct usba_request, queue);
if (!req->submitted)
submit_request(ep, req);
}
static void send_status(struct usba_udc *udc, struct usba_ep *ep)
{
ep->state = STATUS_STAGE_IN;
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
}
static void receive_data(struct usba_ep *ep)
{
struct usba_udc *udc = ep->udc;
struct usba_request *req;
unsigned long status;
unsigned int bytecount, nr_busy;
int is_complete = 0;
status = usba_ep_readl(ep, STA);
nr_busy = USBA_BFEXT(BUSY_BANKS, status);
DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
while (nr_busy > 0) {
if (list_empty(&ep->queue)) {
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
break;
}
req = list_entry(ep->queue.next,
struct usba_request, queue);
bytecount = USBA_BFEXT(BYTE_COUNT, status);
if (status & (1 << 31))
is_complete = 1;
if (req->req.actual + bytecount >= req->req.length) {
is_complete = 1;
bytecount = req->req.length - req->req.actual;
}
memcpy_fromio(req->req.buf + req->req.actual,
ep->fifo, bytecount);
req->req.actual += bytecount;
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
if (is_complete) {
DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
req->req.status = 0;
list_del_init(&req->queue);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
status = usba_ep_readl(ep, STA);
nr_busy = USBA_BFEXT(BUSY_BANKS, status);
if (is_complete && ep_is_control(ep)) {
send_status(udc, ep);
break;
}
}
}
static void
request_complete(struct usba_ep *ep, struct usba_request *req, int status)
{
struct usba_udc *udc = ep->udc;
WARN_ON(!list_empty(&req->queue));
if (req->req.status == -EINPROGRESS)
req->req.status = status;
if (req->using_dma)
usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
DBG(DBG_GADGET | DBG_REQ,
"%s: req %p complete: status %d, actual %u\n",
ep->ep.name, req, req->req.status, req->req.actual);
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
static void
request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
{
struct usba_request *req, *tmp_req;
list_for_each_entry_safe(req, tmp_req, list, queue) {
list_del_init(&req->queue);
request_complete(ep, req, status);
}
}
static int
usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags, maxpacket;
unsigned int nr_trans;
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
maxpacket = usb_endpoint_maxp(desc);
if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
|| ep->index == 0
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| maxpacket == 0
|| maxpacket > ep->fifo_size) {
DBG(DBG_ERR, "ep_enable: Invalid argument");
return -EINVAL;
}
ep->is_isoc = 0;
ep->is_in = 0;
DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n",
ep->ep.name, ep->ept_cfg, maxpacket);
if (usb_endpoint_dir_in(desc)) {
ep->is_in = 1;
ep->ept_cfg |= USBA_EPT_DIR_IN;
}
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
break;
case USB_ENDPOINT_XFER_ISOC:
if (!ep->can_isoc) {
DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
ep->ep.name);
return -EINVAL;
}
/*
* Bits 11:12 specify number of _additional_
* transactions per microframe.
*/
nr_trans = usb_endpoint_maxp_mult(desc);
if (nr_trans > 3)
return -EINVAL;
ep->is_isoc = 1;
ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
ep->ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
break;
case USB_ENDPOINT_XFER_BULK:
ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
break;
case USB_ENDPOINT_XFER_INT:
ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
break;
}
spin_lock_irqsave(&ep->udc->lock, flags);
ep->ep.desc = desc;
ep->ep.maxpacket = maxpacket;
usba_ep_writel(ep, CFG, ep->ept_cfg);
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
if (ep->can_dma) {
u32 ctrl;
usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index) |
USBA_BF(DMA_INT, 1 << ep->index));
ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
usba_ep_writel(ep, CTL_ENB, ctrl);
} else {
usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index));
}
spin_unlock_irqrestore(&udc->lock, flags);
DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
(unsigned long)usba_ep_readl(ep, CFG));
DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
(unsigned long)usba_int_enb_get(udc));
return 0;
}
static int usba_ep_disable(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
LIST_HEAD(req_list);
unsigned long flags;
DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
spin_lock_irqsave(&udc->lock, flags);
if (!ep->ep.desc) {
spin_unlock_irqrestore(&udc->lock, flags);
DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name);
return -EINVAL;
}
ep->ep.desc = NULL;
list_splice_init(&ep->queue, &req_list);
if (ep->can_dma) {
usba_dma_writel(ep, CONTROL, 0);
usba_dma_writel(ep, ADDRESS, 0);
usba_dma_readl(ep, STATUS);
}
usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
usba_int_enb_clear(udc, USBA_BF(EPT_INT, 1 << ep->index));
request_complete_list(ep, &req_list, -ESHUTDOWN);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_request *
usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct usba_request *req;
DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_request *req = to_usba_req(_req);
DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
kfree(req);
}
static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
struct usba_request *req, gfp_t gfp_flags)
{
unsigned long flags;
int ret;
DBG(DBG_DMA, "%s: req l/%u d/%pad %c%c%c\n",
ep->ep.name, req->req.length, &req->req.dma,
req->req.zero ? 'Z' : 'z',
req->req.short_not_ok ? 'S' : 's',
req->req.no_interrupt ? 'I' : 'i');
if (req->req.length > 0x10000) {
/* Lengths from 0 to 65536 (inclusive) are supported */
DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
return -EINVAL;
}
ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in);
if (ret)
return ret;
req->using_dma = 1;
req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
| USBA_DMA_END_BUF_EN;
if (!ep->is_in)
req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
/*
* Add this request to the queue and submit for DMA if
* possible. Check if we're still alive first -- we may have
* received a reset since last time we checked.
*/
ret = -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (ep->ep.desc) {
if (list_empty(&ep->queue))
submit_request(ep, req);
list_add_tail(&req->queue, &ep->queue);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int
usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct usba_request *req = to_usba_req(_req);
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags;
int ret;
DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
ep->ep.name, req, _req->length);
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN ||
!ep->ep.desc)
return -ESHUTDOWN;
req->submitted = 0;
req->using_dma = 0;
req->last_transaction = 0;
_req->status = -EINPROGRESS;
_req->actual = 0;
if (ep->can_dma)
return queue_dma(udc, ep, req, gfp_flags);
/* May have received a reset since last time we checked */
ret = -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (ep->ep.desc) {
list_add_tail(&req->queue, &ep->queue);
if ((!ep_is_control(ep) && ep->is_in) ||
(ep_is_control(ep)
&& (ep->state == DATA_STAGE_IN
|| ep->state == STATUS_STAGE_IN)))
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
else
usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static void
usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
{
req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
}
static int stop_dma(struct usba_ep *ep, u32 *pstatus)
{
unsigned int timeout;
u32 status;
/*
* Stop the DMA controller. When writing both CH_EN
* and LINK to 0, the other bits are not affected.
*/
usba_dma_writel(ep, CONTROL, 0);
/* Wait for the FIFO to empty */
for (timeout = 40; timeout; --timeout) {
status = usba_dma_readl(ep, STATUS);
if (!(status & USBA_DMA_CH_EN))
break;
udelay(1);
}
if (pstatus)
*pstatus = status;
if (timeout == 0) {
dev_err(&ep->udc->pdev->dev,
"%s: timed out waiting for DMA FIFO to empty\n",
ep->ep.name);
return -ETIMEDOUT;
}
return 0;
}
static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
struct usba_request *req = NULL;
struct usba_request *iter;
unsigned long flags;
u32 status;
DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
ep->ep.name, _req);
spin_lock_irqsave(&udc->lock, flags);
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
if (req->using_dma) {
/*
* If this request is currently being transferred,
* stop the DMA controller and reset the FIFO.
*/
if (ep->queue.next == &req->queue) {
status = usba_dma_readl(ep, STATUS);
if (status & USBA_DMA_CH_EN)
stop_dma(ep, &status);
#ifdef CONFIG_USB_GADGET_DEBUG_FS
ep->last_dma_status = status;
#endif
usba_writel(udc, EPT_RST, 1 << ep->index);
usba_update_req(ep, req, status);
}
}
/*
* Errors should stop the queue from advancing until the
* completion function returns.
*/
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
/* Process the next request if any */
submit_next_request(ep);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usba_ep_set_halt(struct usb_ep *_ep, int value)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags;
int ret = 0;
DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
value ? "set" : "clear");
if (!ep->ep.desc) {
DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
ep->ep.name);
return -ENODEV;
}
if (ep->is_isoc) {
DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
ep->ep.name);
return -ENOTTY;
}
spin_lock_irqsave(&udc->lock, flags);
/*
* We can't halt IN endpoints while there are still data to be
* transferred
*/
if (!list_empty(&ep->queue)
|| ((value && ep->is_in && (usba_ep_readl(ep, STA)
& USBA_BF(BUSY_BANKS, -1L))))) {
ret = -EAGAIN;
} else {
if (value)
usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
else
usba_ep_writel(ep, CLR_STA,
USBA_FORCE_STALL | USBA_TOGGLE_CLR);
usba_ep_readl(ep, STA);
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int usba_ep_fifo_status(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
}
static void usba_ep_fifo_flush(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
usba_writel(udc, EPT_RST, 1 << ep->index);
}
static const struct usb_ep_ops usba_ep_ops = {
.enable = usba_ep_enable,
.disable = usba_ep_disable,
.alloc_request = usba_ep_alloc_request,
.free_request = usba_ep_free_request,
.queue = usba_ep_queue,
.dequeue = usba_ep_dequeue,
.set_halt = usba_ep_set_halt,
.fifo_status = usba_ep_fifo_status,
.fifo_flush = usba_ep_fifo_flush,
};
static int usba_udc_get_frame(struct usb_gadget *gadget)
{
struct usba_udc *udc = to_usba_udc(gadget);
return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
}
static int usba_udc_wakeup(struct usb_gadget *gadget)
{
struct usba_udc *udc = to_usba_udc(gadget);
unsigned long flags;
u32 ctrl;
int ret = -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
ctrl = usba_readl(udc, CTRL);
usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int
usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
{
struct usba_udc *udc = to_usba_udc(gadget);
unsigned long flags;
gadget->is_selfpowered = (is_selfpowered != 0);
spin_lock_irqsave(&udc->lock, flags);
if (is_selfpowered)
udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
else
udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int atmel_usba_pullup(struct usb_gadget *gadget, int is_on);
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int atmel_usba_stop(struct usb_gadget *gadget);
static struct usb_ep *atmel_usba_match_ep(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *ep_comp)
{
struct usb_ep *_ep;
struct usba_ep *ep;
/* Look at endpoints until an unclaimed one looks usable */
list_for_each_entry(_ep, &gadget->ep_list, ep_list) {
if (usb_gadget_ep_match_desc(gadget, _ep, desc, ep_comp))
goto found_ep;
}
/* Fail */
return NULL;
found_ep:
if (fifo_mode == 0) {
/* Optimize hw fifo size based on ep type and other info */
ep = to_usba_ep(_ep);
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
ep->nr_banks = 1;
break;
case USB_ENDPOINT_XFER_ISOC:
ep->fifo_size = 1024;
if (ep->udc->ep_prealloc)
ep->nr_banks = 2;
break;
case USB_ENDPOINT_XFER_BULK:
ep->fifo_size = 512;
if (ep->udc->ep_prealloc)
ep->nr_banks = 1;
break;
case USB_ENDPOINT_XFER_INT:
if (desc->wMaxPacketSize == 0)
ep->fifo_size =
roundup_pow_of_two(_ep->maxpacket_limit);
else
ep->fifo_size =
roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize));
if (ep->udc->ep_prealloc)
ep->nr_banks = 1;
break;
}
/* It might be a little bit late to set this */
usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
/* Generate ept_cfg basd on FIFO size and number of banks */
if (ep->fifo_size <= 8)
ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
else
/* LSB is bit 1, not 0 */
ep->ept_cfg =
USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3);
ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks);
}
return _ep;
}
static const struct usb_gadget_ops usba_udc_ops = {
.get_frame = usba_udc_get_frame,
.wakeup = usba_udc_wakeup,
.set_selfpowered = usba_udc_set_selfpowered,
.pullup = atmel_usba_pullup,
.udc_start = atmel_usba_start,
.udc_stop = atmel_usba_stop,
.match_ep = atmel_usba_match_ep,
};
static struct usb_endpoint_descriptor usba_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(64),
/* FIXME: I have no idea what to put here */
.bInterval = 1,
};
static const struct usb_gadget usba_gadget_template = {
.ops = &usba_udc_ops,
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
};
/*
* Called with interrupts disabled and udc->lock held.
*/
static void reset_all_endpoints(struct usba_udc *udc)
{
struct usba_ep *ep;
struct usba_request *req, *tmp_req;
usba_writel(udc, EPT_RST, ~0UL);
ep = to_usba_ep(udc->gadget.ep0);
list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
}
}
static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
{
struct usba_ep *ep;
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
return to_usba_ep(udc->gadget.ep0);
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
u8 bEndpointAddress;
if (!ep->ep.desc)
continue;
bEndpointAddress = ep->ep.desc->bEndpointAddress;
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
continue;
if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
== (wIndex & USB_ENDPOINT_NUMBER_MASK))
return ep;
}
return NULL;
}
/* Called with interrupts disabled and udc->lock held */
static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
{
usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
ep->state = WAIT_FOR_SETUP;
}
static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
{
if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
return 1;
return 0;
}
static inline void set_address(struct usba_udc *udc, unsigned int addr)
{
u32 regval;
DBG(DBG_BUS, "setting address %u...\n", addr);
regval = usba_readl(udc, CTRL);
regval = USBA_BFINS(DEV_ADDR, addr, regval);
usba_writel(udc, CTRL, regval);
}
static int do_test_mode(struct usba_udc *udc)
{
static const char test_packet_buffer[] = {
/* JKJKJKJK * 9 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* JJKKJJKK * 8 */
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
/* JJKKJJKK * 8 */
0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
/* JJJJJJJKKKKKKK * 8 */
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
/* JJJJJJJK * 8 */
0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
/* {JKKKKKKK * 10}, JK */
0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
};
struct usba_ep *ep;
struct device *dev = &udc->pdev->dev;
int test_mode;
test_mode = udc->test_mode;
/* Start from a clean slate */
reset_all_endpoints(udc);
switch (test_mode) {
case 0x0100:
/* Test_J */
usba_writel(udc, TST, USBA_TST_J_MODE);
dev_info(dev, "Entering Test_J mode...\n");
break;
case 0x0200:
/* Test_K */
usba_writel(udc, TST, USBA_TST_K_MODE);
dev_info(dev, "Entering Test_K mode...\n");
break;
case 0x0300:
/*
* Test_SE0_NAK: Force high-speed mode and set up ep0
* for Bulk IN transfers
*/
ep = &udc->usba_ep[0];
usba_writel(udc, TST,
USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
usba_ep_writel(ep, CFG,
USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
| USBA_EPT_DIR_IN
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
| USBA_BF(BK_NUMBER, 1));
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
set_protocol_stall(udc, ep);
dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
} else {
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
dev_info(dev, "Entering Test_SE0_NAK mode...\n");
}
break;
case 0x0400:
/* Test_Packet */
ep = &udc->usba_ep[0];
usba_ep_writel(ep, CFG,
USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
| USBA_EPT_DIR_IN
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
| USBA_BF(BK_NUMBER, 1));
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
set_protocol_stall(udc, ep);
dev_err(dev, "Test_Packet: ep0 not mapped\n");
} else {
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
usba_writel(udc, TST, USBA_TST_PKT_MODE);
memcpy_toio(ep->fifo, test_packet_buffer,
sizeof(test_packet_buffer));
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
dev_info(dev, "Entering Test_Packet mode...\n");
}
break;
default:
dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
return -EINVAL;
}
return 0;
}
/* Avoid overly long expressions */
static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
return true;
return false;
}
static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
return true;
return false;
}
static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
return true;
return false;
}
static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
struct usb_ctrlrequest *crq)
{
int retval = 0;
switch (crq->bRequest) {
case USB_REQ_GET_STATUS: {
u16 status;
if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
status = cpu_to_le16(udc->devstatus);
} else if (crq->bRequestType
== (USB_DIR_IN | USB_RECIP_INTERFACE)) {
status = cpu_to_le16(0);
} else if (crq->bRequestType
== (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
struct usba_ep *target;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
status = 0;
if (is_stalled(udc, target))
status |= cpu_to_le16(1);
} else
goto delegate;
/* Write directly to the FIFO. No queueing is done. */
if (crq->wLength != cpu_to_le16(sizeof(status)))
goto stall;
ep->state = DATA_STAGE_IN;
writew_relaxed(status, ep->fifo);
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
break;
}
case USB_REQ_CLEAR_FEATURE: {
if (crq->bRequestType == USB_RECIP_DEVICE) {
if (feature_is_dev_remote_wakeup(crq))
udc->devstatus
&= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
else
/* Can't CLEAR_FEATURE TEST_MODE */
goto stall;
} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
struct usba_ep *target;
if (crq->wLength != cpu_to_le16(0)
|| !feature_is_ep_halt(crq))
goto stall;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
if (target->index != 0)
usba_ep_writel(target, CLR_STA,
USBA_TOGGLE_CLR);
} else {
goto delegate;
}
send_status(udc, ep);
break;
}
case USB_REQ_SET_FEATURE: {
if (crq->bRequestType == USB_RECIP_DEVICE) {
if (feature_is_dev_test_mode(crq)) {
send_status(udc, ep);
ep->state = STATUS_STAGE_TEST;
udc->test_mode = le16_to_cpu(crq->wIndex);
return 0;
} else if (feature_is_dev_remote_wakeup(crq)) {
udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
} else {
goto stall;
}
} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
struct usba_ep *target;
if (crq->wLength != cpu_to_le16(0)
|| !feature_is_ep_halt(crq))
goto stall;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
} else
goto delegate;
send_status(udc, ep);
break;
}
case USB_REQ_SET_ADDRESS:
if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
goto delegate;
set_address(udc, le16_to_cpu(crq->wValue));
send_status(udc, ep);
ep->state = STATUS_STAGE_ADDR;
break;
default:
delegate:
spin_unlock(&udc->lock);
retval = udc->driver->setup(&udc->gadget, crq);
spin_lock(&udc->lock);
}
return retval;
stall:
pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
"halting endpoint...\n",
ep->ep.name, crq->bRequestType, crq->bRequest,
le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
le16_to_cpu(crq->wLength));
set_protocol_stall(udc, ep);
return -1;
}
static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 epstatus;
u32 epctrl;
restart:
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
ep->ep.name, ep->state, epstatus, epctrl);
req = NULL;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct usba_request, queue);
if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
if (req->submitted)
next_fifo_transaction(ep, req);
else
submit_request(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
}
goto restart;
}
if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
switch (ep->state) {
case DATA_STAGE_IN:
usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = STATUS_STAGE_OUT;
break;
case STATUS_STAGE_ADDR:
/* Activate our new address */
usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
| USBA_FADDR_EN));
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
break;
case STATUS_STAGE_IN:
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, 0);
submit_next_request(ep);
}
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
break;
case STATUS_STAGE_TEST:
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
if (do_test_mode(udc))
set_protocol_stall(udc, ep);
break;
default:
pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
"halting endpoint...\n",
ep->ep.name, ep->state);
set_protocol_stall(udc, ep);
break;
}
goto restart;
}
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
switch (ep->state) {
case STATUS_STAGE_OUT:
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, 0);
}
ep->state = WAIT_FOR_SETUP;
break;
case DATA_STAGE_OUT:
receive_data(ep);
break;
default:
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
"halting endpoint...\n",
ep->ep.name, ep->state);
set_protocol_stall(udc, ep);
break;
}
goto restart;
}
if (epstatus & USBA_RX_SETUP) {
union {
struct usb_ctrlrequest crq;
unsigned long data[2];
} crq;
unsigned int pkt_len;
int ret;
if (ep->state != WAIT_FOR_SETUP) {
/*
* Didn't expect a SETUP packet at this
* point. Clean up any pending requests (which
* may be successful).
*/
int status = -EPROTO;
/*
* RXRDY and TXCOMP are dropped when SETUP
* packets arrive. Just pretend we received
* the status packet.
*/
if (ep->state == STATUS_STAGE_OUT
|| ep->state == STATUS_STAGE_IN) {
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
status = 0;
}
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, status);
}
}
pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
DBG(DBG_HW, "Packet length: %u\n", pkt_len);
if (pkt_len != sizeof(crq)) {
pr_warn("udc: Invalid packet length %u (expected %zu)\n",
pkt_len, sizeof(crq));
set_protocol_stall(udc, ep);
return;
}
DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
/* Free up one bank in the FIFO so that we can
* generate or receive a reply right away. */
usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
/* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
ep->state, crq.crq.bRequestType,
crq.crq.bRequest); */
if (crq.crq.bRequestType & USB_DIR_IN) {
/*
* The USB 2.0 spec states that "if wLength is
* zero, there is no data transfer phase."
* However, testusb #14 seems to actually
* expect a data phase even if wLength = 0...
*/
ep->state = DATA_STAGE_IN;
} else {
if (crq.crq.wLength != cpu_to_le16(0))
ep->state = DATA_STAGE_OUT;
else
ep->state = STATUS_STAGE_IN;
}
ret = -1;
if (ep->index == 0)
ret = handle_ep0_setup(udc, ep, &crq.crq);
else {
spin_unlock(&udc->lock);
ret = udc->driver->setup(&udc->gadget, &crq.crq);
spin_lock(&udc->lock);
}
DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
crq.crq.bRequestType, crq.crq.bRequest,
le16_to_cpu(crq.crq.wLength), ep->state, ret);
if (ret < 0) {
/* Let the host know that we failed */
set_protocol_stall(udc, ep);
}
}
}
static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 epstatus;
u32 epctrl;
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
if (list_empty(&ep->queue)) {
dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
return;
}
req = list_entry(ep->queue.next, struct usba_request, queue);
if (req->using_dma) {
/* Send a zero-length packet */
usba_ep_writel(ep, SET_STA,
USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_DIS,
USBA_TX_PK_RDY);
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
} else {
if (req->submitted)
next_fifo_transaction(ep, req);
else
submit_request(ep, req);
if (req->last_transaction) {
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
}
}
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
}
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
receive_data(ep);
}
}
static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 status, control, pending;
status = usba_dma_readl(ep, STATUS);
control = usba_dma_readl(ep, CONTROL);
#ifdef CONFIG_USB_GADGET_DEBUG_FS
ep->last_dma_status = status;
#endif
pending = status & control;
DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
if (status & USBA_DMA_CH_EN) {
dev_err(&udc->pdev->dev,
"DMA_CH_EN is set after transfer is finished!\n");
dev_err(&udc->pdev->dev,
"status=%#08x, pending=%#08x, control=%#08x\n",
status, pending, control);
/*
* try to pretend nothing happened. We might have to
* do something here...
*/
}
if (list_empty(&ep->queue))
/* Might happen if a reset comes along at the right moment */
return;
if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
req = list_entry(ep->queue.next, struct usba_request, queue);
usba_update_req(ep, req, status);
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
}
}
static int start_clock(struct usba_udc *udc);
static void stop_clock(struct usba_udc *udc);
static irqreturn_t usba_udc_irq(int irq, void *devid)
{
struct usba_udc *udc = devid;
u32 status, int_enb;
u32 dma_status;
u32 ep_status;
spin_lock(&udc->lock);
int_enb = usba_int_enb_get(udc);
status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
DBG(DBG_INT, "irq, status=%#08x\n", status);
if (status & USBA_DET_SUSPEND) {
usba_writel(udc, INT_CLR, USBA_DET_SUSPEND|USBA_WAKE_UP);
usba_int_enb_set(udc, USBA_WAKE_UP);
usba_int_enb_clear(udc, USBA_DET_SUSPEND);
udc->suspended = true;
toggle_bias(udc, 0);
udc->bias_pulse_needed = true;
stop_clock(udc);
DBG(DBG_BUS, "Suspend detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (status & USBA_WAKE_UP) {
start_clock(udc);
toggle_bias(udc, 1);
usba_writel(udc, INT_CLR, USBA_WAKE_UP);
DBG(DBG_BUS, "Wake Up CPU detected\n");
}
if (status & USBA_END_OF_RESUME) {
udc->suspended = false;
usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
usba_int_enb_clear(udc, USBA_WAKE_UP);
usba_int_enb_set(udc, USBA_DET_SUSPEND);
generate_bias_pulse(udc);
DBG(DBG_BUS, "Resume detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
dma_status = USBA_BFEXT(DMA_INT, status);
if (dma_status) {
int i;
usba_int_enb_set(udc, USBA_DET_SUSPEND);
for (i = 1; i <= USBA_NR_DMAS; i++)
if (dma_status & (1 << i))
usba_dma_irq(udc, &udc->usba_ep[i]);
}
ep_status = USBA_BFEXT(EPT_INT, status);
if (ep_status) {
int i;
usba_int_enb_set(udc, USBA_DET_SUSPEND);
for (i = 0; i < udc->num_ep; i++)
if (ep_status & (1 << i)) {
if (ep_is_control(&udc->usba_ep[i]))
usba_control_irq(udc, &udc->usba_ep[i]);
else
usba_ep_irq(udc, &udc->usba_ep[i]);
}
}
if (status & USBA_END_OF_RESET) {
struct usba_ep *ep0, *ep;
int i;
usba_writel(udc, INT_CLR,
USBA_END_OF_RESET|USBA_END_OF_RESUME
|USBA_DET_SUSPEND|USBA_WAKE_UP);
generate_bias_pulse(udc);
reset_all_endpoints(udc);
if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock(&udc->lock);
usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_lock(&udc->lock);
}
if (status & USBA_HIGH_SPEED)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
DBG(DBG_BUS, "%s bus reset detected\n",
usb_speed_string(udc->gadget.speed));
ep0 = &udc->usba_ep[0];
ep0->ep.desc = &usba_ep0_desc;
ep0->state = WAIT_FOR_SETUP;
usba_ep_writel(ep0, CFG,
(USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
| USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
usba_ep_writel(ep0, CTL_ENB,
USBA_EPT_ENABLE | USBA_RX_SETUP);
/* If we get reset while suspended... */
udc->suspended = false;
usba_int_enb_clear(udc, USBA_WAKE_UP);
usba_int_enb_set(udc, USBA_BF(EPT_INT, 1) |
USBA_DET_SUSPEND | USBA_END_OF_RESUME);
/*
* Unclear why we hit this irregularly, e.g. in usbtest,
* but it's clearly harmless...
*/
if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
dev_err(&udc->pdev->dev,
"ODD: EP0 configuration is invalid!\n");
/* Preallocate other endpoints */
for (i = 1; i < udc->num_ep; i++) {
ep = &udc->usba_ep[i];
if (ep->ep.claimed) {
usba_ep_writel(ep, CFG, ep->ept_cfg);
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED))
dev_err(&udc->pdev->dev,
"ODD: EP%d configuration is invalid!\n", i);
}
}
}
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static int start_clock(struct usba_udc *udc)
{
int ret;
if (udc->clocked)
return 0;
pm_stay_awake(&udc->pdev->dev);
ret = clk_prepare_enable(udc->pclk);
if (ret)
return ret;
ret = clk_prepare_enable(udc->hclk);
if (ret) {
clk_disable_unprepare(udc->pclk);
return ret;
}
udc->clocked = true;
return 0;
}
static void stop_clock(struct usba_udc *udc)
{
if (!udc->clocked)
return;
clk_disable_unprepare(udc->hclk);
clk_disable_unprepare(udc->pclk);
udc->clocked = false;
pm_relax(&udc->pdev->dev);
}
static int usba_start(struct usba_udc *udc)
{
unsigned long flags;
int ret;
ret = start_clock(udc);
if (ret)
return ret;
if (udc->suspended)
return 0;
spin_lock_irqsave(&udc->lock, flags);
toggle_bias(udc, 1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
/* Clear all requested and pending interrupts... */
usba_writel(udc, INT_ENB, 0);
udc->int_enb_cache = 0;
usba_writel(udc, INT_CLR,
USBA_END_OF_RESET|USBA_END_OF_RESUME
|USBA_DET_SUSPEND|USBA_WAKE_UP);
/* ...and enable just 'reset' IRQ to get us started */
usba_int_enb_set(udc, USBA_END_OF_RESET);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static void usba_stop(struct usba_udc *udc)
{
unsigned long flags;
if (udc->suspended)
return;
spin_lock_irqsave(&udc->lock, flags);
udc->gadget.speed = USB_SPEED_UNKNOWN;
reset_all_endpoints(udc);
/* This will also disable the DP pullup */
toggle_bias(udc, 0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
spin_unlock_irqrestore(&udc->lock, flags);
stop_clock(udc);
}
static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
{
struct usba_udc *udc = devid;
int vbus;
/* debounce */
udelay(10);
mutex_lock(&udc->vbus_mutex);
vbus = vbus_is_present(udc);
if (vbus != udc->vbus_prev) {
if (vbus) {
usba_start(udc);
} else {
udc->suspended = false;
if (udc->driver->disconnect)
udc->driver->disconnect(&udc->gadget);
usba_stop(udc);
}
udc->vbus_prev = vbus;
}
mutex_unlock(&udc->vbus_mutex);
return IRQ_HANDLED;
}
static int atmel_usba_pullup(struct usb_gadget *gadget, int is_on)
{
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
u32 ctrl;
spin_lock_irqsave(&udc->lock, flags);
ctrl = usba_readl(udc, CTRL);
if (is_on)
ctrl &= ~USBA_DETACH;
else
ctrl |= USBA_DETACH;
usba_writel(udc, CTRL, ctrl);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
int ret;
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
mutex_lock(&udc->vbus_mutex);
if (udc->vbus_pin)
enable_irq(gpiod_to_irq(udc->vbus_pin));
/* If Vbus is present, enable the controller and wait for reset */
udc->vbus_prev = vbus_is_present(udc);
if (udc->vbus_prev) {
ret = usba_start(udc);
if (ret)
goto err;
}
mutex_unlock(&udc->vbus_mutex);
return 0;
err:
if (udc->vbus_pin)
disable_irq(gpiod_to_irq(udc->vbus_pin));
mutex_unlock(&udc->vbus_mutex);
spin_lock_irqsave(&udc->lock, flags);
udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
udc->driver = NULL;
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int atmel_usba_stop(struct usb_gadget *gadget)
{
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
if (udc->vbus_pin)
disable_irq(gpiod_to_irq(udc->vbus_pin));
udc->suspended = false;
usba_stop(udc);
udc->driver = NULL;
return 0;
}
static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
{
regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
is_on ? AT91_PMC_BIASEN : 0);
}
static void at91sam9g45_pulse_bias(struct usba_udc *udc)
{
regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0);
regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
AT91_PMC_BIASEN);
}
static const struct usba_udc_errata at91sam9rl_errata = {
.toggle_bias = at91sam9rl_toggle_bias,
};
static const struct usba_udc_errata at91sam9g45_errata = {
.pulse_bias = at91sam9g45_pulse_bias,
};
static const struct usba_ep_config ep_config_sam9[] = {
{ .nr_banks = 1 }, /* ep 0 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
{ .nr_banks = 3, .can_dma = 1 }, /* ep 3 */
{ .nr_banks = 3, .can_dma = 1 }, /* ep 4 */
{ .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
{ .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
};
static const struct usba_ep_config ep_config_sama5[] = {
{ .nr_banks = 1 }, /* ep 0 */
{ .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
{ .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 3 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 4 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 7 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 8 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 9 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 10 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 11 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 12 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 13 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 14 */
{ .nr_banks = 2, .can_isoc = 1 }, /* ep 15 */
};
static const struct usba_udc_config udc_at91sam9rl_cfg = {
.errata = &at91sam9rl_errata,
.config = ep_config_sam9,
.num_ep = ARRAY_SIZE(ep_config_sam9),
.ep_prealloc = true,
};
static const struct usba_udc_config udc_at91sam9g45_cfg = {
.errata = &at91sam9g45_errata,
.config = ep_config_sam9,
.num_ep = ARRAY_SIZE(ep_config_sam9),
.ep_prealloc = true,
};
static const struct usba_udc_config udc_sama5d3_cfg = {
.config = ep_config_sama5,
.num_ep = ARRAY_SIZE(ep_config_sama5),
.ep_prealloc = true,
};
static const struct usba_udc_config udc_sam9x60_cfg = {
.num_ep = ARRAY_SIZE(ep_config_sam9),
.config = ep_config_sam9,
.ep_prealloc = false,
};
static const struct of_device_id atmel_udc_dt_ids[] = {
{ .compatible = "atmel,at91sam9rl-udc", .data = &udc_at91sam9rl_cfg },
{ .compatible = "atmel,at91sam9g45-udc", .data = &udc_at91sam9g45_cfg },
{ .compatible = "atmel,sama5d3-udc", .data = &udc_sama5d3_cfg },
{ .compatible = "microchip,sam9x60-udc", .data = &udc_sam9x60_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
static const struct of_device_id atmel_pmc_dt_ids[] = {
{ .compatible = "atmel,at91sam9g45-pmc" },
{ .compatible = "atmel,at91sam9rl-pmc" },
{ .compatible = "atmel,at91sam9x5-pmc" },
{ /* sentinel */ }
};
static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *pp;
int i, ret;
struct usba_ep *eps, *ep;
const struct usba_udc_config *udc_config;
match = of_match_node(atmel_udc_dt_ids, np);
if (!match)
return ERR_PTR(-EINVAL);
udc_config = match->data;
udc->ep_prealloc = udc_config->ep_prealloc;
udc->errata = udc_config->errata;
if (udc->errata) {
pp = of_find_matching_node_and_match(NULL, atmel_pmc_dt_ids,
NULL);
if (!pp)
return ERR_PTR(-ENODEV);
udc->pmc = syscon_node_to_regmap(pp);
of_node_put(pp);
if (IS_ERR(udc->pmc))
return ERR_CAST(udc->pmc);
}
udc->num_ep = 0;
udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus",
GPIOD_IN);
if (IS_ERR(udc->vbus_pin))
return ERR_CAST(udc->vbus_pin);
if (fifo_mode == 0) {
udc->num_ep = udc_config->num_ep;
} else {
udc->num_ep = usba_config_fifo_table(udc);
}
eps = devm_kcalloc(&pdev->dev, udc->num_ep, sizeof(struct usba_ep),
GFP_KERNEL);
if (!eps)
return ERR_PTR(-ENOMEM);
udc->gadget.ep0 = &eps[0].ep;
INIT_LIST_HEAD(&eps[0].ep.ep_list);
i = 0;
while (i < udc->num_ep) {
const struct usba_ep_config *ep_cfg = &udc_config->config[i];
ep = &eps[i];
ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : i;
/* Only the first EP is 64 bytes */
if (ep->index == 0)
ep->fifo_size = 64;
else
ep->fifo_size = 1024;
if (fifo_mode) {
if (ep->fifo_size < udc->fifo_cfg[i].fifo_size)
dev_warn(&pdev->dev,
"Using default max fifo-size value\n");
else
ep->fifo_size = udc->fifo_cfg[i].fifo_size;
}
ep->nr_banks = ep_cfg->nr_banks;
if (fifo_mode) {
if (ep->nr_banks < udc->fifo_cfg[i].nr_banks)
dev_warn(&pdev->dev,
"Using default max nb-banks value\n");
else
ep->nr_banks = udc->fifo_cfg[i].nr_banks;
}
ep->can_dma = ep_cfg->can_dma;
ep->can_isoc = ep_cfg->can_isoc;
sprintf(ep->name, "ep%d", ep->index);
ep->ep.name = ep->name;
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
ep->ep.ops = &usba_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
if (ep->index == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = ep->can_isoc;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
if (fifo_mode != 0) {
/*
* Generate ept_cfg based on FIFO size and
* banks number
*/
if (ep->fifo_size <= 8)
ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
else
/* LSB is bit 1, not 0 */
ep->ept_cfg =
USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3);
ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks);
}
if (i)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
i++;
}
if (i == 0) {
dev_err(&pdev->dev, "of_probe: no endpoint specified\n");
ret = -EINVAL;
goto err;
}
return eps;
err:
return ERR_PTR(ret);
}
static int usba_udc_probe(struct platform_device *pdev)
{
struct resource *res;
struct clk *pclk, *hclk;
struct usba_udc *udc;
int irq, ret, i;
udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
udc->gadget = usba_gadget_template;
INIT_LIST_HEAD(&udc->gadget.ep_list);
udc->regs = devm_platform_get_and_ioremap_resource(pdev, CTRL_IOMEM_ID, &res);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
dev_info(&pdev->dev, "MMIO registers at %pR mapped at %p\n",
res, udc->regs);
udc->fifo = devm_platform_get_and_ioremap_resource(pdev, FIFO_IOMEM_ID, &res);
if (IS_ERR(udc->fifo))
return PTR_ERR(udc->fifo);
dev_info(&pdev->dev, "FIFO at %pR mapped at %p\n", res, udc->fifo);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(pclk))
return PTR_ERR(pclk);
hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(hclk))
return PTR_ERR(hclk);
spin_lock_init(&udc->lock);
mutex_init(&udc->vbus_mutex);
udc->pdev = pdev;
udc->pclk = pclk;
udc->hclk = hclk;
platform_set_drvdata(pdev, udc);
/* Make sure we start from a clean slate */
ret = clk_prepare_enable(pclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n");
return ret;
}
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
clk_disable_unprepare(pclk);
udc->usba_ep = atmel_udc_of_init(pdev, udc);
toggle_bias(udc, 0);
if (IS_ERR(udc->usba_ep))
return PTR_ERR(udc->usba_ep);
ret = devm_request_irq(&pdev->dev, irq, usba_udc_irq, 0,
"atmel_usba_udc", udc);
if (ret) {
dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
irq, ret);
return ret;
}
udc->irq = irq;
if (udc->vbus_pin) {
irq_set_status_flags(gpiod_to_irq(udc->vbus_pin), IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev,
gpiod_to_irq(udc->vbus_pin), NULL,
usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = NULL;
dev_warn(&udc->pdev->dev,
"failed to request vbus irq; "
"assuming always on\n");
}
}
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret)
return ret;
device_init_wakeup(&pdev->dev, 1);
usba_init_debugfs(udc);
for (i = 1; i < udc->num_ep; i++)
usba_ep_init_debugfs(udc, &udc->usba_ep[i]);
return 0;
}
static void usba_udc_remove(struct platform_device *pdev)
{
struct usba_udc *udc;
int i;
udc = platform_get_drvdata(pdev);
device_init_wakeup(&pdev->dev, 0);
usb_del_gadget_udc(&udc->gadget);
for (i = 1; i < udc->num_ep; i++)
usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
usba_cleanup_debugfs(udc);
}
#ifdef CONFIG_PM_SLEEP
static int usba_udc_suspend(struct device *dev)
{
struct usba_udc *udc = dev_get_drvdata(dev);
/* Not started */
if (!udc->driver)
return 0;
mutex_lock(&udc->vbus_mutex);
if (!device_may_wakeup(dev)) {
udc->suspended = false;
usba_stop(udc);
goto out;
}
/*
* Device may wake up. We stay clocked if we failed
* to request vbus irq, assuming always on.
*/
if (udc->vbus_pin) {
/* FIXME: right to stop here...??? */
usba_stop(udc);
enable_irq_wake(gpiod_to_irq(udc->vbus_pin));
}
enable_irq_wake(udc->irq);
out:
mutex_unlock(&udc->vbus_mutex);
return 0;
}
static int usba_udc_resume(struct device *dev)
{
struct usba_udc *udc = dev_get_drvdata(dev);
/* Not started */
if (!udc->driver)
return 0;
if (device_may_wakeup(dev)) {
if (udc->vbus_pin)
disable_irq_wake(gpiod_to_irq(udc->vbus_pin));
disable_irq_wake(udc->irq);
}
/* If Vbus is present, enable the controller and wait for reset */
mutex_lock(&udc->vbus_mutex);
udc->vbus_prev = vbus_is_present(udc);
if (udc->vbus_prev)
usba_start(udc);
mutex_unlock(&udc->vbus_mutex);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
static struct platform_driver udc_driver = {
.probe = usba_udc_probe,
.remove_new = usba_udc_remove,
.driver = {
.name = "atmel_usba_udc",
.pm = &usba_udc_pm_ops,
.of_match_table = atmel_udc_dt_ids,
},
};
module_platform_driver(udc_driver);
MODULE_DESCRIPTION("Atmel USBA UDC driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_usba_udc");
| linux-master | drivers/usb/gadget/udc/atmel_usba_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB3.0 Peripheral driver (USB gadget)
*
* Copyright (C) 2015-2017 Renesas Electronics Corporation
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/of.h>
#include <linux/usb/role.h>
#include <linux/usb/rzv2m_usb3drd.h>
/* register definitions */
#define USB3_AXI_INT_STA 0x008
#define USB3_AXI_INT_ENA 0x00c
#define USB3_DMA_INT_STA 0x010
#define USB3_DMA_INT_ENA 0x014
#define USB3_DMA_CH0_CON(n) (0x030 + ((n) - 1) * 0x10) /* n = 1 to 4 */
#define USB3_DMA_CH0_PRD_ADR(n) (0x034 + ((n) - 1) * 0x10) /* n = 1 to 4 */
#define USB3_USB_COM_CON 0x200
#define USB3_USB20_CON 0x204
#define USB3_USB30_CON 0x208
#define USB3_USB_STA 0x210
#define USB3_DRD_CON(p) ((p)->is_rzv2m ? 0x400 : 0x218)
#define USB3_USB_INT_STA_1 0x220
#define USB3_USB_INT_STA_2 0x224
#define USB3_USB_INT_ENA_1 0x228
#define USB3_USB_INT_ENA_2 0x22c
#define USB3_STUP_DAT_0 0x230
#define USB3_STUP_DAT_1 0x234
#define USB3_USB_OTG_STA(p) ((p)->is_rzv2m ? 0x410 : 0x268)
#define USB3_USB_OTG_INT_STA(p) ((p)->is_rzv2m ? 0x414 : 0x26c)
#define USB3_USB_OTG_INT_ENA(p) ((p)->is_rzv2m ? 0x418 : 0x270)
#define USB3_P0_MOD 0x280
#define USB3_P0_CON 0x288
#define USB3_P0_STA 0x28c
#define USB3_P0_INT_STA 0x290
#define USB3_P0_INT_ENA 0x294
#define USB3_P0_LNG 0x2a0
#define USB3_P0_READ 0x2a4
#define USB3_P0_WRITE 0x2a8
#define USB3_PIPE_COM 0x2b0
#define USB3_PN_MOD 0x2c0
#define USB3_PN_RAMMAP 0x2c4
#define USB3_PN_CON 0x2c8
#define USB3_PN_STA 0x2cc
#define USB3_PN_INT_STA 0x2d0
#define USB3_PN_INT_ENA 0x2d4
#define USB3_PN_LNG 0x2e0
#define USB3_PN_READ 0x2e4
#define USB3_PN_WRITE 0x2e8
#define USB3_SSIFCMD 0x340
/* AXI_INT_ENA and AXI_INT_STA */
#define AXI_INT_DMAINT BIT(31)
#define AXI_INT_EPCINT BIT(30)
/* PRD's n = from 1 to 4 */
#define AXI_INT_PRDEN_CLR_STA_SHIFT(n) (16 + (n) - 1)
#define AXI_INT_PRDERR_STA_SHIFT(n) (0 + (n) - 1)
#define AXI_INT_PRDEN_CLR_STA(n) (1 << AXI_INT_PRDEN_CLR_STA_SHIFT(n))
#define AXI_INT_PRDERR_STA(n) (1 << AXI_INT_PRDERR_STA_SHIFT(n))
/* DMA_INT_ENA and DMA_INT_STA */
#define DMA_INT(n) BIT(n)
/* DMA_CH0_CONn */
#define DMA_CON_PIPE_DIR BIT(15) /* 1: In Transfer */
#define DMA_CON_PIPE_NO_SHIFT 8
#define DMA_CON_PIPE_NO_MASK GENMASK(12, DMA_CON_PIPE_NO_SHIFT)
#define DMA_COM_PIPE_NO(n) (((n) << DMA_CON_PIPE_NO_SHIFT) & \
DMA_CON_PIPE_NO_MASK)
#define DMA_CON_PRD_EN BIT(0)
/* LCLKSEL */
#define LCLKSEL_LSEL BIT(18)
/* USB_COM_CON */
#define USB_COM_CON_CONF BIT(24)
#define USB_COM_CON_PN_WDATAIF_NL BIT(23)
#define USB_COM_CON_PN_RDATAIF_NL BIT(22)
#define USB_COM_CON_PN_LSTTR_PP BIT(21)
#define USB_COM_CON_SPD_MODE BIT(17)
#define USB_COM_CON_EP0_EN BIT(16)
#define USB_COM_CON_DEV_ADDR_SHIFT 8
#define USB_COM_CON_DEV_ADDR_MASK GENMASK(14, USB_COM_CON_DEV_ADDR_SHIFT)
#define USB_COM_CON_DEV_ADDR(n) (((n) << USB_COM_CON_DEV_ADDR_SHIFT) & \
USB_COM_CON_DEV_ADDR_MASK)
#define USB_COM_CON_RX_DETECTION BIT(1)
#define USB_COM_CON_PIPE_CLR BIT(0)
/* USB20_CON */
#define USB20_CON_B2_PUE BIT(31)
#define USB20_CON_B2_SUSPEND BIT(24)
#define USB20_CON_B2_CONNECT BIT(17)
#define USB20_CON_B2_TSTMOD_SHIFT 8
#define USB20_CON_B2_TSTMOD_MASK GENMASK(10, USB20_CON_B2_TSTMOD_SHIFT)
#define USB20_CON_B2_TSTMOD(n) (((n) << USB20_CON_B2_TSTMOD_SHIFT) & \
USB20_CON_B2_TSTMOD_MASK)
#define USB20_CON_B2_TSTMOD_EN BIT(0)
/* USB30_CON */
#define USB30_CON_POW_SEL_SHIFT 24
#define USB30_CON_POW_SEL_MASK GENMASK(26, USB30_CON_POW_SEL_SHIFT)
#define USB30_CON_POW_SEL_IN_U3 BIT(26)
#define USB30_CON_POW_SEL_IN_DISCON 0
#define USB30_CON_POW_SEL_P2_TO_P0 BIT(25)
#define USB30_CON_POW_SEL_P0_TO_P3 BIT(24)
#define USB30_CON_POW_SEL_P0_TO_P2 0
#define USB30_CON_B3_PLLWAKE BIT(23)
#define USB30_CON_B3_CONNECT BIT(17)
#define USB30_CON_B3_HOTRST_CMP BIT(1)
/* USB_STA */
#define USB_STA_SPEED_MASK (BIT(2) | BIT(1))
#define USB_STA_SPEED_HS BIT(2)
#define USB_STA_SPEED_FS BIT(1)
#define USB_STA_SPEED_SS 0
#define USB_STA_VBUS_STA BIT(0)
/* DRD_CON */
#define DRD_CON_PERI_RST BIT(31) /* rzv2m only */
#define DRD_CON_HOST_RST BIT(30) /* rzv2m only */
#define DRD_CON_PERI_CON BIT(24)
#define DRD_CON_VBOUT BIT(0)
/* USB_INT_ENA_1 and USB_INT_STA_1 */
#define USB_INT_1_B3_PLLWKUP BIT(31)
#define USB_INT_1_B3_LUPSUCS BIT(30)
#define USB_INT_1_B3_DISABLE BIT(27)
#define USB_INT_1_B3_WRMRST BIT(21)
#define USB_INT_1_B3_HOTRST BIT(20)
#define USB_INT_1_B2_USBRST BIT(12)
#define USB_INT_1_B2_L1SPND BIT(11)
#define USB_INT_1_B2_SPND BIT(9)
#define USB_INT_1_B2_RSUM BIT(8)
#define USB_INT_1_SPEED BIT(1)
#define USB_INT_1_VBUS_CNG BIT(0)
/* USB_INT_ENA_2 and USB_INT_STA_2 */
#define USB_INT_2_PIPE(n) BIT(n)
/* USB_OTG_STA, USB_OTG_INT_STA and USB_OTG_INT_ENA */
#define USB_OTG_IDMON(p) ((p)->is_rzv2m ? BIT(0) : BIT(4))
/* P0_MOD */
#define P0_MOD_DIR BIT(6)
/* P0_CON and PN_CON */
#define PX_CON_BYTE_EN_MASK (BIT(10) | BIT(9))
#define PX_CON_BYTE_EN_SHIFT 9
#define PX_CON_BYTE_EN_BYTES(n) (((n) << PX_CON_BYTE_EN_SHIFT) & \
PX_CON_BYTE_EN_MASK)
#define PX_CON_SEND BIT(8)
/* P0_CON */
#define P0_CON_ST_RES_MASK (BIT(27) | BIT(26))
#define P0_CON_ST_RES_FORCE_STALL BIT(27)
#define P0_CON_ST_RES_NORMAL BIT(26)
#define P0_CON_ST_RES_FORCE_NRDY 0
#define P0_CON_OT_RES_MASK (BIT(25) | BIT(24))
#define P0_CON_OT_RES_FORCE_STALL BIT(25)
#define P0_CON_OT_RES_NORMAL BIT(24)
#define P0_CON_OT_RES_FORCE_NRDY 0
#define P0_CON_IN_RES_MASK (BIT(17) | BIT(16))
#define P0_CON_IN_RES_FORCE_STALL BIT(17)
#define P0_CON_IN_RES_NORMAL BIT(16)
#define P0_CON_IN_RES_FORCE_NRDY 0
#define P0_CON_RES_WEN BIT(7)
#define P0_CON_BCLR BIT(1)
/* P0_STA and PN_STA */
#define PX_STA_BUFSTS BIT(0)
/* P0_INT_ENA and P0_INT_STA */
#define P0_INT_STSED BIT(18)
#define P0_INT_STSST BIT(17)
#define P0_INT_SETUP BIT(16)
#define P0_INT_RCVNL BIT(8)
#define P0_INT_ERDY BIT(7)
#define P0_INT_FLOW BIT(6)
#define P0_INT_STALL BIT(2)
#define P0_INT_NRDY BIT(1)
#define P0_INT_BFRDY BIT(0)
#define P0_INT_ALL_BITS (P0_INT_STSED | P0_INT_SETUP | P0_INT_BFRDY)
/* PN_MOD */
#define PN_MOD_DIR BIT(6)
#define PN_MOD_TYPE_SHIFT 4
#define PN_MOD_TYPE_MASK GENMASK(5, PN_MOD_TYPE_SHIFT)
#define PN_MOD_TYPE(n) (((n) << PN_MOD_TYPE_SHIFT) & \
PN_MOD_TYPE_MASK)
#define PN_MOD_EPNUM_MASK GENMASK(3, 0)
#define PN_MOD_EPNUM(n) ((n) & PN_MOD_EPNUM_MASK)
/* PN_RAMMAP */
#define PN_RAMMAP_RAMAREA_SHIFT 29
#define PN_RAMMAP_RAMAREA_MASK GENMASK(31, PN_RAMMAP_RAMAREA_SHIFT)
#define PN_RAMMAP_RAMAREA_16KB BIT(31)
#define PN_RAMMAP_RAMAREA_8KB (BIT(30) | BIT(29))
#define PN_RAMMAP_RAMAREA_4KB BIT(30)
#define PN_RAMMAP_RAMAREA_2KB BIT(29)
#define PN_RAMMAP_RAMAREA_1KB 0
#define PN_RAMMAP_MPKT_SHIFT 16
#define PN_RAMMAP_MPKT_MASK GENMASK(26, PN_RAMMAP_MPKT_SHIFT)
#define PN_RAMMAP_MPKT(n) (((n) << PN_RAMMAP_MPKT_SHIFT) & \
PN_RAMMAP_MPKT_MASK)
#define PN_RAMMAP_RAMIF_SHIFT 14
#define PN_RAMMAP_RAMIF_MASK GENMASK(15, PN_RAMMAP_RAMIF_SHIFT)
#define PN_RAMMAP_RAMIF(n) (((n) << PN_RAMMAP_RAMIF_SHIFT) & \
PN_RAMMAP_RAMIF_MASK)
#define PN_RAMMAP_BASEAD_MASK GENMASK(13, 0)
#define PN_RAMMAP_BASEAD(offs) (((offs) >> 3) & PN_RAMMAP_BASEAD_MASK)
#define PN_RAMMAP_DATA(area, ramif, basead) ((PN_RAMMAP_##area) | \
(PN_RAMMAP_RAMIF(ramif)) | \
(PN_RAMMAP_BASEAD(basead)))
/* PN_CON */
#define PN_CON_EN BIT(31)
#define PN_CON_DATAIF_EN BIT(30)
#define PN_CON_RES_MASK (BIT(17) | BIT(16))
#define PN_CON_RES_FORCE_STALL BIT(17)
#define PN_CON_RES_NORMAL BIT(16)
#define PN_CON_RES_FORCE_NRDY 0
#define PN_CON_LAST BIT(11)
#define PN_CON_RES_WEN BIT(7)
#define PN_CON_CLR BIT(0)
/* PN_INT_STA and PN_INT_ENA */
#define PN_INT_LSTTR BIT(4)
#define PN_INT_BFRDY BIT(0)
/* USB3_SSIFCMD */
#define SSIFCMD_URES_U2 BIT(9)
#define SSIFCMD_URES_U1 BIT(8)
#define SSIFCMD_UDIR_U2 BIT(7)
#define SSIFCMD_UDIR_U1 BIT(6)
#define SSIFCMD_UREQ_U2 BIT(5)
#define SSIFCMD_UREQ_U1 BIT(4)
#define USB3_EP0_SS_MAX_PACKET_SIZE 512
#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
#define USB3_EP0_BUF_SIZE 8
#define USB3_MAX_NUM_PIPES(p) ((p)->is_rzv2m ? 16 : 6) /* This includes PIPE 0 */
#define USB3_WAIT_US 3
#define USB3_DMA_NUM_SETTING_AREA 4
/*
* To avoid double-meaning of "0" (xferred 65536 bytes or received zlp if
* buffer size is 65536), this driver uses the maximum size per a entry is
* 32768 bytes.
*/
#define USB3_DMA_MAX_XFER_SIZE 32768
#define USB3_DMA_PRD_SIZE 4096
struct renesas_usb3;
/* Physical Region Descriptor Table */
struct renesas_usb3_prd {
u32 word1;
#define USB3_PRD1_E BIT(30) /* the end of chain */
#define USB3_PRD1_U BIT(29) /* completion of transfer */
#define USB3_PRD1_D BIT(28) /* Error occurred */
#define USB3_PRD1_INT BIT(27) /* Interrupt occurred */
#define USB3_PRD1_LST BIT(26) /* Last Packet */
#define USB3_PRD1_B_INC BIT(24)
#define USB3_PRD1_MPS_8 0
#define USB3_PRD1_MPS_16 BIT(21)
#define USB3_PRD1_MPS_32 BIT(22)
#define USB3_PRD1_MPS_64 (BIT(22) | BIT(21))
#define USB3_PRD1_MPS_512 BIT(23)
#define USB3_PRD1_MPS_1024 (BIT(23) | BIT(21))
#define USB3_PRD1_MPS_RESERVED (BIT(23) | BIT(22) | BIT(21))
#define USB3_PRD1_SIZE_MASK GENMASK(15, 0)
u32 bap;
};
#define USB3_DMA_NUM_PRD_ENTRIES (USB3_DMA_PRD_SIZE / \
sizeof(struct renesas_usb3_prd))
#define USB3_DMA_MAX_XFER_SIZE_ALL_PRDS (USB3_DMA_PRD_SIZE / \
sizeof(struct renesas_usb3_prd) * \
USB3_DMA_MAX_XFER_SIZE)
struct renesas_usb3_dma {
struct renesas_usb3_prd *prd;
dma_addr_t prd_dma;
int num; /* Setting area number (from 1 to 4) */
bool used;
};
struct renesas_usb3_request {
struct usb_request req;
struct list_head queue;
};
#define USB3_EP_NAME_SIZE 8
struct renesas_usb3_ep {
struct usb_ep ep;
struct renesas_usb3 *usb3;
struct renesas_usb3_dma *dma;
int num;
char ep_name[USB3_EP_NAME_SIZE];
struct list_head queue;
u32 rammap_val;
bool dir_in;
bool halt;
bool wedge;
bool started;
};
struct renesas_usb3_priv {
int ramsize_per_ramif; /* unit = bytes */
int num_ramif;
int ramsize_per_pipe; /* unit = bytes */
bool workaround_for_vbus; /* if true, don't check vbus signal */
bool is_rzv2m; /* if true, RZ/V2M SoC */
};
struct renesas_usb3 {
void __iomem *reg;
void __iomem *drd_reg;
struct reset_control *usbp_rstc;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct extcon_dev *extcon;
struct work_struct extcon_work;
struct phy *phy;
struct dentry *dentry;
struct usb_role_switch *role_sw;
struct device *host_dev;
struct work_struct role_work;
enum usb_role role;
struct renesas_usb3_ep *usb3_ep;
int num_usb3_eps;
struct renesas_usb3_dma dma[USB3_DMA_NUM_SETTING_AREA];
spinlock_t lock;
int disabled_count;
struct usb_request *ep0_req;
enum usb_role connection_state;
u16 test_mode;
u8 ep0_buf[USB3_EP0_BUF_SIZE];
bool softconnect;
bool workaround_for_vbus;
bool extcon_host; /* check id and set EXTCON_USB_HOST */
bool extcon_usb; /* check vbus and set EXTCON_USB */
bool forced_b_device;
bool start_to_connect;
bool role_sw_by_connector;
bool is_rzv2m;
};
#define gadget_to_renesas_usb3(_gadget) \
container_of(_gadget, struct renesas_usb3, gadget)
#define renesas_usb3_to_gadget(renesas_usb3) (&renesas_usb3->gadget)
#define usb3_to_dev(_usb3) (_usb3->gadget.dev.parent)
#define usb_ep_to_usb3_ep(_ep) container_of(_ep, struct renesas_usb3_ep, ep)
#define usb3_ep_to_usb3(_usb3_ep) (_usb3_ep->usb3)
#define usb_req_to_usb3_req(_req) container_of(_req, \
struct renesas_usb3_request, req)
#define usb3_get_ep(usb3, n) ((usb3)->usb3_ep + (n))
#define usb3_for_each_ep(usb3_ep, usb3, i) \
for ((i) = 0, usb3_ep = usb3_get_ep(usb3, (i)); \
(i) < (usb3)->num_usb3_eps; \
(i)++, usb3_ep = usb3_get_ep(usb3, (i)))
#define usb3_get_dma(usb3, i) (&(usb3)->dma[i])
#define usb3_for_each_dma(usb3, dma, i) \
for ((i) = 0, dma = usb3_get_dma((usb3), (i)); \
(i) < USB3_DMA_NUM_SETTING_AREA; \
(i)++, dma = usb3_get_dma((usb3), (i)))
static const char udc_name[] = "renesas_usb3";
static bool use_dma = 1;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "use dedicated DMAC");
static void usb3_write(struct renesas_usb3 *usb3, u32 data, u32 offs)
{
iowrite32(data, usb3->reg + offs);
}
static u32 usb3_read(struct renesas_usb3 *usb3, u32 offs)
{
return ioread32(usb3->reg + offs);
}
static void usb3_set_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
{
u32 val = usb3_read(usb3, offs);
val |= bits;
usb3_write(usb3, val, offs);
}
static void usb3_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
{
u32 val = usb3_read(usb3, offs);
val &= ~bits;
usb3_write(usb3, val, offs);
}
static void usb3_drd_write(struct renesas_usb3 *usb3, u32 data, u32 offs)
{
void __iomem *reg;
if (usb3->is_rzv2m)
reg = usb3->drd_reg + offs - USB3_DRD_CON(usb3);
else
reg = usb3->reg + offs;
iowrite32(data, reg);
}
static u32 usb3_drd_read(struct renesas_usb3 *usb3, u32 offs)
{
void __iomem *reg;
if (usb3->is_rzv2m)
reg = usb3->drd_reg + offs - USB3_DRD_CON(usb3);
else
reg = usb3->reg + offs;
return ioread32(reg);
}
static void usb3_drd_set_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
{
u32 val = usb3_drd_read(usb3, offs);
val |= bits;
usb3_drd_write(usb3, val, offs);
}
static void usb3_drd_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
{
u32 val = usb3_drd_read(usb3, offs);
val &= ~bits;
usb3_drd_write(usb3, val, offs);
}
static int usb3_wait(struct renesas_usb3 *usb3, u32 reg, u32 mask,
u32 expected)
{
int i;
for (i = 0; i < USB3_WAIT_US; i++) {
if ((usb3_read(usb3, reg) & mask) == expected)
return 0;
udelay(1);
}
dev_dbg(usb3_to_dev(usb3), "%s: timed out (%8x, %08x, %08x)\n",
__func__, reg, mask, expected);
return -EBUSY;
}
static void renesas_usb3_extcon_work(struct work_struct *work)
{
struct renesas_usb3 *usb3 = container_of(work, struct renesas_usb3,
extcon_work);
extcon_set_state_sync(usb3->extcon, EXTCON_USB_HOST, usb3->extcon_host);
extcon_set_state_sync(usb3->extcon, EXTCON_USB, usb3->extcon_usb);
}
static void usb3_enable_irq_1(struct renesas_usb3 *usb3, u32 bits)
{
usb3_set_bit(usb3, bits, USB3_USB_INT_ENA_1);
}
static void usb3_disable_irq_1(struct renesas_usb3 *usb3, u32 bits)
{
usb3_clear_bit(usb3, bits, USB3_USB_INT_ENA_1);
}
static void usb3_enable_pipe_irq(struct renesas_usb3 *usb3, int num)
{
usb3_set_bit(usb3, USB_INT_2_PIPE(num), USB3_USB_INT_ENA_2);
}
static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num)
{
usb3_clear_bit(usb3, USB_INT_2_PIPE(num), USB3_USB_INT_ENA_2);
}
static bool usb3_is_host(struct renesas_usb3 *usb3)
{
return !(usb3_drd_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
}
static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
{
/* Set AXI_INT */
usb3_write(usb3, ~0, USB3_DMA_INT_STA);
usb3_write(usb3, 0, USB3_DMA_INT_ENA);
usb3_set_bit(usb3, AXI_INT_DMAINT | AXI_INT_EPCINT, USB3_AXI_INT_ENA);
}
static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
{
usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
if (!usb3->workaround_for_vbus)
usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
}
static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3)
{
if (!(usb3_read(usb3, USB3_USB20_CON) & USB20_CON_B2_SUSPEND))
return true; /* already waked it up */
usb3_clear_bit(usb3, USB20_CON_B2_SUSPEND, USB3_USB20_CON);
usb3_enable_irq_1(usb3, USB_INT_1_B2_RSUM);
return false;
}
static void usb3_usb2_pullup(struct renesas_usb3 *usb3, int pullup)
{
u32 bits = USB20_CON_B2_PUE | USB20_CON_B2_CONNECT;
if (usb3->softconnect && pullup)
usb3_set_bit(usb3, bits, USB3_USB20_CON);
else
usb3_clear_bit(usb3, bits, USB3_USB20_CON);
}
static void usb3_set_test_mode(struct renesas_usb3 *usb3)
{
u32 val = usb3_read(usb3, USB3_USB20_CON);
val &= ~USB20_CON_B2_TSTMOD_MASK;
val |= USB20_CON_B2_TSTMOD(usb3->test_mode);
usb3_write(usb3, val | USB20_CON_B2_TSTMOD_EN, USB3_USB20_CON);
if (!usb3->test_mode)
usb3_clear_bit(usb3, USB20_CON_B2_TSTMOD_EN, USB3_USB20_CON);
}
static void usb3_start_usb2_connection(struct renesas_usb3 *usb3)
{
usb3->disabled_count++;
usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
usb3_set_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
usb3_usb2_pullup(usb3, 1);
}
static int usb3_is_usb3_phy_in_u3(struct renesas_usb3 *usb3)
{
return usb3_read(usb3, USB3_USB30_CON) & USB30_CON_POW_SEL_IN_U3;
}
static bool usb3_wakeup_usb3_phy(struct renesas_usb3 *usb3)
{
if (!usb3_is_usb3_phy_in_u3(usb3))
return true; /* already waked it up */
usb3_set_bit(usb3, USB30_CON_B3_PLLWAKE, USB3_USB30_CON);
usb3_enable_irq_1(usb3, USB_INT_1_B3_PLLWKUP);
return false;
}
static u16 usb3_feature_get_un_enabled(struct renesas_usb3 *usb3)
{
u32 mask_u2 = SSIFCMD_UDIR_U2 | SSIFCMD_UREQ_U2;
u32 mask_u1 = SSIFCMD_UDIR_U1 | SSIFCMD_UREQ_U1;
u32 val = usb3_read(usb3, USB3_SSIFCMD);
u16 ret = 0;
/* Enables {U2,U1} if the bits of UDIR and UREQ are set to 0 */
if (!(val & mask_u2))
ret |= 1 << USB_DEV_STAT_U2_ENABLED;
if (!(val & mask_u1))
ret |= 1 << USB_DEV_STAT_U1_ENABLED;
return ret;
}
static void usb3_feature_u2_enable(struct renesas_usb3 *usb3, bool enable)
{
u32 bits = SSIFCMD_UDIR_U2 | SSIFCMD_UREQ_U2;
/* Enables U2 if the bits of UDIR and UREQ are set to 0 */
if (enable)
usb3_clear_bit(usb3, bits, USB3_SSIFCMD);
else
usb3_set_bit(usb3, bits, USB3_SSIFCMD);
}
static void usb3_feature_u1_enable(struct renesas_usb3 *usb3, bool enable)
{
u32 bits = SSIFCMD_UDIR_U1 | SSIFCMD_UREQ_U1;
/* Enables U1 if the bits of UDIR and UREQ are set to 0 */
if (enable)
usb3_clear_bit(usb3, bits, USB3_SSIFCMD);
else
usb3_set_bit(usb3, bits, USB3_SSIFCMD);
}
static void usb3_start_operation_for_usb3(struct renesas_usb3 *usb3)
{
usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
usb3_set_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
}
static void usb3_start_usb3_connection(struct renesas_usb3 *usb3)
{
usb3_start_operation_for_usb3(usb3);
usb3_set_bit(usb3, USB_COM_CON_RX_DETECTION, USB3_USB_COM_CON);
usb3_enable_irq_1(usb3, USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
USB_INT_1_SPEED);
}
static void usb3_stop_usb3_connection(struct renesas_usb3 *usb3)
{
usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
}
static void usb3_transition_to_default_state(struct renesas_usb3 *usb3,
bool is_usb3)
{
usb3_set_bit(usb3, USB_INT_2_PIPE(0), USB3_USB_INT_ENA_2);
usb3_write(usb3, P0_INT_ALL_BITS, USB3_P0_INT_STA);
usb3_set_bit(usb3, P0_INT_ALL_BITS, USB3_P0_INT_ENA);
if (is_usb3)
usb3_enable_irq_1(usb3, USB_INT_1_B3_WRMRST |
USB_INT_1_B3_HOTRST);
else
usb3_enable_irq_1(usb3, USB_INT_1_B2_SPND |
USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
}
static void usb3_connect(struct renesas_usb3 *usb3)
{
if (usb3_wakeup_usb3_phy(usb3))
usb3_start_usb3_connection(usb3);
}
static void usb3_reset_epc(struct renesas_usb3 *usb3)
{
usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
usb3_clear_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
usb3_set_bit(usb3, USB_COM_CON_PIPE_CLR, USB3_USB_COM_CON);
usb3->test_mode = 0;
usb3_set_test_mode(usb3);
}
static void usb3_disconnect(struct renesas_usb3 *usb3)
{
usb3->disabled_count = 0;
usb3_usb2_pullup(usb3, 0);
usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
usb3_reset_epc(usb3);
usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
usb3_init_epc_registers(usb3);
if (usb3->driver)
usb3->driver->disconnect(&usb3->gadget);
}
static void usb3_check_vbus(struct renesas_usb3 *usb3)
{
if (usb3->workaround_for_vbus) {
usb3_connect(usb3);
} else {
usb3->extcon_usb = !!(usb3_read(usb3, USB3_USB_STA) &
USB_STA_VBUS_STA);
if (usb3->extcon_usb)
usb3_connect(usb3);
else
usb3_disconnect(usb3);
schedule_work(&usb3->extcon_work);
}
}
static void renesas_usb3_role_work(struct work_struct *work)
{
struct renesas_usb3 *usb3 =
container_of(work, struct renesas_usb3, role_work);
usb_role_switch_set_role(usb3->role_sw, usb3->role);
}
static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
{
if (usb3->is_rzv2m) {
if (host) {
usb3_drd_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
usb3_drd_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
} else {
usb3_drd_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
usb3_drd_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
}
}
if (host)
usb3_drd_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
else
usb3_drd_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
}
static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
{
if (usb3->role_sw) {
usb3->role = host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
schedule_work(&usb3->role_work);
} else {
usb3_set_mode(usb3, host);
}
}
static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
{
if (enable)
usb3_drd_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
else
usb3_drd_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
}
static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
{
unsigned long flags;
spin_lock_irqsave(&usb3->lock, flags);
if (!usb3->role_sw_by_connector ||
usb3->connection_state != USB_ROLE_NONE) {
usb3_set_mode_by_role_sw(usb3, host);
usb3_vbus_out(usb3, a_dev);
}
/* for A-Peripheral or forced B-device mode */
if ((!host && a_dev) || usb3->start_to_connect)
usb3_connect(usb3);
spin_unlock_irqrestore(&usb3->lock, flags);
}
static bool usb3_is_a_device(struct renesas_usb3 *usb3)
{
return !(usb3_drd_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
}
static void usb3_check_id(struct renesas_usb3 *usb3)
{
usb3->extcon_host = usb3_is_a_device(usb3);
if ((!usb3->role_sw_by_connector && usb3->extcon_host &&
!usb3->forced_b_device) || usb3->connection_state == USB_ROLE_HOST)
usb3_mode_config(usb3, true, true);
else
usb3_mode_config(usb3, false, false);
schedule_work(&usb3->extcon_work);
}
static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
{
usb3_init_axi_bridge(usb3);
usb3_init_epc_registers(usb3);
usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
USB3_USB_COM_CON);
usb3_drd_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
usb3_drd_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
usb3_check_id(usb3);
usb3_check_vbus(usb3);
}
static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
{
usb3_disconnect(usb3);
usb3_write(usb3, 0, USB3_P0_INT_ENA);
usb3_drd_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
usb3_write(usb3, 0, USB3_AXI_INT_ENA);
}
static void usb3_irq_epc_int_1_pll_wakeup(struct renesas_usb3 *usb3)
{
usb3_disable_irq_1(usb3, USB_INT_1_B3_PLLWKUP);
usb3_clear_bit(usb3, USB30_CON_B3_PLLWAKE, USB3_USB30_CON);
usb3_start_usb3_connection(usb3);
}
static void usb3_irq_epc_int_1_linkup_success(struct renesas_usb3 *usb3)
{
usb3_transition_to_default_state(usb3, true);
}
static void usb3_irq_epc_int_1_resume(struct renesas_usb3 *usb3)
{
usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM);
usb3_start_usb2_connection(usb3);
usb3_transition_to_default_state(usb3, false);
}
static void usb3_irq_epc_int_1_suspend(struct renesas_usb3 *usb3)
{
usb3_disable_irq_1(usb3, USB_INT_1_B2_SPND);
if (usb3->gadget.speed != USB_SPEED_UNKNOWN &&
usb3->gadget.state != USB_STATE_NOTATTACHED) {
if (usb3->driver && usb3->driver->suspend)
usb3->driver->suspend(&usb3->gadget);
usb_gadget_set_state(&usb3->gadget, USB_STATE_SUSPENDED);
}
}
static void usb3_irq_epc_int_1_disable(struct renesas_usb3 *usb3)
{
usb3_stop_usb3_connection(usb3);
if (usb3_wakeup_usb2_phy(usb3))
usb3_irq_epc_int_1_resume(usb3);
}
static void usb3_irq_epc_int_1_bus_reset(struct renesas_usb3 *usb3)
{
usb3_reset_epc(usb3);
if (usb3->disabled_count < 3)
usb3_start_usb3_connection(usb3);
else
usb3_start_usb2_connection(usb3);
}
static void usb3_irq_epc_int_1_vbus_change(struct renesas_usb3 *usb3)
{
usb3_check_vbus(usb3);
}
static void usb3_irq_epc_int_1_hot_reset(struct renesas_usb3 *usb3)
{
usb3_reset_epc(usb3);
usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
/* This bit shall be set within 12ms from the start of HotReset */
usb3_set_bit(usb3, USB30_CON_B3_HOTRST_CMP, USB3_USB30_CON);
}
static void usb3_irq_epc_int_1_warm_reset(struct renesas_usb3 *usb3)
{
usb3_reset_epc(usb3);
usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
usb3_start_operation_for_usb3(usb3);
usb3_enable_irq_1(usb3, USB_INT_1_SPEED);
}
static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
{
u32 speed = usb3_read(usb3, USB3_USB_STA) & USB_STA_SPEED_MASK;
switch (speed) {
case USB_STA_SPEED_SS:
usb3->gadget.speed = USB_SPEED_SUPER;
usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
break;
case USB_STA_SPEED_HS:
usb3->gadget.speed = USB_SPEED_HIGH;
usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
break;
case USB_STA_SPEED_FS:
usb3->gadget.speed = USB_SPEED_FULL;
usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
break;
default:
usb3->gadget.speed = USB_SPEED_UNKNOWN;
break;
}
}
static void usb3_irq_epc_int_1(struct renesas_usb3 *usb3, u32 int_sta_1)
{
if (int_sta_1 & USB_INT_1_B3_PLLWKUP)
usb3_irq_epc_int_1_pll_wakeup(usb3);
if (int_sta_1 & USB_INT_1_B3_LUPSUCS)
usb3_irq_epc_int_1_linkup_success(usb3);
if (int_sta_1 & USB_INT_1_B3_HOTRST)
usb3_irq_epc_int_1_hot_reset(usb3);
if (int_sta_1 & USB_INT_1_B3_WRMRST)
usb3_irq_epc_int_1_warm_reset(usb3);
if (int_sta_1 & USB_INT_1_B3_DISABLE)
usb3_irq_epc_int_1_disable(usb3);
if (int_sta_1 & USB_INT_1_B2_USBRST)
usb3_irq_epc_int_1_bus_reset(usb3);
if (int_sta_1 & USB_INT_1_B2_RSUM)
usb3_irq_epc_int_1_resume(usb3);
if (int_sta_1 & USB_INT_1_B2_SPND)
usb3_irq_epc_int_1_suspend(usb3);
if (int_sta_1 & USB_INT_1_SPEED)
usb3_irq_epc_int_1_speed(usb3);
if (int_sta_1 & USB_INT_1_VBUS_CNG)
usb3_irq_epc_int_1_vbus_change(usb3);
}
static struct renesas_usb3_request *__usb3_get_request(struct renesas_usb3_ep
*usb3_ep)
{
return list_first_entry_or_null(&usb3_ep->queue,
struct renesas_usb3_request, queue);
}
static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
*usb3_ep)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
struct renesas_usb3_request *usb3_req;
unsigned long flags;
spin_lock_irqsave(&usb3->lock, flags);
usb3_req = __usb3_get_request(usb3_ep);
spin_unlock_irqrestore(&usb3->lock, flags);
return usb3_req;
}
static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req,
int status)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
status);
usb3_req->req.status = status;
usb3_ep->started = false;
list_del_init(&usb3_req->queue);
spin_unlock(&usb3->lock);
usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
spin_lock(&usb3->lock);
}
static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req, int status)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
unsigned long flags;
spin_lock_irqsave(&usb3->lock, flags);
__usb3_request_done(usb3_ep, usb3_req, status);
spin_unlock_irqrestore(&usb3->lock, flags);
}
static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
if (usb3_req)
usb3_request_done(usb3_ep, usb3_req, 0);
if (usb3->test_mode)
usb3_set_test_mode(usb3);
}
static void usb3_get_setup_data(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
u32 *data = (u32 *)ctrl;
*data++ = usb3_read(usb3, USB3_STUP_DAT_0);
*data = usb3_read(usb3, USB3_STUP_DAT_1);
/* update this driver's flag */
usb3_ep->dir_in = !!(ctrl->bRequestType & USB_DIR_IN);
}
static void usb3_set_p0_con_update_res(struct renesas_usb3 *usb3, u32 res)
{
u32 val = usb3_read(usb3, USB3_P0_CON);
val &= ~(P0_CON_ST_RES_MASK | P0_CON_OT_RES_MASK | P0_CON_IN_RES_MASK);
val |= res | P0_CON_RES_WEN;
usb3_write(usb3, val, USB3_P0_CON);
}
static void usb3_set_p0_con_for_ctrl_read_data(struct renesas_usb3 *usb3)
{
usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY |
P0_CON_OT_RES_FORCE_STALL |
P0_CON_IN_RES_NORMAL);
}
static void usb3_set_p0_con_for_ctrl_read_status(struct renesas_usb3 *usb3)
{
usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL |
P0_CON_OT_RES_FORCE_STALL |
P0_CON_IN_RES_NORMAL);
}
static void usb3_set_p0_con_for_ctrl_write_data(struct renesas_usb3 *usb3)
{
usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY |
P0_CON_OT_RES_NORMAL |
P0_CON_IN_RES_FORCE_STALL);
}
static void usb3_set_p0_con_for_ctrl_write_status(struct renesas_usb3 *usb3)
{
usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL |
P0_CON_OT_RES_NORMAL |
P0_CON_IN_RES_FORCE_STALL);
}
static void usb3_set_p0_con_for_no_data(struct renesas_usb3 *usb3)
{
usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL |
P0_CON_OT_RES_FORCE_STALL |
P0_CON_IN_RES_FORCE_STALL);
}
static void usb3_set_p0_con_stall(struct renesas_usb3 *usb3)
{
usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_STALL |
P0_CON_OT_RES_FORCE_STALL |
P0_CON_IN_RES_FORCE_STALL);
}
static void usb3_set_p0_con_stop(struct renesas_usb3 *usb3)
{
usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY |
P0_CON_OT_RES_FORCE_NRDY |
P0_CON_IN_RES_FORCE_NRDY);
}
static int usb3_pn_change(struct renesas_usb3 *usb3, int num)
{
if (num == 0 || num > usb3->num_usb3_eps)
return -ENXIO;
usb3_write(usb3, num, USB3_PIPE_COM);
return 0;
}
static void usb3_set_pn_con_update_res(struct renesas_usb3 *usb3, u32 res)
{
u32 val = usb3_read(usb3, USB3_PN_CON);
val &= ~PN_CON_RES_MASK;
val |= res & PN_CON_RES_MASK;
val |= PN_CON_RES_WEN;
usb3_write(usb3, val, USB3_PN_CON);
}
static void usb3_pn_start(struct renesas_usb3 *usb3)
{
usb3_set_pn_con_update_res(usb3, PN_CON_RES_NORMAL);
}
static void usb3_pn_stop(struct renesas_usb3 *usb3)
{
usb3_set_pn_con_update_res(usb3, PN_CON_RES_FORCE_NRDY);
}
static void usb3_pn_stall(struct renesas_usb3 *usb3)
{
usb3_set_pn_con_update_res(usb3, PN_CON_RES_FORCE_STALL);
}
static int usb3_pn_con_clear(struct renesas_usb3 *usb3)
{
usb3_set_bit(usb3, PN_CON_CLR, USB3_PN_CON);
return usb3_wait(usb3, USB3_PN_CON, PN_CON_CLR, 0);
}
static bool usb3_is_transfer_complete(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct usb_request *req = &usb3_req->req;
if ((!req->zero && req->actual == req->length) ||
(req->actual % usb3_ep->ep.maxpacket) || (req->length == 0))
return true;
else
return false;
}
static int usb3_wait_pipe_status(struct renesas_usb3_ep *usb3_ep, u32 mask)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
u32 sta_reg = usb3_ep->num ? USB3_PN_STA : USB3_P0_STA;
return usb3_wait(usb3, sta_reg, mask, mask);
}
static void usb3_set_px_con_send(struct renesas_usb3_ep *usb3_ep, int bytes,
bool last)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
u32 con_reg = usb3_ep->num ? USB3_PN_CON : USB3_P0_CON;
u32 val = usb3_read(usb3, con_reg);
val |= PX_CON_SEND | PX_CON_BYTE_EN_BYTES(bytes);
val |= (usb3_ep->num && last) ? PN_CON_LAST : 0;
usb3_write(usb3, val, con_reg);
}
static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req,
u32 fifo_reg)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
int i;
int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual,
usb3_ep->ep.maxpacket);
u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
u32 tmp = 0;
bool is_last = !len ? true : false;
if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
return -EBUSY;
/* Update gadget driver parameter */
usb3_req->req.actual += len;
/* Write data to the register */
if (len >= 4) {
iowrite32_rep(usb3->reg + fifo_reg, buf, len / 4);
buf += (len / 4) * 4;
len %= 4; /* update len to use usb3_set_pX_con_send() */
}
if (len) {
for (i = 0; i < len; i++)
tmp |= buf[i] << (8 * i);
usb3_write(usb3, tmp, fifo_reg);
}
if (!is_last)
is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
/* Send the data */
usb3_set_px_con_send(usb3_ep, len, is_last);
return is_last ? 0 : -EAGAIN;
}
static u32 usb3_get_received_length(struct renesas_usb3_ep *usb3_ep)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
u32 lng_reg = usb3_ep->num ? USB3_PN_LNG : USB3_P0_LNG;
return usb3_read(usb3, lng_reg);
}
static int usb3_read_pipe(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req, u32 fifo_reg)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
int i;
int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual,
usb3_get_received_length(usb3_ep));
u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
u32 tmp = 0;
if (!len)
return 0;
/* Update gadget driver parameter */
usb3_req->req.actual += len;
/* Read data from the register */
if (len >= 4) {
ioread32_rep(usb3->reg + fifo_reg, buf, len / 4);
buf += (len / 4) * 4;
len %= 4;
}
if (len) {
tmp = usb3_read(usb3, fifo_reg);
for (i = 0; i < len; i++)
buf[i] = (tmp >> (8 * i)) & 0xff;
}
return usb3_is_transfer_complete(usb3_ep, usb3_req) ? 0 : -EAGAIN;
}
static void usb3_set_status_stage(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
if (usb3_ep->dir_in) {
usb3_set_p0_con_for_ctrl_read_status(usb3);
} else {
if (!usb3_req->req.length)
usb3_set_p0_con_for_no_data(usb3);
else
usb3_set_p0_con_for_ctrl_write_status(usb3);
}
}
static void usb3_p0_xfer(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
int ret;
if (usb3_ep->dir_in)
ret = usb3_write_pipe(usb3_ep, usb3_req, USB3_P0_WRITE);
else
ret = usb3_read_pipe(usb3_ep, usb3_req, USB3_P0_READ);
if (!ret)
usb3_set_status_stage(usb3_ep, usb3_req);
}
static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
if (usb3_ep->started)
return;
usb3_ep->started = true;
if (usb3_ep->dir_in) {
usb3_set_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
usb3_set_p0_con_for_ctrl_read_data(usb3);
} else {
usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
if (usb3_req->req.length)
usb3_set_p0_con_for_ctrl_write_data(usb3);
}
usb3_p0_xfer(usb3_ep, usb3_req);
}
static void usb3_enable_dma_pipen(struct renesas_usb3 *usb3)
{
usb3_set_bit(usb3, PN_CON_DATAIF_EN, USB3_PN_CON);
}
static void usb3_disable_dma_pipen(struct renesas_usb3 *usb3)
{
usb3_clear_bit(usb3, PN_CON_DATAIF_EN, USB3_PN_CON);
}
static void usb3_enable_dma_irq(struct renesas_usb3 *usb3, int num)
{
usb3_set_bit(usb3, DMA_INT(num), USB3_DMA_INT_ENA);
}
static void usb3_disable_dma_irq(struct renesas_usb3 *usb3, int num)
{
usb3_clear_bit(usb3, DMA_INT(num), USB3_DMA_INT_ENA);
}
static u32 usb3_dma_mps_to_prd_word1(struct renesas_usb3_ep *usb3_ep)
{
switch (usb3_ep->ep.maxpacket) {
case 8:
return USB3_PRD1_MPS_8;
case 16:
return USB3_PRD1_MPS_16;
case 32:
return USB3_PRD1_MPS_32;
case 64:
return USB3_PRD1_MPS_64;
case 512:
return USB3_PRD1_MPS_512;
case 1024:
return USB3_PRD1_MPS_1024;
default:
return USB3_PRD1_MPS_RESERVED;
}
}
static bool usb3_dma_get_setting_area(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
struct renesas_usb3_dma *dma;
int i;
bool ret = false;
if (usb3_req->req.length > USB3_DMA_MAX_XFER_SIZE_ALL_PRDS) {
dev_dbg(usb3_to_dev(usb3), "%s: the length is too big (%d)\n",
__func__, usb3_req->req.length);
return false;
}
/* The driver doesn't handle zero-length packet via dmac */
if (!usb3_req->req.length)
return false;
if (usb3_dma_mps_to_prd_word1(usb3_ep) == USB3_PRD1_MPS_RESERVED)
return false;
usb3_for_each_dma(usb3, dma, i) {
if (dma->used)
continue;
if (usb_gadget_map_request(&usb3->gadget, &usb3_req->req,
usb3_ep->dir_in) < 0)
break;
dma->used = true;
usb3_ep->dma = dma;
ret = true;
break;
}
return ret;
}
static void usb3_dma_put_setting_area(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
int i;
struct renesas_usb3_dma *dma;
usb3_for_each_dma(usb3, dma, i) {
if (usb3_ep->dma == dma) {
usb_gadget_unmap_request(&usb3->gadget, &usb3_req->req,
usb3_ep->dir_in);
dma->used = false;
usb3_ep->dma = NULL;
break;
}
}
}
static void usb3_dma_fill_prd(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3_prd *cur_prd = usb3_ep->dma->prd;
u32 remain = usb3_req->req.length;
u32 dma = usb3_req->req.dma;
u32 len;
int i = 0;
do {
len = min_t(u32, remain, USB3_DMA_MAX_XFER_SIZE) &
USB3_PRD1_SIZE_MASK;
cur_prd->word1 = usb3_dma_mps_to_prd_word1(usb3_ep) |
USB3_PRD1_B_INC | len;
cur_prd->bap = dma;
remain -= len;
dma += len;
if (!remain || (i + 1) < USB3_DMA_NUM_PRD_ENTRIES)
break;
cur_prd++;
i++;
} while (1);
cur_prd->word1 |= USB3_PRD1_E | USB3_PRD1_INT;
if (usb3_ep->dir_in)
cur_prd->word1 |= USB3_PRD1_LST;
}
static void usb3_dma_kick_prd(struct renesas_usb3_ep *usb3_ep)
{
struct renesas_usb3_dma *dma = usb3_ep->dma;
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
u32 dma_con = DMA_COM_PIPE_NO(usb3_ep->num) | DMA_CON_PRD_EN;
if (usb3_ep->dir_in)
dma_con |= DMA_CON_PIPE_DIR;
wmb(); /* prd entries should be in system memory here */
usb3_write(usb3, 1 << usb3_ep->num, USB3_DMA_INT_STA);
usb3_write(usb3, AXI_INT_PRDEN_CLR_STA(dma->num) |
AXI_INT_PRDERR_STA(dma->num), USB3_AXI_INT_STA);
usb3_write(usb3, dma->prd_dma, USB3_DMA_CH0_PRD_ADR(dma->num));
usb3_write(usb3, dma_con, USB3_DMA_CH0_CON(dma->num));
usb3_enable_dma_irq(usb3, usb3_ep->num);
}
static void usb3_dma_stop_prd(struct renesas_usb3_ep *usb3_ep)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
struct renesas_usb3_dma *dma = usb3_ep->dma;
usb3_disable_dma_irq(usb3, usb3_ep->num);
usb3_write(usb3, 0, USB3_DMA_CH0_CON(dma->num));
}
static int usb3_dma_update_status(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3_prd *cur_prd = usb3_ep->dma->prd;
struct usb_request *req = &usb3_req->req;
u32 remain, len;
int i = 0;
int status = 0;
rmb(); /* The controller updated prd entries */
do {
if (cur_prd->word1 & USB3_PRD1_D)
status = -EIO;
if (cur_prd->word1 & USB3_PRD1_E)
len = req->length % USB3_DMA_MAX_XFER_SIZE;
else
len = USB3_DMA_MAX_XFER_SIZE;
remain = cur_prd->word1 & USB3_PRD1_SIZE_MASK;
req->actual += len - remain;
if (cur_prd->word1 & USB3_PRD1_E ||
(i + 1) < USB3_DMA_NUM_PRD_ENTRIES)
break;
cur_prd++;
i++;
} while (1);
return status;
}
static bool usb3_dma_try_start(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
if (!use_dma)
return false;
if (usb3_dma_get_setting_area(usb3_ep, usb3_req)) {
usb3_pn_stop(usb3);
usb3_enable_dma_pipen(usb3);
usb3_dma_fill_prd(usb3_ep, usb3_req);
usb3_dma_kick_prd(usb3_ep);
usb3_pn_start(usb3);
return true;
}
return false;
}
static int usb3_dma_try_stop(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
unsigned long flags;
int status = 0;
spin_lock_irqsave(&usb3->lock, flags);
if (!usb3_ep->dma)
goto out;
if (!usb3_pn_change(usb3, usb3_ep->num))
usb3_disable_dma_pipen(usb3);
usb3_dma_stop_prd(usb3_ep);
status = usb3_dma_update_status(usb3_ep, usb3_req);
usb3_dma_put_setting_area(usb3_ep, usb3_req);
out:
spin_unlock_irqrestore(&usb3->lock, flags);
return status;
}
static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3,
struct device *dev)
{
int i;
struct renesas_usb3_dma *dma;
usb3_for_each_dma(usb3, dma, i) {
if (dma->prd) {
dma_free_coherent(dev, USB3_DMA_PRD_SIZE,
dma->prd, dma->prd_dma);
dma->prd = NULL;
}
}
return 0;
}
static int renesas_usb3_dma_alloc_prd(struct renesas_usb3 *usb3,
struct device *dev)
{
int i;
struct renesas_usb3_dma *dma;
if (!use_dma)
return 0;
usb3_for_each_dma(usb3, dma, i) {
dma->prd = dma_alloc_coherent(dev, USB3_DMA_PRD_SIZE,
&dma->prd_dma, GFP_KERNEL);
if (!dma->prd) {
renesas_usb3_dma_free_prd(usb3, dev);
return -ENOMEM;
}
dma->num = i + 1;
}
return 0;
}
static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
struct renesas_usb3_request *usb3_req_first;
unsigned long flags;
int ret = -EAGAIN;
u32 enable_bits = 0;
spin_lock_irqsave(&usb3->lock, flags);
if (usb3_ep->halt || usb3_ep->started)
goto out;
usb3_req_first = __usb3_get_request(usb3_ep);
if (!usb3_req_first || usb3_req != usb3_req_first)
goto out;
if (usb3_pn_change(usb3, usb3_ep->num) < 0)
goto out;
usb3_ep->started = true;
if (usb3_dma_try_start(usb3_ep, usb3_req))
goto out;
usb3_pn_start(usb3);
if (usb3_ep->dir_in) {
ret = usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE);
enable_bits |= PN_INT_LSTTR;
}
if (ret < 0)
enable_bits |= PN_INT_BFRDY;
if (enable_bits) {
usb3_set_bit(usb3, enable_bits, USB3_PN_INT_ENA);
usb3_enable_pipe_irq(usb3, usb3_ep->num);
}
out:
spin_unlock_irqrestore(&usb3->lock, flags);
}
static int renesas_usb3_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req);
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
unsigned long flags;
dev_dbg(usb3_to_dev(usb3), "ep_queue: ep%2d, %u\n", usb3_ep->num,
_req->length);
_req->status = -EINPROGRESS;
_req->actual = 0;
spin_lock_irqsave(&usb3->lock, flags);
list_add_tail(&usb3_req->queue, &usb3_ep->queue);
spin_unlock_irqrestore(&usb3->lock, flags);
if (!usb3_ep->num)
usb3_start_pipe0(usb3_ep, usb3_req);
else
usb3_start_pipen(usb3_ep, usb3_req);
return 0;
}
static void usb3_set_device_address(struct renesas_usb3 *usb3, u16 addr)
{
/* DEV_ADDR bit field is cleared by WarmReset, HotReset and BusReset */
usb3_set_bit(usb3, USB_COM_CON_DEV_ADDR(addr), USB3_USB_COM_CON);
}
static bool usb3_std_req_set_address(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
if (le16_to_cpu(ctrl->wValue) >= 128)
return true; /* stall */
usb3_set_device_address(usb3, le16_to_cpu(ctrl->wValue));
usb3_set_p0_con_for_no_data(usb3);
return false;
}
static void usb3_pipe0_internal_xfer(struct renesas_usb3 *usb3,
void *tx_data, size_t len,
void (*complete)(struct usb_ep *ep,
struct usb_request *req))
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
if (tx_data)
memcpy(usb3->ep0_buf, tx_data,
min_t(size_t, len, USB3_EP0_BUF_SIZE));
usb3->ep0_req->buf = &usb3->ep0_buf;
usb3->ep0_req->length = len;
usb3->ep0_req->complete = complete;
renesas_usb3_ep_queue(&usb3_ep->ep, usb3->ep0_req, GFP_ATOMIC);
}
static void usb3_pipe0_get_status_completion(struct usb_ep *ep,
struct usb_request *req)
{
}
static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
bool stall = false;
struct renesas_usb3_ep *usb3_ep;
int num;
u16 status = 0;
__le16 tx_data;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
if (usb3->gadget.is_selfpowered)
status |= 1 << USB_DEVICE_SELF_POWERED;
if (usb3->gadget.speed == USB_SPEED_SUPER)
status |= usb3_feature_get_un_enabled(usb3);
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:
num = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
usb3_ep = usb3_get_ep(usb3, num);
if (usb3_ep->halt)
status |= 1 << USB_ENDPOINT_HALT;
break;
default:
stall = true;
break;
}
if (!stall) {
tx_data = cpu_to_le16(status);
dev_dbg(usb3_to_dev(usb3), "get_status: req = %p\n",
usb_req_to_usb3_req(usb3->ep0_req));
usb3_pipe0_internal_xfer(usb3, &tx_data, sizeof(tx_data),
usb3_pipe0_get_status_completion);
}
return stall;
}
static bool usb3_std_req_feature_device(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl, bool set)
{
bool stall = true;
u16 w_value = le16_to_cpu(ctrl->wValue);
switch (w_value) {
case USB_DEVICE_TEST_MODE:
if (!set)
break;
usb3->test_mode = le16_to_cpu(ctrl->wIndex) >> 8;
stall = false;
break;
case USB_DEVICE_U1_ENABLE:
case USB_DEVICE_U2_ENABLE:
if (usb3->gadget.speed != USB_SPEED_SUPER)
break;
if (w_value == USB_DEVICE_U1_ENABLE)
usb3_feature_u1_enable(usb3, set);
if (w_value == USB_DEVICE_U2_ENABLE)
usb3_feature_u2_enable(usb3, set);
stall = false;
break;
default:
break;
}
return stall;
}
static int usb3_set_halt_p0(struct renesas_usb3_ep *usb3_ep, bool halt)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
if (unlikely(usb3_ep->num))
return -EINVAL;
usb3_ep->halt = halt;
if (halt)
usb3_set_p0_con_stall(usb3);
else
usb3_set_p0_con_stop(usb3);
return 0;
}
static int usb3_set_halt_pn(struct renesas_usb3_ep *usb3_ep, bool halt,
bool is_clear_feature)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
unsigned long flags;
spin_lock_irqsave(&usb3->lock, flags);
if (!usb3_pn_change(usb3, usb3_ep->num)) {
usb3_ep->halt = halt;
if (halt) {
usb3_pn_stall(usb3);
} else if (!is_clear_feature || !usb3_ep->wedge) {
usb3_pn_con_clear(usb3);
usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON);
usb3_pn_stop(usb3);
}
}
spin_unlock_irqrestore(&usb3->lock, flags);
return 0;
}
static int usb3_set_halt(struct renesas_usb3_ep *usb3_ep, bool halt,
bool is_clear_feature)
{
int ret = 0;
if (halt && usb3_ep->started)
return -EAGAIN;
if (usb3_ep->num)
ret = usb3_set_halt_pn(usb3_ep, halt, is_clear_feature);
else
ret = usb3_set_halt_p0(usb3_ep, halt);
return ret;
}
static bool usb3_std_req_feature_endpoint(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl,
bool set)
{
int num = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
struct renesas_usb3_ep *usb3_ep;
struct renesas_usb3_request *usb3_req;
if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT)
return true; /* stall */
usb3_ep = usb3_get_ep(usb3, num);
usb3_set_halt(usb3_ep, set, true);
/* Restarts a queue if clear feature */
if (!set) {
usb3_ep->started = false;
usb3_req = usb3_get_request(usb3_ep);
if (usb3_req)
usb3_start_pipen(usb3_ep, usb3_req);
}
return false;
}
static bool usb3_std_req_feature(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl, bool set)
{
bool stall = false;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
stall = usb3_std_req_feature_device(usb3, ctrl, set);
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:
stall = usb3_std_req_feature_endpoint(usb3, ctrl, set);
break;
default:
stall = true;
break;
}
if (!stall)
usb3_set_p0_con_for_no_data(usb3);
return stall;
}
static void usb3_pipe0_set_sel_completion(struct usb_ep *ep,
struct usb_request *req)
{
/* TODO */
}
static bool usb3_std_req_set_sel(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
u16 w_length = le16_to_cpu(ctrl->wLength);
if (w_length != 6)
return true; /* stall */
dev_dbg(usb3_to_dev(usb3), "set_sel: req = %p\n",
usb_req_to_usb3_req(usb3->ep0_req));
usb3_pipe0_internal_xfer(usb3, NULL, 6, usb3_pipe0_set_sel_completion);
return false;
}
static bool usb3_std_req_set_configuration(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
if (le16_to_cpu(ctrl->wValue) > 0)
usb3_set_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
else
usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
return false;
}
/**
* usb3_handle_standard_request - handle some standard requests
* @usb3: the renesas_usb3 pointer
* @ctrl: a pointer of setup data
*
* Returns true if this function handled a standard request
*/
static bool usb3_handle_standard_request(struct renesas_usb3 *usb3,
struct usb_ctrlrequest *ctrl)
{
bool ret = false;
bool stall = false;
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
stall = usb3_std_req_set_address(usb3, ctrl);
ret = true;
break;
case USB_REQ_GET_STATUS:
stall = usb3_std_req_get_status(usb3, ctrl);
ret = true;
break;
case USB_REQ_CLEAR_FEATURE:
stall = usb3_std_req_feature(usb3, ctrl, false);
ret = true;
break;
case USB_REQ_SET_FEATURE:
stall = usb3_std_req_feature(usb3, ctrl, true);
ret = true;
break;
case USB_REQ_SET_SEL:
stall = usb3_std_req_set_sel(usb3, ctrl);
ret = true;
break;
case USB_REQ_SET_ISOCH_DELAY:
/* This hardware doesn't support Isochronous xfer */
stall = true;
ret = true;
break;
case USB_REQ_SET_CONFIGURATION:
usb3_std_req_set_configuration(usb3, ctrl);
break;
default:
break;
}
}
if (stall)
usb3_set_p0_con_stall(usb3);
return ret;
}
static int usb3_p0_con_clear_buffer(struct renesas_usb3 *usb3)
{
usb3_set_bit(usb3, P0_CON_BCLR, USB3_P0_CON);
return usb3_wait(usb3, USB3_P0_CON, P0_CON_BCLR, 0);
}
static void usb3_irq_epc_pipe0_setup(struct renesas_usb3 *usb3)
{
struct usb_ctrlrequest ctrl;
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
/* Call giveback function if previous transfer is not completed */
if (usb3_ep->started)
usb3_request_done(usb3_ep, usb3_get_request(usb3_ep),
-ECONNRESET);
usb3_p0_con_clear_buffer(usb3);
usb3_get_setup_data(usb3, &ctrl);
if (!usb3_handle_standard_request(usb3, &ctrl))
if (usb3->driver->setup(&usb3->gadget, &ctrl) < 0)
usb3_set_p0_con_stall(usb3);
}
static void usb3_irq_epc_pipe0_bfrdy(struct renesas_usb3 *usb3)
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
if (!usb3_req)
return;
usb3_p0_xfer(usb3_ep, usb3_req);
}
static void usb3_irq_epc_pipe0(struct renesas_usb3 *usb3)
{
u32 p0_int_sta = usb3_read(usb3, USB3_P0_INT_STA);
p0_int_sta &= usb3_read(usb3, USB3_P0_INT_ENA);
usb3_write(usb3, p0_int_sta, USB3_P0_INT_STA);
if (p0_int_sta & P0_INT_STSED)
usb3_irq_epc_pipe0_status_end(usb3);
if (p0_int_sta & P0_INT_SETUP)
usb3_irq_epc_pipe0_setup(usb3);
if (p0_int_sta & P0_INT_BFRDY)
usb3_irq_epc_pipe0_bfrdy(usb3);
}
static void usb3_request_done_pipen(struct renesas_usb3 *usb3,
struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req,
int status)
{
unsigned long flags;
spin_lock_irqsave(&usb3->lock, flags);
if (usb3_pn_change(usb3, usb3_ep->num))
usb3_pn_stop(usb3);
spin_unlock_irqrestore(&usb3->lock, flags);
usb3_disable_pipe_irq(usb3, usb3_ep->num);
usb3_request_done(usb3_ep, usb3_req, status);
/* get next usb3_req */
usb3_req = usb3_get_request(usb3_ep);
if (usb3_req)
usb3_start_pipen(usb3_ep, usb3_req);
}
static void usb3_irq_epc_pipen_lsttr(struct renesas_usb3 *usb3, int num)
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
if (!usb3_req)
return;
if (usb3_ep->dir_in) {
dev_dbg(usb3_to_dev(usb3), "%s: len = %u, actual = %u\n",
__func__, usb3_req->req.length, usb3_req->req.actual);
usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
}
}
static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num)
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
bool done = false;
if (!usb3_req)
return;
spin_lock(&usb3->lock);
if (usb3_pn_change(usb3, num))
goto out;
if (usb3_ep->dir_in) {
/* Do not stop the IN pipe here to detect LSTTR interrupt */
if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
} else {
if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
done = true;
}
out:
/* need to unlock because usb3_request_done_pipen() locks it */
spin_unlock(&usb3->lock);
if (done)
usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
}
static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
{
u32 pn_int_sta;
spin_lock(&usb3->lock);
if (usb3_pn_change(usb3, num) < 0) {
spin_unlock(&usb3->lock);
return;
}
pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
spin_unlock(&usb3->lock);
if (pn_int_sta & PN_INT_LSTTR)
usb3_irq_epc_pipen_lsttr(usb3, num);
if (pn_int_sta & PN_INT_BFRDY)
usb3_irq_epc_pipen_bfrdy(usb3, num);
}
static void usb3_irq_epc_int_2(struct renesas_usb3 *usb3, u32 int_sta_2)
{
int i;
for (i = 0; i < usb3->num_usb3_eps; i++) {
if (int_sta_2 & USB_INT_2_PIPE(i)) {
if (!i)
usb3_irq_epc_pipe0(usb3);
else
usb3_irq_epc_pipen(usb3, i);
}
}
}
static void usb3_irq_idmon_change(struct renesas_usb3 *usb3)
{
usb3_check_id(usb3);
}
static void usb3_irq_otg_int(struct renesas_usb3 *usb3)
{
u32 otg_int_sta = usb3_drd_read(usb3, USB3_USB_OTG_INT_STA(usb3));
otg_int_sta &= usb3_drd_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
if (otg_int_sta)
usb3_drd_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
if (otg_int_sta & USB_OTG_IDMON(usb3))
usb3_irq_idmon_change(usb3);
}
static void usb3_irq_epc(struct renesas_usb3 *usb3)
{
u32 int_sta_1 = usb3_read(usb3, USB3_USB_INT_STA_1);
u32 int_sta_2 = usb3_read(usb3, USB3_USB_INT_STA_2);
int_sta_1 &= usb3_read(usb3, USB3_USB_INT_ENA_1);
if (int_sta_1) {
usb3_write(usb3, int_sta_1, USB3_USB_INT_STA_1);
usb3_irq_epc_int_1(usb3, int_sta_1);
}
int_sta_2 &= usb3_read(usb3, USB3_USB_INT_ENA_2);
if (int_sta_2)
usb3_irq_epc_int_2(usb3, int_sta_2);
if (!usb3->is_rzv2m)
usb3_irq_otg_int(usb3);
}
static void usb3_irq_dma_int(struct renesas_usb3 *usb3, u32 dma_sta)
{
struct renesas_usb3_ep *usb3_ep;
struct renesas_usb3_request *usb3_req;
int i, status;
for (i = 0; i < usb3->num_usb3_eps; i++) {
if (!(dma_sta & DMA_INT(i)))
continue;
usb3_ep = usb3_get_ep(usb3, i);
if (!(usb3_read(usb3, USB3_AXI_INT_STA) &
AXI_INT_PRDEN_CLR_STA(usb3_ep->dma->num)))
continue;
usb3_req = usb3_get_request(usb3_ep);
status = usb3_dma_try_stop(usb3_ep, usb3_req);
usb3_request_done_pipen(usb3, usb3_ep, usb3_req, status);
}
}
static void usb3_irq_dma(struct renesas_usb3 *usb3)
{
u32 dma_sta = usb3_read(usb3, USB3_DMA_INT_STA);
dma_sta &= usb3_read(usb3, USB3_DMA_INT_ENA);
if (dma_sta) {
usb3_write(usb3, dma_sta, USB3_DMA_INT_STA);
usb3_irq_dma_int(usb3, dma_sta);
}
}
static irqreturn_t renesas_usb3_irq(int irq, void *_usb3)
{
struct renesas_usb3 *usb3 = _usb3;
irqreturn_t ret = IRQ_NONE;
u32 axi_int_sta = usb3_read(usb3, USB3_AXI_INT_STA);
if (axi_int_sta & AXI_INT_DMAINT) {
usb3_irq_dma(usb3);
ret = IRQ_HANDLED;
}
if (axi_int_sta & AXI_INT_EPCINT) {
usb3_irq_epc(usb3);
ret = IRQ_HANDLED;
}
return ret;
}
static irqreturn_t renesas_usb3_otg_irq(int irq, void *_usb3)
{
struct renesas_usb3 *usb3 = _usb3;
usb3_irq_otg_int(usb3);
return IRQ_HANDLED;
}
static void usb3_write_pn_mod(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
u32 val = 0;
val |= usb3_ep->dir_in ? PN_MOD_DIR : 0;
val |= PN_MOD_TYPE(usb_endpoint_type(desc));
val |= PN_MOD_EPNUM(usb_endpoint_num(desc));
usb3_write(usb3, val, USB3_PN_MOD);
}
static u32 usb3_calc_ramarea(int ram_size)
{
WARN_ON(ram_size > SZ_16K);
if (ram_size <= SZ_1K)
return PN_RAMMAP_RAMAREA_1KB;
else if (ram_size <= SZ_2K)
return PN_RAMMAP_RAMAREA_2KB;
else if (ram_size <= SZ_4K)
return PN_RAMMAP_RAMAREA_4KB;
else if (ram_size <= SZ_8K)
return PN_RAMMAP_RAMAREA_8KB;
else
return PN_RAMMAP_RAMAREA_16KB;
}
static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
int i;
static const u32 max_packet_array[] = {8, 16, 32, 64, 512};
u32 mpkt = PN_RAMMAP_MPKT(1024);
for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
if (usb_endpoint_maxp(desc) <= max_packet_array[i])
mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
}
return usb3_ep->rammap_val | mpkt;
}
static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
unsigned long flags;
usb3_ep->dir_in = usb_endpoint_dir_in(desc);
spin_lock_irqsave(&usb3->lock, flags);
if (!usb3_pn_change(usb3, usb3_ep->num)) {
usb3_write_pn_mod(usb3_ep, desc);
usb3_write(usb3, usb3_calc_rammap_val(usb3_ep, desc),
USB3_PN_RAMMAP);
usb3_pn_con_clear(usb3);
usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON);
}
spin_unlock_irqrestore(&usb3->lock, flags);
return 0;
}
static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
unsigned long flags;
usb3_ep->halt = false;
spin_lock_irqsave(&usb3->lock, flags);
if (!usb3_pn_change(usb3, usb3_ep->num)) {
usb3_write(usb3, 0, USB3_PN_INT_ENA);
usb3_write(usb3, 0, USB3_PN_RAMMAP);
usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON);
}
spin_unlock_irqrestore(&usb3->lock, flags);
return 0;
}
/*------- usb_ep_ops -----------------------------------------------------*/
static int renesas_usb3_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
return usb3_enable_pipe_n(usb3_ep, desc);
}
static int renesas_usb3_ep_disable(struct usb_ep *_ep)
{
struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
struct renesas_usb3_request *usb3_req;
do {
usb3_req = usb3_get_request(usb3_ep);
if (!usb3_req)
break;
usb3_dma_try_stop(usb3_ep, usb3_req);
usb3_request_done(usb3_ep, usb3_req, -ESHUTDOWN);
} while (1);
return usb3_disable_pipe_n(usb3_ep);
}
static struct usb_request *__renesas_usb3_ep_alloc_request(gfp_t gfp_flags)
{
struct renesas_usb3_request *usb3_req;
usb3_req = kzalloc(sizeof(struct renesas_usb3_request), gfp_flags);
if (!usb3_req)
return NULL;
INIT_LIST_HEAD(&usb3_req->queue);
return &usb3_req->req;
}
static void __renesas_usb3_ep_free_request(struct usb_request *_req)
{
struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req);
kfree(usb3_req);
}
static struct usb_request *renesas_usb3_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
return __renesas_usb3_ep_alloc_request(gfp_flags);
}
static void renesas_usb3_ep_free_request(struct usb_ep *_ep,
struct usb_request *_req)
{
__renesas_usb3_ep_free_request(_req);
}
static int renesas_usb3_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req);
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
dev_dbg(usb3_to_dev(usb3), "ep_dequeue: ep%2d, %u\n", usb3_ep->num,
_req->length);
usb3_dma_try_stop(usb3_ep, usb3_req);
usb3_request_done_pipen(usb3, usb3_ep, usb3_req, -ECONNRESET);
return 0;
}
static int renesas_usb3_ep_set_halt(struct usb_ep *_ep, int value)
{
return usb3_set_halt(usb_ep_to_usb3_ep(_ep), !!value, false);
}
static int renesas_usb3_ep_set_wedge(struct usb_ep *_ep)
{
struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
usb3_ep->wedge = true;
return usb3_set_halt(usb3_ep, true, false);
}
static void renesas_usb3_ep_fifo_flush(struct usb_ep *_ep)
{
struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
unsigned long flags;
if (usb3_ep->num) {
spin_lock_irqsave(&usb3->lock, flags);
if (!usb3_pn_change(usb3, usb3_ep->num)) {
usb3_pn_con_clear(usb3);
usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON);
}
spin_unlock_irqrestore(&usb3->lock, flags);
} else {
usb3_p0_con_clear_buffer(usb3);
}
}
static const struct usb_ep_ops renesas_usb3_ep_ops = {
.enable = renesas_usb3_ep_enable,
.disable = renesas_usb3_ep_disable,
.alloc_request = renesas_usb3_ep_alloc_request,
.free_request = renesas_usb3_ep_free_request,
.queue = renesas_usb3_ep_queue,
.dequeue = renesas_usb3_ep_dequeue,
.set_halt = renesas_usb3_ep_set_halt,
.set_wedge = renesas_usb3_ep_set_wedge,
.fifo_flush = renesas_usb3_ep_fifo_flush,
};
/*------- usb_gadget_ops -------------------------------------------------*/
static int renesas_usb3_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct renesas_usb3 *usb3;
if (!driver || driver->max_speed < USB_SPEED_FULL ||
!driver->setup)
return -EINVAL;
usb3 = gadget_to_renesas_usb3(gadget);
if (usb3->is_rzv2m && usb3_is_a_device(usb3))
return -EBUSY;
/* hook up the driver */
usb3->driver = driver;
if (usb3->phy)
phy_init(usb3->phy);
pm_runtime_get_sync(usb3_to_dev(usb3));
/* Peripheral Reset */
if (usb3->is_rzv2m)
rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false);
renesas_usb3_init_controller(usb3);
return 0;
}
static int renesas_usb3_stop(struct usb_gadget *gadget)
{
struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
usb3->softconnect = false;
usb3->gadget.speed = USB_SPEED_UNKNOWN;
usb3->driver = NULL;
if (usb3->is_rzv2m)
rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false);
renesas_usb3_stop_controller(usb3);
if (usb3->phy)
phy_exit(usb3->phy);
pm_runtime_put(usb3_to_dev(usb3));
return 0;
}
static int renesas_usb3_get_frame(struct usb_gadget *_gadget)
{
return -EOPNOTSUPP;
}
static int renesas_usb3_pullup(struct usb_gadget *gadget, int is_on)
{
struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
usb3->softconnect = !!is_on;
return 0;
}
static int renesas_usb3_set_selfpowered(struct usb_gadget *gadget, int is_self)
{
gadget->is_selfpowered = !!is_self;
return 0;
}
static const struct usb_gadget_ops renesas_usb3_gadget_ops = {
.get_frame = renesas_usb3_get_frame,
.udc_start = renesas_usb3_start,
.udc_stop = renesas_usb3_stop,
.pullup = renesas_usb3_pullup,
.set_selfpowered = renesas_usb3_set_selfpowered,
};
static enum usb_role renesas_usb3_role_switch_get(struct usb_role_switch *sw)
{
struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw);
enum usb_role cur_role;
pm_runtime_get_sync(usb3_to_dev(usb3));
cur_role = usb3_is_host(usb3) ? USB_ROLE_HOST : USB_ROLE_DEVICE;
pm_runtime_put(usb3_to_dev(usb3));
return cur_role;
}
static void handle_ext_role_switch_states(struct device *dev,
enum usb_role role)
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
struct device *host = usb3->host_dev;
enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw);
switch (role) {
case USB_ROLE_NONE:
usb3->connection_state = USB_ROLE_NONE;
if (!usb3->is_rzv2m && cur_role == USB_ROLE_HOST)
device_release_driver(host);
if (usb3->driver) {
if (usb3->is_rzv2m)
rzv2m_usb3drd_reset(dev->parent, false);
usb3_disconnect(usb3);
}
usb3_vbus_out(usb3, false);
if (usb3->is_rzv2m) {
rzv2m_usb3drd_reset(dev->parent, true);
device_release_driver(host);
}
break;
case USB_ROLE_DEVICE:
if (usb3->connection_state == USB_ROLE_NONE) {
usb3->connection_state = USB_ROLE_DEVICE;
usb3_set_mode(usb3, false);
if (usb3->driver) {
if (usb3->is_rzv2m)
renesas_usb3_init_controller(usb3);
usb3_connect(usb3);
}
} else if (cur_role == USB_ROLE_HOST) {
device_release_driver(host);
usb3_set_mode(usb3, false);
if (usb3->driver)
usb3_connect(usb3);
}
usb3_vbus_out(usb3, false);
break;
case USB_ROLE_HOST:
if (usb3->connection_state == USB_ROLE_NONE) {
if (usb3->driver) {
if (usb3->is_rzv2m)
rzv2m_usb3drd_reset(dev->parent, false);
usb3_disconnect(usb3);
}
usb3->connection_state = USB_ROLE_HOST;
usb3_set_mode(usb3, true);
usb3_vbus_out(usb3, true);
if (device_attach(host) < 0)
dev_err(dev, "device_attach(host) failed\n");
} else if (cur_role == USB_ROLE_DEVICE) {
usb3_disconnect(usb3);
/* Must set the mode before device_attach of the host */
usb3_set_mode(usb3, true);
/* This device_attach() might sleep */
if (device_attach(host) < 0)
dev_err(dev, "device_attach(host) failed\n");
}
break;
default:
break;
}
}
static void handle_role_switch_states(struct device *dev,
enum usb_role role)
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
struct device *host = usb3->host_dev;
enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw);
if (cur_role == USB_ROLE_HOST && role == USB_ROLE_DEVICE) {
device_release_driver(host);
usb3_set_mode(usb3, false);
} else if (cur_role == USB_ROLE_DEVICE && role == USB_ROLE_HOST) {
/* Must set the mode before device_attach of the host */
usb3_set_mode(usb3, true);
/* This device_attach() might sleep */
if (device_attach(host) < 0)
dev_err(dev, "device_attach(host) failed\n");
}
}
static int renesas_usb3_role_switch_set(struct usb_role_switch *sw,
enum usb_role role)
{
struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw);
pm_runtime_get_sync(usb3_to_dev(usb3));
if (usb3->role_sw_by_connector)
handle_ext_role_switch_states(usb3_to_dev(usb3), role);
else
handle_role_switch_states(usb3_to_dev(usb3), role);
pm_runtime_put(usb3_to_dev(usb3));
return 0;
}
static ssize_t role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
bool new_mode_is_host;
if (!usb3->driver)
return -ENODEV;
if (usb3->forced_b_device)
return -EBUSY;
if (sysfs_streq(buf, "host"))
new_mode_is_host = true;
else if (sysfs_streq(buf, "peripheral"))
new_mode_is_host = false;
else
return -EINVAL;
if (new_mode_is_host == usb3_is_host(usb3))
return -EINVAL;
usb3_mode_config(usb3, new_mode_is_host, usb3_is_a_device(usb3));
return count;
}
static ssize_t role_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
if (!usb3->driver)
return -ENODEV;
return sprintf(buf, "%s\n", usb3_is_host(usb3) ? "host" : "peripheral");
}
static DEVICE_ATTR_RW(role);
static int renesas_usb3_b_device_show(struct seq_file *s, void *unused)
{
struct renesas_usb3 *usb3 = s->private;
seq_printf(s, "%d\n", usb3->forced_b_device);
return 0;
}
static int renesas_usb3_b_device_open(struct inode *inode, struct file *file)
{
return single_open(file, renesas_usb3_b_device_show, inode->i_private);
}
static ssize_t renesas_usb3_b_device_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct renesas_usb3 *usb3 = s->private;
char buf[32];
if (!usb3->driver)
return -ENODEV;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
usb3->start_to_connect = false;
if (usb3->workaround_for_vbus && usb3->forced_b_device &&
!strncmp(buf, "2", 1))
usb3->start_to_connect = true;
else if (!strncmp(buf, "1", 1))
usb3->forced_b_device = true;
else
usb3->forced_b_device = false;
if (usb3->workaround_for_vbus)
usb3_disconnect(usb3);
/* Let this driver call usb3_connect() if needed */
usb3_check_id(usb3);
return count;
}
static const struct file_operations renesas_usb3_b_device_fops = {
.open = renesas_usb3_b_device_open,
.write = renesas_usb3_b_device_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
struct device *dev)
{
usb3->dentry = debugfs_create_dir(dev_name(dev), usb_debug_root);
debugfs_create_file("b_device", 0644, usb3->dentry, usb3,
&renesas_usb3_b_device_fops);
}
/*------- platform_driver ------------------------------------------------*/
static void renesas_usb3_remove(struct platform_device *pdev)
{
struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
debugfs_remove_recursive(usb3->dentry);
device_remove_file(&pdev->dev, &dev_attr_role);
cancel_work_sync(&usb3->role_work);
usb_role_switch_unregister(usb3->role_sw);
usb_del_gadget_udc(&usb3->gadget);
reset_control_assert(usb3->usbp_rstc);
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
pm_runtime_disable(&pdev->dev);
}
static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
const struct renesas_usb3_priv *priv)
{
struct renesas_usb3_ep *usb3_ep;
int i;
/* calculate num_usb3_eps from renesas_usb3_priv */
usb3->num_usb3_eps = priv->ramsize_per_ramif * priv->num_ramif * 2 /
priv->ramsize_per_pipe + 1;
if (usb3->num_usb3_eps > USB3_MAX_NUM_PIPES(usb3))
usb3->num_usb3_eps = USB3_MAX_NUM_PIPES(usb3);
usb3->usb3_ep = devm_kcalloc(dev,
usb3->num_usb3_eps, sizeof(*usb3_ep),
GFP_KERNEL);
if (!usb3->usb3_ep)
return -ENOMEM;
dev_dbg(dev, "%s: num_usb3_eps = %d\n", __func__, usb3->num_usb3_eps);
/*
* This driver prepares pipes as follows:
* - odd pipes = IN pipe
* - even pipes = OUT pipe (except pipe 0)
*/
usb3_for_each_ep(usb3_ep, usb3, i) {
snprintf(usb3_ep->ep_name, sizeof(usb3_ep->ep_name), "ep%d", i);
usb3_ep->usb3 = usb3;
usb3_ep->num = i;
usb3_ep->ep.name = usb3_ep->ep_name;
usb3_ep->ep.ops = &renesas_usb3_ep_ops;
INIT_LIST_HEAD(&usb3_ep->queue);
INIT_LIST_HEAD(&usb3_ep->ep.ep_list);
if (!i) {
/* for control pipe */
usb3->gadget.ep0 = &usb3_ep->ep;
usb_ep_set_maxpacket_limit(&usb3_ep->ep,
USB3_EP0_SS_MAX_PACKET_SIZE);
usb3_ep->ep.caps.type_control = true;
usb3_ep->ep.caps.dir_in = true;
usb3_ep->ep.caps.dir_out = true;
continue;
}
/* for bulk or interrupt pipe */
usb_ep_set_maxpacket_limit(&usb3_ep->ep, ~0);
list_add_tail(&usb3_ep->ep.ep_list, &usb3->gadget.ep_list);
usb3_ep->ep.caps.type_bulk = true;
usb3_ep->ep.caps.type_int = true;
if (i & 1)
usb3_ep->ep.caps.dir_in = true;
else
usb3_ep->ep.caps.dir_out = true;
}
return 0;
}
static void renesas_usb3_init_ram(struct renesas_usb3 *usb3, struct device *dev,
const struct renesas_usb3_priv *priv)
{
struct renesas_usb3_ep *usb3_ep;
int i;
u32 ramif[2], basead[2]; /* index 0 = for IN pipes */
u32 *cur_ramif, *cur_basead;
u32 val;
memset(ramif, 0, sizeof(ramif));
memset(basead, 0, sizeof(basead));
/*
* This driver prepares pipes as follows:
* - all pipes = the same size as "ramsize_per_pipe"
* Please refer to the "Method of Specifying RAM Mapping"
*/
usb3_for_each_ep(usb3_ep, usb3, i) {
if (!i)
continue; /* out of scope if ep num = 0 */
if (usb3_ep->ep.caps.dir_in) {
cur_ramif = &ramif[0];
cur_basead = &basead[0];
} else {
cur_ramif = &ramif[1];
cur_basead = &basead[1];
}
if (*cur_basead > priv->ramsize_per_ramif)
continue; /* out of memory for IN or OUT pipe */
/* calculate rammap_val */
val = PN_RAMMAP_RAMIF(*cur_ramif);
val |= usb3_calc_ramarea(priv->ramsize_per_pipe);
val |= PN_RAMMAP_BASEAD(*cur_basead);
usb3_ep->rammap_val = val;
dev_dbg(dev, "ep%2d: val = %08x, ramif = %d, base = %x\n",
i, val, *cur_ramif, *cur_basead);
/* update current ramif */
if (*cur_ramif + 1 == priv->num_ramif) {
*cur_ramif = 0;
*cur_basead += priv->ramsize_per_pipe;
} else {
(*cur_ramif)++;
}
}
}
static const struct renesas_usb3_priv renesas_usb3_priv_gen3 = {
.ramsize_per_ramif = SZ_16K,
.num_ramif = 4,
.ramsize_per_pipe = SZ_4K,
};
static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
.ramsize_per_ramif = SZ_16K,
.num_ramif = 4,
.ramsize_per_pipe = SZ_4K,
.workaround_for_vbus = true,
};
static const struct renesas_usb3_priv renesas_usb3_priv_rzv2m = {
.ramsize_per_ramif = SZ_16K,
.num_ramif = 1,
.ramsize_per_pipe = SZ_4K,
.is_rzv2m = true,
};
static const struct of_device_id usb3_of_match[] = {
{
.compatible = "renesas,r8a774c0-usb3-peri",
.data = &renesas_usb3_priv_r8a77990,
}, {
.compatible = "renesas,r8a7795-usb3-peri",
.data = &renesas_usb3_priv_gen3,
}, {
.compatible = "renesas,r8a77990-usb3-peri",
.data = &renesas_usb3_priv_r8a77990,
}, {
.compatible = "renesas,rzv2m-usb3-peri",
.data = &renesas_usb3_priv_rzv2m,
}, {
.compatible = "renesas,rcar-gen3-usb3-peri",
.data = &renesas_usb3_priv_gen3,
},
{ },
};
MODULE_DEVICE_TABLE(of, usb3_of_match);
static const unsigned int renesas_usb3_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
.set = renesas_usb3_role_switch_set,
.get = renesas_usb3_role_switch_get,
.allow_userspace_control = true,
};
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
int irq, ret;
const struct renesas_usb3_priv *priv;
priv = of_device_get_match_data(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return -ENOMEM;
usb3->is_rzv2m = priv->is_rzv2m;
usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
platform_set_drvdata(pdev, usb3);
spin_lock_init(&usb3->lock);
usb3->gadget.ops = &renesas_usb3_gadget_ops;
usb3->gadget.name = udc_name;
usb3->gadget.max_speed = USB_SPEED_SUPER;
INIT_LIST_HEAD(&usb3->gadget.ep_list);
ret = renesas_usb3_init_ep(usb3, &pdev->dev, priv);
if (ret < 0)
return ret;
renesas_usb3_init_ram(usb3, &pdev->dev, priv);
ret = devm_request_irq(&pdev->dev, irq, renesas_usb3_irq, 0,
dev_name(&pdev->dev), usb3);
if (ret < 0)
return ret;
if (usb3->is_rzv2m) {
struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent);
usb3->drd_reg = ddata->reg;
ret = devm_request_irq(&pdev->dev, ddata->drd_irq,
renesas_usb3_otg_irq, 0,
dev_name(&pdev->dev), usb3);
if (ret < 0)
return ret;
}
INIT_WORK(&usb3->extcon_work, renesas_usb3_extcon_work);
usb3->extcon = devm_extcon_dev_allocate(&pdev->dev, renesas_usb3_cable);
if (IS_ERR(usb3->extcon))
return PTR_ERR(usb3->extcon);
ret = devm_extcon_dev_register(&pdev->dev, usb3->extcon);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register extcon\n");
return ret;
}
/* for ep0 handling */
usb3->ep0_req = __renesas_usb3_ep_alloc_request(GFP_KERNEL);
if (!usb3->ep0_req)
return -ENOMEM;
ret = renesas_usb3_dma_alloc_prd(usb3, &pdev->dev);
if (ret < 0)
goto err_alloc_prd;
/*
* This is optional. So, if this driver cannot get a phy,
* this driver will not handle a phy anymore.
*/
usb3->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(usb3->phy)) {
ret = PTR_ERR(usb3->phy);
goto err_add_udc;
}
usb3->usbp_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
NULL);
if (IS_ERR(usb3->usbp_rstc)) {
ret = PTR_ERR(usb3->usbp_rstc);
goto err_add_udc;
}
reset_control_deassert(usb3->usbp_rstc);
pm_runtime_enable(&pdev->dev);
ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
if (ret < 0)
goto err_reset;
ret = device_create_file(&pdev->dev, &dev_attr_role);
if (ret < 0)
goto err_dev_create;
if (device_property_read_bool(&pdev->dev, "usb-role-switch")) {
usb3->role_sw_by_connector = true;
renesas_usb3_role_switch_desc.fwnode = dev_fwnode(&pdev->dev);
}
renesas_usb3_role_switch_desc.driver_data = usb3;
INIT_WORK(&usb3->role_work, renesas_usb3_role_work);
usb3->role_sw = usb_role_switch_register(&pdev->dev,
&renesas_usb3_role_switch_desc);
if (!IS_ERR(usb3->role_sw)) {
usb3->host_dev = usb_of_get_companion_dev(&pdev->dev);
if (!usb3->host_dev) {
/* If not found, this driver will not use a role sw */
usb_role_switch_unregister(usb3->role_sw);
usb3->role_sw = NULL;
}
} else {
usb3->role_sw = NULL;
}
usb3->workaround_for_vbus = priv->workaround_for_vbus;
renesas_usb3_debugfs_init(usb3, &pdev->dev);
dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
return 0;
err_dev_create:
usb_del_gadget_udc(&usb3->gadget);
err_reset:
reset_control_assert(usb3->usbp_rstc);
err_add_udc:
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
err_alloc_prd:
__renesas_usb3_ep_free_request(usb3->ep0_req);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int renesas_usb3_suspend(struct device *dev)
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
/* Not started */
if (!usb3->driver)
return 0;
renesas_usb3_stop_controller(usb3);
if (usb3->phy)
phy_exit(usb3->phy);
pm_runtime_put(dev);
return 0;
}
static int renesas_usb3_resume(struct device *dev)
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
/* Not started */
if (!usb3->driver)
return 0;
if (usb3->phy)
phy_init(usb3->phy);
pm_runtime_get_sync(dev);
renesas_usb3_init_controller(usb3);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(renesas_usb3_pm_ops, renesas_usb3_suspend,
renesas_usb3_resume);
static struct platform_driver renesas_usb3_driver = {
.probe = renesas_usb3_probe,
.remove_new = renesas_usb3_remove,
.driver = {
.name = udc_name,
.pm = &renesas_usb3_pm_ops,
.of_match_table = usb3_of_match,
},
};
module_platform_driver(renesas_usb3_driver);
MODULE_DESCRIPTION("Renesas USB3.0 Peripheral driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda <[email protected]>");
MODULE_ALIAS("platform:renesas_usb3");
| linux-master | drivers/usb/gadget/udc/renesas_usb3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
* Copyright (C) 2000-2002 Lineo
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software ([email protected])
*/
/*
* This device has ep0 and three semi-configurable bulk/interrupt endpoints.
*
* - Endpoint numbering is fixed: ep{1,2,3}-bulk
* - Gadget drivers can choose ep maxpacket (8/16/32/64)
* - Gadget drivers can choose direction (IN, OUT)
* - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
*/
// #define VERBOSE /* extra debug messages (success too) */
// #define USB_TRACE /* packet-level success messages */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include "goku_udc.h"
#define DRIVER_DESC "TC86C001 USB Device Controller"
#define DRIVER_VERSION "30-Oct 2003"
static const char driver_name [] = "goku_udc";
static const char driver_desc [] = DRIVER_DESC;
MODULE_AUTHOR("[email protected]");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* IN dma behaves ok under testing, though the IN-dma abort paths don't
* seem to behave quite as expected. Used by default.
*
* OUT dma documents design problems handling the common "short packet"
* transfer termination policy; it couldn't be enabled by default, even
* if the OUT-dma abort problems had a resolution.
*/
static unsigned use_dma = 1;
#if 0
//#include <linux/moduleparam.h>
/* "modprobe goku_udc use_dma=1" etc
* 0 to disable dma
* 1 to use IN dma only (normal operation)
* 2 to use IN and OUT dma
*/
module_param(use_dma, uint, S_IRUGO);
#endif
/*-------------------------------------------------------------------------*/
static void nuke(struct goku_ep *, int status);
static inline void
command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
{
writel(COMMAND_EP(epnum) | command, ®s->Command);
udelay(300);
}
static int
goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct goku_udc *dev;
struct goku_ep *ep;
u32 mode;
u16 max;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (ep == &dev->ep[0])
return -EINVAL;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->num != usb_endpoint_num(desc))
return -EINVAL;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
break;
default:
return -EINVAL;
}
if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
!= EPxSTATUS_EP_INVALID)
return -EBUSY;
/* enabling the no-toggle interrupt mode would need an api hook */
mode = 0;
max = get_unaligned_le16(&desc->wMaxPacketSize);
switch (max) {
case 64:
mode++;
fallthrough;
case 32:
mode++;
fallthrough;
case 16:
mode++;
fallthrough;
case 8:
mode <<= 3;
break;
default:
return -EINVAL;
}
mode |= 2 << 1; /* bulk, or intr-with-toggle */
/* ep1/ep2 dma direction is chosen early; it works in the other
* direction, with pio. be cautious with out-dma.
*/
ep->is_in = usb_endpoint_dir_in(desc);
if (ep->is_in) {
mode |= 1;
ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
} else {
ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
if (ep->dma)
DBG(dev, "%s out-dma hides short packets\n",
ep->ep.name);
}
spin_lock_irqsave(&ep->dev->lock, flags);
/* ep1 and ep2 can do double buffering and/or dma */
if (ep->num < 3) {
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 tmp;
/* double buffer except (for now) with pio in */
tmp = ((ep->dma || !ep->is_in)
? 0x10 /* double buffered */
: 0x11 /* single buffer */
) << ep->num;
tmp |= readl(®s->EPxSingle);
writel(tmp, ®s->EPxSingle);
tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
tmp |= readl(®s->EPxBCS);
writel(tmp, ®s->EPxBCS);
}
writel(mode, ep->reg_mode);
command(ep->dev->regs, COMMAND_RESET, ep->num);
ep->ep.maxpacket = max;
ep->stopped = 0;
ep->ep.desc = desc;
spin_unlock_irqrestore(&ep->dev->lock, flags);
DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
max);
return 0;
}
static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
{
struct goku_udc *dev = ep->dev;
if (regs) {
command(regs, COMMAND_INVALID, ep->num);
if (ep->num) {
if (ep->num == UDC_MSTWR_ENDPOINT)
dev->int_enable &= ~(INT_MSTWREND
|INT_MSTWRTMOUT);
else if (ep->num == UDC_MSTRD_ENDPOINT)
dev->int_enable &= ~INT_MSTRDEND;
dev->int_enable &= ~INT_EPxDATASET (ep->num);
} else
dev->int_enable &= ~INT_EP0;
writel(dev->int_enable, ®s->int_enable);
readl(®s->int_enable);
if (ep->num < 3) {
struct goku_udc_regs __iomem *r = ep->dev->regs;
u32 tmp;
tmp = readl(&r->EPxSingle);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxSingle);
tmp = readl(&r->EPxBCS);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxBCS);
}
/* reset dma in case we're still using it */
if (ep->dma) {
u32 master;
master = readl(®s->dma_master) & MST_RW_BITS;
if (ep->num == UDC_MSTWR_ENDPOINT) {
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
} else {
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
}
writel(master, ®s->dma_master);
}
}
usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE);
ep->ep.desc = NULL;
ep->stopped = 1;
ep->irqs = 0;
ep->dma = 0;
}
static int goku_ep_disable(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !ep->ep.desc)
return -ENODEV;
dev = ep->dev;
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "disable %s\n", _ep->name);
spin_lock_irqsave(&dev->lock, flags);
nuke(ep, -ESHUTDOWN);
ep_reset(dev->regs, ep);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct goku_request *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
if (!_ep || !_req)
return;
req = container_of(_req, struct goku_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct goku_ep *ep, struct goku_request *req, int status)
{
struct goku_udc *dev;
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
static inline int
write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
{
unsigned length, count;
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
count = length;
while (likely(count--))
writel(*buf++, fifo);
return length;
}
// return: 0 = still running, 1 = completed, negative = errno
static int write_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc *dev = ep->dev;
u32 tmp;
u8 *buf;
unsigned count;
int is_last;
tmp = readl(&dev->regs->DataSet);
buf = req->req.buf + req->req.actual;
prefetch(buf);
dev = ep->dev;
if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
return -EL2HLT;
/* NOTE: just single-buffered PIO-IN for now. */
if (unlikely((tmp & DATASET_A(ep->num)) != 0))
return 0;
/* clear our "packet available" irq */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
/* last packet often short (sometimes a zlp, especially on ep0) */
if (unlikely(count != ep->ep.maxpacket)) {
writel(~(1<<ep->num), &dev->regs->EOP);
if (ep->num == 0) {
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
}
is_last = 1;
} else {
if (likely(req->req.length != req->req.actual)
|| req->req.zero)
is_last = 0;
else
is_last = 1;
}
#if 0 /* printk seemed to trash is_last...*/
//#ifdef USB_TRACE
VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
ep->ep.name, count, is_last ? "/last" : "",
req->req.length - req->req.actual, req);
#endif
/* requests complete when all IN data is in the FIFO,
* or sometimes later, if a zlp was needed.
*/
if (is_last) {
done(ep, req, 0);
return 1;
}
return 0;
}
static int read_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs;
u32 size, set;
u8 *buf;
unsigned bufferspace, is_short, dbuff;
regs = ep->dev->regs;
top:
buf = req->req.buf + req->req.actual;
prefetchw(buf);
if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
return -EL2HLT;
dbuff = (ep->num == 1 || ep->num == 2);
do {
/* ack dataset irq matching the status we'll handle */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), ®s->int_status);
set = readl(®s->DataSet) & DATASET_AB(ep->num);
size = readl(®s->EPxSizeLA[ep->num]);
bufferspace = req->req.length - req->req.actual;
/* usually do nothing without an OUT packet */
if (likely(ep->num != 0 || bufferspace != 0)) {
if (unlikely(set == 0))
break;
/* use ep1/ep2 double-buffering for OUT */
if (!(size & PACKET_ACTIVE))
size = readl(®s->EPxSizeLB[ep->num]);
if (!(size & PACKET_ACTIVE)) /* "can't happen" */
break;
size &= DATASIZE; /* EPxSizeH == 0 */
/* ep0out no-out-data case for set_config, etc */
} else
size = 0;
/* read all bytes from this packet */
req->req.actual += size;
is_short = (size < ep->ep.maxpacket);
#ifdef USB_TRACE
VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
ep->ep.name, size, is_short ? "/S" : "",
req, req->req.actual, req->req.length);
#endif
while (likely(size-- != 0)) {
u8 byte = (u8) readl(ep->reg_fifo);
if (unlikely(bufferspace == 0)) {
/* this happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data in this packet.
*/
if (req->req.status != -EOVERFLOW)
DBG(ep->dev, "%s overflow %u\n",
ep->ep.name, size);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
bufferspace--;
}
}
/* completion */
if (unlikely(is_short || req->req.actual == req->req.length)) {
if (unlikely(ep->num == 0)) {
/* non-control endpoints now usable? */
if (ep->dev->req_config)
writel(ep->dev->configured
? USBSTATE_CONFIGURED
: 0,
®s->UsbState);
/* ep0out status stage */
writel(~(1<<0), ®s->EOP);
ep->stopped = 1;
ep->dev->ep0state = EP0_STATUS;
}
done(ep, req, 0);
/* empty the second buffer asap */
if (dbuff && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct goku_request, queue);
goto top;
}
return 1;
}
} while (dbuff);
return 0;
}
static inline void
pio_irq_enable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable |= INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_irq_disable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable &= ~INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_advance(struct goku_ep *ep)
{
struct goku_request *req;
if (unlikely(list_empty (&ep->queue)))
return;
req = list_entry(ep->queue.next, struct goku_request, queue);
(ep->is_in ? write_fifo : read_fifo)(ep, req);
}
/*-------------------------------------------------------------------------*/
// return: 0 = q running, 1 = q stopped, negative = errno
static int start_dma(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
u32 start = req->req.dma;
u32 end = start + req->req.length - 1;
master = readl(®s->dma_master) & MST_RW_BITS;
/* re-init the bits affecting IN dma; careful with zlps */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA)) {
DBG (ep->dev, "start, IN active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->in_dma_end);
writel(start, ®s->in_dma_start);
master &= ~MST_R_BITS;
if (unlikely(req->req.length == 0))
master |= MST_RD_ENA | MST_RD_EOPB;
else if ((req->req.length % ep->ep.maxpacket) != 0
|| req->req.zero)
master |= MST_RD_ENA | MST_EOPB_ENA;
else
master |= MST_RD_ENA | MST_EOPB_DIS;
ep->dev->int_enable |= INT_MSTRDEND;
/* Goku DMA-OUT merges short packets, which plays poorly with
* protocols where short packets mark the transfer boundaries.
* The chip supports a nonstandard policy with INT_MSTWRTMOUT,
* ending transfers after 3 SOFs; we don't turn it on.
*/
} else {
if (unlikely(master & MST_WR_ENA)) {
DBG (ep->dev, "start, OUT active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->out_dma_end);
writel(start, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_ENA | MST_TIMEOUT_DIS;
ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
}
writel(master, ®s->dma_master);
writel(ep->dev->int_enable, ®s->int_enable);
return 0;
}
static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
{
struct goku_request *req;
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
master = readl(®s->dma_master);
if (unlikely(list_empty(&ep->queue))) {
stop:
if (ep->is_in)
dev->int_enable &= ~INT_MSTRDEND;
else
dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
writel(dev->int_enable, ®s->int_enable);
return;
}
req = list_entry(ep->queue.next, struct goku_request, queue);
/* normal hw dma completion (not abort) */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA))
return;
req->req.actual = readl(®s->in_dma_current);
} else {
if (unlikely(master & MST_WR_ENA))
return;
/* hardware merges short packets, and also hides packet
* overruns. a partial packet MAY be in the fifo here.
*/
req->req.actual = readl(®s->out_dma_current);
}
req->req.actual -= req->req.dma;
req->req.actual++;
#ifdef USB_TRACE
VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
ep->ep.name, ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length, req);
#endif
done(ep, req, 0);
if (list_empty(&ep->queue))
goto stop;
req = list_entry(ep->queue.next, struct goku_request, queue);
(void) start_dma(ep, req);
}
static void abort_dma(struct goku_ep *ep, int status)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
struct goku_request *req;
u32 curr, master;
/* NAK future host requests, hoping the implicit delay lets the
* dma engine finish reading (or writing) its latest packet and
* empty the dma buffer (up to 16 bytes).
*
* This avoids needing to clean up a partial packet in the fifo;
* we can't do that for IN without side effects to HALT and TOGGLE.
*/
command(regs, COMMAND_FIFO_DISABLE, ep->num);
req = list_entry(ep->queue.next, struct goku_request, queue);
master = readl(®s->dma_master) & MST_RW_BITS;
/* FIXME using these resets isn't usably documented. this may
* not work unless it's followed by disabling the endpoint.
*
* FIXME the OUT reset path doesn't even behave consistently.
*/
if (ep->is_in) {
if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0))
goto finished;
curr = readl(®s->in_dma_current);
writel(curr, ®s->in_dma_end);
writel(curr, ®s->in_dma_start);
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_RD_ENA)
DBG(ep->dev, "IN dma active after reset!\n");
} else {
if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0))
goto finished;
curr = readl(®s->out_dma_current);
writel(curr, ®s->out_dma_end);
writel(curr, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_WR_ENA)
DBG(ep->dev, "OUT dma active after reset!\n");
}
req->req.actual = (curr - req->req.dma) + 1;
req->req.status = status;
VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length);
command(regs, COMMAND_FIFO_ENABLE, ep->num);
return;
finished:
/* dma already completed; no abort needed */
command(regs, COMMAND_FIFO_ENABLE, ep->num);
req->req.actual = req->req.length;
req->req.status = 0;
}
/*-------------------------------------------------------------------------*/
static int
goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
int status;
/* always require a cpu-view buffer so pio works */
req = container_of(_req, struct goku_request, req);
if (unlikely(!_req || !_req->complete
|| !_req->buf || !list_empty(&req->queue)))
return -EINVAL;
ep = container_of(_ep, struct goku_ep, ep);
if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0)))
return -EINVAL;
dev = ep->dev;
if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
/* can't touch registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
/* set up dma mapping in case the caller didn't */
if (ep->dma) {
status = usb_gadget_map_request(&dev->gadget, &req->req,
ep->is_in);
if (status)
return status;
}
#ifdef USB_TRACE
VDBG(dev, "%s queue req %p, len %u buf %p\n",
_ep->name, _req, _req->length, _req->buf);
#endif
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* for ep0 IN without premature status, zlp is required and
* writing EOP starts the status stage (OUT).
*/
if (unlikely(ep->num == 0 && ep->is_in))
_req->zero = 1;
/* kickstart this i/o queue? */
status = 0;
if (list_empty(&ep->queue) && likely(!ep->stopped)) {
/* dma: done after dma completion IRQ (or error)
* pio: done after last fifo operation
*/
if (ep->dma)
status = start_dma(ep, req);
else
status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
if (unlikely(status != 0)) {
if (status > 0)
status = 0;
req = NULL;
}
} /* else pio or dma irq handler advances the queue. */
if (likely(req != NULL))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue))
&& likely(ep->num != 0)
&& !ep->dma
&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
pio_irq_enable(dev, dev->regs, ep->num);
spin_unlock_irqrestore(&dev->lock, flags);
/* pci writes may still be posted */
return status;
}
/* dequeue ALL requests */
static void nuke(struct goku_ep *ep, int status)
{
struct goku_request *req;
ep->stopped = 1;
if (list_empty(&ep->queue))
return;
if (ep->dma)
abort_dma(ep, status);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct goku_request, queue);
done(ep, req, status);
}
}
/* dequeue JUST ONE request */
static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req = NULL, *iter;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver)
return -ESHUTDOWN;
/* we can't touch (dma) registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
_req);
spin_lock_irqsave(&dev->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore (&dev->lock, flags);
return -EINVAL;
}
if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
abort_dma(ep, -ECONNRESET);
done(ep, req, -ECONNRESET);
dma_advance(dev, ep);
} else if (!list_empty(&req->queue))
done(ep, req, -ECONNRESET);
else
req = NULL;
spin_unlock_irqrestore(&dev->lock, flags);
return req ? 0 : -EOPNOTSUPP;
}
/*-------------------------------------------------------------------------*/
static void goku_clear_halt(struct goku_ep *ep)
{
// assert (ep->num !=0)
VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
if (ep->stopped) {
ep->stopped = 0;
if (ep->dma) {
struct goku_request *req;
if (list_empty(&ep->queue))
return;
req = list_entry(ep->queue.next, struct goku_request,
queue);
(void) start_dma(ep, req);
} else
pio_advance(ep);
}
}
static int goku_set_halt(struct usb_ep *_ep, int value)
{
struct goku_ep *ep;
unsigned long flags;
int retval = 0;
if (!_ep)
return -ENODEV;
ep = container_of (_ep, struct goku_ep, ep);
if (ep->num == 0) {
if (value) {
ep->dev->ep0state = EP0_STALL;
ep->dev->ep[0].stopped = 1;
} else
return -EINVAL;
/* don't change EPxSTATUS_EP_INVALID to READY */
} else if (!ep->ep.desc) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue))
retval = -EAGAIN;
else if (ep->is_in && value
/* data in (either) packet buffer? */
&& (readl(&ep->dev->regs->DataSet)
& DATASET_AB(ep->num)))
retval = -EAGAIN;
else if (!value)
goku_clear_halt(ep);
else {
ep->stopped = 1;
VDBG(ep->dev, "%s set halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_STALL, ep->num);
readl(ep->reg_status);
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return retval;
}
static int goku_fifo_status(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return -ENODEV;
ep = container_of(_ep, struct goku_ep, ep);
/* size is only reported sanely for OUT */
if (ep->is_in)
return -EOPNOTSUPP;
/* ignores 16-byte dma buffer; SizeH == 0 */
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE;
size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE;
VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
return size;
}
static void goku_fifo_flush(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return;
ep = container_of(_ep, struct goku_ep, ep);
VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
/* don't change EPxSTATUS_EP_INVALID to READY */
if (!ep->ep.desc && ep->num != 0) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return;
}
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]);
size &= DATASIZE;
/* Non-desirable behavior: FIFO_CLEAR also clears the
* endpoint halt feature. For OUT, we _could_ just read
* the bytes out (PIO, if !ep->dma); for in, no choice.
*/
if (size)
command(regs, COMMAND_FIFO_CLEAR, ep->num);
}
static const struct usb_ep_ops goku_ep_ops = {
.enable = goku_ep_enable,
.disable = goku_ep_disable,
.alloc_request = goku_alloc_request,
.free_request = goku_free_request,
.queue = goku_queue,
.dequeue = goku_dequeue,
.set_halt = goku_set_halt,
.fifo_status = goku_fifo_status,
.fifo_flush = goku_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int goku_get_frame(struct usb_gadget *_gadget)
{
return -EOPNOTSUPP;
}
static struct usb_ep *goku_match_ep(struct usb_gadget *g,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *ep_comp)
{
struct goku_udc *dev = to_goku_udc(g);
struct usb_ep *ep;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_INT:
/* single buffering is enough */
ep = &dev->ep[3].ep;
if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
return ep;
break;
case USB_ENDPOINT_XFER_BULK:
if (usb_endpoint_dir_in(desc)) {
/* DMA may be available */
ep = &dev->ep[2].ep;
if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
return ep;
}
break;
default:
/* nothing */ ;
}
return NULL;
}
static int goku_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int goku_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
.udc_start = goku_udc_start,
.udc_stop = goku_udc_stop,
.match_ep = goku_match_ep,
// no remote wakeup
// not selfpowered
};
/*-------------------------------------------------------------------------*/
static inline const char *dmastr(void)
{
if (use_dma == 0)
return "(dma disabled)";
else if (use_dma == 2)
return "(dma IN and OUT)";
else
return "(dma IN)";
}
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name [] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void dump_intmask(struct seq_file *m, const char *label, u32 mask)
{
/* int_status is the same format ... */
seq_printf(m, "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
label, mask,
(mask & INT_PWRDETECT) ? " power" : "",
(mask & INT_SYSERROR) ? " sys" : "",
(mask & INT_MSTRDEND) ? " in-dma" : "",
(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
(mask & INT_MSTWREND) ? " out-dma" : "",
(mask & INT_MSTWRSET) ? " wrset" : "",
(mask & INT_ERR) ? " err" : "",
(mask & INT_SOF) ? " sof" : "",
(mask & INT_EP3NAK) ? " ep3nak" : "",
(mask & INT_EP2NAK) ? " ep2nak" : "",
(mask & INT_EP1NAK) ? " ep1nak" : "",
(mask & INT_EP3DATASET) ? " ep3" : "",
(mask & INT_EP2DATASET) ? " ep2" : "",
(mask & INT_EP1DATASET) ? " ep1" : "",
(mask & INT_STATUSNAK) ? " ep0snak" : "",
(mask & INT_STATUS) ? " ep0status" : "",
(mask & INT_SETUP) ? " setup" : "",
(mask & INT_ENDPOINT0) ? " ep0" : "",
(mask & INT_USBRESET) ? " reset" : "",
(mask & INT_SUSPEND) ? " suspend" : "");
}
static const char *udc_ep_state(enum ep0state state)
{
switch (state) {
case EP0_DISCONNECT:
return "ep0_disconnect";
case EP0_IDLE:
return "ep0_idle";
case EP0_IN:
return "ep0_in";
case EP0_OUT:
return "ep0_out";
case EP0_STATUS:
return "ep0_status";
case EP0_STALL:
return "ep0_stall";
case EP0_SUSPEND:
return "ep0_suspend";
}
return "ep0_?";
}
static const char *udc_ep_status(u32 status)
{
switch (status & EPxSTATUS_EP_MASK) {
case EPxSTATUS_EP_READY:
return "ready";
case EPxSTATUS_EP_DATAIN:
return "packet";
case EPxSTATUS_EP_FULL:
return "full";
case EPxSTATUS_EP_TX_ERR: /* host will retry */
return "tx_err";
case EPxSTATUS_EP_RX_ERR:
return "rx_err";
case EPxSTATUS_EP_BUSY: /* ep0 only */
return "busy";
case EPxSTATUS_EP_STALL:
return "stall";
case EPxSTATUS_EP_INVALID: /* these "can't happen" */
return "invalid";
}
return "?";
}
static int udc_proc_read(struct seq_file *m, void *v)
{
struct goku_udc *dev = m->private;
struct goku_udc_regs __iomem *regs = dev->regs;
unsigned long flags;
int i, is_usb_connected;
u32 tmp;
local_irq_save(flags);
/* basic device status */
tmp = readl(®s->power_detect);
is_usb_connected = tmp & PW_DETECT;
seq_printf(m,
"%s - %s\n"
"%s version: %s %s\n"
"Gadget driver: %s\n"
"Host %s, %s\n"
"\n",
pci_name(dev->pdev), driver_desc,
driver_name, DRIVER_VERSION, dmastr(),
dev->driver ? dev->driver->driver.name : "(none)",
is_usb_connected
? ((tmp & PW_PULLUP) ? "full speed" : "powered")
: "disconnected",
udc_ep_state(dev->ep0state));
dump_intmask(m, "int_status", readl(®s->int_status));
dump_intmask(m, "int_enable", readl(®s->int_enable));
if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
goto done;
/* registers for (active) device and ep0 */
seq_printf(m, "\nirqs %lu\ndataset %02x single.bcs %02x.%02x state %x addr %u\n",
dev->irqs, readl(®s->DataSet),
readl(®s->EPxSingle), readl(®s->EPxBCS),
readl(®s->UsbState),
readl(®s->address));
if (seq_has_overflowed(m))
goto done;
tmp = readl(®s->dma_master);
seq_printf(m, "dma %03X =" EIGHTBITS "%s %s\n",
tmp,
(tmp & MST_EOPB_DIS) ? " eopb-" : "",
(tmp & MST_EOPB_ENA) ? " eopb+" : "",
(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
(tmp & MST_RD_EOPB) ? " eopb" : "",
(tmp & MST_RD_RESET) ? " in_reset" : "",
(tmp & MST_WR_RESET) ? " out_reset" : "",
(tmp & MST_RD_ENA) ? " IN" : "",
(tmp & MST_WR_ENA) ? " OUT" : "",
(tmp & MST_CONNECTION) ? "ep1in/ep2out" : "ep1out/ep2in");
if (seq_has_overflowed(m))
goto done;
/* dump endpoint queues */
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep [i];
struct goku_request *req;
if (i && !ep->ep.desc)
continue;
tmp = readl(ep->reg_status);
seq_printf(m, "%s %s max %u %s, irqs %lu, status %02x (%s) " FOURBITS "\n",
ep->ep.name,
ep->is_in ? "in" : "out",
ep->ep.maxpacket,
ep->dma ? "dma" : "pio",
ep->irqs,
tmp, udc_ep_status(tmp),
(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : "");
if (seq_has_overflowed(m))
goto done;
if (list_empty(&ep->queue)) {
seq_puts(m, "\t(nothing queued)\n");
if (seq_has_overflowed(m))
goto done;
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
if (ep->dma && req->queue.prev == &ep->queue) {
if (i == UDC_MSTRD_ENDPOINT)
tmp = readl(®s->in_dma_current);
else
tmp = readl(®s->out_dma_current);
tmp -= req->req.dma;
tmp++;
} else
tmp = req->req.actual;
seq_printf(m, "\treq %p len %u/%u buf %p\n",
&req->req, tmp, req->req.length,
req->req.buf);
if (seq_has_overflowed(m))
goto done;
}
}
done:
local_irq_restore(flags);
return 0;
}
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
static void udc_reinit (struct goku_udc *dev)
{
static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
unsigned i;
INIT_LIST_HEAD (&dev->gadget.ep_list);
dev->gadget.ep0 = &dev->ep [0].ep;
dev->gadget.speed = USB_SPEED_UNKNOWN;
dev->ep0state = EP0_DISCONNECT;
dev->irqs = 0;
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep[i];
ep->num = i;
ep->ep.name = names[i];
ep->reg_fifo = &dev->regs->ep_fifo [i];
ep->reg_status = &dev->regs->ep_status [i];
ep->reg_mode = &dev->regs->ep_mode[i];
ep->ep.ops = &goku_ep_ops;
list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
ep->dev = dev;
INIT_LIST_HEAD (&ep->queue);
ep_reset(NULL, ep);
if (i == 0)
ep->ep.caps.type_control = true;
else
ep->ep.caps.type_bulk = true;
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
}
dev->ep[0].reg_mode = NULL;
usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE);
list_del_init (&dev->ep[0].ep.ep_list);
}
static void udc_reset(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
writel(0, ®s->power_detect);
writel(0, ®s->int_enable);
readl(®s->int_enable);
dev->int_enable = 0;
/* deassert reset, leave USB D+ at hi-Z (no pullup)
* don't let INT_PWRDETECT sequence begin
*/
udelay(250);
writel(PW_RESETB, ®s->power_detect);
readl(®s->int_enable);
}
static void ep0_start(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
unsigned i;
VDBG(dev, "%s\n", __func__);
udc_reset(dev);
udc_reinit (dev);
//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master);
/* hw handles set_address, set_feature, get_status; maybe more */
writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
| G_REQMODE_GET_DESC
| G_REQMODE_CLEAR_FEAT
, ®s->reqmode);
for (i = 0; i < 4; i++)
dev->ep[i].irqs = 0;
/* can't modify descriptors after writing UsbReady */
for (i = 0; i < DESC_LEN; i++)
writel(0, ®s->descriptors[i]);
writel(0, ®s->UsbReady);
/* expect ep0 requests when the host drops reset */
writel(PW_RESETB | PW_PULLUP, ®s->power_detect);
dev->int_enable = INT_DEVWIDE | INT_EP0;
writel(dev->int_enable, &dev->regs->int_enable);
readl(®s->int_enable);
dev->gadget.speed = USB_SPEED_FULL;
dev->ep0state = EP0_IDLE;
}
static void udc_enable(struct goku_udc *dev)
{
/* start enumeration now, or after power detect irq */
if (readl(&dev->regs->power_detect) & PW_DETECT)
ep0_start(dev);
else {
DBG(dev, "%s\n", __func__);
dev->int_enable = INT_PWRDETECT;
writel(dev->int_enable, &dev->regs->int_enable);
}
}
/*-------------------------------------------------------------------------*/
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
*/
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int goku_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct goku_udc *dev = to_goku_udc(g);
/* hook up the driver */
dev->driver = driver;
/*
* then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
udc_enable(dev);
return 0;
}
static void stop_activity(struct goku_udc *dev)
{
unsigned i;
DBG (dev, "%s\n", __func__);
/* disconnect gadget driver after quiesceing hw and the driver */
udc_reset (dev);
for (i = 0; i < 4; i++)
nuke(&dev->ep [i], -ESHUTDOWN);
if (dev->driver)
udc_enable(dev);
}
static int goku_udc_stop(struct usb_gadget *g)
{
struct goku_udc *dev = to_goku_udc(g);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
stop_activity(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static void ep0_setup(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
struct usb_ctrlrequest ctrl;
int tmp;
/* read SETUP packet and enter DATA stage */
ctrl.bRequestType = readl(®s->bRequestType);
ctrl.bRequest = readl(®s->bRequest);
ctrl.wValue = cpu_to_le16((readl(®s->wValueH) << 8)
| readl(®s->wValueL));
ctrl.wIndex = cpu_to_le16((readl(®s->wIndexH) << 8)
| readl(®s->wIndexL));
ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8)
| readl(®s->wLengthL));
writel(0, ®s->SetupRecv);
nuke(&dev->ep[0], 0);
dev->ep[0].stopped = 0;
if (likely(ctrl.bRequestType & USB_DIR_IN)) {
dev->ep[0].is_in = 1;
dev->ep0state = EP0_IN;
/* detect early status stages */
writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
} else {
dev->ep[0].is_in = 0;
dev->ep0state = EP0_OUT;
/* NOTE: CLEAR_FEATURE is done in software so that we can
* synchronize transfer restarts after bulk IN stalls. data
* won't even enter the fifo until the halt is cleared.
*/
switch (ctrl.bRequest) {
case USB_REQ_CLEAR_FEATURE:
switch (ctrl.bRequestType) {
case USB_RECIP_ENDPOINT:
tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
/* active endpoint */
if (tmp > 3 ||
(!dev->ep[tmp].ep.desc && tmp != 0))
goto stall;
if (ctrl.wIndex & cpu_to_le16(
USB_DIR_IN)) {
if (!dev->ep[tmp].is_in)
goto stall;
} else {
if (dev->ep[tmp].is_in)
goto stall;
}
if (ctrl.wValue != cpu_to_le16(
USB_ENDPOINT_HALT))
goto stall;
if (tmp)
goku_clear_halt(&dev->ep[tmp]);
succeed:
/* start ep0out status stage */
writel(~(1<<0), ®s->EOP);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
return;
case USB_RECIP_DEVICE:
/* device remote wakeup: always clear */
if (ctrl.wValue != cpu_to_le16(1))
goto stall;
VDBG(dev, "clear dev remote wakeup\n");
goto succeed;
case USB_RECIP_INTERFACE:
goto stall;
default: /* pass to gadget driver */
break;
}
break;
default:
break;
}
}
#ifdef USB_TRACE
VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
ctrl.bRequestType, ctrl.bRequest,
le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
le16_to_cpu(ctrl.wLength));
#endif
/* hw wants to know when we're configured (or not) */
dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
&& ctrl.bRequestType == USB_RECIP_DEVICE);
if (unlikely(dev->req_config))
dev->configured = (ctrl.wValue != cpu_to_le16(0));
/* delegate everything to the gadget driver.
* it may respond after this irq handler returns.
*/
spin_unlock (&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &ctrl);
spin_lock (&dev->lock);
if (unlikely(tmp < 0)) {
stall:
#ifdef USB_TRACE
VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
ctrl.bRequestType, ctrl.bRequest, tmp);
#endif
command(regs, COMMAND_STALL, 0);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STALL;
}
/* expect at least one data or status stage irq */
}
#define ACK(irqbit) { \
stat &= ~irqbit; \
writel(~irqbit, ®s->int_status); \
handled = 1; \
}
static irqreturn_t goku_irq(int irq, void *_dev)
{
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
struct goku_ep *ep;
u32 stat, handled = 0;
unsigned i, rescans = 5;
spin_lock(&dev->lock);
rescan:
stat = readl(®s->int_status) & dev->int_enable;
if (!stat)
goto done;
dev->irqs++;
/* device-wide irqs */
if (unlikely(stat & INT_DEVWIDE)) {
if (stat & INT_SYSERROR) {
ERROR(dev, "system error\n");
stop_activity(dev);
stat = 0;
handled = 1;
// FIXME have a neater way to prevent re-enumeration
dev->driver = NULL;
goto done;
}
if (stat & INT_PWRDETECT) {
writel(~stat, ®s->int_status);
if (readl(&dev->regs->power_detect) & PW_DETECT) {
VDBG(dev, "connect\n");
ep0_start(dev);
} else {
DBG(dev, "disconnect\n");
if (dev->gadget.speed == USB_SPEED_FULL)
stop_activity(dev);
dev->ep0state = EP0_DISCONNECT;
dev->int_enable = INT_DEVWIDE;
writel(dev->int_enable, &dev->regs->int_enable);
}
stat = 0;
handled = 1;
goto done;
}
if (stat & INT_SUSPEND) {
ACK(INT_SUSPEND);
if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) {
switch (dev->ep0state) {
case EP0_DISCONNECT:
case EP0_SUSPEND:
goto pm_next;
default:
break;
}
DBG(dev, "USB suspend\n");
dev->ep0state = EP0_SUSPEND;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
} else {
if (dev->ep0state != EP0_SUSPEND) {
DBG(dev, "bogus USB resume %d\n",
dev->ep0state);
goto pm_next;
}
DBG(dev, "USB resume\n");
dev->ep0state = EP0_IDLE;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->resume) {
spin_unlock(&dev->lock);
dev->driver->resume(&dev->gadget);
spin_lock(&dev->lock);
}
}
}
pm_next:
if (stat & INT_USBRESET) { /* hub reset done */
ACK(INT_USBRESET);
INFO(dev, "USB reset done, gadget %s\n",
dev->driver->driver.name);
}
// and INT_ERR on some endpoint's crc/bitstuff/... problem
}
/* progress ep0 setup, data, or status stages.
* no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
*/
if (stat & INT_SETUP) {
ACK(INT_SETUP);
dev->ep[0].irqs++;
ep0_setup(dev);
}
if (stat & INT_STATUSNAK) {
ACK(INT_STATUSNAK|INT_ENDPOINT0);
if (dev->ep0state == EP0_IN) {
ep = &dev->ep[0];
ep->irqs++;
nuke(ep, 0);
writel(~(1<<0), ®s->EOP);
dev->ep0state = EP0_STATUS;
}
}
if (stat & INT_ENDPOINT0) {
ACK(INT_ENDPOINT0);
ep = &dev->ep[0];
ep->irqs++;
pio_advance(ep);
}
/* dma completion */
if (stat & INT_MSTRDEND) { /* IN */
ACK(INT_MSTRDEND);
ep = &dev->ep[UDC_MSTRD_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWREND) { /* OUT */
ACK(INT_MSTWREND);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWRTMOUT) { /* OUT */
ACK(INT_MSTWRTMOUT);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
ERROR(dev, "%s write timeout ?\n", ep->ep.name);
// reset dma? then dma_advance()
}
/* pio */
for (i = 1; i < 4; i++) {
u32 tmp = INT_EPxDATASET(i);
if (!(stat & tmp))
continue;
ep = &dev->ep[i];
pio_advance(ep);
if (list_empty (&ep->queue))
pio_irq_disable(dev, regs, i);
stat &= ~tmp;
handled = 1;
ep->irqs++;
}
if (rescans--)
goto rescan;
done:
(void)readl(®s->int_enable);
spin_unlock(&dev->lock);
if (stat)
DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
readl(®s->int_status), dev->int_enable);
return IRQ_RETVAL(handled);
}
#undef ACK
/*-------------------------------------------------------------------------*/
static void gadget_release(struct device *_dev)
{
struct goku_udc *dev = dev_get_drvdata(_dev);
kfree(dev);
}
/* tear down the binding between this driver and the pci device */
static void goku_remove(struct pci_dev *pdev)
{
struct goku_udc *dev = pci_get_drvdata(pdev);
DBG(dev, "%s\n", __func__);
usb_del_gadget_udc(&dev->gadget);
BUG_ON(dev->driver);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
remove_proc_entry(proc_node_name, NULL);
#endif
if (dev->regs)
udc_reset(dev);
if (dev->got_irq)
free_irq(pdev->irq, dev);
if (dev->regs)
iounmap(dev->regs);
if (dev->got_region)
release_mem_region(pci_resource_start (pdev, 0),
pci_resource_len (pdev, 0));
if (dev->enabled)
pci_disable_device(pdev);
dev->regs = NULL;
INFO(dev, "unbind\n");
}
/* wrap this driver around the specified pci device, but
* don't respond over USB until a gadget driver binds to us.
*/
static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct goku_udc *dev = NULL;
unsigned long resource, len;
void __iomem *base = NULL;
int retval;
if (!pdev->irq) {
printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
retval = -ENODEV;
goto err;
}
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (!dev) {
retval = -ENOMEM;
goto err;
}
pci_set_drvdata(pdev, dev);
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
dev->gadget.max_speed = USB_SPEED_FULL;
/* the "gadget" abstracts/virtualizes the controller */
dev->gadget.name = driver_name;
/* now all the pci goodies ... */
retval = pci_enable_device(pdev);
if (retval < 0) {
DBG(dev, "can't enable, %d\n", retval);
goto err;
}
dev->enabled = 1;
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, driver_name)) {
DBG(dev, "controller already in use\n");
retval = -EBUSY;
goto err;
}
dev->got_region = 1;
base = ioremap(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
goto err;
}
dev->regs = (struct goku_udc_regs __iomem *) base;
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
/* init to known state, then setup irqs */
udc_reset(dev);
udc_reinit (dev);
if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
driver_name, dev) != 0) {
DBG(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
goto err;
}
dev->got_irq = 1;
if (use_dma)
pci_set_master(pdev);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
proc_create_single_data(proc_node_name, 0, NULL, udc_proc_read, dev);
#endif
retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
gadget_release);
if (retval)
goto err;
return 0;
err:
if (dev)
goku_remove (pdev);
/* gadget_release is not registered yet, kfree explicitly */
kfree(dev);
return retval;
}
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids[] = { {
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
.device = 0x0107, /* this UDC */
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver goku_pci_driver = {
.name = driver_name,
.id_table = pci_ids,
.probe = goku_probe,
.remove = goku_remove,
/* FIXME add power management support */
};
module_pci_driver(goku_pci_driver);
| linux-master | drivers/usb/gadget/udc/goku_udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USBHS device controller driver Trace Support
*
* Copyright (C) 2023 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "cdns2-trace.h"
| linux-master | drivers/usb/gadget/udc/cdns2/cdns2-trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBHS-DEV driver.
*
* Copyright (C) 2023 Cadence Design Systems.
*
* Authors: Pawel Laszczak <[email protected]>
*/
#include <linux/usb/composite.h>
#include <asm/unaligned.h>
#include "cdns2-gadget.h"
#include "cdns2-trace.h"
static struct usb_endpoint_descriptor cdns2_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(64)
};
static int cdns2_w_index_to_ep_index(u16 wIndex)
{
if (!(wIndex & USB_ENDPOINT_NUMBER_MASK))
return 0;
return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) +
(wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1;
}
static bool cdns2_check_new_setup(struct cdns2_device *pdev)
{
u8 reg;
reg = readb(&pdev->ep0_regs->cs);
return !!(reg & EP0CS_CHGSET);
}
static void cdns2_ep0_enqueue(struct cdns2_device *pdev, dma_addr_t dma_addr,
unsigned int length, int zlp)
{
struct cdns2_adma_regs __iomem *regs = pdev->adma_regs;
struct cdns2_endpoint *pep = &pdev->eps[0];
struct cdns2_ring *ring = &pep->ring;
ring->trbs[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
ring->trbs[0].length = cpu_to_le32(TRB_LEN(length));
if (zlp) {
ring->trbs[0].control = cpu_to_le32(TRB_CYCLE |
TRB_TYPE(TRB_NORMAL));
ring->trbs[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
ring->trbs[1].length = cpu_to_le32(TRB_LEN(0));
ring->trbs[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
TRB_TYPE(TRB_NORMAL));
} else {
ring->trbs[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
TRB_TYPE(TRB_NORMAL));
ring->trbs[1].control = 0;
}
trace_cdns2_queue_trb(pep, ring->trbs);
if (!pep->dir)
writel(0, &pdev->ep0_regs->rxbc);
cdns2_select_ep(pdev, pep->dir);
writel(DMA_EP_STS_TRBERR, ®s->ep_sts);
writel(pep->ring.dma, ®s->ep_traddr);
trace_cdns2_doorbell_ep0(pep, readl(®s->ep_traddr));
writel(DMA_EP_CMD_DRDY, ®s->ep_cmd);
}
static int cdns2_ep0_delegate_req(struct cdns2_device *pdev)
{
int ret;
spin_unlock(&pdev->lock);
ret = pdev->gadget_driver->setup(&pdev->gadget, &pdev->setup);
spin_lock(&pdev->lock);
return ret;
}
static void cdns2_ep0_stall(struct cdns2_device *pdev)
{
struct cdns2_endpoint *pep = &pdev->eps[0];
struct cdns2_request *preq;
preq = cdns2_next_preq(&pep->pending_list);
set_reg_bit_8(&pdev->ep0_regs->cs, EP0CS_DSTALL);
if (pdev->ep0_stage == CDNS2_DATA_STAGE && preq)
cdns2_gadget_giveback(pep, preq, -ECONNRESET);
else if (preq)
list_del_init(&preq->list);
pdev->ep0_stage = CDNS2_SETUP_STAGE;
pep->ep_state |= EP_STALLED;
}
static void cdns2_status_stage(struct cdns2_device *pdev)
{
struct cdns2_endpoint *pep = &pdev->eps[0];
struct cdns2_request *preq;
preq = cdns2_next_preq(&pep->pending_list);
if (preq)
list_del_init(&preq->list);
pdev->ep0_stage = CDNS2_SETUP_STAGE;
writeb(EP0CS_HSNAK, &pdev->ep0_regs->cs);
}
static int cdns2_req_ep0_set_configuration(struct cdns2_device *pdev,
struct usb_ctrlrequest *ctrl_req)
{
enum usb_device_state state = pdev->gadget.state;
u32 config = le16_to_cpu(ctrl_req->wValue);
int ret;
if (state < USB_STATE_ADDRESS) {
dev_err(pdev->dev, "Set Configuration - bad device state\n");
return -EINVAL;
}
ret = cdns2_ep0_delegate_req(pdev);
if (ret)
return ret;
trace_cdns2_device_state(config ? "configured" : "addressed");
if (!config)
usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
return 0;
}
static int cdns2_req_ep0_set_address(struct cdns2_device *pdev, u32 addr)
{
enum usb_device_state device_state = pdev->gadget.state;
u8 reg;
if (addr > USB_DEVICE_MAX_ADDRESS) {
dev_err(pdev->dev,
"Device address (%d) cannot be greater than %d\n",
addr, USB_DEVICE_MAX_ADDRESS);
return -EINVAL;
}
if (device_state == USB_STATE_CONFIGURED) {
dev_err(pdev->dev,
"can't set_address from configured state\n");
return -EINVAL;
}
reg = readb(&pdev->usb_regs->fnaddr);
pdev->dev_address = reg;
usb_gadget_set_state(&pdev->gadget,
(addr ? USB_STATE_ADDRESS : USB_STATE_DEFAULT));
trace_cdns2_device_state(addr ? "addressed" : "default");
return 0;
}
static int cdns2_req_ep0_handle_status(struct cdns2_device *pdev,
struct usb_ctrlrequest *ctrl)
{
struct cdns2_endpoint *pep;
__le16 *response_pkt;
u16 status = 0;
int ep_sts;
u32 recip;
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
status = pdev->gadget.is_selfpowered;
status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
break;
case USB_RECIP_INTERFACE:
return cdns2_ep0_delegate_req(pdev);
case USB_RECIP_ENDPOINT:
ep_sts = cdns2_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex));
pep = &pdev->eps[ep_sts];
if (pep->ep_state & EP_STALLED)
status = BIT(USB_ENDPOINT_HALT);
break;
default:
return -EINVAL;
}
put_unaligned_le16(status, (__le16 *)pdev->ep0_preq.request.buf);
cdns2_ep0_enqueue(pdev, pdev->ep0_preq.request.dma,
sizeof(*response_pkt), 0);
return 0;
}
static int cdns2_ep0_handle_feature_device(struct cdns2_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
enum usb_device_state state;
enum usb_device_speed speed;
int ret = 0;
u32 wValue;
u16 tmode;
wValue = le16_to_cpu(ctrl->wValue);
state = pdev->gadget.state;
speed = pdev->gadget.speed;
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
pdev->may_wakeup = !!set;
break;
case USB_DEVICE_TEST_MODE:
if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
return -EINVAL;
tmode = le16_to_cpu(ctrl->wIndex);
if (!set || (tmode & 0xff) != 0)
return -EINVAL;
tmode >>= 8;
switch (tmode) {
case USB_TEST_J:
case USB_TEST_K:
case USB_TEST_SE0_NAK:
case USB_TEST_PACKET:
/*
* The USBHS controller automatically handles the
* Set_Feature(testmode) request. Standard test modes
* that use values of test mode selector from
* 01h to 04h (Test_J, Test_K, Test_SE0_NAK,
* Test_Packet) are supported by the
* controller(HS - ack, FS - stall).
*/
break;
default:
ret = -EINVAL;
}
break;
default:
ret = -EINVAL;
}
return ret;
}
static int cdns2_ep0_handle_feature_intf(struct cdns2_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
int ret = 0;
u32 wValue;
wValue = le16_to_cpu(ctrl->wValue);
switch (wValue) {
case USB_INTRF_FUNC_SUSPEND:
break;
default:
ret = -EINVAL;
}
return ret;
}
static int cdns2_ep0_handle_feature_endpoint(struct cdns2_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
struct cdns2_endpoint *pep;
u8 wValue;
wValue = le16_to_cpu(ctrl->wValue);
pep = &pdev->eps[cdns2_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))];
if (wValue != USB_ENDPOINT_HALT)
return -EINVAL;
if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN))
return 0;
switch (wValue) {
case USB_ENDPOINT_HALT:
if (set || !(pep->ep_state & EP_WEDGE))
return cdns2_halt_endpoint(pdev, pep, set);
break;
default:
dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue);
return -EINVAL;
}
return 0;
}
static int cdns2_req_ep0_handle_feature(struct cdns2_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
return cdns2_ep0_handle_feature_device(pdev, ctrl, set);
case USB_RECIP_INTERFACE:
return cdns2_ep0_handle_feature_intf(pdev, ctrl, set);
case USB_RECIP_ENDPOINT:
return cdns2_ep0_handle_feature_endpoint(pdev, ctrl, set);
default:
return -EINVAL;
}
}
static int cdns2_ep0_std_request(struct cdns2_device *pdev)
{
struct usb_ctrlrequest *ctrl = &pdev->setup;
int ret;
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
ret = cdns2_req_ep0_set_address(pdev,
le16_to_cpu(ctrl->wValue));
break;
case USB_REQ_SET_CONFIGURATION:
ret = cdns2_req_ep0_set_configuration(pdev, ctrl);
break;
case USB_REQ_GET_STATUS:
ret = cdns2_req_ep0_handle_status(pdev, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
ret = cdns2_req_ep0_handle_feature(pdev, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
ret = cdns2_req_ep0_handle_feature(pdev, ctrl, 1);
break;
default:
ret = cdns2_ep0_delegate_req(pdev);
break;
}
return ret;
}
static void __pending_setup_status_handler(struct cdns2_device *pdev)
{
struct usb_request *request = pdev->pending_status_request;
if (pdev->status_completion_no_call && request && request->complete) {
request->complete(&pdev->eps[0].endpoint, request);
pdev->status_completion_no_call = 0;
}
}
void cdns2_pending_setup_status_handler(struct work_struct *work)
{
struct cdns2_device *pdev = container_of(work, struct cdns2_device,
pending_status_wq);
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
__pending_setup_status_handler(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
}
void cdns2_handle_setup_packet(struct cdns2_device *pdev)
{
struct usb_ctrlrequest *ctrl = &pdev->setup;
struct cdns2_endpoint *pep = &pdev->eps[0];
struct cdns2_request *preq;
int ret = 0;
u16 len;
u8 reg;
int i;
writeb(EP0CS_CHGSET, &pdev->ep0_regs->cs);
for (i = 0; i < 8; i++)
((u8 *)&pdev->setup)[i] = readb(&pdev->ep0_regs->setupdat[i]);
/*
* If SETUP packet was modified while reading just simple ignore it.
* The new one will be handled latter.
*/
if (cdns2_check_new_setup(pdev)) {
trace_cdns2_ep0_setup("overridden");
return;
}
trace_cdns2_ctrl_req(ctrl);
if (!pdev->gadget_driver)
goto out;
if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
ret = -EINVAL;
goto out;
}
pep = &pdev->eps[0];
/* Halt for Ep0 is cleared automatically when SETUP packet arrives. */
pep->ep_state &= ~EP_STALLED;
if (!list_empty(&pep->pending_list)) {
preq = cdns2_next_preq(&pep->pending_list);
cdns2_gadget_giveback(pep, preq, -ECONNRESET);
}
len = le16_to_cpu(ctrl->wLength);
if (len)
pdev->ep0_stage = CDNS2_DATA_STAGE;
else
pdev->ep0_stage = CDNS2_STATUS_STAGE;
pep->dir = ctrl->bRequestType & USB_DIR_IN;
/*
* SET_ADDRESS request is acknowledged automatically by controller and
* in the worse case driver may not notice this request. To check
* whether this request has been processed driver can use
* fnaddr register.
*/
reg = readb(&pdev->usb_regs->fnaddr);
if (pdev->setup.bRequest != USB_REQ_SET_ADDRESS &&
pdev->dev_address != reg)
cdns2_req_ep0_set_address(pdev, reg);
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
ret = cdns2_ep0_std_request(pdev);
else
ret = cdns2_ep0_delegate_req(pdev);
if (ret == USB_GADGET_DELAYED_STATUS) {
trace_cdns2_ep0_status_stage("delayed");
return;
}
out:
if (ret < 0)
cdns2_ep0_stall(pdev);
else if (pdev->ep0_stage == CDNS2_STATUS_STAGE)
cdns2_status_stage(pdev);
}
static void cdns2_transfer_completed(struct cdns2_device *pdev)
{
struct cdns2_endpoint *pep = &pdev->eps[0];
if (!list_empty(&pep->pending_list)) {
struct cdns2_request *preq;
trace_cdns2_complete_trb(pep, pep->ring.trbs);
preq = cdns2_next_preq(&pep->pending_list);
preq->request.actual =
TRB_LEN(le32_to_cpu(pep->ring.trbs->length));
cdns2_gadget_giveback(pep, preq, 0);
}
cdns2_status_stage(pdev);
}
void cdns2_handle_ep0_interrupt(struct cdns2_device *pdev, int dir)
{
u32 ep_sts_reg;
cdns2_select_ep(pdev, dir);
trace_cdns2_ep0_irq(pdev);
ep_sts_reg = readl(&pdev->adma_regs->ep_sts);
writel(ep_sts_reg, &pdev->adma_regs->ep_sts);
__pending_setup_status_handler(pdev);
if ((ep_sts_reg & DMA_EP_STS_IOC) || (ep_sts_reg & DMA_EP_STS_ISP)) {
pdev->eps[0].dir = dir;
cdns2_transfer_completed(pdev);
}
}
/*
* Function shouldn't be called by gadget driver,
* endpoint 0 is allways active.
*/
static int cdns2_gadget_ep0_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
return -EINVAL;
}
/*
* Function shouldn't be called by gadget driver,
* endpoint 0 is allways active.
*/
static int cdns2_gadget_ep0_disable(struct usb_ep *ep)
{
return -EINVAL;
}
static int cdns2_gadget_ep0_set_halt(struct usb_ep *ep, int value)
{
struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
struct cdns2_device *pdev = pep->pdev;
unsigned long flags;
if (!value)
return 0;
spin_lock_irqsave(&pdev->lock, flags);
cdns2_ep0_stall(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdns2_gadget_ep0_set_wedge(struct usb_ep *ep)
{
return cdns2_gadget_ep0_set_halt(ep, 1);
}
static int cdns2_gadget_ep0_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags)
{
struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
struct cdns2_device *pdev = pep->pdev;
struct cdns2_request *preq;
unsigned long flags;
u8 zlp = 0;
int ret;
spin_lock_irqsave(&pdev->lock, flags);
preq = to_cdns2_request(request);
trace_cdns2_request_enqueue(preq);
/* Cancel the request if controller receive new SETUP packet. */
if (cdns2_check_new_setup(pdev)) {
trace_cdns2_ep0_setup("overridden");
spin_unlock_irqrestore(&pdev->lock, flags);
return -ECONNRESET;
}
/* Send STATUS stage. Should be called only for SET_CONFIGURATION. */
if (pdev->ep0_stage == CDNS2_STATUS_STAGE) {
cdns2_status_stage(pdev);
request->actual = 0;
pdev->status_completion_no_call = true;
pdev->pending_status_request = request;
usb_gadget_set_state(&pdev->gadget, USB_STATE_CONFIGURED);
spin_unlock_irqrestore(&pdev->lock, flags);
/*
* Since there is no completion interrupt for status stage,
* it needs to call ->completion in software after
* cdns2_gadget_ep0_queue is back.
*/
queue_work(system_freezable_wq, &pdev->pending_status_wq);
return 0;
}
if (!list_empty(&pep->pending_list)) {
trace_cdns2_ep0_setup("pending");
dev_err(pdev->dev,
"can't handle multiple requests for ep0\n");
spin_unlock_irqrestore(&pdev->lock, flags);
return -EBUSY;
}
ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->dir);
if (ret) {
spin_unlock_irqrestore(&pdev->lock, flags);
dev_err(pdev->dev, "failed to map request\n");
return -EINVAL;
}
request->status = -EINPROGRESS;
list_add_tail(&preq->list, &pep->pending_list);
if (request->zero && request->length &&
(request->length % ep->maxpacket == 0))
zlp = 1;
cdns2_ep0_enqueue(pdev, request->dma, request->length, zlp);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static const struct usb_ep_ops cdns2_gadget_ep0_ops = {
.enable = cdns2_gadget_ep0_enable,
.disable = cdns2_gadget_ep0_disable,
.alloc_request = cdns2_gadget_ep_alloc_request,
.free_request = cdns2_gadget_ep_free_request,
.queue = cdns2_gadget_ep0_queue,
.dequeue = cdns2_gadget_ep_dequeue,
.set_halt = cdns2_gadget_ep0_set_halt,
.set_wedge = cdns2_gadget_ep0_set_wedge,
};
void cdns2_ep0_config(struct cdns2_device *pdev)
{
struct cdns2_endpoint *pep;
pep = &pdev->eps[0];
if (!list_empty(&pep->pending_list)) {
struct cdns2_request *preq;
preq = cdns2_next_preq(&pep->pending_list);
list_del_init(&preq->list);
}
writeb(EP0_FIFO_AUTO, &pdev->ep0_regs->fifo);
cdns2_select_ep(pdev, USB_DIR_OUT);
writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
writeb(EP0_FIFO_IO_TX | EP0_FIFO_AUTO, &pdev->ep0_regs->fifo);
cdns2_select_ep(pdev, USB_DIR_IN);
writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
writeb(pdev->gadget.ep0->maxpacket, &pdev->ep0_regs->maxpack);
writel(DMA_EP_IEN_EP_OUT0 | DMA_EP_IEN_EP_IN0,
&pdev->adma_regs->ep_ien);
}
void cdns2_init_ep0(struct cdns2_device *pdev,
struct cdns2_endpoint *pep)
{
u16 maxpacket = le16_to_cpu(cdns2_gadget_ep0_desc.wMaxPacketSize);
usb_ep_set_maxpacket_limit(&pep->endpoint, maxpacket);
pep->endpoint.ops = &cdns2_gadget_ep0_ops;
pep->endpoint.desc = &cdns2_gadget_ep0_desc;
pep->endpoint.caps.type_control = true;
pep->endpoint.caps.dir_in = true;
pep->endpoint.caps.dir_out = true;
pdev->gadget.ep0 = &pep->endpoint;
}
| linux-master | drivers/usb/gadget/udc/cdns2/cdns2-ep0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBHS-DEV controller - PCI Glue driver.
*
* Copyright (C) 2023 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*
*/
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include "cdns2-gadget.h"
#define PCI_DRIVER_NAME "cdns-pci-usbhs"
#define CDNS_VENDOR_ID 0x17cd
#define CDNS_DEVICE_ID 0x0120
#define PCI_BAR_DEV 0
#define PCI_DEV_FN_DEVICE 0
static int cdns2_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
resource_size_t rsrc_start, rsrc_len;
struct device *dev = &pdev->dev;
struct cdns2_device *priv_dev;
struct resource *res;
int ret;
/* For GADGET PCI (devfn) function number is 0. */
if (!id || pdev->devfn != PCI_DEV_FN_DEVICE ||
pdev->class != PCI_CLASS_SERIAL_USB_DEVICE)
return -EINVAL;
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret);
return ret;
}
pci_set_master(pdev);
priv_dev = devm_kzalloc(&pdev->dev, sizeof(*priv_dev), GFP_KERNEL);
if (!priv_dev)
return -ENOMEM;
dev_dbg(dev, "Initialize resources\n");
rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV);
rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV);
res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev");
if (!res) {
dev_dbg(dev, "controller already in use\n");
return -EBUSY;
}
priv_dev->regs = devm_ioremap(dev, rsrc_start, rsrc_len);
if (!priv_dev->regs) {
dev_dbg(dev, "error mapping memory\n");
return -EFAULT;
}
priv_dev->irq = pdev->irq;
dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n",
&rsrc_start);
priv_dev->dev = dev;
priv_dev->eps_supported = 0x000f000f;
priv_dev->onchip_tx_buf = 16;
priv_dev->onchip_rx_buf = 16;
ret = cdns2_gadget_init(priv_dev);
if (ret)
return ret;
pci_set_drvdata(pdev, priv_dev);
device_wakeup_enable(&pdev->dev);
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
return 0;
}
static void cdns2_pci_remove(struct pci_dev *pdev)
{
struct cdns2_device *priv_dev = pci_get_drvdata(pdev);
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
cdns2_gadget_remove(priv_dev);
}
static int cdns2_pci_suspend(struct device *dev)
{
struct cdns2_device *priv_dev = dev_get_drvdata(dev);
return cdns2_gadget_suspend(priv_dev);
}
static int cdns2_pci_resume(struct device *dev)
{
struct cdns2_device *priv_dev = dev_get_drvdata(dev);
return cdns2_gadget_resume(priv_dev, 1);
}
static const struct dev_pm_ops cdns2_pci_pm_ops = {
SYSTEM_SLEEP_PM_OPS(cdns2_pci_suspend, cdns2_pci_resume)
};
static const struct pci_device_id cdns2_pci_ids[] = {
{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
{ 0, }
};
static struct pci_driver cdns2_pci_driver = {
.name = "cdns2-pci",
.id_table = &cdns2_pci_ids[0],
.probe = cdns2_pci_probe,
.remove = cdns2_pci_remove,
.driver = {
.pm = pm_ptr(&cdns2_pci_pm_ops),
}
};
module_pci_driver(cdns2_pci_driver);
MODULE_DEVICE_TABLE(pci, cdns2_pci_ids);
MODULE_ALIAS("pci:cdns2");
MODULE_AUTHOR("Pawel Laszczak <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cadence CDNS2 PCI driver");
| linux-master | drivers/usb/gadget/udc/cdns2/cdns2-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBHS-DEV Driver - gadget side.
*
* Copyright (C) 2023 Cadence Design Systems.
*
* Authors: Pawel Laszczak <[email protected]>
*/
/*
* Work around 1:
* At some situations, the controller may get stale data address in TRB
* at below sequences:
* 1. Controller read TRB includes data address
* 2. Software updates TRBs includes data address and Cycle bit
* 3. Controller read TRB which includes Cycle bit
* 4. DMA run with stale data address
*
* To fix this problem, driver needs to make the first TRB in TD as invalid.
* After preparing all TRBs driver needs to check the position of DMA and
* if the DMA point to the first just added TRB and doorbell is 1,
* then driver must defer making this TRB as valid. This TRB will be make
* as valid during adding next TRB only if DMA is stopped or at TRBERR
* interrupt.
*
*/
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/property.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
#include "cdns2-gadget.h"
#include "cdns2-trace.h"
/**
* set_reg_bit_32 - set bit in given 32 bits register.
* @ptr: register address.
* @mask: bits to set.
*/
static void set_reg_bit_32(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) | mask;
writel(mask, ptr);
}
/*
* clear_reg_bit_32 - clear bit in given 32 bits register.
* @ptr: register address.
* @mask: bits to clear.
*/
static void clear_reg_bit_32(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) & ~mask;
writel(mask, ptr);
}
/* Clear bit in given 8 bits register. */
static void clear_reg_bit_8(void __iomem *ptr, u8 mask)
{
mask = readb(ptr) & ~mask;
writeb(mask, ptr);
}
/* Set bit in given 16 bits register. */
void set_reg_bit_8(void __iomem *ptr, u8 mask)
{
mask = readb(ptr) | mask;
writeb(mask, ptr);
}
static int cdns2_get_dma_pos(struct cdns2_device *pdev,
struct cdns2_endpoint *pep)
{
int dma_index;
dma_index = readl(&pdev->adma_regs->ep_traddr) - pep->ring.dma;
return dma_index / TRB_SIZE;
}
/* Get next private request from list. */
struct cdns2_request *cdns2_next_preq(struct list_head *list)
{
return list_first_entry_or_null(list, struct cdns2_request, list);
}
void cdns2_select_ep(struct cdns2_device *pdev, u32 ep)
{
if (pdev->selected_ep == ep)
return;
pdev->selected_ep = ep;
writel(ep, &pdev->adma_regs->ep_sel);
}
dma_addr_t cdns2_trb_virt_to_dma(struct cdns2_endpoint *pep,
struct cdns2_trb *trb)
{
u32 offset = (char *)trb - (char *)pep->ring.trbs;
return pep->ring.dma + offset;
}
static void cdns2_free_tr_segment(struct cdns2_endpoint *pep)
{
struct cdns2_device *pdev = pep->pdev;
struct cdns2_ring *ring = &pep->ring;
if (pep->ring.trbs) {
dma_pool_free(pdev->eps_dma_pool, ring->trbs, ring->dma);
memset(ring, 0, sizeof(*ring));
}
}
/* Allocates Transfer Ring segment. */
static int cdns2_alloc_tr_segment(struct cdns2_endpoint *pep)
{
struct cdns2_device *pdev = pep->pdev;
struct cdns2_trb *link_trb;
struct cdns2_ring *ring;
ring = &pep->ring;
if (!ring->trbs) {
ring->trbs = dma_pool_alloc(pdev->eps_dma_pool,
GFP_DMA32 | GFP_ATOMIC,
&ring->dma);
if (!ring->trbs)
return -ENOMEM;
}
memset(ring->trbs, 0, TR_SEG_SIZE);
if (!pep->num)
return 0;
/* Initialize the last TRB as Link TRB */
link_trb = (ring->trbs + (TRBS_PER_SEGMENT - 1));
link_trb->buffer = cpu_to_le32(TRB_BUFFER(ring->dma));
link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) |
TRB_TOGGLE);
return 0;
}
/*
* Stalls and flushes selected endpoint.
* Endpoint must be selected before invoking this function.
*/
static void cdns2_ep_stall_flush(struct cdns2_endpoint *pep)
{
struct cdns2_device *pdev = pep->pdev;
int val;
trace_cdns2_ep_halt(pep, 1, 1);
writel(DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
/* Wait for DFLUSH cleared. */
readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
!(val & DMA_EP_CMD_DFLUSH), 1, 1000);
pep->ep_state |= EP_STALLED;
pep->ep_state &= ~EP_STALL_PENDING;
}
/*
* Increment a trb index.
*
* The index should never point to the last link TRB in TR. After incrementing,
* if it point to the link TRB, wrap around to the beginning and revert
* cycle state bit. The link TRB is always at the last TRB entry.
*/
static void cdns2_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
{
(*index)++;
if (*index == (trb_in_seg - 1)) {
*index = 0;
*cs ^= 1;
}
}
static void cdns2_ep_inc_enq(struct cdns2_ring *ring)
{
ring->free_trbs--;
cdns2_ep_inc_trb(&ring->enqueue, &ring->pcs, TRBS_PER_SEGMENT);
}
static void cdns2_ep_inc_deq(struct cdns2_ring *ring)
{
ring->free_trbs++;
cdns2_ep_inc_trb(&ring->dequeue, &ring->ccs, TRBS_PER_SEGMENT);
}
/*
* Enable/disable LPM.
*
* If bit USBCS_LPMNYET is not set and device receive Extended Token packet,
* then controller answer with ACK handshake.
* If bit USBCS_LPMNYET is set and device receive Extended Token packet,
* then controller answer with NYET handshake.
*/
static void cdns2_enable_l1(struct cdns2_device *pdev, int enable)
{
if (enable) {
clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
writeb(LPMCLOCK_SLEEP_ENTRY, &pdev->usb_regs->lpmclock);
} else {
set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
}
}
static enum usb_device_speed cdns2_get_speed(struct cdns2_device *pdev)
{
u8 speed = readb(&pdev->usb_regs->speedctrl);
if (speed & SPEEDCTRL_HS)
return USB_SPEED_HIGH;
else if (speed & SPEEDCTRL_FS)
return USB_SPEED_FULL;
return USB_SPEED_UNKNOWN;
}
static struct cdns2_trb *cdns2_next_trb(struct cdns2_endpoint *pep,
struct cdns2_trb *trb)
{
if (trb == (pep->ring.trbs + (TRBS_PER_SEGMENT - 1)))
return pep->ring.trbs;
else
return ++trb;
}
void cdns2_gadget_giveback(struct cdns2_endpoint *pep,
struct cdns2_request *preq,
int status)
{
struct usb_request *request = &preq->request;
struct cdns2_device *pdev = pep->pdev;
list_del_init(&preq->list);
if (request->status == -EINPROGRESS)
request->status = status;
usb_gadget_unmap_request_by_dev(pdev->dev, request, pep->dir);
/* All TRBs have finished, clear the counter. */
preq->finished_trb = 0;
trace_cdns2_request_giveback(preq);
if (request->complete) {
spin_unlock(&pdev->lock);
usb_gadget_giveback_request(&pep->endpoint, request);
spin_lock(&pdev->lock);
}
if (request->buf == pdev->zlp_buf)
cdns2_gadget_ep_free_request(&pep->endpoint, request);
}
static void cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint *pep)
{
/* Work around for stale data address in TRB. */
if (pep->wa1_set) {
trace_cdns2_wa1(pep, "restore cycle bit");
pep->wa1_set = 0;
pep->wa1_trb_index = 0xFFFF;
if (pep->wa1_cycle_bit)
pep->wa1_trb->control |= cpu_to_le32(0x1);
else
pep->wa1_trb->control &= cpu_to_le32(~0x1);
}
}
static int cdns2_wa1_update_guard(struct cdns2_endpoint *pep,
struct cdns2_trb *trb)
{
struct cdns2_device *pdev = pep->pdev;
if (!pep->wa1_set) {
u32 doorbell;
doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
if (doorbell) {
pep->wa1_cycle_bit = pep->ring.pcs ? TRB_CYCLE : 0;
pep->wa1_set = 1;
pep->wa1_trb = trb;
pep->wa1_trb_index = pep->ring.enqueue;
trace_cdns2_wa1(pep, "set guard");
return 0;
}
}
return 1;
}
static void cdns2_wa1_tray_restore_cycle_bit(struct cdns2_device *pdev,
struct cdns2_endpoint *pep)
{
int dma_index;
u32 doorbell;
doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
dma_index = cdns2_get_dma_pos(pdev, pep);
if (!doorbell || dma_index != pep->wa1_trb_index)
cdns2_wa1_restore_cycle_bit(pep);
}
static int cdns2_prepare_ring(struct cdns2_device *pdev,
struct cdns2_endpoint *pep,
int num_trbs)
{
struct cdns2_trb *link_trb = NULL;
int doorbell, dma_index;
struct cdns2_ring *ring;
u32 ch_bit = 0;
ring = &pep->ring;
if (num_trbs > ring->free_trbs) {
pep->ep_state |= EP_RING_FULL;
trace_cdns2_no_room_on_ring("Ring full\n");
return -ENOBUFS;
}
if ((ring->enqueue + num_trbs) >= (TRBS_PER_SEGMENT - 1)) {
doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
dma_index = cdns2_get_dma_pos(pdev, pep);
/* Driver can't update LINK TRB if it is current processed. */
if (doorbell && dma_index == TRBS_PER_SEGMENT - 1) {
pep->ep_state |= EP_DEFERRED_DRDY;
return -ENOBUFS;
}
/* Update C bt in Link TRB before starting DMA. */
link_trb = ring->trbs + (TRBS_PER_SEGMENT - 1);
/*
* For TRs size equal 2 enabling TRB_CHAIN for epXin causes
* that DMA stuck at the LINK TRB.
* On the other hand, removing TRB_CHAIN for longer TRs for
* epXout cause that DMA stuck after handling LINK TRB.
* To eliminate this strange behavioral driver set TRB_CHAIN
* bit only for TR size > 2.
*/
if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2)
ch_bit = TRB_CHAIN;
link_trb->control = cpu_to_le32(((ring->pcs) ? TRB_CYCLE : 0) |
TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
}
return 0;
}
static void cdns2_dbg_request_trbs(struct cdns2_endpoint *pep,
struct cdns2_request *preq)
{
struct cdns2_trb *link_trb = pep->ring.trbs + (TRBS_PER_SEGMENT - 1);
struct cdns2_trb *trb = preq->trb;
int num_trbs = preq->num_of_trb;
int i = 0;
while (i < num_trbs) {
trace_cdns2_queue_trb(pep, trb + i);
if (trb + i == link_trb) {
trb = pep->ring.trbs;
num_trbs = num_trbs - i;
i = 0;
} else {
i++;
}
}
}
static unsigned int cdns2_count_trbs(struct cdns2_endpoint *pep,
u64 addr, u64 len)
{
unsigned int num_trbs = 1;
if (pep->type == USB_ENDPOINT_XFER_ISOC) {
/*
* To speed up DMA performance address should not exceed 4KB.
* for high bandwidth transfer and driver will split
* such buffer into two TRBs.
*/
num_trbs = DIV_ROUND_UP(len +
(addr & (TRB_MAX_ISO_BUFF_SIZE - 1)),
TRB_MAX_ISO_BUFF_SIZE);
if (pep->interval > 1)
num_trbs = pep->dir ? num_trbs * pep->interval : 1;
} else if (pep->dir) {
/*
* One extra link trb for IN direction.
* Sometimes DMA doesn't want advance to next TD and transfer
* hangs. This extra Link TRB force DMA to advance to next TD.
*/
num_trbs++;
}
return num_trbs;
}
static unsigned int cdns2_count_sg_trbs(struct cdns2_endpoint *pep,
struct usb_request *req)
{
unsigned int i, len, full_len, num_trbs = 0;
struct scatterlist *sg;
int trb_len = 0;
full_len = req->length;
for_each_sg(req->sg, sg, req->num_sgs, i) {
len = sg_dma_len(sg);
num_trbs += cdns2_count_trbs(pep, sg_dma_address(sg), len);
len = min(len, full_len);
/*
* For HS ISO transfer TRBs should not exceed max packet size.
* When DMA is working, and data exceed max packet size then
* some data will be read in single mode instead burst mode.
* This behavior will drastically reduce the copying speed.
* To avoid this we need one or two extra TRBs.
* This issue occurs for UVC class with sg_supported = 1
* because buffers addresses are not aligned to 1024.
*/
if (pep->type == USB_ENDPOINT_XFER_ISOC) {
u8 temp;
trb_len += len;
temp = trb_len >> 10;
if (temp) {
if (trb_len % 1024)
num_trbs = num_trbs + temp;
else
num_trbs = num_trbs + temp - 1;
trb_len = trb_len - (temp << 10);
}
}
full_len -= len;
if (full_len == 0)
break;
}
return num_trbs;
}
/*
* Function prepares the array with optimized AXI burst value for different
* transfer lengths. Controller handles the final data which are less
* then AXI burst size as single byte transactions.
* e.g.:
* Let's assume that driver prepares trb with trb->length 700 and burst size
* will be set to 128. In this case the controller will handle a first 512 as
* single AXI transaction but the next 188 bytes will be handled
* as 47 separate AXI transaction.
* The better solution is to use the burst size equal 16 and then we will
* have only 25 AXI transaction (10 * 64 + 15 *4).
*/
static void cdsn2_isoc_burst_opt(struct cdns2_device *pdev)
{
int axi_burst_option[] = {1, 2, 4, 8, 16, 32, 64, 128};
int best_burst;
int array_size;
int opt_burst;
int trb_size;
int i, j;
array_size = ARRAY_SIZE(axi_burst_option);
for (i = 0; i <= MAX_ISO_SIZE; i++) {
trb_size = i / 4;
best_burst = trb_size ? trb_size : 1;
for (j = 0; j < array_size; j++) {
opt_burst = trb_size / axi_burst_option[j];
opt_burst += trb_size % axi_burst_option[j];
if (opt_burst < best_burst) {
best_burst = opt_burst;
pdev->burst_opt[i] = axi_burst_option[j];
}
}
}
}
static void cdns2_ep_tx_isoc(struct cdns2_endpoint *pep,
struct cdns2_request *preq,
int num_trbs)
{
struct scatterlist *sg = NULL;
u32 remaining_packet_size = 0;
struct cdns2_trb *trb;
bool first_trb = true;
dma_addr_t trb_dma;
u32 trb_buff_len;
u32 block_length;
int td_idx = 0;
int split_size;
u32 full_len;
int enqd_len;
int sent_len;
int sg_iter;
u32 control;
int num_tds;
u32 length;
/*
* For OUT direction 1 TD per interval is enough
* because TRBs are not dumped by controller.
*/
num_tds = pep->dir ? pep->interval : 1;
split_size = preq->request.num_sgs ? 1024 : 3072;
for (td_idx = 0; td_idx < num_tds; td_idx++) {
if (preq->request.num_sgs) {
sg = preq->request.sg;
trb_dma = sg_dma_address(sg);
block_length = sg_dma_len(sg);
} else {
trb_dma = preq->request.dma;
block_length = preq->request.length;
}
full_len = preq->request.length;
sg_iter = preq->request.num_sgs ? preq->request.num_sgs : 1;
remaining_packet_size = split_size;
for (enqd_len = 0; enqd_len < full_len;
enqd_len += trb_buff_len) {
if (remaining_packet_size == 0)
remaining_packet_size = split_size;
/*
* Calculate TRB length.- buffer can't across 4KB
* and max packet size.
*/
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(trb_dma);
trb_buff_len = min(trb_buff_len, remaining_packet_size);
trb_buff_len = min(trb_buff_len, block_length);
if (trb_buff_len > full_len - enqd_len)
trb_buff_len = full_len - enqd_len;
control = TRB_TYPE(TRB_NORMAL);
/*
* For IN direction driver has to set the IOC for
* last TRB in last TD.
* For OUT direction driver must set IOC and ISP
* only for last TRB in each TDs.
*/
if (enqd_len + trb_buff_len >= full_len || !pep->dir)
control |= TRB_IOC | TRB_ISP;
/*
* Don't give the first TRB to the hardware (by toggling
* the cycle bit) until we've finished creating all the
* other TRBs.
*/
if (first_trb) {
first_trb = false;
if (pep->ring.pcs == 0)
control |= TRB_CYCLE;
} else {
control |= pep->ring.pcs;
}
if (enqd_len + trb_buff_len < full_len)
control |= TRB_CHAIN;
length = TRB_LEN(trb_buff_len) |
TRB_BURST(pep->pdev->burst_opt[trb_buff_len]);
trb = pep->ring.trbs + pep->ring.enqueue;
trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
trb->length = cpu_to_le32(length);
trb->control = cpu_to_le32(control);
trb_dma += trb_buff_len;
sent_len = trb_buff_len;
if (sg && sent_len >= block_length) {
/* New sg entry */
--sg_iter;
sent_len -= block_length;
if (sg_iter != 0) {
sg = sg_next(sg);
trb_dma = sg_dma_address(sg);
block_length = sg_dma_len(sg);
}
}
remaining_packet_size -= trb_buff_len;
block_length -= sent_len;
preq->end_trb = pep->ring.enqueue;
cdns2_ep_inc_enq(&pep->ring);
}
}
}
static void cdns2_ep_tx_bulk(struct cdns2_endpoint *pep,
struct cdns2_request *preq,
int trbs_per_td)
{
struct scatterlist *sg = NULL;
struct cdns2_ring *ring;
struct cdns2_trb *trb;
dma_addr_t trb_dma;
int sg_iter = 0;
u32 control;
u32 length;
if (preq->request.num_sgs) {
sg = preq->request.sg;
trb_dma = sg_dma_address(sg);
length = sg_dma_len(sg);
} else {
trb_dma = preq->request.dma;
length = preq->request.length;
}
ring = &pep->ring;
for (sg_iter = 0; sg_iter < trbs_per_td; sg_iter++) {
control = TRB_TYPE(TRB_NORMAL) | ring->pcs | TRB_ISP;
trb = pep->ring.trbs + ring->enqueue;
if (pep->dir && sg_iter == trbs_per_td - 1) {
preq->end_trb = ring->enqueue;
control = ring->pcs | TRB_TYPE(TRB_LINK) | TRB_CHAIN
| TRB_IOC;
cdns2_ep_inc_enq(&pep->ring);
if (ring->enqueue == 0)
control |= TRB_TOGGLE;
/* Point to next bad TRB. */
trb->buffer = cpu_to_le32(pep->ring.dma +
(ring->enqueue * TRB_SIZE));
trb->length = 0;
trb->control = cpu_to_le32(control);
break;
}
/*
* Don't give the first TRB to the hardware (by toggling
* the cycle bit) until we've finished creating all the
* other TRBs.
*/
if (sg_iter == 0)
control = control ^ TRB_CYCLE;
/* For last TRB in TD. */
if (sg_iter == (trbs_per_td - (pep->dir ? 2 : 1)))
control |= TRB_IOC;
else
control |= TRB_CHAIN;
trb->buffer = cpu_to_le32(trb_dma);
trb->length = cpu_to_le32(TRB_BURST(pep->trb_burst_size) |
TRB_LEN(length));
trb->control = cpu_to_le32(control);
if (sg && sg_iter < (trbs_per_td - 1)) {
sg = sg_next(sg);
trb_dma = sg_dma_address(sg);
length = sg_dma_len(sg);
}
preq->end_trb = ring->enqueue;
cdns2_ep_inc_enq(&pep->ring);
}
}
static void cdns2_set_drdy(struct cdns2_device *pdev,
struct cdns2_endpoint *pep)
{
trace_cdns2_ring(pep);
/*
* Memory barrier - Cycle Bit must be set before doorbell.
*/
dma_wmb();
/* Clearing TRBERR and DESCMIS before setting DRDY. */
writel(DMA_EP_STS_TRBERR | DMA_EP_STS_DESCMIS,
&pdev->adma_regs->ep_sts);
writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
if (readl(&pdev->adma_regs->ep_sts) & DMA_EP_STS_TRBERR) {
writel(DMA_EP_STS_TRBERR, &pdev->adma_regs->ep_sts);
writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
}
trace_cdns2_doorbell_epx(pep, readl(&pdev->adma_regs->ep_traddr));
}
static int cdns2_prepare_first_isoc_transfer(struct cdns2_device *pdev,
struct cdns2_endpoint *pep)
{
struct cdns2_trb *trb;
u32 buffer;
u8 hw_ccs;
if ((readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY))
return -EBUSY;
if (!pep->dir) {
set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
writel(pep->ring.dma + pep->ring.dequeue,
&pdev->adma_regs->ep_traddr);
return 0;
}
/*
* The first packet after doorbell can be corrupted so,
* driver prepares 0 length packet as first packet.
*/
buffer = pep->ring.dma + pep->ring.dequeue * TRB_SIZE;
hw_ccs = !!DMA_EP_STS_CCS(readl(&pdev->adma_regs->ep_sts));
trb = &pep->ring.trbs[TRBS_PER_SEGMENT];
trb->length = 0;
trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) | TRB_TYPE(TRB_NORMAL));
/*
* LINK TRB is used to force updating cycle bit in controller and
* move to correct place in transfer ring.
*/
trb++;
trb->length = 0;
trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) |
TRB_TYPE(TRB_LINK) | TRB_CHAIN);
if (hw_ccs != pep->ring.ccs)
trb->control |= cpu_to_le32(TRB_TOGGLE);
set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
writel(pep->ring.dma + (TRBS_PER_SEGMENT * TRB_SIZE),
&pdev->adma_regs->ep_traddr);
return 0;
}
/* Prepare and start transfer on no-default endpoint. */
static int cdns2_ep_run_transfer(struct cdns2_endpoint *pep,
struct cdns2_request *preq)
{
struct cdns2_device *pdev = pep->pdev;
struct cdns2_ring *ring;
u32 togle_pcs = 1;
int num_trbs;
int ret;
cdns2_select_ep(pdev, pep->endpoint.address);
if (preq->request.sg)
num_trbs = cdns2_count_sg_trbs(pep, &preq->request);
else
num_trbs = cdns2_count_trbs(pep, preq->request.dma,
preq->request.length);
ret = cdns2_prepare_ring(pdev, pep, num_trbs);
if (ret)
return ret;
ring = &pep->ring;
preq->start_trb = ring->enqueue;
preq->trb = ring->trbs + ring->enqueue;
if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
cdns2_ep_tx_isoc(pep, preq, num_trbs);
} else {
togle_pcs = cdns2_wa1_update_guard(pep, ring->trbs + ring->enqueue);
cdns2_ep_tx_bulk(pep, preq, num_trbs);
}
preq->num_of_trb = num_trbs;
/*
* Memory barrier - cycle bit must be set as the last operation.
*/
dma_wmb();
/* Give the TD to the consumer. */
if (togle_pcs)
preq->trb->control = preq->trb->control ^ cpu_to_le32(1);
cdns2_wa1_tray_restore_cycle_bit(pdev, pep);
cdns2_dbg_request_trbs(pep, preq);
if (!pep->wa1_set && !(pep->ep_state & EP_STALLED) && !pep->skip) {
if (pep->type == USB_ENDPOINT_XFER_ISOC) {
ret = cdns2_prepare_first_isoc_transfer(pdev, pep);
if (ret)
return 0;
}
cdns2_set_drdy(pdev, pep);
}
return 0;
}
/* Prepare and start transfer for all not started requests. */
static int cdns2_start_all_request(struct cdns2_device *pdev,
struct cdns2_endpoint *pep)
{
struct cdns2_request *preq;
int ret;
while (!list_empty(&pep->deferred_list)) {
preq = cdns2_next_preq(&pep->deferred_list);
ret = cdns2_ep_run_transfer(pep, preq);
if (ret)
return ret;
list_move_tail(&preq->list, &pep->pending_list);
}
pep->ep_state &= ~EP_RING_FULL;
return 0;
}
/*
* Check whether trb has been handled by DMA.
*
* Endpoint must be selected before invoking this function.
*
* Returns false if request has not been handled by DMA, else returns true.
*
* SR - start ring
* ER - end ring
* DQ = ring->dequeue - dequeue position
* EQ = ring->enqueue - enqueue position
* ST = preq->start_trb - index of first TRB in transfer ring
* ET = preq->end_trb - index of last TRB in transfer ring
* CI = current_index - index of processed TRB by DMA.
*
* As first step, we check if the TRB between the ST and ET.
* Then, we check if cycle bit for index pep->dequeue
* is correct.
*
* some rules:
* 1. ring->dequeue never equals to current_index.
* 2 ring->enqueue never exceed ring->dequeue
* 3. exception: ring->enqueue == ring->dequeue
* and ring->free_trbs is zero.
* This case indicate that TR is full.
*
* At below two cases, the request have been handled.
* Case 1 - ring->dequeue < current_index
* SR ... EQ ... DQ ... CI ... ER
* SR ... DQ ... CI ... EQ ... ER
*
* Case 2 - ring->dequeue > current_index
* This situation takes place when CI go through the LINK TRB at the end of
* transfer ring.
* SR ... CI ... EQ ... DQ ... ER
*/
static bool cdns2_trb_handled(struct cdns2_endpoint *pep,
struct cdns2_request *preq)
{
struct cdns2_device *pdev = pep->pdev;
struct cdns2_ring *ring;
struct cdns2_trb *trb;
int current_index = 0;
int handled = 0;
int doorbell;
ring = &pep->ring;
current_index = cdns2_get_dma_pos(pdev, pep);
doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
/*
* Only ISO transfer can use 2 entries outside the standard
* Transfer Ring. First of them is used as zero length packet and the
* second as LINK TRB.
*/
if (current_index >= TRBS_PER_SEGMENT)
goto finish;
/* Current trb doesn't belong to this request. */
if (preq->start_trb < preq->end_trb) {
if (ring->dequeue > preq->end_trb)
goto finish;
if (ring->dequeue < preq->start_trb)
goto finish;
}
if (preq->start_trb > preq->end_trb && ring->dequeue > preq->end_trb &&
ring->dequeue < preq->start_trb)
goto finish;
if (preq->start_trb == preq->end_trb && ring->dequeue != preq->end_trb)
goto finish;
trb = &ring->trbs[ring->dequeue];
if ((le32_to_cpu(trb->control) & TRB_CYCLE) != ring->ccs)
goto finish;
if (doorbell == 1 && current_index == ring->dequeue)
goto finish;
/* The corner case for TRBS_PER_SEGMENT equal 2). */
if (TRBS_PER_SEGMENT == 2 && pep->type != USB_ENDPOINT_XFER_ISOC) {
handled = 1;
goto finish;
}
if (ring->enqueue == ring->dequeue &&
ring->free_trbs == 0) {
handled = 1;
} else if (ring->dequeue < current_index) {
if ((current_index == (TRBS_PER_SEGMENT - 1)) &&
!ring->dequeue)
goto finish;
handled = 1;
} else if (ring->dequeue > current_index) {
handled = 1;
}
finish:
trace_cdns2_request_handled(preq, current_index, handled);
return handled;
}
static void cdns2_skip_isoc_td(struct cdns2_device *pdev,
struct cdns2_endpoint *pep,
struct cdns2_request *preq)
{
struct cdns2_trb *trb;
int i;
trb = pep->ring.trbs + pep->ring.dequeue;
for (i = preq->finished_trb ; i < preq->num_of_trb; i++) {
preq->finished_trb++;
trace_cdns2_complete_trb(pep, trb);
cdns2_ep_inc_deq(&pep->ring);
trb = cdns2_next_trb(pep, trb);
}
cdns2_gadget_giveback(pep, preq, 0);
cdns2_prepare_first_isoc_transfer(pdev, pep);
pep->skip = false;
cdns2_set_drdy(pdev, pep);
}
static void cdns2_transfer_completed(struct cdns2_device *pdev,
struct cdns2_endpoint *pep)
{
struct cdns2_request *preq = NULL;
bool request_handled = false;
struct cdns2_trb *trb;
while (!list_empty(&pep->pending_list)) {
preq = cdns2_next_preq(&pep->pending_list);
trb = pep->ring.trbs + pep->ring.dequeue;
/*
* The TRB was changed as link TRB, and the request
* was handled at ep_dequeue.
*/
while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK &&
le32_to_cpu(trb->length)) {
trace_cdns2_complete_trb(pep, trb);
cdns2_ep_inc_deq(&pep->ring);
trb = pep->ring.trbs + pep->ring.dequeue;
}
/*
* Re-select endpoint. It could be changed by other CPU
* during handling usb_gadget_giveback_request.
*/
cdns2_select_ep(pdev, pep->endpoint.address);
while (cdns2_trb_handled(pep, preq)) {
preq->finished_trb++;
if (preq->finished_trb >= preq->num_of_trb)
request_handled = true;
trb = pep->ring.trbs + pep->ring.dequeue;
trace_cdns2_complete_trb(pep, trb);
if (pep->dir && pep->type == USB_ENDPOINT_XFER_ISOC)
/*
* For ISOC IN controller doens't update the
* trb->length.
*/
preq->request.actual = preq->request.length;
else
preq->request.actual +=
TRB_LEN(le32_to_cpu(trb->length));
cdns2_ep_inc_deq(&pep->ring);
}
if (request_handled) {
cdns2_gadget_giveback(pep, preq, 0);
request_handled = false;
} else {
goto prepare_next_td;
}
if (pep->type != USB_ENDPOINT_XFER_ISOC &&
TRBS_PER_SEGMENT == 2)
break;
}
prepare_next_td:
if (pep->skip && preq)
cdns2_skip_isoc_td(pdev, pep, preq);
if (!(pep->ep_state & EP_STALLED) &&
!(pep->ep_state & EP_STALL_PENDING))
cdns2_start_all_request(pdev, pep);
}
static void cdns2_wakeup(struct cdns2_device *pdev)
{
if (!pdev->may_wakeup)
return;
/* Start driving resume signaling to indicate remote wakeup. */
set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_SIGRSUME);
}
static void cdns2_rearm_transfer(struct cdns2_endpoint *pep, u8 rearm)
{
struct cdns2_device *pdev = pep->pdev;
cdns2_wa1_restore_cycle_bit(pep);
if (rearm) {
trace_cdns2_ring(pep);
/* Cycle Bit must be updated before arming DMA. */
dma_wmb();
writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
cdns2_wakeup(pdev);
trace_cdns2_doorbell_epx(pep,
readl(&pdev->adma_regs->ep_traddr));
}
}
static void cdns2_handle_epx_interrupt(struct cdns2_endpoint *pep)
{
struct cdns2_device *pdev = pep->pdev;
u8 isoerror = 0;
u32 ep_sts_reg;
u32 val;
cdns2_select_ep(pdev, pep->endpoint.address);
trace_cdns2_epx_irq(pdev, pep);
ep_sts_reg = readl(&pdev->adma_regs->ep_sts);
writel(ep_sts_reg, &pdev->adma_regs->ep_sts);
if (pep->type == USB_ENDPOINT_XFER_ISOC) {
u8 mult;
u8 cs;
mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
cs = pep->dir ? readb(&pdev->epx_regs->ep[pep->num - 1].txcs) :
readb(&pdev->epx_regs->ep[pep->num - 1].rxcs);
if (mult > 0)
isoerror = EPX_CS_ERR(cs);
}
/*
* Sometimes ISO Error for mult=1 or mult=2 is not propagated on time
* from USB module to DMA module. To protect against this driver
* checks also the txcs/rxcs registers.
*/
if ((ep_sts_reg & DMA_EP_STS_ISOERR) || isoerror) {
clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
/* Wait for DBUSY cleared. */
readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
!(val & DMA_EP_STS_DBUSY), 1, 125);
writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
/* Wait for DFLUSH cleared. */
readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
!(val & DMA_EP_CMD_DFLUSH), 1, 10);
pep->skip = true;
}
if (ep_sts_reg & DMA_EP_STS_TRBERR || pep->skip) {
if (pep->ep_state & EP_STALL_PENDING &&
!(ep_sts_reg & DMA_EP_STS_DESCMIS))
cdns2_ep_stall_flush(pep);
/*
* For isochronous transfer driver completes request on
* IOC or on TRBERR. IOC appears only when device receive
* OUT data packet. If host disable stream or lost some packet
* then the only way to finish all queued transfer is to do it
* on TRBERR event.
*/
if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->wa1_set) {
if (!pep->dir)
clear_reg_bit_32(&pdev->adma_regs->ep_cfg,
DMA_EP_CFG_ENABLE);
cdns2_transfer_completed(pdev, pep);
if (pep->ep_state & EP_DEFERRED_DRDY) {
pep->ep_state &= ~EP_DEFERRED_DRDY;
cdns2_set_drdy(pdev, pep);
}
return;
}
cdns2_transfer_completed(pdev, pep);
if (!(pep->ep_state & EP_STALLED) &&
!(pep->ep_state & EP_STALL_PENDING)) {
if (pep->ep_state & EP_DEFERRED_DRDY) {
pep->ep_state &= ~EP_DEFERRED_DRDY;
cdns2_start_all_request(pdev, pep);
} else {
cdns2_rearm_transfer(pep, pep->wa1_set);
}
}
return;
}
if ((ep_sts_reg & DMA_EP_STS_IOC) || (ep_sts_reg & DMA_EP_STS_ISP))
cdns2_transfer_completed(pdev, pep);
}
static void cdns2_disconnect_gadget(struct cdns2_device *pdev)
{
if (pdev->gadget_driver && pdev->gadget_driver->disconnect)
pdev->gadget_driver->disconnect(&pdev->gadget);
}
static irqreturn_t cdns2_usb_irq_handler(int irq, void *data)
{
struct cdns2_device *pdev = data;
unsigned long reg_ep_ists;
u8 reg_usb_irq_m;
u8 reg_ext_irq_m;
u8 reg_usb_irq;
u8 reg_ext_irq;
if (pdev->in_lpm)
return IRQ_NONE;
reg_usb_irq_m = readb(&pdev->interrupt_regs->usbien);
reg_ext_irq_m = readb(&pdev->interrupt_regs->extien);
/* Mask all sources of interrupt. */
writeb(0, &pdev->interrupt_regs->usbien);
writeb(0, &pdev->interrupt_regs->extien);
writel(0, &pdev->adma_regs->ep_ien);
/* Clear interrupt sources. */
writel(0, &pdev->adma_regs->ep_sts);
writeb(0, &pdev->interrupt_regs->usbirq);
writeb(0, &pdev->interrupt_regs->extirq);
reg_ep_ists = readl(&pdev->adma_regs->ep_ists);
reg_usb_irq = readb(&pdev->interrupt_regs->usbirq);
reg_ext_irq = readb(&pdev->interrupt_regs->extirq);
if (reg_ep_ists || (reg_usb_irq & reg_usb_irq_m) ||
(reg_ext_irq & reg_ext_irq_m))
return IRQ_WAKE_THREAD;
writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
writel(~0, &pdev->adma_regs->ep_ien);
return IRQ_NONE;
}
static irqreturn_t cdns2_thread_usb_irq_handler(struct cdns2_device *pdev)
{
u8 usb_irq, ext_irq;
int speed;
int i;
ext_irq = readb(&pdev->interrupt_regs->extirq) & EXTIRQ_WAKEUP;
writeb(ext_irq, &pdev->interrupt_regs->extirq);
usb_irq = readb(&pdev->interrupt_regs->usbirq) & USB_IEN_INIT;
writeb(usb_irq, &pdev->interrupt_regs->usbirq);
if (!ext_irq && !usb_irq)
return IRQ_NONE;
trace_cdns2_usb_irq(usb_irq, ext_irq);
if (ext_irq & EXTIRQ_WAKEUP) {
if (pdev->gadget_driver && pdev->gadget_driver->resume) {
spin_unlock(&pdev->lock);
pdev->gadget_driver->resume(&pdev->gadget);
spin_lock(&pdev->lock);
}
}
if (usb_irq & USBIRQ_LPM) {
u8 reg = readb(&pdev->usb_regs->lpmctrl);
/* LPM1 enter */
if (!(reg & LPMCTRLLH_LPMNYET))
writeb(0, &pdev->usb_regs->sleep_clkgate);
}
if (usb_irq & USBIRQ_SUSPEND) {
if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
spin_unlock(&pdev->lock);
pdev->gadget_driver->suspend(&pdev->gadget);
spin_lock(&pdev->lock);
}
}
if (usb_irq & USBIRQ_URESET) {
if (pdev->gadget_driver) {
pdev->dev_address = 0;
spin_unlock(&pdev->lock);
usb_gadget_udc_reset(&pdev->gadget,
pdev->gadget_driver);
spin_lock(&pdev->lock);
/*
* The USBIRQ_URESET is reported at the beginning of
* reset signal. 100ms is enough time to finish reset
* process. For high-speed reset procedure is completed
* when controller detect HS mode.
*/
for (i = 0; i < 100; i++) {
mdelay(1);
speed = cdns2_get_speed(pdev);
if (speed == USB_SPEED_HIGH)
break;
}
pdev->gadget.speed = speed;
cdns2_enable_l1(pdev, 0);
cdns2_ep0_config(pdev);
pdev->may_wakeup = 0;
}
}
if (usb_irq & USBIRQ_SUDAV) {
pdev->ep0_stage = CDNS2_SETUP_STAGE;
cdns2_handle_setup_packet(pdev);
}
return IRQ_HANDLED;
}
/* Deferred USB interrupt handler. */
static irqreturn_t cdns2_thread_irq_handler(int irq, void *data)
{
struct cdns2_device *pdev = data;
unsigned long dma_ep_ists;
unsigned long flags;
unsigned int bit;
local_bh_disable();
spin_lock_irqsave(&pdev->lock, flags);
cdns2_thread_usb_irq_handler(pdev);
dma_ep_ists = readl(&pdev->adma_regs->ep_ists);
if (!dma_ep_ists)
goto unlock;
trace_cdns2_dma_ep_ists(dma_ep_ists);
/* Handle default endpoint OUT. */
if (dma_ep_ists & DMA_EP_ISTS_EP_OUT0)
cdns2_handle_ep0_interrupt(pdev, USB_DIR_OUT);
/* Handle default endpoint IN. */
if (dma_ep_ists & DMA_EP_ISTS_EP_IN0)
cdns2_handle_ep0_interrupt(pdev, USB_DIR_IN);
dma_ep_ists &= ~(DMA_EP_ISTS_EP_OUT0 | DMA_EP_ISTS_EP_IN0);
for_each_set_bit(bit, &dma_ep_ists, sizeof(u32) * BITS_PER_BYTE) {
u8 ep_idx = bit > 16 ? (bit - 16) * 2 : (bit * 2) - 1;
/*
* Endpoints in pdev->eps[] are held in order:
* ep0, ep1out, ep1in, ep2out, ep2in... ep15out, ep15in.
* but in dma_ep_ists in order:
* ep0 ep1out ep2out ... ep15out ep0in ep1in .. ep15in
*/
cdns2_handle_epx_interrupt(&pdev->eps[ep_idx]);
}
unlock:
writel(~0, &pdev->adma_regs->ep_ien);
writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
spin_unlock_irqrestore(&pdev->lock, flags);
local_bh_enable();
return IRQ_HANDLED;
}
/* Calculates and assigns onchip memory for endpoints. */
static void cdns2_eps_onchip_buffer_init(struct cdns2_device *pdev)
{
struct cdns2_endpoint *pep;
int min_buf_tx = 0;
int min_buf_rx = 0;
u16 tx_offset = 0;
u16 rx_offset = 0;
int free;
int i;
for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
pep = &pdev->eps[i];
if (!(pep->ep_state & EP_CLAIMED))
continue;
if (pep->dir)
min_buf_tx += pep->buffering;
else
min_buf_rx += pep->buffering;
}
for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
pep = &pdev->eps[i];
if (!(pep->ep_state & EP_CLAIMED))
continue;
if (pep->dir) {
free = pdev->onchip_tx_buf - min_buf_tx;
if (free + pep->buffering >= 4)
free = 4;
else
free = free + pep->buffering;
min_buf_tx = min_buf_tx - pep->buffering + free;
pep->buffering = free;
writel(tx_offset,
&pdev->epx_regs->txstaddr[pep->num - 1]);
pdev->epx_regs->txstaddr[pep->num - 1] = tx_offset;
dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
pep->name, tx_offset, pep->buffering);
tx_offset += pep->buffering * 1024;
} else {
free = pdev->onchip_rx_buf - min_buf_rx;
if (free + pep->buffering >= 4)
free = 4;
else
free = free + pep->buffering;
min_buf_rx = min_buf_rx - pep->buffering + free;
pep->buffering = free;
writel(rx_offset,
&pdev->epx_regs->rxstaddr[pep->num - 1]);
dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
pep->name, rx_offset, pep->buffering);
rx_offset += pep->buffering * 1024;
}
}
}
/* Configure hardware endpoint. */
static int cdns2_ep_config(struct cdns2_endpoint *pep, bool enable)
{
bool is_iso_ep = (pep->type == USB_ENDPOINT_XFER_ISOC);
struct cdns2_device *pdev = pep->pdev;
u32 max_packet_size;
u8 dir = 0;
u8 ep_cfg;
u8 mult;
u32 val;
int ret;
switch (pep->type) {
case USB_ENDPOINT_XFER_INT:
ep_cfg = EPX_CON_TYPE_INT;
break;
case USB_ENDPOINT_XFER_BULK:
ep_cfg = EPX_CON_TYPE_BULK;
break;
default:
mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
ep_cfg = mult << EPX_CON_ISOD_SHIFT;
ep_cfg |= EPX_CON_TYPE_ISOC;
if (pep->dir) {
set_reg_bit_8(&pdev->epx_regs->isoautoarm, BIT(pep->num));
set_reg_bit_8(&pdev->epx_regs->isoautodump, BIT(pep->num));
set_reg_bit_8(&pdev->epx_regs->isodctrl, BIT(pep->num));
}
}
switch (pdev->gadget.speed) {
case USB_SPEED_FULL:
max_packet_size = is_iso_ep ? 1023 : 64;
break;
case USB_SPEED_HIGH:
max_packet_size = is_iso_ep ? 1024 : 512;
break;
default:
/* All other speed are not supported. */
return -EINVAL;
}
ep_cfg |= (EPX_CON_VAL | (pep->buffering - 1));
if (pep->dir) {
dir = FIFOCTRL_IO_TX;
writew(max_packet_size, &pdev->epx_regs->txmaxpack[pep->num - 1]);
writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].txcon);
} else {
writew(max_packet_size, &pdev->epx_regs->rxmaxpack[pep->num - 1]);
writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].rxcon);
}
writeb(pep->num | dir | FIFOCTRL_FIFOAUTO,
&pdev->usb_regs->fifoctrl);
writeb(pep->num | dir, &pdev->epx_regs->endprst);
writeb(pep->num | ENDPRST_FIFORST | ENDPRST_TOGRST | dir,
&pdev->epx_regs->endprst);
if (max_packet_size == 1024)
pep->trb_burst_size = 128;
else if (max_packet_size >= 512)
pep->trb_burst_size = 64;
else
pep->trb_burst_size = 16;
cdns2_select_ep(pdev, pep->num | pep->dir);
writel(DMA_EP_CMD_EPRST | DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
ret = readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
!(val & (DMA_EP_CMD_DFLUSH |
DMA_EP_CMD_EPRST)),
1, 1000);
if (ret)
return ret;
writel(DMA_EP_STS_TRBERR | DMA_EP_STS_ISOERR, &pdev->adma_regs->ep_sts_en);
if (enable)
writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
trace_cdns2_epx_hw_cfg(pdev, pep);
dev_dbg(pdev->dev, "Configure %s: with MPS: %08x, ep con: %02x\n",
pep->name, max_packet_size, ep_cfg);
return 0;
}
struct usb_request *cdns2_gadget_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
struct cdns2_request *preq;
preq = kzalloc(sizeof(*preq), gfp_flags);
if (!preq)
return NULL;
preq->pep = pep;
trace_cdns2_alloc_request(preq);
return &preq->request;
}
void cdns2_gadget_ep_free_request(struct usb_ep *ep,
struct usb_request *request)
{
struct cdns2_request *preq = to_cdns2_request(request);
trace_cdns2_free_request(preq);
kfree(preq);
}
static int cdns2_gadget_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
u32 reg = DMA_EP_STS_EN_TRBERREN;
struct cdns2_endpoint *pep;
struct cdns2_device *pdev;
unsigned long flags;
int enable = 1;
int ret = 0;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
!desc->wMaxPacketSize) {
return -EINVAL;
}
pep = ep_to_cdns2_ep(ep);
pdev = pep->pdev;
if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
"%s is already enabled\n", pep->name))
return 0;
spin_lock_irqsave(&pdev->lock, flags);
pep->type = usb_endpoint_type(desc);
pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
if (pdev->gadget.speed == USB_SPEED_FULL)
if (pep->type == USB_ENDPOINT_XFER_INT)
pep->interval = desc->bInterval;
if (pep->interval > ISO_MAX_INTERVAL &&
pep->type == USB_ENDPOINT_XFER_ISOC) {
dev_err(pdev->dev, "ISO period is limited to %d (current: %d)\n",
ISO_MAX_INTERVAL, pep->interval);
ret = -EINVAL;
goto exit;
}
/*
* During ISO OUT traffic DMA reads Transfer Ring for the EP which has
* never got doorbell.
* This issue was detected only on simulation, but to avoid this issue
* driver add protection against it. To fix it driver enable ISO OUT
* endpoint before setting DRBL. This special treatment of ISO OUT
* endpoints are recommended by controller specification.
*/
if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->dir)
enable = 0;
ret = cdns2_alloc_tr_segment(pep);
if (ret)
goto exit;
ret = cdns2_ep_config(pep, enable);
if (ret) {
cdns2_free_tr_segment(pep);
ret = -EINVAL;
goto exit;
}
trace_cdns2_gadget_ep_enable(pep);
pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
pep->ep_state |= EP_ENABLED;
pep->wa1_set = 0;
pep->ring.enqueue = 0;
pep->ring.dequeue = 0;
reg = readl(&pdev->adma_regs->ep_sts);
pep->ring.pcs = !!DMA_EP_STS_CCS(reg);
pep->ring.ccs = !!DMA_EP_STS_CCS(reg);
writel(pep->ring.dma, &pdev->adma_regs->ep_traddr);
/* one TRB is reserved for link TRB used in DMULT mode*/
pep->ring.free_trbs = TRBS_PER_SEGMENT - 1;
exit:
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static int cdns2_gadget_ep_disable(struct usb_ep *ep)
{
struct cdns2_endpoint *pep;
struct cdns2_request *preq;
struct cdns2_device *pdev;
unsigned long flags;
int val;
if (!ep)
return -EINVAL;
pep = ep_to_cdns2_ep(ep);
pdev = pep->pdev;
if (dev_WARN_ONCE(pdev->dev, !(pep->ep_state & EP_ENABLED),
"%s is already disabled\n", pep->name))
return 0;
spin_lock_irqsave(&pdev->lock, flags);
trace_cdns2_gadget_ep_disable(pep);
cdns2_select_ep(pdev, ep->desc->bEndpointAddress);
clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
/*
* Driver needs some time before resetting endpoint.
* It need waits for clearing DBUSY bit or for timeout expired.
* 10us is enough time for controller to stop transfer.
*/
readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
!(val & DMA_EP_STS_DBUSY), 1, 10);
writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
!(val & (DMA_EP_CMD_DFLUSH | DMA_EP_CMD_EPRST)),
1, 1000);
while (!list_empty(&pep->pending_list)) {
preq = cdns2_next_preq(&pep->pending_list);
cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
}
while (!list_empty(&pep->deferred_list)) {
preq = cdns2_next_preq(&pep->deferred_list);
cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
}
ep->desc = NULL;
pep->ep_state &= ~EP_ENABLED;
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdns2_ep_enqueue(struct cdns2_endpoint *pep,
struct cdns2_request *preq,
gfp_t gfp_flags)
{
struct cdns2_device *pdev = pep->pdev;
struct usb_request *request;
int ret;
request = &preq->request;
request->actual = 0;
request->status = -EINPROGRESS;
ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->dir);
if (ret) {
trace_cdns2_request_enqueue_error(preq);
return ret;
}
list_add_tail(&preq->list, &pep->deferred_list);
trace_cdns2_request_enqueue(preq);
if (!(pep->ep_state & EP_STALLED) && !(pep->ep_state & EP_STALL_PENDING))
cdns2_start_all_request(pdev, pep);
return 0;
}
static int cdns2_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
struct usb_request *zlp_request;
struct cdns2_request *preq;
struct cdns2_endpoint *pep;
struct cdns2_device *pdev;
unsigned long flags;
int ret;
if (!request || !ep)
return -EINVAL;
pep = ep_to_cdns2_ep(ep);
pdev = pep->pdev;
if (!(pep->ep_state & EP_ENABLED)) {
dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
pep->name);
return -EINVAL;
}
spin_lock_irqsave(&pdev->lock, flags);
preq = to_cdns2_request(request);
ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
if (ret == 0 && request->zero && request->length &&
(request->length % ep->maxpacket == 0)) {
struct cdns2_request *preq;
zlp_request = cdns2_gadget_ep_alloc_request(ep, GFP_ATOMIC);
zlp_request->buf = pdev->zlp_buf;
zlp_request->length = 0;
preq = to_cdns2_request(zlp_request);
ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
}
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
int cdns2_gadget_ep_dequeue(struct usb_ep *ep,
struct usb_request *request)
{
struct cdns2_request *preq, *preq_temp, *cur_preq;
struct cdns2_endpoint *pep;
struct cdns2_trb *link_trb;
u8 req_on_hw_ring = 0;
unsigned long flags;
u32 buffer;
int val, i;
if (!ep || !request || !ep->desc)
return -EINVAL;
pep = ep_to_cdns2_ep(ep);
if (!pep->endpoint.desc) {
dev_err(pep->pdev->dev, "%s: can't dequeue to disabled endpoint\n",
pep->name);
return -ESHUTDOWN;
}
/* Requests has been dequeued during disabling endpoint. */
if (!(pep->ep_state & EP_ENABLED))
return 0;
spin_lock_irqsave(&pep->pdev->lock, flags);
cur_preq = to_cdns2_request(request);
trace_cdns2_request_dequeue(cur_preq);
list_for_each_entry_safe(preq, preq_temp, &pep->pending_list, list) {
if (cur_preq == preq) {
req_on_hw_ring = 1;
goto found;
}
}
list_for_each_entry_safe(preq, preq_temp, &pep->deferred_list, list) {
if (cur_preq == preq)
goto found;
}
goto not_found;
found:
link_trb = preq->trb;
/* Update ring only if removed request is on pending_req_list list. */
if (req_on_hw_ring && link_trb) {
/* Stop DMA */
writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
/* Wait for DFLUSH cleared. */
readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
!(val & DMA_EP_CMD_DFLUSH), 1, 1000);
buffer = cpu_to_le32(TRB_BUFFER(pep->ring.dma +
((preq->end_trb + 1) * TRB_SIZE)));
for (i = 0; i < preq->num_of_trb; i++) {
link_trb->buffer = buffer;
link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control)
& TRB_CYCLE) | TRB_CHAIN |
TRB_TYPE(TRB_LINK));
trace_cdns2_queue_trb(pep, link_trb);
link_trb = cdns2_next_trb(pep, link_trb);
}
if (pep->wa1_trb == preq->trb)
cdns2_wa1_restore_cycle_bit(pep);
}
cdns2_gadget_giveback(pep, cur_preq, -ECONNRESET);
preq = cdns2_next_preq(&pep->pending_list);
if (preq)
cdns2_rearm_transfer(pep, 1);
not_found:
spin_unlock_irqrestore(&pep->pdev->lock, flags);
return 0;
}
int cdns2_halt_endpoint(struct cdns2_device *pdev,
struct cdns2_endpoint *pep,
int value)
{
u8 __iomem *conf;
int dir = 0;
if (!(pep->ep_state & EP_ENABLED))
return -EPERM;
if (pep->dir) {
dir = ENDPRST_IO_TX;
conf = &pdev->epx_regs->ep[pep->num - 1].txcon;
} else {
conf = &pdev->epx_regs->ep[pep->num - 1].rxcon;
}
if (!value) {
struct cdns2_trb *trb = NULL;
struct cdns2_request *preq;
struct cdns2_trb trb_tmp;
preq = cdns2_next_preq(&pep->pending_list);
if (preq) {
trb = preq->trb;
if (trb) {
trb_tmp = *trb;
trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
}
}
trace_cdns2_ep_halt(pep, 0, 0);
/* Resets Sequence Number */
writeb(dir | pep->num, &pdev->epx_regs->endprst);
writeb(dir | ENDPRST_TOGRST | pep->num,
&pdev->epx_regs->endprst);
clear_reg_bit_8(conf, EPX_CON_STALL);
pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
if (preq) {
if (trb)
*trb = trb_tmp;
cdns2_rearm_transfer(pep, 1);
}
cdns2_start_all_request(pdev, pep);
} else {
trace_cdns2_ep_halt(pep, 1, 0);
set_reg_bit_8(conf, EPX_CON_STALL);
writeb(dir | pep->num, &pdev->epx_regs->endprst);
writeb(dir | ENDPRST_FIFORST | pep->num,
&pdev->epx_regs->endprst);
pep->ep_state |= EP_STALLED;
}
return 0;
}
/* Sets/clears stall on selected endpoint. */
static int cdns2_gadget_ep_set_halt(struct usb_ep *ep, int value)
{
struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
struct cdns2_device *pdev = pep->pdev;
struct cdns2_request *preq;
unsigned long flags = 0;
int ret;
spin_lock_irqsave(&pdev->lock, flags);
preq = cdns2_next_preq(&pep->pending_list);
if (value && preq) {
trace_cdns2_ep_busy_try_halt_again(pep);
ret = -EAGAIN;
goto done;
}
if (!value)
pep->ep_state &= ~EP_WEDGE;
ret = cdns2_halt_endpoint(pdev, pep, value);
done:
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static int cdns2_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
cdns2_gadget_ep_set_halt(ep, 1);
pep->ep_state |= EP_WEDGE;
return 0;
}
static struct
cdns2_endpoint *cdns2_find_available_ep(struct cdns2_device *pdev,
struct usb_endpoint_descriptor *desc)
{
struct cdns2_endpoint *pep;
struct usb_ep *ep;
int ep_correct;
list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
unsigned long num;
int ret;
/* ep name pattern likes epXin or epXout. */
char c[2] = {ep->name[2], '\0'};
ret = kstrtoul(c, 10, &num);
if (ret)
return ERR_PTR(ret);
pep = ep_to_cdns2_ep(ep);
if (pep->num != num)
continue;
ep_correct = (pep->endpoint.caps.dir_in &&
usb_endpoint_dir_in(desc)) ||
(pep->endpoint.caps.dir_out &&
usb_endpoint_dir_out(desc));
if (ep_correct && !(pep->ep_state & EP_CLAIMED))
return pep;
}
return ERR_PTR(-ENOENT);
}
/*
* Function used to recognize which endpoints will be used to optimize
* on-chip memory usage.
*/
static struct
usb_ep *cdns2_gadget_match_ep(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
struct cdns2_endpoint *pep;
unsigned long flags;
pep = cdns2_find_available_ep(pdev, desc);
if (IS_ERR(pep)) {
dev_err(pdev->dev, "no available ep\n");
return NULL;
}
spin_lock_irqsave(&pdev->lock, flags);
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
pep->buffering = 4;
else
pep->buffering = 1;
pep->ep_state |= EP_CLAIMED;
spin_unlock_irqrestore(&pdev->lock, flags);
return &pep->endpoint;
}
static const struct usb_ep_ops cdns2_gadget_ep_ops = {
.enable = cdns2_gadget_ep_enable,
.disable = cdns2_gadget_ep_disable,
.alloc_request = cdns2_gadget_ep_alloc_request,
.free_request = cdns2_gadget_ep_free_request,
.queue = cdns2_gadget_ep_queue,
.dequeue = cdns2_gadget_ep_dequeue,
.set_halt = cdns2_gadget_ep_set_halt,
.set_wedge = cdns2_gadget_ep_set_wedge,
};
static int cdns2_gadget_get_frame(struct usb_gadget *gadget)
{
struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
return readw(&pdev->usb_regs->frmnr);
}
static int cdns2_gadget_wakeup(struct usb_gadget *gadget)
{
struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
cdns2_wakeup(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdns2_gadget_set_selfpowered(struct usb_gadget *gadget,
int is_selfpowered)
{
struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
pdev->is_selfpowered = !!is_selfpowered;
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
/* Disable interrupts and begin the controller halting process. */
static void cdns2_quiesce(struct cdns2_device *pdev)
{
set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
/* Disable interrupt. */
writeb(0, &pdev->interrupt_regs->extien),
writeb(0, &pdev->interrupt_regs->usbien),
writew(0, &pdev->adma_regs->ep_ien);
/* Clear interrupt line. */
writeb(0x0, &pdev->interrupt_regs->usbirq);
}
static void cdns2_gadget_config(struct cdns2_device *pdev)
{
cdns2_ep0_config(pdev);
/* Enable DMA interrupts for all endpoints. */
writel(~0x0, &pdev->adma_regs->ep_ien);
cdns2_enable_l1(pdev, 0);
writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
writel(DMA_CONF_DMULT, &pdev->adma_regs->conf);
}
static int cdns2_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
unsigned long flags;
trace_cdns2_pullup(is_on);
/*
* Disable events handling while controller is being
* enabled/disabled.
*/
disable_irq(pdev->irq);
spin_lock_irqsave(&pdev->lock, flags);
if (is_on) {
cdns2_gadget_config(pdev);
clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
} else {
cdns2_quiesce(pdev);
}
spin_unlock_irqrestore(&pdev->lock, flags);
enable_irq(pdev->irq);
return 0;
}
static int cdns2_gadget_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
enum usb_device_speed max_speed = driver->max_speed;
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
pdev->gadget_driver = driver;
/* Limit speed if necessary. */
max_speed = min(driver->max_speed, gadget->max_speed);
switch (max_speed) {
case USB_SPEED_FULL:
writeb(SPEEDCTRL_HSDISABLE, &pdev->usb_regs->speedctrl);
break;
case USB_SPEED_HIGH:
writeb(0, &pdev->usb_regs->speedctrl);
break;
default:
dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
max_speed);
fallthrough;
case USB_SPEED_UNKNOWN:
/* Default to highspeed. */
max_speed = USB_SPEED_HIGH;
break;
}
/* Reset all USB endpoints. */
writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
&pdev->usb_regs->endprst);
writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->usb_regs->endprst);
cdns2_eps_onchip_buffer_init(pdev);
cdns2_gadget_config(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdns2_gadget_udc_stop(struct usb_gadget *gadget)
{
struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
struct cdns2_endpoint *pep;
u32 bEndpointAddress;
struct usb_ep *ep;
int val;
pdev->gadget_driver = NULL;
pdev->gadget.speed = USB_SPEED_UNKNOWN;
list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
pep = ep_to_cdns2_ep(ep);
bEndpointAddress = pep->num | pep->dir;
cdns2_select_ep(pdev, bEndpointAddress);
writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
!(val & DMA_EP_CMD_EPRST), 1, 100);
}
cdns2_quiesce(pdev);
writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
&pdev->epx_regs->endprst);
writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->epx_regs->endprst);
return 0;
}
static const struct usb_gadget_ops cdns2_gadget_ops = {
.get_frame = cdns2_gadget_get_frame,
.wakeup = cdns2_gadget_wakeup,
.set_selfpowered = cdns2_gadget_set_selfpowered,
.pullup = cdns2_gadget_pullup,
.udc_start = cdns2_gadget_udc_start,
.udc_stop = cdns2_gadget_udc_stop,
.match_ep = cdns2_gadget_match_ep,
};
static void cdns2_free_all_eps(struct cdns2_device *pdev)
{
int i;
for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++)
cdns2_free_tr_segment(&pdev->eps[i]);
}
/* Initializes software endpoints of gadget. */
static int cdns2_init_eps(struct cdns2_device *pdev)
{
struct cdns2_endpoint *pep;
int i;
for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
bool direction = !(i & 1); /* Start from OUT endpoint. */
u8 epnum = ((i + 1) >> 1);
/*
* Endpoints are being held in pdev->eps[] in form:
* ep0, ep1out, ep1in ... ep15out, ep15in.
*/
if (!CDNS2_IF_EP_EXIST(pdev, epnum, direction))
continue;
pep = &pdev->eps[i];
pep->pdev = pdev;
pep->num = epnum;
/* 0 for OUT, 1 for IN. */
pep->dir = direction ? USB_DIR_IN : USB_DIR_OUT;
pep->idx = i;
/* Ep0in and ep0out are represented by pdev->eps[0]. */
if (!epnum) {
int ret;
snprintf(pep->name, sizeof(pep->name), "ep%d%s",
epnum, "BiDir");
cdns2_init_ep0(pdev, pep);
ret = cdns2_alloc_tr_segment(pep);
if (ret) {
dev_err(pdev->dev, "Failed to init ep0\n");
return ret;
}
} else {
snprintf(pep->name, sizeof(pep->name), "ep%d%s",
epnum, !!direction ? "in" : "out");
pep->endpoint.name = pep->name;
usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
pep->endpoint.ops = &cdns2_gadget_ep_ops;
list_add_tail(&pep->endpoint.ep_list, &pdev->gadget.ep_list);
pep->endpoint.caps.dir_in = direction;
pep->endpoint.caps.dir_out = !direction;
pep->endpoint.caps.type_iso = 1;
pep->endpoint.caps.type_bulk = 1;
pep->endpoint.caps.type_int = 1;
}
pep->endpoint.name = pep->name;
pep->ep_state = 0;
dev_dbg(pdev->dev, "Init %s, SupType: CTRL: %s, INT: %s, "
"BULK: %s, ISOC %s, SupDir IN: %s, OUT: %s\n",
pep->name,
(pep->endpoint.caps.type_control) ? "yes" : "no",
(pep->endpoint.caps.type_int) ? "yes" : "no",
(pep->endpoint.caps.type_bulk) ? "yes" : "no",
(pep->endpoint.caps.type_iso) ? "yes" : "no",
(pep->endpoint.caps.dir_in) ? "yes" : "no",
(pep->endpoint.caps.dir_out) ? "yes" : "no");
INIT_LIST_HEAD(&pep->pending_list);
INIT_LIST_HEAD(&pep->deferred_list);
}
return 0;
}
static int cdns2_gadget_start(struct cdns2_device *pdev)
{
u32 max_speed;
void *buf;
int val;
int ret;
pdev->usb_regs = pdev->regs;
pdev->ep0_regs = pdev->regs;
pdev->epx_regs = pdev->regs;
pdev->interrupt_regs = pdev->regs;
pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET;
/* Reset controller. */
set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST);
ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val,
!(val & CPUCTRL_SW_RST), 1, 10000);
if (ret) {
dev_err(pdev->dev, "Error: reset controller timeout\n");
return -EINVAL;
}
usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL);
device_property_read_u16(pdev->dev, "cdns,on-chip-tx-buff-size",
&pdev->onchip_tx_buf);
device_property_read_u16(pdev->dev, "cdns,on-chip-rx-buff-size",
&pdev->onchip_rx_buf);
device_property_read_u32(pdev->dev, "cdns,avail-endpoints",
&pdev->eps_supported);
/*
* Driver assumes that each USBHS controller has at least
* one IN and one OUT non control endpoint.
*/
if (!pdev->onchip_tx_buf && !pdev->onchip_rx_buf) {
ret = -EINVAL;
dev_err(pdev->dev, "Invalid on-chip memory configuration\n");
goto put_gadget;
}
if (!(pdev->eps_supported & ~0x00010001)) {
ret = -EINVAL;
dev_err(pdev->dev, "No hardware endpoints available\n");
goto put_gadget;
}
max_speed = usb_get_maximum_speed(pdev->dev);
switch (max_speed) {
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
break;
default:
dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
max_speed);
fallthrough;
case USB_SPEED_UNKNOWN:
max_speed = USB_SPEED_HIGH;
break;
}
pdev->gadget.max_speed = max_speed;
pdev->gadget.speed = USB_SPEED_UNKNOWN;
pdev->gadget.ops = &cdns2_gadget_ops;
pdev->gadget.name = "usbhs-gadget";
pdev->gadget.quirk_avoids_skb_reserve = 1;
pdev->gadget.irq = pdev->irq;
spin_lock_init(&pdev->lock);
INIT_WORK(&pdev->pending_status_wq, cdns2_pending_setup_status_handler);
/* Initialize endpoint container. */
INIT_LIST_HEAD(&pdev->gadget.ep_list);
pdev->eps_dma_pool = dma_pool_create("cdns2_eps_dma_pool", pdev->dev,
TR_SEG_SIZE, 8, 0);
if (!pdev->eps_dma_pool) {
dev_err(pdev->dev, "Failed to create TRB dma pool\n");
ret = -ENOMEM;
goto put_gadget;
}
ret = cdns2_init_eps(pdev);
if (ret) {
dev_err(pdev->dev, "Failed to create endpoints\n");
goto destroy_dma_pool;
}
pdev->gadget.sg_supported = 1;
pdev->zlp_buf = kzalloc(CDNS2_EP_ZLP_BUF_SIZE, GFP_KERNEL);
if (!pdev->zlp_buf) {
ret = -ENOMEM;
goto destroy_dma_pool;
}
/* Allocate memory for setup packet buffer. */
buf = dma_alloc_coherent(pdev->dev, 8, &pdev->ep0_preq.request.dma,
GFP_DMA);
pdev->ep0_preq.request.buf = buf;
if (!pdev->ep0_preq.request.buf) {
ret = -ENOMEM;
goto free_zlp_buf;
}
/* Add USB gadget device. */
ret = usb_add_gadget(&pdev->gadget);
if (ret < 0) {
dev_err(pdev->dev, "Failed to add gadget\n");
goto free_ep0_buf;
}
return 0;
free_ep0_buf:
dma_free_coherent(pdev->dev, 8, pdev->ep0_preq.request.buf,
pdev->ep0_preq.request.dma);
free_zlp_buf:
kfree(pdev->zlp_buf);
destroy_dma_pool:
dma_pool_destroy(pdev->eps_dma_pool);
put_gadget:
usb_put_gadget(&pdev->gadget);
return ret;
}
int cdns2_gadget_suspend(struct cdns2_device *pdev)
{
unsigned long flags;
cdns2_disconnect_gadget(pdev);
spin_lock_irqsave(&pdev->lock, flags);
pdev->gadget.speed = USB_SPEED_UNKNOWN;
trace_cdns2_device_state("notattached");
usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
cdns2_enable_l1(pdev, 0);
/* Disable interrupt for device. */
writeb(0, &pdev->interrupt_regs->usbien);
writel(0, &pdev->adma_regs->ep_ien);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
int cdns2_gadget_resume(struct cdns2_device *pdev, bool hibernated)
{
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
if (!pdev->gadget_driver) {
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
cdns2_gadget_config(pdev);
if (hibernated)
clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
void cdns2_gadget_remove(struct cdns2_device *pdev)
{
pm_runtime_mark_last_busy(pdev->dev);
pm_runtime_put_autosuspend(pdev->dev);
usb_del_gadget(&pdev->gadget);
cdns2_free_all_eps(pdev);
dma_pool_destroy(pdev->eps_dma_pool);
kfree(pdev->zlp_buf);
usb_put_gadget(&pdev->gadget);
}
int cdns2_gadget_init(struct cdns2_device *pdev)
{
int ret;
/* Ensure 32-bit DMA Mask. */
ret = dma_set_mask_and_coherent(pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(pdev->dev, "Failed to set dma mask: %d\n", ret);
return ret;
}
pm_runtime_get_sync(pdev->dev);
cdsn2_isoc_burst_opt(pdev);
ret = cdns2_gadget_start(pdev);
if (ret) {
pm_runtime_put_sync(pdev->dev);
return ret;
}
/*
* Because interrupt line can be shared with other components in
* driver it can't use IRQF_ONESHOT flag here.
*/
ret = devm_request_threaded_irq(pdev->dev, pdev->irq,
cdns2_usb_irq_handler,
cdns2_thread_irq_handler,
IRQF_SHARED,
dev_name(pdev->dev),
pdev);
if (ret)
goto err0;
return 0;
err0:
cdns2_gadget_remove(pdev);
return ret;
}
| linux-master | drivers/usb/gadget/udc/cdns2/cdns2-gadget.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* hub.c - virtual hub handling
*
* Copyright 2017 IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include <linux/bcd.h>
#include <linux/version.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "vhub.h"
/* usb 2.0 hub device descriptor
*
* A few things we may want to improve here:
*
* - We may need to indicate TT support
* - We may need a device qualifier descriptor
* as devices can pretend to be usb1 or 2
* - Make vid/did overridable
* - make it look like usb1 if usb1 mode forced
*/
#define KERNEL_REL bin2bcd(LINUX_VERSION_MAJOR)
#define KERNEL_VER bin2bcd(LINUX_VERSION_PATCHLEVEL)
enum {
AST_VHUB_STR_INDEX_MAX = 4,
AST_VHUB_STR_MANUF = 3,
AST_VHUB_STR_PRODUCT = 2,
AST_VHUB_STR_SERIAL = 1,
};
static const struct usb_device_descriptor ast_vhub_dev_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_HUB,
.bDeviceSubClass = 0,
.bDeviceProtocol = 1,
.bMaxPacketSize0 = 64,
.idVendor = cpu_to_le16(0x1d6b),
.idProduct = cpu_to_le16(0x0107),
.bcdDevice = cpu_to_le16(0x0100),
.iManufacturer = AST_VHUB_STR_MANUF,
.iProduct = AST_VHUB_STR_PRODUCT,
.iSerialNumber = AST_VHUB_STR_SERIAL,
.bNumConfigurations = 1,
};
static const struct usb_qualifier_descriptor ast_vhub_qual_desc = {
.bLength = 0xA,
.bDescriptorType = USB_DT_DEVICE_QUALIFIER,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_HUB,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
.bMaxPacketSize0 = 64,
.bNumConfigurations = 1,
.bRESERVED = 0,
};
/*
* Configuration descriptor: same comments as above
* regarding handling USB1 mode.
*/
/*
* We don't use sizeof() as Linux definition of
* struct usb_endpoint_descriptor contains 2
* extra bytes
*/
#define AST_VHUB_CONF_DESC_SIZE (USB_DT_CONFIG_SIZE + \
USB_DT_INTERFACE_SIZE + \
USB_DT_ENDPOINT_SIZE)
static const struct ast_vhub_full_cdesc ast_vhub_conf_desc = {
.cfg = {
.bLength = USB_DT_CONFIG_SIZE,
.bDescriptorType = USB_DT_CONFIG,
.wTotalLength = cpu_to_le16(AST_VHUB_CONF_DESC_SIZE),
.bNumInterfaces = 1,
.bConfigurationValue = 1,
.iConfiguration = 0,
.bmAttributes = USB_CONFIG_ATT_ONE |
USB_CONFIG_ATT_SELFPOWER |
USB_CONFIG_ATT_WAKEUP,
.bMaxPower = 0,
},
.intf = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 0,
.bAlternateSetting = 0,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_HUB,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
.iInterface = 0,
},
.ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0x81,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(1),
.bInterval = 0x0c,
},
};
#define AST_VHUB_HUB_DESC_SIZE (USB_DT_HUB_NONVAR_SIZE + 2)
static const struct usb_hub_descriptor ast_vhub_hub_desc = {
.bDescLength = AST_VHUB_HUB_DESC_SIZE,
.bDescriptorType = USB_DT_HUB,
.bNbrPorts = AST_VHUB_NUM_PORTS,
.wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM),
.bPwrOn2PwrGood = 10,
.bHubContrCurrent = 0,
.u.hs.DeviceRemovable[0] = 0,
.u.hs.DeviceRemovable[1] = 0xff,
};
/*
* These strings converted to UTF-16 must be smaller than
* our EP0 buffer.
*/
static const struct usb_string ast_vhub_str_array[] = {
{
.id = AST_VHUB_STR_SERIAL,
.s = "00000000"
},
{
.id = AST_VHUB_STR_PRODUCT,
.s = "USB Virtual Hub"
},
{
.id = AST_VHUB_STR_MANUF,
.s = "Aspeed"
},
{ }
};
static const struct usb_gadget_strings ast_vhub_strings = {
.language = 0x0409,
.strings = (struct usb_string *)ast_vhub_str_array
};
static int ast_vhub_hub_dev_status(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue)
{
u8 st0;
EPDBG(ep, "GET_STATUS(dev)\n");
/*
* Mark it as self-powered, I doubt the BMC is powered off
* the USB bus ...
*/
st0 = 1 << USB_DEVICE_SELF_POWERED;
/*
* Need to double check how remote wakeup actually works
* on that chip and what triggers it.
*/
if (ep->vhub->wakeup_en)
st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP;
return ast_vhub_simple_reply(ep, st0, 0);
}
static int ast_vhub_hub_ep_status(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue)
{
int ep_num;
u8 st0 = 0;
ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
EPDBG(ep, "GET_STATUS(ep%d)\n", ep_num);
/* On the hub we have only EP 0 and 1 */
if (ep_num == 1) {
if (ep->vhub->ep1_stalled)
st0 |= 1 << USB_ENDPOINT_HALT;
} else if (ep_num != 0)
return std_req_stall;
return ast_vhub_simple_reply(ep, st0, 0);
}
static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue,
bool is_set)
{
u32 val;
EPDBG(ep, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
ep->vhub->wakeup_en = is_set;
EPDBG(ep, "Hub remote wakeup %s\n",
is_set ? "enabled" : "disabled");
return std_req_complete;
}
if (wValue == USB_DEVICE_TEST_MODE) {
val = readl(ep->vhub->regs + AST_VHUB_CTRL);
val &= ~GENMASK(10, 8);
val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
writel(val, ep->vhub->regs + AST_VHUB_CTRL);
return std_req_complete;
}
return std_req_stall;
}
static int ast_vhub_hub_ep_feature(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue,
bool is_set)
{
int ep_num;
u32 reg;
ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
EPDBG(ep, "%s_FEATURE(ep%d val=%02x)\n",
is_set ? "SET" : "CLEAR", ep_num, wValue);
if (ep_num > 1)
return std_req_stall;
if (wValue != USB_ENDPOINT_HALT)
return std_req_stall;
if (ep_num == 0)
return std_req_complete;
EPDBG(ep, "%s stall on EP 1\n",
is_set ? "setting" : "clearing");
ep->vhub->ep1_stalled = is_set;
reg = readl(ep->vhub->regs + AST_VHUB_EP1_CTRL);
if (is_set) {
reg |= VHUB_EP1_CTRL_STALL;
} else {
reg &= ~VHUB_EP1_CTRL_STALL;
reg |= VHUB_EP1_CTRL_RESET_TOGGLE;
}
writel(reg, ep->vhub->regs + AST_VHUB_EP1_CTRL);
return std_req_complete;
}
static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
u8 desc_type, u16 len)
{
size_t dsize;
struct ast_vhub *vhub = ep->vhub;
EPDBG(ep, "GET_DESCRIPTOR(type:%d)\n", desc_type);
/*
* Copy first to EP buffer and send from there, so
* we can do some in-place patching if needed. We know
* the EP buffer is big enough but ensure that doesn't
* change. We do that now rather than later after we
* have checked sizes etc... to avoid a gcc bug where
* it thinks len is constant and barfs about read
* overflows in memcpy.
*/
switch(desc_type) {
case USB_DT_DEVICE:
dsize = USB_DT_DEVICE_SIZE;
memcpy(ep->buf, &vhub->vhub_dev_desc, dsize);
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_dev_desc));
BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
case USB_DT_OTHER_SPEED_CONFIG:
case USB_DT_CONFIG:
dsize = AST_VHUB_CONF_DESC_SIZE;
memcpy(ep->buf, &vhub->vhub_conf_desc, dsize);
((u8 *)ep->buf)[1] = desc_type;
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_conf_desc));
BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
case USB_DT_HUB:
dsize = AST_VHUB_HUB_DESC_SIZE;
memcpy(ep->buf, &vhub->vhub_hub_desc, dsize);
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_hub_desc));
BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
case USB_DT_DEVICE_QUALIFIER:
dsize = sizeof(vhub->vhub_qual_desc);
memcpy(ep->buf, &vhub->vhub_qual_desc, dsize);
break;
default:
return std_req_stall;
}
/* Crop requested length */
if (len > dsize)
len = dsize;
/* Shoot it from the EP buffer */
return ast_vhub_reply(ep, NULL, len);
}
static struct usb_gadget_strings*
ast_vhub_str_of_container(struct usb_gadget_string_container *container)
{
return (struct usb_gadget_strings *)container->stash;
}
static int ast_vhub_collect_languages(struct ast_vhub *vhub, void *buf,
size_t size)
{
int rc, hdr_len, nlangs, max_langs;
struct usb_gadget_strings *lang_str;
struct usb_gadget_string_container *container;
struct usb_string_descriptor *sdesc = buf;
nlangs = 0;
hdr_len = sizeof(struct usb_descriptor_header);
max_langs = (size - hdr_len) / sizeof(sdesc->wData[0]);
list_for_each_entry(container, &vhub->vhub_str_desc, list) {
if (nlangs >= max_langs)
break;
lang_str = ast_vhub_str_of_container(container);
sdesc->wData[nlangs++] = cpu_to_le16(lang_str->language);
}
rc = hdr_len + nlangs * sizeof(sdesc->wData[0]);
sdesc->bLength = rc;
sdesc->bDescriptorType = USB_DT_STRING;
return rc;
}
static struct usb_gadget_strings *ast_vhub_lookup_string(struct ast_vhub *vhub,
u16 lang_id)
{
struct usb_gadget_strings *lang_str;
struct usb_gadget_string_container *container;
list_for_each_entry(container, &vhub->vhub_str_desc, list) {
lang_str = ast_vhub_str_of_container(container);
if (lang_str->language == lang_id)
return lang_str;
}
return NULL;
}
static int ast_vhub_rep_string(struct ast_vhub_ep *ep,
u8 string_id, u16 lang_id,
u16 len)
{
int rc;
u8 buf[256];
struct ast_vhub *vhub = ep->vhub;
struct usb_gadget_strings *lang_str;
if (string_id == 0) {
rc = ast_vhub_collect_languages(vhub, buf, sizeof(buf));
} else {
lang_str = ast_vhub_lookup_string(vhub, lang_id);
if (!lang_str)
return std_req_stall;
rc = usb_gadget_get_string(lang_str, string_id, buf);
}
if (rc < 0 || rc >= AST_VHUB_EP0_MAX_PACKET)
return std_req_stall;
/* Shoot it from the EP buffer */
memcpy(ep->buf, buf, rc);
return ast_vhub_reply(ep, NULL, min_t(u16, rc, len));
}
enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq)
{
struct ast_vhub *vhub = ep->vhub;
u16 wValue, wIndex, wLength;
wValue = le16_to_cpu(crq->wValue);
wIndex = le16_to_cpu(crq->wIndex);
wLength = le16_to_cpu(crq->wLength);
/* First packet, grab speed */
if (vhub->speed == USB_SPEED_UNKNOWN) {
u32 ustat = readl(vhub->regs + AST_VHUB_USBSTS);
if (ustat & VHUB_USBSTS_HISPEED)
vhub->speed = USB_SPEED_HIGH;
else
vhub->speed = USB_SPEED_FULL;
UDCDBG(vhub, "USB status=%08x speed=%s\n", ustat,
vhub->speed == USB_SPEED_HIGH ? "high" : "full");
}
switch ((crq->bRequestType << 8) | crq->bRequest) {
/* SET_ADDRESS */
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
EPDBG(ep, "SET_ADDRESS: Got address %x\n", wValue);
writel(wValue, vhub->regs + AST_VHUB_CONF);
return std_req_complete;
/* GET_STATUS */
case DeviceRequest | USB_REQ_GET_STATUS:
return ast_vhub_hub_dev_status(ep, wIndex, wValue);
case InterfaceRequest | USB_REQ_GET_STATUS:
return ast_vhub_simple_reply(ep, 0, 0);
case EndpointRequest | USB_REQ_GET_STATUS:
return ast_vhub_hub_ep_status(ep, wIndex, wValue);
/* SET/CLEAR_FEATURE */
case DeviceOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_hub_dev_feature(ep, wIndex, wValue, true);
case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_hub_dev_feature(ep, wIndex, wValue, false);
case EndpointOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_hub_ep_feature(ep, wIndex, wValue, true);
case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_hub_ep_feature(ep, wIndex, wValue, false);
/* GET/SET_CONFIGURATION */
case DeviceRequest | USB_REQ_GET_CONFIGURATION:
return ast_vhub_simple_reply(ep, 1);
case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
if (wValue != 1)
return std_req_stall;
return std_req_complete;
/* GET_DESCRIPTOR */
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
switch (wValue >> 8) {
case USB_DT_DEVICE:
case USB_DT_CONFIG:
case USB_DT_DEVICE_QUALIFIER:
case USB_DT_OTHER_SPEED_CONFIG:
return ast_vhub_rep_desc(ep, wValue >> 8,
wLength);
case USB_DT_STRING:
return ast_vhub_rep_string(ep, wValue & 0xff,
wIndex, wLength);
}
return std_req_stall;
/* GET/SET_INTERFACE */
case DeviceRequest | USB_REQ_GET_INTERFACE:
return ast_vhub_simple_reply(ep, 0);
case DeviceOutRequest | USB_REQ_SET_INTERFACE:
if (wValue != 0 || wIndex != 0)
return std_req_stall;
return std_req_complete;
}
return std_req_stall;
}
static void ast_vhub_update_hub_ep1(struct ast_vhub *vhub,
unsigned int port)
{
/* Update HW EP1 response */
u32 reg = readl(vhub->regs + AST_VHUB_EP1_STS_CHG);
u32 pmask = (1 << (port + 1));
if (vhub->ports[port].change)
reg |= pmask;
else
reg &= ~pmask;
writel(reg, vhub->regs + AST_VHUB_EP1_STS_CHG);
}
static void ast_vhub_change_port_stat(struct ast_vhub *vhub,
unsigned int port,
u16 clr_flags,
u16 set_flags,
bool set_c)
{
struct ast_vhub_port *p = &vhub->ports[port];
u16 prev;
/* Update port status */
prev = p->status;
p->status = (prev & ~clr_flags) | set_flags;
DDBG(&p->dev, "port %d status %04x -> %04x (C=%d)\n",
port + 1, prev, p->status, set_c);
/* Update change bits if needed */
if (set_c) {
u16 chg = p->status ^ prev;
/* Only these are relevant for change */
chg &= USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE |
USB_PORT_STAT_C_SUSPEND |
USB_PORT_STAT_C_OVERCURRENT |
USB_PORT_STAT_C_RESET |
USB_PORT_STAT_C_L1;
/*
* We only set USB_PORT_STAT_C_ENABLE if we are disabling
* the port as per USB spec, otherwise MacOS gets upset
*/
if (p->status & USB_PORT_STAT_ENABLE)
chg &= ~USB_PORT_STAT_C_ENABLE;
p->change = chg;
ast_vhub_update_hub_ep1(vhub, port);
}
}
static void ast_vhub_send_host_wakeup(struct ast_vhub *vhub)
{
u32 reg = readl(vhub->regs + AST_VHUB_CTRL);
UDCDBG(vhub, "Waking up host !\n");
reg |= VHUB_CTRL_MANUAL_REMOTE_WAKEUP;
writel(reg, vhub->regs + AST_VHUB_CTRL);
}
void ast_vhub_device_connect(struct ast_vhub *vhub,
unsigned int port, bool on)
{
if (on)
ast_vhub_change_port_stat(vhub, port, 0,
USB_PORT_STAT_CONNECTION, true);
else
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE,
0, true);
/*
* If the hub is set to wakup the host on connection events
* then send a wakeup.
*/
if (vhub->wakeup_en)
ast_vhub_send_host_wakeup(vhub);
}
static void ast_vhub_wake_work(struct work_struct *work)
{
struct ast_vhub *vhub = container_of(work,
struct ast_vhub,
wake_work);
unsigned long flags;
unsigned int i;
/*
* Wake all sleeping ports. If a port is suspended by
* the host suspend (without explicit state suspend),
* we let the normal host wake path deal with it later.
*/
spin_lock_irqsave(&vhub->lock, flags);
for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
continue;
ast_vhub_change_port_stat(vhub, i,
USB_PORT_STAT_SUSPEND,
0, true);
ast_vhub_dev_resume(&p->dev);
}
ast_vhub_send_host_wakeup(vhub);
spin_unlock_irqrestore(&vhub->lock, flags);
}
void ast_vhub_hub_wake_all(struct ast_vhub *vhub)
{
/*
* A device is trying to wake the world, because this
* can recurse into the device, we break the call chain
* using a work queue
*/
schedule_work(&vhub->wake_work);
}
static void ast_vhub_port_reset(struct ast_vhub *vhub, u8 port)
{
struct ast_vhub_port *p = &vhub->ports[port];
u16 set, clr, speed;
/* First mark disabled */
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_SUSPEND,
USB_PORT_STAT_RESET,
false);
if (!p->dev.driver)
return;
/*
* This will either "start" the port or reset the
* device if already started...
*/
ast_vhub_dev_reset(&p->dev);
/* Grab the right speed */
speed = p->dev.driver->max_speed;
if (speed == USB_SPEED_UNKNOWN || speed > vhub->speed)
speed = vhub->speed;
switch (speed) {
case USB_SPEED_LOW:
set = USB_PORT_STAT_LOW_SPEED;
clr = USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_FULL:
set = 0;
clr = USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_HIGH:
set = USB_PORT_STAT_HIGH_SPEED;
clr = USB_PORT_STAT_LOW_SPEED;
break;
default:
UDCDBG(vhub, "Unsupported speed %d when"
" connecting device\n",
speed);
return;
}
clr |= USB_PORT_STAT_RESET;
set |= USB_PORT_STAT_ENABLE;
/* This should ideally be delayed ... */
ast_vhub_change_port_stat(vhub, port, clr, set, true);
}
static enum std_req_rc ast_vhub_set_port_feature(struct ast_vhub_ep *ep,
u8 port, u16 feat)
{
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_port *p;
if (port == 0 || port > vhub->max_ports)
return std_req_stall;
port--;
p = &vhub->ports[port];
switch(feat) {
case USB_PORT_FEAT_SUSPEND:
if (!(p->status & USB_PORT_STAT_ENABLE))
return std_req_complete;
ast_vhub_change_port_stat(vhub, port,
0, USB_PORT_STAT_SUSPEND,
false);
ast_vhub_dev_suspend(&p->dev);
return std_req_complete;
case USB_PORT_FEAT_RESET:
EPDBG(ep, "Port reset !\n");
ast_vhub_port_reset(vhub, port);
return std_req_complete;
case USB_PORT_FEAT_POWER:
/*
* On Power-on, we mark the connected flag changed,
* if there's a connected device, some hosts will
* otherwise fail to detect it.
*/
if (p->status & USB_PORT_STAT_CONNECTION) {
p->change |= USB_PORT_STAT_C_CONNECTION;
ast_vhub_update_hub_ep1(vhub, port);
}
return std_req_complete;
case USB_PORT_FEAT_TEST:
case USB_PORT_FEAT_INDICATOR:
/* We don't do anything with these */
return std_req_complete;
}
return std_req_stall;
}
static enum std_req_rc ast_vhub_clr_port_feature(struct ast_vhub_ep *ep,
u8 port, u16 feat)
{
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_port *p;
if (port == 0 || port > vhub->max_ports)
return std_req_stall;
port--;
p = &vhub->ports[port];
switch(feat) {
case USB_PORT_FEAT_ENABLE:
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_SUSPEND, 0,
false);
ast_vhub_dev_suspend(&p->dev);
return std_req_complete;
case USB_PORT_FEAT_SUSPEND:
if (!(p->status & USB_PORT_STAT_SUSPEND))
return std_req_complete;
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_SUSPEND, 0,
false);
ast_vhub_dev_resume(&p->dev);
return std_req_complete;
case USB_PORT_FEAT_POWER:
/* We don't do power control */
return std_req_complete;
case USB_PORT_FEAT_INDICATOR:
/* We don't have indicators */
return std_req_complete;
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_RESET:
/* Clear state-change feature */
p->change &= ~(1u << (feat - 16));
ast_vhub_update_hub_ep1(vhub, port);
return std_req_complete;
}
return std_req_stall;
}
static enum std_req_rc ast_vhub_get_port_stat(struct ast_vhub_ep *ep,
u8 port)
{
struct ast_vhub *vhub = ep->vhub;
u16 stat, chg;
if (port == 0 || port > vhub->max_ports)
return std_req_stall;
port--;
stat = vhub->ports[port].status;
chg = vhub->ports[port].change;
/* We always have power */
stat |= USB_PORT_STAT_POWER;
EPDBG(ep, " port status=%04x change=%04x\n", stat, chg);
return ast_vhub_simple_reply(ep,
stat & 0xff,
stat >> 8,
chg & 0xff,
chg >> 8);
}
enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq)
{
u16 wValue, wIndex, wLength;
wValue = le16_to_cpu(crq->wValue);
wIndex = le16_to_cpu(crq->wIndex);
wLength = le16_to_cpu(crq->wLength);
switch ((crq->bRequestType << 8) | crq->bRequest) {
case GetHubStatus:
EPDBG(ep, "GetHubStatus\n");
return ast_vhub_simple_reply(ep, 0, 0, 0, 0);
case GetPortStatus:
EPDBG(ep, "GetPortStatus(%d)\n", wIndex & 0xff);
return ast_vhub_get_port_stat(ep, wIndex & 0xf);
case GetHubDescriptor:
if (wValue != (USB_DT_HUB << 8))
return std_req_stall;
EPDBG(ep, "GetHubDescriptor(%d)\n", wIndex & 0xff);
return ast_vhub_rep_desc(ep, USB_DT_HUB, wLength);
case SetHubFeature:
case ClearHubFeature:
EPDBG(ep, "Get/SetHubFeature(%d)\n", wValue);
/* No feature, just complete the requests */
if (wValue == C_HUB_LOCAL_POWER ||
wValue == C_HUB_OVER_CURRENT)
return std_req_complete;
return std_req_stall;
case SetPortFeature:
EPDBG(ep, "SetPortFeature(%d,%d)\n", wIndex & 0xf, wValue);
return ast_vhub_set_port_feature(ep, wIndex & 0xf, wValue);
case ClearPortFeature:
EPDBG(ep, "ClearPortFeature(%d,%d)\n", wIndex & 0xf, wValue);
return ast_vhub_clr_port_feature(ep, wIndex & 0xf, wValue);
case ClearTTBuffer:
case ResetTT:
case StopTT:
return std_req_complete;
case GetTTState:
return ast_vhub_simple_reply(ep, 0, 0, 0, 0);
default:
EPDBG(ep, "Unknown class request\n");
}
return std_req_stall;
}
void ast_vhub_hub_suspend(struct ast_vhub *vhub)
{
unsigned int i;
UDCDBG(vhub, "USB bus suspend\n");
if (vhub->suspended)
return;
vhub->suspended = true;
/*
* Forward to unsuspended ports without changing
* their connection status.
*/
for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
ast_vhub_dev_suspend(&p->dev);
}
}
void ast_vhub_hub_resume(struct ast_vhub *vhub)
{
unsigned int i;
UDCDBG(vhub, "USB bus resume\n");
if (!vhub->suspended)
return;
vhub->suspended = false;
/*
* Forward to unsuspended ports without changing
* their connection status.
*/
for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
ast_vhub_dev_resume(&p->dev);
}
}
void ast_vhub_hub_reset(struct ast_vhub *vhub)
{
unsigned int i;
UDCDBG(vhub, "USB bus reset\n");
/*
* Is the speed known ? If not we don't care, we aren't
* initialized yet and ports haven't been enabled.
*/
if (vhub->speed == USB_SPEED_UNKNOWN)
return;
/* We aren't suspended anymore obviously */
vhub->suspended = false;
/* No speed set */
vhub->speed = USB_SPEED_UNKNOWN;
/* Wakeup not enabled anymore */
vhub->wakeup_en = false;
/*
* Clear all port status, disable gadgets and "suspend"
* them. They will be woken up by a port reset.
*/
for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
/* Only keep the connected flag */
p->status &= USB_PORT_STAT_CONNECTION;
p->change = 0;
/* Suspend the gadget if any */
ast_vhub_dev_suspend(&p->dev);
}
/* Cleanup HW */
writel(0, vhub->regs + AST_VHUB_CONF);
writel(0, vhub->regs + AST_VHUB_EP0_CTRL);
writel(VHUB_EP1_CTRL_RESET_TOGGLE |
VHUB_EP1_CTRL_ENABLE,
vhub->regs + AST_VHUB_EP1_CTRL);
writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
}
static void ast_vhub_of_parse_dev_desc(struct ast_vhub *vhub,
const struct device_node *vhub_np)
{
u16 id;
u32 data;
if (!of_property_read_u32(vhub_np, "vhub-vendor-id", &data)) {
id = (u16)data;
vhub->vhub_dev_desc.idVendor = cpu_to_le16(id);
}
if (!of_property_read_u32(vhub_np, "vhub-product-id", &data)) {
id = (u16)data;
vhub->vhub_dev_desc.idProduct = cpu_to_le16(id);
}
if (!of_property_read_u32(vhub_np, "vhub-device-revision", &data)) {
id = (u16)data;
vhub->vhub_dev_desc.bcdDevice = cpu_to_le16(id);
}
}
static void ast_vhub_fixup_usb1_dev_desc(struct ast_vhub *vhub)
{
vhub->vhub_dev_desc.bcdUSB = cpu_to_le16(0x0100);
vhub->vhub_dev_desc.bDeviceProtocol = 0;
}
static struct usb_gadget_string_container*
ast_vhub_str_container_alloc(struct ast_vhub *vhub)
{
unsigned int size;
struct usb_string *str_array;
struct usb_gadget_strings *lang_str;
struct usb_gadget_string_container *container;
size = sizeof(*container);
size += sizeof(struct usb_gadget_strings);
size += sizeof(struct usb_string) * AST_VHUB_STR_INDEX_MAX;
container = devm_kzalloc(&vhub->pdev->dev, size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
lang_str = ast_vhub_str_of_container(container);
str_array = (struct usb_string *)(lang_str + 1);
lang_str->strings = str_array;
return container;
}
static void ast_vhub_str_deep_copy(struct usb_gadget_strings *dest,
const struct usb_gadget_strings *src)
{
struct usb_string *src_array = src->strings;
struct usb_string *dest_array = dest->strings;
dest->language = src->language;
if (src_array && dest_array) {
do {
*dest_array = *src_array;
dest_array++;
src_array++;
} while (src_array->s);
}
}
static int ast_vhub_str_alloc_add(struct ast_vhub *vhub,
const struct usb_gadget_strings *src_str)
{
struct usb_gadget_strings *dest_str;
struct usb_gadget_string_container *container;
container = ast_vhub_str_container_alloc(vhub);
if (IS_ERR(container))
return PTR_ERR(container);
dest_str = ast_vhub_str_of_container(container);
ast_vhub_str_deep_copy(dest_str, src_str);
list_add_tail(&container->list, &vhub->vhub_str_desc);
return 0;
}
static const struct {
const char *name;
u8 id;
} str_id_map[] = {
{"manufacturer", AST_VHUB_STR_MANUF},
{"product", AST_VHUB_STR_PRODUCT},
{"serial-number", AST_VHUB_STR_SERIAL},
{},
};
static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
const struct device_node *desc_np)
{
u32 langid;
int ret = 0;
int i, offset;
const char *str;
struct device_node *child;
struct usb_string str_array[AST_VHUB_STR_INDEX_MAX];
struct usb_gadget_strings lang_str = {
.strings = (struct usb_string *)str_array,
};
for_each_child_of_node(desc_np, child) {
if (of_property_read_u32(child, "reg", &langid))
continue; /* no language identifier specified */
if (!usb_validate_langid(langid))
continue; /* invalid language identifier */
lang_str.language = langid;
for (i = offset = 0; str_id_map[i].name; i++) {
str = of_get_property(child, str_id_map[i].name, NULL);
if (str) {
str_array[offset].s = str;
str_array[offset].id = str_id_map[i].id;
offset++;
}
}
str_array[offset].id = 0;
str_array[offset].s = NULL;
ret = ast_vhub_str_alloc_add(vhub, &lang_str);
if (ret) {
of_node_put(child);
break;
}
}
return ret;
}
static int ast_vhub_init_desc(struct ast_vhub *vhub)
{
int ret;
struct device_node *desc_np;
const struct device_node *vhub_np = vhub->pdev->dev.of_node;
/* Initialize vhub Device Descriptor. */
memcpy(&vhub->vhub_dev_desc, &ast_vhub_dev_desc,
sizeof(vhub->vhub_dev_desc));
ast_vhub_of_parse_dev_desc(vhub, vhub_np);
if (vhub->force_usb1)
ast_vhub_fixup_usb1_dev_desc(vhub);
/* Initialize vhub Configuration Descriptor. */
memcpy(&vhub->vhub_conf_desc, &ast_vhub_conf_desc,
sizeof(vhub->vhub_conf_desc));
/* Initialize vhub Hub Descriptor. */
memcpy(&vhub->vhub_hub_desc, &ast_vhub_hub_desc,
sizeof(vhub->vhub_hub_desc));
vhub->vhub_hub_desc.bNbrPorts = vhub->max_ports;
/* Initialize vhub String Descriptors. */
INIT_LIST_HEAD(&vhub->vhub_str_desc);
desc_np = of_get_child_by_name(vhub_np, "vhub-strings");
if (desc_np) {
ret = ast_vhub_of_parse_str_desc(vhub, desc_np);
of_node_put(desc_np);
}
else
ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
/* Initialize vhub Qualifier Descriptor. */
memcpy(&vhub->vhub_qual_desc, &ast_vhub_qual_desc,
sizeof(vhub->vhub_qual_desc));
return ret;
}
int ast_vhub_init_hub(struct ast_vhub *vhub)
{
vhub->speed = USB_SPEED_UNKNOWN;
INIT_WORK(&vhub->wake_work, ast_vhub_wake_work);
return ast_vhub_init_desc(vhub);
}
| linux-master | drivers/usb/gadget/udc/aspeed-vhub/hub.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* ep0.c - Endpoint 0 handling
*
* Copyright 2017 IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include "vhub.h"
int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
{
struct usb_request *req = &ep->ep0.req.req;
int rc;
if (WARN_ON(ep->d_idx != 0))
return std_req_stall;
if (WARN_ON(!ep->ep0.dir_in))
return std_req_stall;
if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
return std_req_stall;
if (WARN_ON(req->status == -EINPROGRESS))
return std_req_stall;
req->buf = ptr;
req->length = len;
req->complete = NULL;
req->zero = true;
/*
* Call internal queue directly after dropping the lock. This is
* safe to do as the reply is always the last thing done when
* processing a SETUP packet, usually as a tail call
*/
spin_unlock(&ep->vhub->lock);
if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
rc = std_req_stall;
else
rc = std_req_data;
spin_lock(&ep->vhub->lock);
return rc;
}
int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
{
u8 *buffer = ep->buf;
unsigned int i;
va_list args;
va_start(args, len);
/* Copy data directly into EP buffer */
for (i = 0; i < len; i++)
buffer[i] = va_arg(args, int);
va_end(args);
/* req->buf NULL means data is already there */
return ast_vhub_reply(ep, NULL, len);
}
void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
{
struct usb_ctrlrequest crq;
enum std_req_rc std_req_rc;
int rc = -ENODEV;
if (WARN_ON(ep->d_idx != 0))
return;
/*
* Grab the setup packet from the chip and byteswap
* interesting fields
*/
memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
crq.bRequestType, crq.bRequest,
le16_to_cpu(crq.wValue),
le16_to_cpu(crq.wIndex),
le16_to_cpu(crq.wLength),
(crq.bRequestType & USB_DIR_IN) ? "in" : "out",
ep->ep0.state);
/*
* Check our state, cancel pending requests if needed
*
* Note: Under some circumstances, we can get a new setup
* packet while waiting for the stall ack, just accept it.
*
* In any case, a SETUP packet in wrong state should have
* reset the HW state machine, so let's just log, nuke
* requests, move on.
*/
if (ep->ep0.state != ep0_state_token &&
ep->ep0.state != ep0_state_stall) {
EPDBG(ep, "wrong state\n");
ast_vhub_nuke(ep, -EIO);
}
/* Calculate next state for EP0 */
ep->ep0.state = ep0_state_data;
ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
/* If this is the vHub, we handle requests differently */
std_req_rc = std_req_driver;
if (ep->dev == NULL) {
if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
std_req_rc = ast_vhub_std_hub_request(ep, &crq);
else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
std_req_rc = ast_vhub_class_hub_request(ep, &crq);
else
std_req_rc = std_req_stall;
} else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
std_req_rc = ast_vhub_std_dev_request(ep, &crq);
/* Act upon result */
switch(std_req_rc) {
case std_req_complete:
goto complete;
case std_req_stall:
goto stall;
case std_req_driver:
break;
case std_req_data:
return;
}
/* Pass request up to the gadget driver */
if (WARN_ON(!ep->dev))
goto stall;
if (ep->dev->driver) {
EPDBG(ep, "forwarding to gadget...\n");
spin_unlock(&ep->vhub->lock);
rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
spin_lock(&ep->vhub->lock);
EPDBG(ep, "driver returned %d\n", rc);
} else {
EPDBG(ep, "no gadget for request !\n");
}
if (rc >= 0)
return;
stall:
EPDBG(ep, "stalling\n");
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_stall;
ep->ep0.dir_in = false;
return;
complete:
EPVDBG(ep, "sending [in] status with no data\n");
writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_status;
ep->ep0.dir_in = false;
}
static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
struct ast_vhub_req *req)
{
unsigned int chunk;
u32 reg;
/* If this is a 0-length request, it's the gadget trying to
* send a status on our behalf. We take it from here.
*/
if (req->req.length == 0)
req->last_desc = 1;
/* Are we done ? Complete request, otherwise wait for next interrupt */
if (req->last_desc >= 0) {
EPVDBG(ep, "complete send %d/%d\n",
req->req.actual, req->req.length);
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, 0);
return;
}
/*
* Next chunk cropped to max packet size. Also check if this
* is the last packet
*/
chunk = req->req.length - req->req.actual;
if (chunk > ep->ep.maxpacket)
chunk = ep->ep.maxpacket;
else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
req->last_desc = 1;
EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
/*
* Copy data if any (internal requests already have data
* in the EP buffer)
*/
if (chunk && req->req.buf)
memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
vhub_dma_workaround(ep->buf);
/* Remember chunk size and trigger send */
reg = VHUB_EP0_SET_TX_LEN(chunk);
writel(reg, ep->ep0.ctlstat);
writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
req->req.actual += chunk;
}
static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
{
EPVDBG(ep, "rx prime\n");
/* Prime endpoint for receiving data */
writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
}
static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
unsigned int len)
{
unsigned int remain;
int rc = 0;
/* We are receiving... grab request */
remain = req->req.length - req->req.actual;
EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
/* Are we getting more than asked ? */
if (len > remain) {
EPDBG(ep, "receiving too much (ovf: %d) !\n",
len - remain);
len = remain;
rc = -EOVERFLOW;
}
/* Hardware return wrong data len */
if (len < ep->ep.maxpacket && len != remain) {
EPDBG(ep, "using expected data len instead\n");
len = remain;
}
if (len && req->req.buf)
memcpy(req->req.buf + req->req.actual, ep->buf, len);
req->req.actual += len;
/* Done ? */
if (len < ep->ep.maxpacket || len == remain) {
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, rc);
} else
ast_vhub_ep0_rx_prime(ep);
}
void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
{
struct ast_vhub_req *req;
struct ast_vhub *vhub = ep->vhub;
struct device *dev = &vhub->pdev->dev;
bool stall = false;
u32 stat;
/* Read EP0 status */
stat = readl(ep->ep0.ctlstat);
/* Grab current request if any */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
switch(ep->ep0.state) {
case ep0_state_token:
/* There should be no request queued in that state... */
if (req) {
dev_warn(dev, "request present while in TOKEN state\n");
ast_vhub_nuke(ep, -EINVAL);
}
dev_warn(dev, "ack while in TOKEN state\n");
stall = true;
break;
case ep0_state_data:
/* Check the state bits corresponding to our direction */
if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
(!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
(ep->ep0.dir_in != in_ack)) {
/* In that case, ignore interrupt */
dev_warn(dev, "irq state mismatch");
break;
}
/*
* We are in data phase and there's no request, something is
* wrong, stall
*/
if (!req) {
dev_warn(dev, "data phase, no request\n");
stall = true;
break;
}
/* We have a request, handle data transfers */
if (ep->ep0.dir_in)
ast_vhub_ep0_do_send(ep, req);
else
ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
return;
case ep0_state_status:
/* Nuke stale requests */
if (req) {
dev_warn(dev, "request present while in STATUS state\n");
ast_vhub_nuke(ep, -EINVAL);
}
/*
* If the status phase completes with the wrong ack, stall
* the endpoint just in case, to abort whatever the host
* was doing.
*/
if (ep->ep0.dir_in == in_ack) {
dev_warn(dev, "status direction mismatch\n");
stall = true;
}
break;
case ep0_state_stall:
/*
* There shouldn't be any request left, but nuke just in case
* otherwise the stale request will block subsequent ones
*/
ast_vhub_nuke(ep, -EIO);
break;
}
/* Reset to token state or stall */
if (stall) {
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_stall;
} else
ep->ep0.state = ep0_state_token;
}
static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
gfp_t gfp_flags)
{
struct ast_vhub_req *req = to_ast_req(u_req);
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
struct device *dev = &vhub->pdev->dev;
unsigned long flags;
/* Paranoid cheks */
if (!u_req || (!u_req->complete && !req->internal)) {
dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
if (u_req) {
dev_warn(dev, "complete=%p internal=%d\n",
u_req->complete, req->internal);
}
return -EINVAL;
}
/* Not endpoint 0 ? */
if (WARN_ON(ep->d_idx != 0))
return -EINVAL;
/* Disabled device */
if (ep->dev && !ep->dev->enabled)
return -ESHUTDOWN;
/* Data, no buffer and not internal ? */
if (u_req->length && !u_req->buf && !req->internal) {
dev_warn(dev, "Request with no buffer !\n");
return -EINVAL;
}
EPVDBG(ep, "enqueue req @%p\n", req);
EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
u_req->length, u_req->zero,
u_req->short_not_ok, ep->ep0.dir_in);
/* Initialize request progress fields */
u_req->status = -EINPROGRESS;
u_req->actual = 0;
req->last_desc = -1;
req->active = false;
spin_lock_irqsave(&vhub->lock, flags);
/* EP0 can only support a single request at a time */
if (!list_empty(&ep->queue) ||
ep->ep0.state == ep0_state_token ||
ep->ep0.state == ep0_state_stall) {
dev_warn(dev, "EP0: Request in wrong state\n");
EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
list_empty(&ep->queue), ep->ep0.state);
spin_unlock_irqrestore(&vhub->lock, flags);
return -EBUSY;
}
/* Add request to list and kick processing if empty */
list_add_tail(&req->queue, &ep->queue);
if (ep->ep0.dir_in) {
/* IN request, send data */
ast_vhub_ep0_do_send(ep, req);
} else if (u_req->length == 0) {
/* 0-len request, send completion as rx */
EPVDBG(ep, "0-length rx completion\n");
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, 0);
} else {
/* OUT request, start receiver */
ast_vhub_ep0_rx_prime(ep);
}
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_req *req;
unsigned long flags;
int rc = -EINVAL;
spin_lock_irqsave(&vhub->lock, flags);
/* Only one request can be in the queue */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
/* Is it ours ? */
if (req && u_req == &req->req) {
EPVDBG(ep, "dequeue req @%p\n", req);
/*
* We don't have to deal with "active" as all
* DMAs go to the EP buffers, not the request.
*/
ast_vhub_done(ep, req, -ECONNRESET);
/* We do stall the EP to clean things up in HW */
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_status;
ep->ep0.dir_in = false;
rc = 0;
}
spin_unlock_irqrestore(&vhub->lock, flags);
return rc;
}
static const struct usb_ep_ops ast_vhub_ep0_ops = {
.queue = ast_vhub_ep0_queue,
.dequeue = ast_vhub_ep0_dequeue,
.alloc_request = ast_vhub_alloc_request,
.free_request = ast_vhub_free_request,
};
void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
{
struct ast_vhub_ep *ep = &dev->ep0;
ast_vhub_nuke(ep, -EIO);
ep->ep0.state = ep0_state_token;
}
void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
struct ast_vhub_dev *dev)
{
memset(ep, 0, sizeof(*ep));
INIT_LIST_HEAD(&ep->ep.ep_list);
INIT_LIST_HEAD(&ep->queue);
ep->ep.ops = &ast_vhub_ep0_ops;
ep->ep.name = "ep0";
ep->ep.caps.type_control = true;
usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
ep->d_idx = 0;
ep->dev = dev;
ep->vhub = vhub;
ep->ep0.state = ep0_state_token;
INIT_LIST_HEAD(&ep->ep0.req.queue);
ep->ep0.req.internal = true;
/* Small difference between vHub and devices */
if (dev) {
ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
ep->ep0.setup = vhub->regs +
AST_VHUB_SETUP0 + 8 * (dev->index + 1);
ep->buf = vhub->ep0_bufs +
AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
ep->buf_dma = vhub->ep0_bufs_dma +
AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
} else {
ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
ep->buf = vhub->ep0_bufs;
ep->buf_dma = vhub->ep0_bufs_dma;
}
}
| linux-master | drivers/usb/gadget/udc/aspeed-vhub/ep0.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* epn.c - Generic endpoints management
*
* Copyright 2017 IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include "vhub.h"
#define EXTRA_CHECKS
#ifdef EXTRA_CHECKS
#define CHECK(ep, expr, fmt...) \
do { \
if (!(expr)) EPDBG(ep, "CHECK:" fmt); \
} while(0)
#else
#define CHECK(ep, expr, fmt...) do { } while(0)
#endif
static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
{
unsigned int act = req->req.actual;
unsigned int len = req->req.length;
unsigned int chunk;
/* There should be no DMA ongoing */
WARN_ON(req->active);
/* Calculate next chunk size */
chunk = len - act;
if (chunk > ep->ep.maxpacket)
chunk = ep->ep.maxpacket;
else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
req->last_desc = 1;
EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
req, act, len, chunk, req->last_desc);
/* If DMA unavailable, using staging EP buffer */
if (!req->req.dma) {
/* For IN transfers, copy data over first */
if (ep->epn.is_in) {
memcpy(ep->buf, req->req.buf + act, chunk);
vhub_dma_workaround(ep->buf);
}
writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
} else {
if (ep->epn.is_in)
vhub_dma_workaround(req->req.buf);
writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
}
/* Start DMA */
req->active = true;
writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
}
static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
{
struct ast_vhub_req *req;
unsigned int len;
int status = 0;
u32 stat;
/* Read EP status */
stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
/* Grab current request if any */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
stat, ep->epn.is_in, req, req ? req->active : 0);
/* In absence of a request, bail out, must have been dequeued */
if (!req)
return;
/*
* Request not active, move on to processing queue, active request
* was probably dequeued
*/
if (!req->active)
goto next_chunk;
/* Check if HW has moved on */
if (VHUB_EP_DMA_RPTR(stat) != 0) {
EPDBG(ep, "DMA read pointer not 0 !\n");
return;
}
/* No current DMA ongoing */
req->active = false;
/* Grab length out of HW */
len = VHUB_EP_DMA_TX_SIZE(stat);
/* If not using DMA, copy data out if needed */
if (!req->req.dma && !ep->epn.is_in && len) {
if (req->req.actual + len > req->req.length) {
req->last_desc = 1;
status = -EOVERFLOW;
goto done;
} else {
memcpy(req->req.buf + req->req.actual, ep->buf, len);
}
}
/* Adjust size */
req->req.actual += len;
/* Check for short packet */
if (len < ep->ep.maxpacket)
req->last_desc = 1;
done:
/* That's it ? complete the request and pick a new one */
if (req->last_desc >= 0) {
ast_vhub_done(ep, req, status);
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
queue);
/*
* Due to lock dropping inside "done" the next request could
* already be active, so check for that and bail if needed.
*/
if (!req || req->active)
return;
}
next_chunk:
ast_vhub_epn_kick(ep, req);
}
static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
{
/*
* d_next == d_last means descriptor list empty to HW,
* thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
* in the list
*/
return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
(AST_VHUB_DESCS_COUNT - 1);
}
static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
struct ast_vhub_req *req)
{
struct ast_vhub_desc *desc = NULL;
unsigned int act = req->act_count;
unsigned int len = req->req.length;
unsigned int chunk;
/* Mark request active if not already */
req->active = true;
/* If the request was already completely written, do nothing */
if (req->last_desc >= 0)
return;
EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
/* While we can create descriptors */
while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
unsigned int d_num;
/* Grab next free descriptor */
d_num = ep->epn.d_next;
desc = &ep->epn.descs[d_num];
ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
/* Calculate next chunk size */
chunk = len - act;
if (chunk <= ep->epn.chunk_max) {
/*
* Is this the last packet ? Because of having up to 8
* packets in a descriptor we can't just compare "chunk"
* with ep.maxpacket. We have to see if it's a multiple
* of it to know if we have to send a zero packet.
* Sadly that involves a modulo which is a bit expensive
* but probably still better than not doing it.
*/
if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
req->last_desc = d_num;
} else {
chunk = ep->epn.chunk_max;
}
EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
act, len, chunk, req->last_desc, d_num,
ast_vhub_count_free_descs(ep));
/* Populate descriptor */
desc->w0 = cpu_to_le32(req->req.dma + act);
/* Interrupt if end of request or no more descriptors */
/*
* TODO: Be smarter about it, if we don't have enough
* descriptors request an interrupt before queue empty
* or so in order to be able to populate more before
* the HW runs out. This isn't a problem at the moment
* as we use 256 descriptors and only put at most one
* request in the ring.
*/
desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
/* Account packet */
req->act_count = act = act + chunk;
}
if (likely(desc))
vhub_dma_workaround(desc);
/* Tell HW about new descriptors */
writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
}
static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
{
struct ast_vhub_req *req;
unsigned int len, d_last;
u32 stat, stat1;
/* Read EP status, workaround HW race */
do {
stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
} while(stat != stat1);
/* Extract RPTR */
d_last = VHUB_EP_DMA_RPTR(stat);
/* Grab current request if any */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
stat, ep->epn.is_in, ep->epn.d_last, d_last);
/* Check all completed descriptors */
while (ep->epn.d_last != d_last) {
struct ast_vhub_desc *desc;
unsigned int d_num;
bool is_last_desc;
/* Grab next completed descriptor */
d_num = ep->epn.d_last;
desc = &ep->epn.descs[d_num];
ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
/* Grab len out of descriptor */
len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
d_num, len, req, req ? req->active : 0);
/* If no active request pending, move on */
if (!req || !req->active)
continue;
/* Adjust size */
req->req.actual += len;
/* Is that the last chunk ? */
is_last_desc = req->last_desc == d_num;
CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
(req->req.actual >= req->req.length &&
!req->req.zero)),
"Last packet discrepancy: last_desc=%d len=%d r.act=%d "
"r.len=%d r.zero=%d mp=%d\n",
is_last_desc, len, req->req.actual, req->req.length,
req->req.zero, ep->ep.maxpacket);
if (is_last_desc) {
/*
* Because we can only have one request at a time
* in our descriptor list in this implementation,
* d_last and ep->d_last should now be equal
*/
CHECK(ep, d_last == ep->epn.d_last,
"DMA read ptr mismatch %d vs %d\n",
d_last, ep->epn.d_last);
/* Note: done will drop and re-acquire the lock */
ast_vhub_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue,
struct ast_vhub_req,
queue);
break;
}
}
/* More work ? */
if (req)
ast_vhub_epn_kick_desc(ep, req);
}
void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
{
if (ep->epn.desc_mode)
ast_vhub_epn_handle_ack_desc(ep);
else
ast_vhub_epn_handle_ack(ep);
}
static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
gfp_t gfp_flags)
{
struct ast_vhub_req *req = to_ast_req(u_req);
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
unsigned long flags;
bool empty;
int rc;
/* Paranoid checks */
if (!u_req || !u_req->complete || !u_req->buf) {
dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
if (u_req) {
dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
u_req->complete, req->internal);
}
return -EINVAL;
}
/* Endpoint enabled ? */
if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
!ep->dev->enabled) {
EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
return -ESHUTDOWN;
}
/* Map request for DMA if possible. For now, the rule for DMA is
* that:
*
* * For single stage mode (no descriptors):
*
* - The buffer is aligned to a 8 bytes boundary (HW requirement)
* - For a OUT endpoint, the request size is a multiple of the EP
* packet size (otherwise the controller will DMA past the end
* of the buffer if the host is sending a too long packet).
*
* * For descriptor mode (tx only for now), always.
*
* We could relax the latter by making the decision to use the bounce
* buffer based on the size of a given *segment* of the request rather
* than the whole request.
*/
if (ep->epn.desc_mode ||
((((unsigned long)u_req->buf & 7) == 0) &&
(ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
ep->epn.is_in);
if (rc) {
dev_warn(&vhub->pdev->dev,
"Request mapping failure %d\n", rc);
return rc;
}
} else
u_req->dma = 0;
EPVDBG(ep, "enqueue req @%p\n", req);
EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
u_req->length, (u32)u_req->dma, u_req->zero,
u_req->short_not_ok, u_req->no_interrupt,
ep->epn.is_in);
/* Initialize request progress fields */
u_req->status = -EINPROGRESS;
u_req->actual = 0;
req->act_count = 0;
req->active = false;
req->last_desc = -1;
spin_lock_irqsave(&vhub->lock, flags);
empty = list_empty(&ep->queue);
/* Add request to list and kick processing if empty */
list_add_tail(&req->queue, &ep->queue);
if (empty) {
if (ep->epn.desc_mode)
ast_vhub_epn_kick_desc(ep, req);
else
ast_vhub_epn_kick(ep, req);
}
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
bool restart_ep)
{
u32 state, reg, loops;
/* Stop DMA activity */
if (ep->epn.desc_mode)
writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
else
writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
state = VHUB_EP_DMA_PROC_STATUS(state);
if (state == EP_DMA_PROC_RX_IDLE ||
state == EP_DMA_PROC_TX_IDLE)
break;
udelay(1);
}
if (loops >= 1000)
dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
/* If we don't have to restart the endpoint, that's it */
if (!restart_ep)
return;
/* Restart the endpoint */
if (ep->epn.desc_mode) {
/*
* Take out descriptors by resetting the DMA read
* pointer to be equal to the CPU write pointer.
*
* Note: If we ever support creating descriptors for
* requests that aren't the head of the queue, we
* may have to do something more complex here,
* especially if the request being taken out is
* not the current head descriptors.
*/
reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
/* Then turn it back on */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
} else {
/* Single mode: just turn it back on */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
}
}
static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_req *req = NULL, *iter;
unsigned long flags;
int rc = -EINVAL;
spin_lock_irqsave(&vhub->lock, flags);
/* Make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != u_req)
continue;
req = iter;
break;
}
if (req) {
EPVDBG(ep, "dequeue req @%p active=%d\n",
req, req->active);
if (req->active)
ast_vhub_stop_active_req(ep, true);
ast_vhub_done(ep, req, -ECONNRESET);
rc = 0;
}
spin_unlock_irqrestore(&vhub->lock, flags);
return rc;
}
void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
{
u32 reg;
if (WARN_ON(ep->d_idx == 0))
return;
reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
if (ep->epn.stalled || ep->epn.wedged)
reg |= VHUB_EP_CFG_STALL_CTRL;
else
reg &= ~VHUB_EP_CFG_STALL_CTRL;
writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
if (!ep->epn.stalled && !ep->epn.wedged)
writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
ep->vhub->regs + AST_VHUB_EP_TOGGLE);
}
static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
bool wedge)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
unsigned long flags;
EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
if (!u_ep || !u_ep->desc)
return -EINVAL;
if (ep->d_idx == 0)
return 0;
if (ep->epn.is_iso)
return -EOPNOTSUPP;
spin_lock_irqsave(&vhub->lock, flags);
/* Fail with still-busy IN endpoints */
if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
spin_unlock_irqrestore(&vhub->lock, flags);
return -EAGAIN;
}
ep->epn.stalled = halt;
ep->epn.wedged = wedge;
ast_vhub_update_epn_stall(ep);
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
{
return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
}
static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
{
return ast_vhub_set_halt_and_wedge(u_ep, true, true);
}
static int ast_vhub_epn_disable(struct usb_ep* u_ep)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
unsigned long flags;
u32 imask, ep_ier;
EPDBG(ep, "Disabling !\n");
spin_lock_irqsave(&vhub->lock, flags);
ep->epn.enabled = false;
/* Stop active DMA if any */
ast_vhub_stop_active_req(ep, false);
/* Disable endpoint */
writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
/* Disable ACK interrupt */
imask = VHUB_EP_IRQ(ep->epn.g_idx);
ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
ep_ier &= ~imask;
writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
/* Nuke all pending requests */
ast_vhub_nuke(ep, -ESHUTDOWN);
/* No more descriptor associated with request */
ep->ep.desc = NULL;
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static int ast_vhub_epn_enable(struct usb_ep* u_ep,
const struct usb_endpoint_descriptor *desc)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub_dev *dev;
struct ast_vhub *vhub;
u16 maxpacket, type;
unsigned long flags;
u32 ep_conf, ep_ier, imask;
/* Check arguments */
if (!u_ep || !desc)
return -EINVAL;
maxpacket = usb_endpoint_maxp(desc);
if (!ep->d_idx || !ep->dev ||
desc->bDescriptorType != USB_DT_ENDPOINT ||
maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
ep->d_idx, ep->dev, desc->bDescriptorType,
maxpacket, ep->ep.maxpacket);
return -EINVAL;
}
if (ep->d_idx != usb_endpoint_num(desc)) {
EPDBG(ep, "EP number mismatch !\n");
return -EINVAL;
}
if (ep->epn.enabled) {
EPDBG(ep, "Already enabled\n");
return -EBUSY;
}
dev = ep->dev;
vhub = ep->vhub;
/* Check device state */
if (!dev->driver) {
EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
dev->driver, dev->gadget.speed);
return -ESHUTDOWN;
}
/* Grab some info from the descriptor */
ep->epn.is_in = usb_endpoint_dir_in(desc);
ep->ep.maxpacket = maxpacket;
type = usb_endpoint_type(desc);
ep->epn.d_next = ep->epn.d_last = 0;
ep->epn.is_iso = false;
ep->epn.stalled = false;
ep->epn.wedged = false;
EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
ep->epn.is_in ? "in" : "out", usb_ep_type_string(type),
usb_endpoint_num(desc), maxpacket);
/* Can we use DMA descriptor mode ? */
ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
if (ep->epn.desc_mode)
memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
/*
* Large send function can send up to 8 packets from
* one descriptor with a limit of 4095 bytes.
*/
ep->epn.chunk_max = ep->ep.maxpacket;
if (ep->epn.is_in) {
ep->epn.chunk_max <<= 3;
while (ep->epn.chunk_max > 4095)
ep->epn.chunk_max -= ep->ep.maxpacket;
}
switch(type) {
case USB_ENDPOINT_XFER_CONTROL:
EPDBG(ep, "Only one control endpoint\n");
return -EINVAL;
case USB_ENDPOINT_XFER_INT:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
break;
case USB_ENDPOINT_XFER_BULK:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
break;
case USB_ENDPOINT_XFER_ISOC:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
ep->epn.is_iso = true;
break;
default:
return -EINVAL;
}
/* Encode the rest of the EP config register */
if (maxpacket < 1024)
ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
if (!ep->epn.is_in)
ep_conf |= VHUB_EP_CFG_DIR_OUT;
ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
ep_conf |= VHUB_EP_CFG_ENABLE;
ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
EPVDBG(ep, "config=%08x\n", ep_conf);
spin_lock_irqsave(&vhub->lock, flags);
/* Disable HW and reset DMA */
writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
writel(VHUB_EP_DMA_CTRL_RESET,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Configure and enable */
writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
if (ep->epn.desc_mode) {
/* Clear DMA status, including the DMA read ptr */
writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
/* Set descriptor base */
writel(ep->epn.descs_dma,
ep->epn.regs + AST_VHUB_EP_DESC_BASE);
/* Set base DMA config value */
ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
if (ep->epn.is_in)
ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
/* First reset and disable all operations */
writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Enable descriptor mode */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
} else {
/* Set base DMA config value */
ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
/* Reset and switch to single stage mode */
writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
}
/* Cleanup data toggle just in case */
writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
vhub->regs + AST_VHUB_EP_TOGGLE);
/* Cleanup and enable ACK interrupt */
imask = VHUB_EP_IRQ(ep->epn.g_idx);
writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
ep_ier |= imask;
writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
/* Woot, we are online ! */
ep->epn.enabled = true;
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
if (WARN_ON(!ep->dev || !ep->d_idx))
return;
EPDBG(ep, "Releasing endpoint\n");
/* Take it out of the EP list */
list_del_init(&ep->ep.ep_list);
/* Mark the address free in the device */
ep->dev->epns[ep->d_idx - 1] = NULL;
/* Free name & DMA buffers */
kfree(ep->ep.name);
ep->ep.name = NULL;
dma_free_coherent(&ep->vhub->pdev->dev,
AST_VHUB_EPn_MAX_PACKET +
8 * AST_VHUB_DESCS_COUNT,
ep->buf, ep->buf_dma);
ep->buf = NULL;
ep->epn.descs = NULL;
/* Mark free */
ep->dev = NULL;
}
static const struct usb_ep_ops ast_vhub_epn_ops = {
.enable = ast_vhub_epn_enable,
.disable = ast_vhub_epn_disable,
.dispose = ast_vhub_epn_dispose,
.queue = ast_vhub_epn_queue,
.dequeue = ast_vhub_epn_dequeue,
.set_halt = ast_vhub_epn_set_halt,
.set_wedge = ast_vhub_epn_set_wedge,
.alloc_request = ast_vhub_alloc_request,
.free_request = ast_vhub_free_request,
};
struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
{
struct ast_vhub *vhub = d->vhub;
struct ast_vhub_ep *ep;
unsigned long flags;
int i;
/* Find a free one (no device) */
spin_lock_irqsave(&vhub->lock, flags);
for (i = 0; i < vhub->max_epns; i++)
if (vhub->epns[i].dev == NULL)
break;
if (i >= vhub->max_epns) {
spin_unlock_irqrestore(&vhub->lock, flags);
return NULL;
}
/* Set it up */
ep = &vhub->epns[i];
ep->dev = d;
spin_unlock_irqrestore(&vhub->lock, flags);
DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
INIT_LIST_HEAD(&ep->queue);
ep->d_idx = addr;
ep->vhub = vhub;
ep->ep.ops = &ast_vhub_epn_ops;
ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
d->epns[addr-1] = ep;
ep->epn.g_idx = i;
ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
AST_VHUB_EPn_MAX_PACKET +
8 * AST_VHUB_DESCS_COUNT,
&ep->buf_dma, GFP_KERNEL);
if (!ep->buf) {
kfree(ep->ep.name);
ep->ep.name = NULL;
return NULL;
}
ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
return ep;
}
| linux-master | drivers/usb/gadget/udc/aspeed-vhub/epn.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* core.c - Top level support
*
* Copyright 2017 IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include "vhub.h"
void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
int status)
{
bool internal = req->internal;
struct ast_vhub *vhub = ep->vhub;
EPVDBG(ep, "completing request @%p, status %d\n", req, status);
list_del_init(&req->queue);
if ((req->req.status == -EINPROGRESS) || (status == -EOVERFLOW))
req->req.status = status;
if (req->req.dma) {
if (!WARN_ON(!ep->dev))
usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
&req->req, ep->epn.is_in);
req->req.dma = 0;
}
/*
* If this isn't an internal EP0 request, call the core
* to call the gadget completion.
*/
if (!internal) {
spin_unlock(&ep->vhub->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->vhub->lock);
}
}
void ast_vhub_nuke(struct ast_vhub_ep *ep, int status)
{
struct ast_vhub_req *req;
int count = 0;
/* Beware, lock will be dropped & req-acquired by done() */
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct ast_vhub_req, queue);
ast_vhub_done(ep, req, status);
count++;
}
if (count)
EPDBG(ep, "Nuked %d request(s)\n", count);
}
struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep,
gfp_t gfp_flags)
{
struct ast_vhub_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
return &req->req;
}
void ast_vhub_free_request(struct usb_ep *u_ep, struct usb_request *u_req)
{
struct ast_vhub_req *req = to_ast_req(u_req);
kfree(req);
}
static irqreturn_t ast_vhub_irq(int irq, void *data)
{
struct ast_vhub *vhub = data;
irqreturn_t iret = IRQ_NONE;
u32 i, istat;
/* Stale interrupt while tearing down */
if (!vhub->ep0_bufs)
return IRQ_NONE;
spin_lock(&vhub->lock);
/* Read and ACK interrupts */
istat = readl(vhub->regs + AST_VHUB_ISR);
if (!istat)
goto bail;
writel(istat, vhub->regs + AST_VHUB_ISR);
iret = IRQ_HANDLED;
UDCVDBG(vhub, "irq status=%08x, ep_acks=%08x ep_nacks=%08x\n",
istat,
readl(vhub->regs + AST_VHUB_EP_ACK_ISR),
readl(vhub->regs + AST_VHUB_EP_NACK_ISR));
/* Handle generic EPs first */
if (istat & VHUB_IRQ_EP_POOL_ACK_STALL) {
u32 ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR);
writel(ep_acks, vhub->regs + AST_VHUB_EP_ACK_ISR);
for (i = 0; ep_acks && i < vhub->max_epns; i++) {
u32 mask = VHUB_EP_IRQ(i);
if (ep_acks & mask) {
ast_vhub_epn_ack_irq(&vhub->epns[i]);
ep_acks &= ~mask;
}
}
}
/* Handle device interrupts */
if (istat & vhub->port_irq_mask) {
for (i = 0; i < vhub->max_ports; i++) {
if (istat & VHUB_DEV_IRQ(i))
ast_vhub_dev_irq(&vhub->ports[i].dev);
}
}
/* Handle top-level vHub EP0 interrupts */
if (istat & (VHUB_IRQ_HUB_EP0_OUT_ACK_STALL |
VHUB_IRQ_HUB_EP0_IN_ACK_STALL |
VHUB_IRQ_HUB_EP0_SETUP)) {
if (istat & VHUB_IRQ_HUB_EP0_IN_ACK_STALL)
ast_vhub_ep0_handle_ack(&vhub->ep0, true);
if (istat & VHUB_IRQ_HUB_EP0_OUT_ACK_STALL)
ast_vhub_ep0_handle_ack(&vhub->ep0, false);
if (istat & VHUB_IRQ_HUB_EP0_SETUP)
ast_vhub_ep0_handle_setup(&vhub->ep0);
}
/* Various top level bus events */
if (istat & (VHUB_IRQ_BUS_RESUME |
VHUB_IRQ_BUS_SUSPEND |
VHUB_IRQ_BUS_RESET)) {
if (istat & VHUB_IRQ_BUS_RESUME)
ast_vhub_hub_resume(vhub);
if (istat & VHUB_IRQ_BUS_SUSPEND)
ast_vhub_hub_suspend(vhub);
if (istat & VHUB_IRQ_BUS_RESET)
ast_vhub_hub_reset(vhub);
}
bail:
spin_unlock(&vhub->lock);
return iret;
}
void ast_vhub_init_hw(struct ast_vhub *vhub)
{
u32 ctrl, port_mask, epn_mask;
UDCDBG(vhub,"(Re)Starting HW ...\n");
/* Enable PHY */
ctrl = VHUB_CTRL_PHY_CLK |
VHUB_CTRL_PHY_RESET_DIS;
/*
* We do *NOT* set the VHUB_CTRL_CLK_STOP_SUSPEND bit
* to stop the logic clock during suspend because
* it causes the registers to become inaccessible and
* we haven't yet figured out a good wayt to bring the
* controller back into life to issue a wakeup.
*/
/*
* Set some ISO & split control bits according to Aspeed
* recommendation
*
* VHUB_CTRL_ISO_RSP_CTRL: When set tells the HW to respond
* with 0 bytes data packet to ISO IN endpoints when no data
* is available.
*
* VHUB_CTRL_SPLIT_IN: This makes a SOF complete a split IN
* transaction.
*/
ctrl |= VHUB_CTRL_ISO_RSP_CTRL | VHUB_CTRL_SPLIT_IN;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
udelay(1);
/* Set descriptor ring size */
if (AST_VHUB_DESCS_COUNT == 256) {
ctrl |= VHUB_CTRL_LONG_DESC;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
} else {
BUILD_BUG_ON(AST_VHUB_DESCS_COUNT != 32);
}
/* Reset all devices */
port_mask = GENMASK(vhub->max_ports, 1);
writel(VHUB_SW_RESET_ROOT_HUB |
VHUB_SW_RESET_DMA_CONTROLLER |
VHUB_SW_RESET_EP_POOL |
port_mask, vhub->regs + AST_VHUB_SW_RESET);
udelay(1);
writel(0, vhub->regs + AST_VHUB_SW_RESET);
/* Disable and cleanup EP ACK/NACK interrupts */
epn_mask = GENMASK(vhub->max_epns - 1, 0);
writel(0, vhub->regs + AST_VHUB_EP_ACK_IER);
writel(0, vhub->regs + AST_VHUB_EP_NACK_IER);
writel(epn_mask, vhub->regs + AST_VHUB_EP_ACK_ISR);
writel(epn_mask, vhub->regs + AST_VHUB_EP_NACK_ISR);
/* Default settings for EP0, enable HW hub EP1 */
writel(0, vhub->regs + AST_VHUB_EP0_CTRL);
writel(VHUB_EP1_CTRL_RESET_TOGGLE |
VHUB_EP1_CTRL_ENABLE,
vhub->regs + AST_VHUB_EP1_CTRL);
writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
/* Configure EP0 DMA buffer */
writel(vhub->ep0.buf_dma, vhub->regs + AST_VHUB_EP0_DATA);
/* Clear address */
writel(0, vhub->regs + AST_VHUB_CONF);
/* Pullup hub (activate on host) */
if (vhub->force_usb1)
ctrl |= VHUB_CTRL_FULL_SPEED_ONLY;
ctrl |= VHUB_CTRL_UPSTREAM_CONNECT;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
/* Enable some interrupts */
writel(VHUB_IRQ_HUB_EP0_IN_ACK_STALL |
VHUB_IRQ_HUB_EP0_OUT_ACK_STALL |
VHUB_IRQ_HUB_EP0_SETUP |
VHUB_IRQ_EP_POOL_ACK_STALL |
VHUB_IRQ_BUS_RESUME |
VHUB_IRQ_BUS_SUSPEND |
VHUB_IRQ_BUS_RESET,
vhub->regs + AST_VHUB_IER);
}
static void ast_vhub_remove(struct platform_device *pdev)
{
struct ast_vhub *vhub = platform_get_drvdata(pdev);
unsigned long flags;
int i;
if (!vhub || !vhub->regs)
return;
/* Remove devices */
for (i = 0; i < vhub->max_ports; i++)
ast_vhub_del_dev(&vhub->ports[i].dev);
spin_lock_irqsave(&vhub->lock, flags);
/* Mask & ack all interrupts */
writel(0, vhub->regs + AST_VHUB_IER);
writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR);
/* Pull device, leave PHY enabled */
writel(VHUB_CTRL_PHY_CLK |
VHUB_CTRL_PHY_RESET_DIS,
vhub->regs + AST_VHUB_CTRL);
if (vhub->clk)
clk_disable_unprepare(vhub->clk);
spin_unlock_irqrestore(&vhub->lock, flags);
if (vhub->ep0_bufs)
dma_free_coherent(&pdev->dev,
AST_VHUB_EP0_MAX_PACKET *
(vhub->max_ports + 1),
vhub->ep0_bufs,
vhub->ep0_bufs_dma);
vhub->ep0_bufs = NULL;
}
static int ast_vhub_probe(struct platform_device *pdev)
{
enum usb_device_speed max_speed;
struct ast_vhub *vhub;
struct resource *res;
int i, rc = 0;
const struct device_node *np = pdev->dev.of_node;
vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL);
if (!vhub)
return -ENOMEM;
rc = of_property_read_u32(np, "aspeed,vhub-downstream-ports",
&vhub->max_ports);
if (rc < 0)
vhub->max_ports = AST_VHUB_NUM_PORTS;
vhub->ports = devm_kcalloc(&pdev->dev, vhub->max_ports,
sizeof(*vhub->ports), GFP_KERNEL);
if (!vhub->ports)
return -ENOMEM;
rc = of_property_read_u32(np, "aspeed,vhub-generic-endpoints",
&vhub->max_epns);
if (rc < 0)
vhub->max_epns = AST_VHUB_NUM_GEN_EPs;
vhub->epns = devm_kcalloc(&pdev->dev, vhub->max_epns,
sizeof(*vhub->epns), GFP_KERNEL);
if (!vhub->epns)
return -ENOMEM;
spin_lock_init(&vhub->lock);
vhub->pdev = pdev;
vhub->port_irq_mask = GENMASK(VHUB_IRQ_DEV1_BIT + vhub->max_ports - 1,
VHUB_IRQ_DEV1_BIT);
vhub->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(vhub->regs)) {
dev_err(&pdev->dev, "Failed to map resources\n");
return PTR_ERR(vhub->regs);
}
UDCDBG(vhub, "vHub@%pR mapped @%p\n", res, vhub->regs);
platform_set_drvdata(pdev, vhub);
vhub->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(vhub->clk)) {
rc = PTR_ERR(vhub->clk);
goto err;
}
rc = clk_prepare_enable(vhub->clk);
if (rc) {
dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", rc);
goto err;
}
/* Check if we need to limit the HW to USB1 */
max_speed = usb_get_maximum_speed(&pdev->dev);
if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
vhub->force_usb1 = true;
/* Mask & ack all interrupts before installing the handler */
writel(0, vhub->regs + AST_VHUB_IER);
writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR);
/* Find interrupt and install handler */
vhub->irq = platform_get_irq(pdev, 0);
if (vhub->irq < 0) {
rc = vhub->irq;
goto err;
}
rc = devm_request_irq(&pdev->dev, vhub->irq, ast_vhub_irq, 0,
KBUILD_MODNAME, vhub);
if (rc) {
dev_err(&pdev->dev, "Failed to request interrupt\n");
goto err;
}
/*
* Allocate DMA buffers for all EP0s in one chunk,
* one per port and one for the vHub itself
*/
vhub->ep0_bufs = dma_alloc_coherent(&pdev->dev,
AST_VHUB_EP0_MAX_PACKET *
(vhub->max_ports + 1),
&vhub->ep0_bufs_dma, GFP_KERNEL);
if (!vhub->ep0_bufs) {
dev_err(&pdev->dev, "Failed to allocate EP0 DMA buffers\n");
rc = -ENOMEM;
goto err;
}
UDCVDBG(vhub, "EP0 DMA buffers @%p (DMA 0x%08x)\n",
vhub->ep0_bufs, (u32)vhub->ep0_bufs_dma);
/* Init vHub EP0 */
ast_vhub_init_ep0(vhub, &vhub->ep0, NULL);
/* Init devices */
for (i = 0; i < vhub->max_ports && rc == 0; i++)
rc = ast_vhub_init_dev(vhub, i);
if (rc)
goto err;
/* Init hub emulation */
rc = ast_vhub_init_hub(vhub);
if (rc)
goto err;
/* Initialize HW */
ast_vhub_init_hw(vhub);
dev_info(&pdev->dev, "Initialized virtual hub in USB%d mode\n",
vhub->force_usb1 ? 1 : 2);
return 0;
err:
ast_vhub_remove(pdev);
return rc;
}
static const struct of_device_id ast_vhub_dt_ids[] = {
{
.compatible = "aspeed,ast2400-usb-vhub",
},
{
.compatible = "aspeed,ast2500-usb-vhub",
},
{
.compatible = "aspeed,ast2600-usb-vhub",
},
{ }
};
MODULE_DEVICE_TABLE(of, ast_vhub_dt_ids);
static struct platform_driver ast_vhub_driver = {
.probe = ast_vhub_probe,
.remove_new = ast_vhub_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ast_vhub_dt_ids,
},
};
module_platform_driver(ast_vhub_driver);
MODULE_DESCRIPTION("Aspeed vHub udc driver");
MODULE_AUTHOR("Benjamin Herrenschmidt <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/udc/aspeed-vhub/core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* dev.c - Individual device/gadget management (ie, a port = a gadget)
*
* Copyright 2017 IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "vhub.h"
void ast_vhub_dev_irq(struct ast_vhub_dev *d)
{
u32 istat = readl(d->regs + AST_VHUB_DEV_ISR);
writel(istat, d->regs + AST_VHUB_DEV_ISR);
if (istat & VHUV_DEV_IRQ_EP0_IN_ACK_STALL)
ast_vhub_ep0_handle_ack(&d->ep0, true);
if (istat & VHUV_DEV_IRQ_EP0_OUT_ACK_STALL)
ast_vhub_ep0_handle_ack(&d->ep0, false);
if (istat & VHUV_DEV_IRQ_EP0_SETUP)
ast_vhub_ep0_handle_setup(&d->ep0);
}
static void ast_vhub_dev_enable(struct ast_vhub_dev *d)
{
u32 reg, hmsk, i;
if (d->enabled)
return;
/* Cleanup EP0 state */
ast_vhub_reset_ep0(d);
/* Enable device and its EP0 interrupts */
reg = VHUB_DEV_EN_ENABLE_PORT |
VHUB_DEV_EN_EP0_IN_ACK_IRQEN |
VHUB_DEV_EN_EP0_OUT_ACK_IRQEN |
VHUB_DEV_EN_EP0_SETUP_IRQEN;
if (d->gadget.speed == USB_SPEED_HIGH)
reg |= VHUB_DEV_EN_SPEED_SEL_HIGH;
writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL);
/* Enable device interrupt in the hub as well */
hmsk = VHUB_IRQ_DEVICE1 << d->index;
reg = readl(d->vhub->regs + AST_VHUB_IER);
reg |= hmsk;
writel(reg, d->vhub->regs + AST_VHUB_IER);
/* Set EP0 DMA buffer address */
writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA);
/* Clear stall on all EPs */
for (i = 0; i < d->max_epns; i++) {
struct ast_vhub_ep *ep = d->epns[i];
if (ep && (ep->epn.stalled || ep->epn.wedged)) {
ep->epn.stalled = false;
ep->epn.wedged = false;
ast_vhub_update_epn_stall(ep);
}
}
/* Additional cleanups */
d->wakeup_en = false;
d->enabled = true;
}
static void ast_vhub_dev_disable(struct ast_vhub_dev *d)
{
u32 reg, hmsk;
if (!d->enabled)
return;
/* Disable device interrupt in the hub */
hmsk = VHUB_IRQ_DEVICE1 << d->index;
reg = readl(d->vhub->regs + AST_VHUB_IER);
reg &= ~hmsk;
writel(reg, d->vhub->regs + AST_VHUB_IER);
/* Then disable device */
writel(0, d->regs + AST_VHUB_DEV_EN_CTRL);
d->gadget.speed = USB_SPEED_UNKNOWN;
d->enabled = false;
}
static int ast_vhub_dev_feature(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue,
bool is_set)
{
u32 val;
DDBG(d, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
d->wakeup_en = is_set;
return std_req_complete;
}
if (wValue == USB_DEVICE_TEST_MODE) {
val = readl(d->vhub->regs + AST_VHUB_CTRL);
val &= ~GENMASK(10, 8);
val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
writel(val, d->vhub->regs + AST_VHUB_CTRL);
return std_req_complete;
}
return std_req_driver;
}
static int ast_vhub_ep_feature(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue, bool is_set)
{
struct ast_vhub_ep *ep;
int ep_num;
ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
DDBG(d, "%s_FEATURE(ep%d val=%02x)\n",
is_set ? "SET" : "CLEAR", ep_num, wValue);
if (ep_num == 0)
return std_req_complete;
if (ep_num >= d->max_epns || !d->epns[ep_num - 1])
return std_req_stall;
if (wValue != USB_ENDPOINT_HALT)
return std_req_driver;
ep = d->epns[ep_num - 1];
if (WARN_ON(!ep))
return std_req_stall;
if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso ||
ep->epn.is_in != !!(wIndex & USB_DIR_IN))
return std_req_stall;
DDBG(d, "%s stall on EP %d\n",
is_set ? "setting" : "clearing", ep_num);
ep->epn.stalled = is_set;
ast_vhub_update_epn_stall(ep);
return std_req_complete;
}
static int ast_vhub_dev_status(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue)
{
u8 st0;
DDBG(d, "GET_STATUS(dev)\n");
st0 = d->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED;
if (d->wakeup_en)
st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP;
return ast_vhub_simple_reply(&d->ep0, st0, 0);
}
static int ast_vhub_ep_status(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue)
{
int ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
struct ast_vhub_ep *ep;
u8 st0 = 0;
DDBG(d, "GET_STATUS(ep%d)\n", ep_num);
if (ep_num >= d->max_epns)
return std_req_stall;
if (ep_num != 0) {
ep = d->epns[ep_num - 1];
if (!ep)
return std_req_stall;
if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso ||
ep->epn.is_in != !!(wIndex & USB_DIR_IN))
return std_req_stall;
if (ep->epn.stalled)
st0 |= 1 << USB_ENDPOINT_HALT;
}
return ast_vhub_simple_reply(&d->ep0, st0, 0);
}
static void ast_vhub_dev_set_address(struct ast_vhub_dev *d, u8 addr)
{
u32 reg;
DDBG(d, "SET_ADDRESS: Got address %x\n", addr);
reg = readl(d->regs + AST_VHUB_DEV_EN_CTRL);
reg &= ~VHUB_DEV_EN_ADDR_MASK;
reg |= VHUB_DEV_EN_SET_ADDR(addr);
writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL);
}
int ast_vhub_std_dev_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq)
{
struct ast_vhub_dev *d = ep->dev;
u16 wValue, wIndex;
/* No driver, we shouldn't be enabled ... */
if (!d->driver || !d->enabled) {
EPDBG(ep,
"Device is wrong state driver=%p enabled=%d\n",
d->driver, d->enabled);
return std_req_stall;
}
/*
* Note: we used to reject/stall requests while suspended,
* we don't do that anymore as we seem to have cases of
* mass storage getting very upset.
*/
/* First packet, grab speed */
if (d->gadget.speed == USB_SPEED_UNKNOWN) {
d->gadget.speed = ep->vhub->speed;
if (d->gadget.speed > d->driver->max_speed)
d->gadget.speed = d->driver->max_speed;
DDBG(d, "fist packet, captured speed %d\n",
d->gadget.speed);
}
wValue = le16_to_cpu(crq->wValue);
wIndex = le16_to_cpu(crq->wIndex);
switch ((crq->bRequestType << 8) | crq->bRequest) {
/* SET_ADDRESS */
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
ast_vhub_dev_set_address(d, wValue);
return std_req_complete;
/* GET_STATUS */
case DeviceRequest | USB_REQ_GET_STATUS:
return ast_vhub_dev_status(d, wIndex, wValue);
case InterfaceRequest | USB_REQ_GET_STATUS:
return ast_vhub_simple_reply(ep, 0, 0);
case EndpointRequest | USB_REQ_GET_STATUS:
return ast_vhub_ep_status(d, wIndex, wValue);
/* SET/CLEAR_FEATURE */
case DeviceOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_dev_feature(d, wIndex, wValue, true);
case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_dev_feature(d, wIndex, wValue, false);
case EndpointOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_ep_feature(d, wIndex, wValue, true);
case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_ep_feature(d, wIndex, wValue, false);
}
return std_req_driver;
}
static int ast_vhub_udc_wakeup(struct usb_gadget* gadget)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
int rc = -EINVAL;
spin_lock_irqsave(&d->vhub->lock, flags);
if (!d->wakeup_en)
goto err;
DDBG(d, "Device initiated wakeup\n");
/* Wakeup the host */
ast_vhub_hub_wake_all(d->vhub);
rc = 0;
err:
spin_unlock_irqrestore(&d->vhub->lock, flags);
return rc;
}
static int ast_vhub_udc_get_frame(struct usb_gadget* gadget)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
return (readl(d->vhub->regs + AST_VHUB_USBSTS) >> 16) & 0x7ff;
}
static void ast_vhub_dev_nuke(struct ast_vhub_dev *d)
{
unsigned int i;
for (i = 0; i < d->max_epns; i++) {
if (!d->epns[i])
continue;
ast_vhub_nuke(d->epns[i], -ESHUTDOWN);
}
}
static int ast_vhub_udc_pullup(struct usb_gadget* gadget, int on)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
DDBG(d, "pullup(%d)\n", on);
/* Mark disconnected in the hub */
ast_vhub_device_connect(d->vhub, d->index, on);
/*
* If enabled, nuke all requests if any (there shouldn't be)
* and disable the port. This will clear the address too.
*/
if (d->enabled) {
ast_vhub_dev_nuke(d);
ast_vhub_dev_disable(d);
}
spin_unlock_irqrestore(&d->vhub->lock, flags);
return 0;
}
static int ast_vhub_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
DDBG(d, "start\n");
/* We don't do much more until the hub enables us */
d->driver = driver;
d->gadget.is_selfpowered = 1;
spin_unlock_irqrestore(&d->vhub->lock, flags);
return 0;
}
static struct usb_ep *ast_vhub_udc_match_ep(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *ss)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
struct ast_vhub_ep *ep;
struct usb_ep *u_ep;
unsigned int max, addr, i;
DDBG(d, "Match EP type %d\n", usb_endpoint_type(desc));
/*
* First we need to look for an existing unclaimed EP as another
* configuration may have already associated a bunch of EPs with
* this gadget. This duplicates the code in usb_ep_autoconfig_ss()
* unfortunately.
*/
list_for_each_entry(u_ep, &gadget->ep_list, ep_list) {
if (usb_gadget_ep_match_desc(gadget, u_ep, desc, ss)) {
DDBG(d, " -> using existing EP%d\n",
to_ast_ep(u_ep)->d_idx);
return u_ep;
}
}
/*
* We didn't find one, we need to grab one from the pool.
*
* First let's do some sanity checking
*/
switch(usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
/* Only EP0 can be a control endpoint */
return NULL;
case USB_ENDPOINT_XFER_ISOC:
/* ISO: limit 1023 bytes full speed, 1024 high/super speed */
if (gadget_is_dualspeed(gadget))
max = 1024;
else
max = 1023;
break;
case USB_ENDPOINT_XFER_BULK:
if (gadget_is_dualspeed(gadget))
max = 512;
else
max = 64;
break;
case USB_ENDPOINT_XFER_INT:
if (gadget_is_dualspeed(gadget))
max = 1024;
else
max = 64;
break;
}
if (usb_endpoint_maxp(desc) > max)
return NULL;
/*
* Find a free EP address for that device. We can't
* let the generic code assign these as it would
* create overlapping numbers for IN and OUT which
* we don't support, so also create a suitable name
* that will allow the generic code to use our
* assigned address.
*/
for (i = 0; i < d->max_epns; i++)
if (d->epns[i] == NULL)
break;
if (i >= d->max_epns)
return NULL;
addr = i + 1;
/*
* Now grab an EP from the shared pool and associate
* it with our device
*/
ep = ast_vhub_alloc_epn(d, addr);
if (!ep)
return NULL;
DDBG(d, "Allocated epn#%d for port EP%d\n",
ep->epn.g_idx, addr);
return &ep->ep;
}
static int ast_vhub_udc_stop(struct usb_gadget *gadget)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
DDBG(d, "stop\n");
d->driver = NULL;
d->gadget.speed = USB_SPEED_UNKNOWN;
ast_vhub_dev_nuke(d);
if (d->enabled)
ast_vhub_dev_disable(d);
spin_unlock_irqrestore(&d->vhub->lock, flags);
return 0;
}
static const struct usb_gadget_ops ast_vhub_udc_ops = {
.get_frame = ast_vhub_udc_get_frame,
.wakeup = ast_vhub_udc_wakeup,
.pullup = ast_vhub_udc_pullup,
.udc_start = ast_vhub_udc_start,
.udc_stop = ast_vhub_udc_stop,
.match_ep = ast_vhub_udc_match_ep,
};
void ast_vhub_dev_suspend(struct ast_vhub_dev *d)
{
if (d->driver && d->driver->suspend) {
spin_unlock(&d->vhub->lock);
d->driver->suspend(&d->gadget);
spin_lock(&d->vhub->lock);
}
}
void ast_vhub_dev_resume(struct ast_vhub_dev *d)
{
if (d->driver && d->driver->resume) {
spin_unlock(&d->vhub->lock);
d->driver->resume(&d->gadget);
spin_lock(&d->vhub->lock);
}
}
void ast_vhub_dev_reset(struct ast_vhub_dev *d)
{
/* No driver, just disable the device and return */
if (!d->driver) {
ast_vhub_dev_disable(d);
return;
}
/* If the port isn't enabled, just enable it */
if (!d->enabled) {
DDBG(d, "Reset of disabled device, enabling...\n");
ast_vhub_dev_enable(d);
} else {
DDBG(d, "Reset of enabled device, resetting...\n");
spin_unlock(&d->vhub->lock);
usb_gadget_udc_reset(&d->gadget, d->driver);
spin_lock(&d->vhub->lock);
/*
* Disable and maybe re-enable HW, this will clear the address
* and speed setting.
*/
ast_vhub_dev_disable(d);
ast_vhub_dev_enable(d);
}
}
void ast_vhub_del_dev(struct ast_vhub_dev *d)
{
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
if (!d->registered) {
spin_unlock_irqrestore(&d->vhub->lock, flags);
return;
}
d->registered = false;
spin_unlock_irqrestore(&d->vhub->lock, flags);
usb_del_gadget_udc(&d->gadget);
device_unregister(d->port_dev);
kfree(d->epns);
}
static void ast_vhub_dev_release(struct device *dev)
{
kfree(dev);
}
int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
{
struct ast_vhub_dev *d = &vhub->ports[idx].dev;
struct device *parent = &vhub->pdev->dev;
int rc;
d->vhub = vhub;
d->index = idx;
d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
d->regs = vhub->regs + 0x100 + 0x10 * idx;
ast_vhub_init_ep0(vhub, &d->ep0, d);
/*
* A USB device can have up to 30 endpoints besides control
* endpoint 0.
*/
d->max_epns = min_t(u32, vhub->max_epns, 30);
d->epns = kcalloc(d->max_epns, sizeof(*d->epns), GFP_KERNEL);
if (!d->epns)
return -ENOMEM;
/*
* The UDC core really needs us to have separate and uniquely
* named "parent" devices for each port so we create a sub device
* here for that purpose
*/
d->port_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!d->port_dev) {
rc = -ENOMEM;
goto fail_alloc;
}
device_initialize(d->port_dev);
d->port_dev->release = ast_vhub_dev_release;
d->port_dev->parent = parent;
dev_set_name(d->port_dev, "%s:p%d", dev_name(parent), idx + 1);
rc = device_add(d->port_dev);
if (rc)
goto fail_add;
/* Populate gadget */
INIT_LIST_HEAD(&d->gadget.ep_list);
d->gadget.ops = &ast_vhub_udc_ops;
d->gadget.ep0 = &d->ep0.ep;
d->gadget.name = KBUILD_MODNAME;
if (vhub->force_usb1)
d->gadget.max_speed = USB_SPEED_FULL;
else
d->gadget.max_speed = USB_SPEED_HIGH;
d->gadget.speed = USB_SPEED_UNKNOWN;
d->gadget.dev.of_node = vhub->pdev->dev.of_node;
d->gadget.dev.of_node_reused = true;
rc = usb_add_gadget_udc(d->port_dev, &d->gadget);
if (rc != 0)
goto fail_udc;
d->registered = true;
return 0;
fail_udc:
device_del(d->port_dev);
fail_add:
put_device(d->port_dev);
fail_alloc:
kfree(d->epns);
return rc;
}
| linux-master | drivers/usb/gadget/udc/aspeed-vhub/dev.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_cmd.c - BRCM BDC USB3.0 device controller
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "bdc.h"
#include "bdc_cmd.h"
#include "bdc_dbg.h"
/* Issues a cmd to cmd processor and waits for cmd completion */
static int bdc_issue_cmd(struct bdc *bdc, u32 cmd_sc, u32 param0,
u32 param1, u32 param2)
{
u32 timeout = BDC_CMD_TIMEOUT;
u32 cmd_status;
u32 temp;
bdc_writel(bdc->regs, BDC_CMDPAR0, param0);
bdc_writel(bdc->regs, BDC_CMDPAR1, param1);
bdc_writel(bdc->regs, BDC_CMDPAR2, param2);
/* Issue the cmd */
/* Make sure the cmd params are written before asking HW to exec cmd */
wmb();
bdc_writel(bdc->regs, BDC_CMDSC, cmd_sc | BDC_CMD_CWS | BDC_CMD_SRD);
do {
temp = bdc_readl(bdc->regs, BDC_CMDSC);
dev_dbg_ratelimited(bdc->dev, "cmdsc=%x", temp);
cmd_status = BDC_CMD_CST(temp);
if (cmd_status != BDC_CMDS_BUSY) {
dev_dbg(bdc->dev,
"command completed cmd_sts:%x\n", cmd_status);
return cmd_status;
}
udelay(1);
} while (timeout--);
dev_err(bdc->dev,
"command operation timedout cmd_status=%d\n", cmd_status);
return cmd_status;
}
/* Submits cmd and analyze the return value of bdc_issue_cmd */
static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
u32 param0, u32 param1, u32 param2)
{
u32 temp, cmd_status;
int ret;
temp = bdc_readl(bdc->regs, BDC_CMDSC);
dev_dbg(bdc->dev,
"%s:CMDSC:%08x cmdsc:%08x param0=%08x param1=%08x param2=%08x\n",
__func__, temp, cmd_sc, param0, param1, param2);
cmd_status = BDC_CMD_CST(temp);
if (cmd_status == BDC_CMDS_BUSY) {
dev_err(bdc->dev, "command processor busy: %x\n", cmd_status);
return -EBUSY;
}
ret = bdc_issue_cmd(bdc, cmd_sc, param0, param1, param2);
switch (ret) {
case BDC_CMDS_SUCC:
dev_dbg(bdc->dev, "command completed successfully\n");
ret = 0;
break;
case BDC_CMDS_PARA:
dev_err(bdc->dev, "command parameter error\n");
ret = -EINVAL;
break;
case BDC_CMDS_STAT:
dev_err(bdc->dev, "Invalid device/ep state\n");
ret = -EINVAL;
break;
case BDC_CMDS_FAIL:
dev_err(bdc->dev, "Command failed?\n");
ret = -EAGAIN;
break;
case BDC_CMDS_INTL:
dev_err(bdc->dev, "BDC Internal error\n");
ret = -ECONNRESET;
break;
case BDC_CMDS_BUSY:
dev_err(bdc->dev,
"command timedout waited for %dusec\n",
BDC_CMD_TIMEOUT);
ret = -ECONNRESET;
break;
default:
dev_dbg(bdc->dev, "Unknown command completion code:%x\n", ret);
}
return ret;
}
/* Deconfigure the endpoint from HW */
int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep)
{
u32 cmd_sc;
cmd_sc = BDC_SUB_CMD_DRP_EP|BDC_CMD_EPN(ep->ep_num)|BDC_CMD_EPC;
dev_dbg(bdc->dev, "%s ep->ep_num =%d cmd_sc=%x\n", __func__,
ep->ep_num, cmd_sc);
return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
}
/* Reinitalize the bdlist after config ep command */
static void ep_bd_list_reinit(struct bdc_ep *ep)
{
struct bdc *bdc = ep->bdc;
struct bdc_bd *bd;
ep->bd_list.eqp_bdi = 0;
ep->bd_list.hwd_bdi = 0;
bd = ep->bd_list.bd_table_array[0]->start_bd;
dev_dbg(bdc->dev, "%s ep:%p bd:%p\n", __func__, ep, bd);
memset(bd, 0, sizeof(struct bdc_bd));
bd->offset[3] |= cpu_to_le32(BD_SBF);
}
/* Configure an endpoint */
int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
{
const struct usb_ss_ep_comp_descriptor *comp_desc;
const struct usb_endpoint_descriptor *desc;
u32 param0, param1, param2, cmd_sc;
u32 mps, mbs, mul, si;
int ret;
desc = ep->desc;
comp_desc = ep->comp_desc;
cmd_sc = mul = mbs = param2 = 0;
param0 = lower_32_bits(ep->bd_list.bd_table_array[0]->dma);
param1 = upper_32_bits(ep->bd_list.bd_table_array[0]->dma);
cpu_to_le32s(¶m0);
cpu_to_le32s(¶m1);
dev_dbg(bdc->dev, "%s: param0=%08x param1=%08x",
__func__, param0, param1);
si = desc->bInterval;
si = clamp_val(si, 1, 16) - 1;
mps = usb_endpoint_maxp(desc);
param2 |= mps << MP_SHIFT;
param2 |= usb_endpoint_type(desc) << EPT_SHIFT;
switch (bdc->gadget.speed) {
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
param2 |= si;
if (usb_endpoint_xfer_isoc(desc) && comp_desc)
mul = comp_desc->bmAttributes;
}
param2 |= mul << EPM_SHIFT;
if (comp_desc)
mbs = comp_desc->bMaxBurst;
param2 |= mbs << MB_SHIFT;
break;
case USB_SPEED_HIGH:
if (usb_endpoint_xfer_isoc(desc) ||
usb_endpoint_xfer_int(desc)) {
param2 |= si;
mbs = usb_endpoint_maxp_mult(desc);
param2 |= mbs << MB_SHIFT;
}
break;
case USB_SPEED_FULL:
case USB_SPEED_LOW:
/* the hardware accepts SI in 125usec range */
if (usb_endpoint_xfer_isoc(desc))
si += 3;
/*
* FS Int endpoints can have si of 1-255ms but the controller
* accepts 2^bInterval*125usec, so convert ms to nearest power
* of 2
*/
if (usb_endpoint_xfer_int(desc))
si = fls(desc->bInterval * 8) - 1;
param2 |= si;
break;
default:
dev_err(bdc->dev, "UNKNOWN speed ERR\n");
return -EINVAL;
}
cmd_sc |= BDC_CMD_EPC|BDC_CMD_EPN(ep->ep_num)|BDC_SUB_CMD_ADD_EP;
dev_dbg(bdc->dev, "cmd_sc=%x param2=%08x\n", cmd_sc, param2);
ret = bdc_submit_cmd(bdc, cmd_sc, param0, param1, param2);
if (ret) {
dev_err(bdc->dev, "command failed :%x\n", ret);
return ret;
}
ep_bd_list_reinit(ep);
return ret;
}
/*
* Change the HW deq pointer, if this command is successful, HW will start
* fetching the next bd from address dma_addr.
*/
int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr)
{
u32 param0, param1;
u32 cmd_sc = 0;
dev_dbg(bdc->dev, "%s: add=%08llx\n", __func__,
(unsigned long long)(dma_addr));
param0 = lower_32_bits(dma_addr);
param1 = upper_32_bits(dma_addr);
cpu_to_le32s(¶m0);
cpu_to_le32s(¶m1);
cmd_sc |= BDC_CMD_EPN(ep->ep_num)|BDC_CMD_BLA;
dev_dbg(bdc->dev, "cmd_sc=%x\n", cmd_sc);
return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
}
/* Set the address sent bu Host in SET_ADD request */
int bdc_address_device(struct bdc *bdc, u32 add)
{
u32 cmd_sc = 0;
u32 param2;
dev_dbg(bdc->dev, "%s: add=%d\n", __func__, add);
cmd_sc |= BDC_SUB_CMD_ADD|BDC_CMD_DVC;
param2 = add & 0x7f;
return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
}
/* Send a Function Wake notification packet using FH command */
int bdc_function_wake_fh(struct bdc *bdc, u8 intf)
{
u32 param0, param1;
u32 cmd_sc = 0;
param0 = param1 = 0;
dev_dbg(bdc->dev, "%s intf=%d\n", __func__, intf);
cmd_sc |= BDC_CMD_FH;
param0 |= TRA_PACKET;
param0 |= (bdc->dev_addr << 25);
param1 |= DEV_NOTF_TYPE;
param1 |= (FWK_SUBTYPE<<4);
dev_dbg(bdc->dev, "param0=%08x param1=%08x\n", param0, param1);
return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
}
/* Send a Function Wake notification packet using DNC command */
int bdc_function_wake(struct bdc *bdc, u8 intf)
{
u32 cmd_sc = 0;
u32 param2 = 0;
dev_dbg(bdc->dev, "%s intf=%d", __func__, intf);
param2 |= intf;
cmd_sc |= BDC_SUB_CMD_FWK|BDC_CMD_DNC;
return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
}
/* Stall the endpoint */
int bdc_ep_set_stall(struct bdc *bdc, int epnum)
{
u32 cmd_sc = 0;
dev_dbg(bdc->dev, "%s epnum=%d\n", __func__, epnum);
/* issue a stall endpoint command */
cmd_sc |= BDC_SUB_CMD_EP_STL | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
}
/* resets the endpoint, called when host sends CLEAR_FEATURE(HALT) */
int bdc_ep_clear_stall(struct bdc *bdc, int epnum)
{
struct bdc_ep *ep;
u32 cmd_sc = 0;
int ret;
dev_dbg(bdc->dev, "%s: epnum=%d\n", __func__, epnum);
ep = bdc->bdc_ep_array[epnum];
/*
* If we are not in stalled then stall Endpoint and issue clear stall,
* his will reset the seq number for non EP0.
*/
if (epnum != 1) {
/* if the endpoint it not stalled */
if (!(ep->flags & BDC_EP_STALL)) {
ret = bdc_ep_set_stall(bdc, epnum);
if (ret)
return ret;
}
}
/* Preserve the seq number for ep0 only */
if (epnum != 1)
cmd_sc |= BDC_CMD_EPO_RST_SN;
/* issue a reset endpoint command */
cmd_sc |= BDC_SUB_CMD_EP_RST | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
if (ret) {
dev_err(bdc->dev, "command failed:%x\n", ret);
return ret;
}
bdc_notify_xfr(bdc, epnum);
return ret;
}
/* Stop the endpoint, called when software wants to dequeue some request */
int bdc_stop_ep(struct bdc *bdc, int epnum)
{
struct bdc_ep *ep;
u32 cmd_sc = 0;
int ret;
ep = bdc->bdc_ep_array[epnum];
dev_dbg(bdc->dev, "%s: ep:%s ep->flags:%08x\n", __func__,
ep->name, ep->flags);
/* Endpoint has to be in running state to execute stop ep command */
if (!(ep->flags & BDC_EP_ENABLED)) {
dev_err(bdc->dev, "stop endpoint called for disabled ep\n");
return -EINVAL;
}
if ((ep->flags & BDC_EP_STALL) || (ep->flags & BDC_EP_STOP))
return 0;
/* issue a stop endpoint command */
cmd_sc |= BDC_CMD_EP0_XSD | BDC_SUB_CMD_EP_STP
| BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
if (ret) {
dev_err(bdc->dev,
"stop endpoint command didn't complete:%d ep:%s\n",
ret, ep->name);
return ret;
}
ep->flags |= BDC_EP_STOP;
bdc_dump_epsts(bdc);
return ret;
}
| linux-master | drivers/usb/gadget/udc/bdc/bdc_cmd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_dbg.c - BRCM BDC USB3.0 device controller debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
*/
#include "bdc.h"
#include "bdc_dbg.h"
void bdc_dbg_regs(struct bdc *bdc)
{
u32 temp;
dev_vdbg(bdc->dev, "bdc->regs:%p\n", bdc->regs);
temp = bdc_readl(bdc->regs, BDC_BDCCFG0);
dev_vdbg(bdc->dev, "bdccfg0:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_BDCCFG1);
dev_vdbg(bdc->dev, "bdccfg1:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_BDCCAP0);
dev_vdbg(bdc->dev, "bdccap0:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
dev_vdbg(bdc->dev, "bdccap1:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_USPC);
dev_vdbg(bdc->dev, "uspc:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_DVCSA);
dev_vdbg(bdc->dev, "dvcsa:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_DVCSB);
dev_vdbg(bdc->dev, "dvcsb:0x%x08\n", temp);
}
void bdc_dump_epsts(struct bdc *bdc)
{
u32 temp;
temp = bdc_readl(bdc->regs, BDC_EPSTS0);
dev_vdbg(bdc->dev, "BDC_EPSTS0:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_EPSTS1);
dev_vdbg(bdc->dev, "BDC_EPSTS1:0x%x\n", temp);
temp = bdc_readl(bdc->regs, BDC_EPSTS2);
dev_vdbg(bdc->dev, "BDC_EPSTS2:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_EPSTS3);
dev_vdbg(bdc->dev, "BDC_EPSTS3:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_EPSTS4);
dev_vdbg(bdc->dev, "BDC_EPSTS4:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_EPSTS5);
dev_vdbg(bdc->dev, "BDC_EPSTS5:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_EPSTS6);
dev_vdbg(bdc->dev, "BDC_EPSTS6:0x%08x\n", temp);
temp = bdc_readl(bdc->regs, BDC_EPSTS7);
dev_vdbg(bdc->dev, "BDC_EPSTS7:0x%08x\n", temp);
}
void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
{
struct bdc_sr *sr;
dma_addr_t addr;
int i;
sr = bdc->srr.sr_bds;
addr = bdc->srr.dma_addr;
dev_vdbg(bdc->dev, "%s sr:%p dqp_index:%d\n", __func__,
sr, bdc->srr.dqp_index);
for (i = 0; i < NUM_SR_ENTRIES; i++) {
sr = &bdc->srr.sr_bds[i];
dev_vdbg(bdc->dev, "%llx %08x %08x %08x %08x\n",
(unsigned long long)addr,
le32_to_cpu(sr->offset[0]),
le32_to_cpu(sr->offset[1]),
le32_to_cpu(sr->offset[2]),
le32_to_cpu(sr->offset[3]));
addr += sizeof(*sr);
}
}
void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep)
{
struct bd_list *bd_list = &ep->bd_list;
struct bd_table *bd_table;
struct bdc_bd *bd;
int tbi, bdi, gbdi;
dma_addr_t dma;
gbdi = 0;
dev_vdbg(bdc->dev,
"Dump bd list for %s epnum:%d\n",
ep->name, ep->ep_num);
dev_vdbg(bdc->dev,
"tabs:%d max_bdi:%d eqp_bdi:%d hwd_bdi:%d num_bds_table:%d\n",
bd_list->num_tabs, bd_list->max_bdi, bd_list->eqp_bdi,
bd_list->hwd_bdi, bd_list->num_bds_table);
for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
bd_table = bd_list->bd_table_array[tbi];
for (bdi = 0; bdi < bd_list->num_bds_table; bdi++) {
bd = bd_table->start_bd + bdi;
dma = bd_table->dma + (sizeof(struct bdc_bd) * bdi);
dev_vdbg(bdc->dev,
"tbi:%2d bdi:%2d gbdi:%2d virt:%p phys:%llx %08x %08x %08x %08x\n",
tbi, bdi, gbdi++, bd, (unsigned long long)dma,
le32_to_cpu(bd->offset[0]),
le32_to_cpu(bd->offset[1]),
le32_to_cpu(bd->offset[2]),
le32_to_cpu(bd->offset[3]));
}
dev_vdbg(bdc->dev, "\n\n");
}
}
| linux-master | drivers/usb/gadget/udc/bdc/bdc_dbg.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/dmapool.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/unaligned.h>
#include <linux/platform_device.h>
#include <linux/usb/composite.h>
#include "bdc.h"
#include "bdc_ep.h"
#include "bdc_cmd.h"
#include "bdc_dbg.h"
static const char * const ep0_state_string[] = {
"WAIT_FOR_SETUP",
"WAIT_FOR_DATA_START",
"WAIT_FOR_DATA_XMIT",
"WAIT_FOR_STATUS_START",
"WAIT_FOR_STATUS_XMIT",
"STATUS_PENDING"
};
/* Free the bdl during ep disable */
static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
{
struct bd_list *bd_list = &ep->bd_list;
struct bdc *bdc = ep->bdc;
struct bd_table *bd_table;
int index;
dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
__func__, ep->name, num_tabs);
if (!bd_list->bd_table_array) {
dev_dbg(bdc->dev, "%s already freed\n", ep->name);
return;
}
for (index = 0; index < num_tabs; index++) {
/*
* check if the bd_table struct is allocated ?
* if yes, then check if bd memory has been allocated, then
* free the dma_pool and also the bd_table struct memory
*/
bd_table = bd_list->bd_table_array[index];
dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
if (!bd_table) {
dev_dbg(bdc->dev, "bd_table not allocated\n");
continue;
}
if (!bd_table->start_bd) {
dev_dbg(bdc->dev, "bd dma pool not allocated\n");
continue;
}
dev_dbg(bdc->dev,
"Free dma pool start_bd:%p dma:%llx\n",
bd_table->start_bd,
(unsigned long long)bd_table->dma);
dma_pool_free(bdc->bd_table_pool,
bd_table->start_bd,
bd_table->dma);
/* Free the bd_table structure */
kfree(bd_table);
}
/* Free the bd table array */
kfree(ep->bd_list.bd_table_array);
}
/*
* chain the tables, by insteting a chain bd at the end of prev_table, pointing
* to next_table
*/
static inline void chain_table(struct bd_table *prev_table,
struct bd_table *next_table,
u32 bd_p_tab)
{
/* Chain the prev table to next table */
prev_table->start_bd[bd_p_tab-1].offset[0] =
cpu_to_le32(lower_32_bits(next_table->dma));
prev_table->start_bd[bd_p_tab-1].offset[1] =
cpu_to_le32(upper_32_bits(next_table->dma));
prev_table->start_bd[bd_p_tab-1].offset[2] =
0x0;
prev_table->start_bd[bd_p_tab-1].offset[3] =
cpu_to_le32(MARK_CHAIN_BD);
}
/* Allocate the bdl for ep, during config ep */
static int ep_bd_list_alloc(struct bdc_ep *ep)
{
struct bd_table *prev_table = NULL;
int index, num_tabs, bd_p_tab;
struct bdc *bdc = ep->bdc;
struct bd_table *bd_table;
dma_addr_t dma;
if (usb_endpoint_xfer_isoc(ep->desc))
num_tabs = NUM_TABLES_ISOCH;
else
num_tabs = NUM_TABLES;
bd_p_tab = NUM_BDS_PER_TABLE;
/* if there is only 1 table in bd list then loop chain to self */
dev_dbg(bdc->dev,
"%s ep:%p num_tabs:%d\n",
__func__, ep, num_tabs);
/* Allocate memory for table array */
ep->bd_list.bd_table_array = kcalloc(num_tabs,
sizeof(struct bd_table *),
GFP_ATOMIC);
if (!ep->bd_list.bd_table_array)
return -ENOMEM;
/* Allocate memory for each table */
for (index = 0; index < num_tabs; index++) {
/* Allocate memory for bd_table structure */
bd_table = kzalloc(sizeof(*bd_table), GFP_ATOMIC);
if (!bd_table)
goto fail;
bd_table->start_bd = dma_pool_zalloc(bdc->bd_table_pool,
GFP_ATOMIC,
&dma);
if (!bd_table->start_bd) {
kfree(bd_table);
goto fail;
}
bd_table->dma = dma;
dev_dbg(bdc->dev,
"index:%d start_bd:%p dma=%08llx prev_table:%p\n",
index, bd_table->start_bd,
(unsigned long long)bd_table->dma, prev_table);
ep->bd_list.bd_table_array[index] = bd_table;
if (prev_table)
chain_table(prev_table, bd_table, bd_p_tab);
prev_table = bd_table;
}
chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
/* Memory allocation is successful, now init the internal fields */
ep->bd_list.num_tabs = num_tabs;
ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
ep->bd_list.num_tabs = num_tabs;
ep->bd_list.num_bds_table = bd_p_tab;
ep->bd_list.eqp_bdi = 0;
ep->bd_list.hwd_bdi = 0;
return 0;
fail:
/* Free the bd_table_array, bd_table struct, bd's */
ep_bd_list_free(ep, num_tabs);
return -ENOMEM;
}
/* returns how many bd's are need for this transfer */
static inline int bd_needed_req(struct bdc_req *req)
{
int bd_needed = 0;
int remaining;
/* 1 bd needed for 0 byte transfer */
if (req->usb_req.length == 0)
return 1;
/* remaining bytes after tranfering all max BD size BD's */
remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
if (remaining)
bd_needed++;
/* How many maximum BUFF size BD's ? */
remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
bd_needed += remaining;
return bd_needed;
}
/* returns the bd index(bdi) corresponding to bd dma address */
static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
{
struct bd_list *bd_list = &ep->bd_list;
dma_addr_t dma_first_bd, dma_last_bd;
struct bdc *bdc = ep->bdc;
struct bd_table *bd_table;
bool found = false;
int tbi, bdi;
dma_first_bd = dma_last_bd = 0;
dev_dbg(bdc->dev, "%s %llx\n",
__func__, (unsigned long long)bd_dma_addr);
/*
* Find in which table this bd_dma_addr belongs?, go through the table
* array and compare addresses of first and last address of bd of each
* table
*/
for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
bd_table = bd_list->bd_table_array[tbi];
dma_first_bd = bd_table->dma;
dma_last_bd = bd_table->dma +
(sizeof(struct bdc_bd) *
(bd_list->num_bds_table - 1));
dev_dbg(bdc->dev, "dma_first_bd:%llx dma_last_bd:%llx\n",
(unsigned long long)dma_first_bd,
(unsigned long long)dma_last_bd);
if (bd_dma_addr >= dma_first_bd && bd_dma_addr <= dma_last_bd) {
found = true;
break;
}
}
if (unlikely(!found)) {
dev_err(bdc->dev, "%s FATAL err, bd not found\n", __func__);
return -EINVAL;
}
/* Now we know the table, find the bdi */
bdi = (bd_dma_addr - dma_first_bd) / sizeof(struct bdc_bd);
/* return the global bdi, to compare with ep eqp_bdi */
return (bdi + (tbi * bd_list->num_bds_table));
}
/* returns the table index(tbi) of the given bdi */
static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
{
int tbi;
tbi = bdi / ep->bd_list.num_bds_table;
dev_vdbg(ep->bdc->dev,
"bdi:%d num_bds_table:%d tbi:%d\n",
bdi, ep->bd_list.num_bds_table, tbi);
return tbi;
}
/* Find the bdi last bd in the transfer */
static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
{
int end_bdi;
end_bdi = next_hwd_bdi - 1;
if (end_bdi < 0)
end_bdi = ep->bd_list.max_bdi - 1;
else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
end_bdi--;
return end_bdi;
}
/*
* How many transfer bd's are available on this ep bdl, chain bds are not
* counted in available bds
*/
static int bd_available_ep(struct bdc_ep *ep)
{
struct bd_list *bd_list = &ep->bd_list;
int available1, available2;
struct bdc *bdc = ep->bdc;
int chain_bd1, chain_bd2;
int available_bd = 0;
available1 = available2 = chain_bd1 = chain_bd2 = 0;
/* if empty then we have all bd's available - number of chain bd's */
if (bd_list->eqp_bdi == bd_list->hwd_bdi)
return bd_list->max_bdi - bd_list->num_tabs;
/*
* Depending upon where eqp and dqp pointers are, caculate number
* of avaialble bd's
*/
if (bd_list->hwd_bdi < bd_list->eqp_bdi) {
/* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
available1 = bd_list->max_bdi - bd_list->eqp_bdi;
available2 = bd_list->hwd_bdi;
chain_bd1 = available1 / bd_list->num_bds_table;
chain_bd2 = available2 / bd_list->num_bds_table;
dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
chain_bd1, chain_bd2);
available_bd = available1 + available2 - chain_bd1 - chain_bd2;
} else {
/* available bd's are from eqp..dqp - number of chain bd's */
available1 = bd_list->hwd_bdi - bd_list->eqp_bdi;
/* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
<= bd_list->num_bds_table) {
/* If there any chain bd in between */
if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
== bdi_to_tbi(ep, bd_list->eqp_bdi))) {
available_bd = available1 - 1;
}
} else {
chain_bd1 = available1 / bd_list->num_bds_table;
available_bd = available1 - chain_bd1;
}
}
/*
* we need to keep one extra bd to check if ring is full or empty so
* reduce by 1
*/
available_bd--;
dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
return available_bd;
}
/* Notify the hardware after queueing the bd to bdl */
void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
{
struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum);
/*
* We don't have anyway to check if ep state is running,
* except the software flags.
*/
if (unlikely(ep->flags & BDC_EP_STOP))
ep->flags &= ~BDC_EP_STOP;
bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
}
/* returns the bd corresponding to bdi */
static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
{
int tbi = bdi_to_tbi(ep, bdi);
int local_bdi = 0;
local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
dev_vdbg(ep->bdc->dev,
"%s bdi:%d local_bdi:%d\n",
__func__, bdi, local_bdi);
return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
}
/* Advance the enqueue pointer */
static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
{
ep->bd_list.eqp_bdi++;
/* if it's chain bd, then move to next */
if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
ep->bd_list.eqp_bdi++;
/* if the eqp is pointing to last + 1 then move back to 0 */
if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
ep->bd_list.eqp_bdi = 0;
}
/* Setup the first bd for ep0 transfer */
static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
{
u16 wValue;
u32 req_len;
req->ep->dir = 0;
req_len = req->usb_req.length;
switch (bdc->ep0_state) {
case WAIT_FOR_DATA_START:
*dword3 |= BD_TYPE_DS;
if (bdc->setup_pkt.bRequestType & USB_DIR_IN)
*dword3 |= BD_DIR_IN;
/* check if zlp will be needed */
wValue = le16_to_cpu(bdc->setup_pkt.wValue);
if ((wValue > req_len) &&
(req_len % bdc->gadget.ep0->maxpacket == 0)) {
dev_dbg(bdc->dev, "ZLP needed wVal:%d len:%d MaxP:%d\n",
wValue, req_len,
bdc->gadget.ep0->maxpacket);
bdc->zlp_needed = true;
}
break;
case WAIT_FOR_STATUS_START:
*dword3 |= BD_TYPE_SS;
if (!le16_to_cpu(bdc->setup_pkt.wLength) ||
!(bdc->setup_pkt.bRequestType & USB_DIR_IN))
*dword3 |= BD_DIR_IN;
break;
default:
dev_err(bdc->dev,
"Unknown ep0 state for queueing bd ep0_state:%s\n",
ep0_state_string[bdc->ep0_state]);
return -EINVAL;
}
return 0;
}
/* Setup the bd dma descriptor for a given request */
static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
{
dma_addr_t buf_add = req->usb_req.dma;
u32 maxp, tfs, dword2, dword3;
struct bd_transfer *bd_xfr;
struct bd_list *bd_list;
struct bdc_ep *ep;
struct bdc_bd *bd;
int ret, bdnum;
u32 req_len;
ep = req->ep;
bd_list = &ep->bd_list;
bd_xfr = &req->bd_xfr;
bd_xfr->req = req;
bd_xfr->start_bdi = bd_list->eqp_bdi;
bd = bdi_to_bd(ep, bd_list->eqp_bdi);
req_len = req->usb_req.length;
maxp = usb_endpoint_maxp(ep->desc);
tfs = roundup(req->usb_req.length, maxp);
tfs = tfs/maxp;
dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
__func__, ep->name, num_bds, tfs, req_len, bd);
for (bdnum = 0; bdnum < num_bds; bdnum++) {
dword2 = dword3 = 0;
/* First bd */
if (!bdnum) {
dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
dword2 |= BD_LTF;
/* format of first bd for ep0 is different than other */
if (ep->ep_num == 1) {
ret = setup_first_bd_ep0(bdc, req, &dword3);
if (ret)
return ret;
}
}
if (!req->ep->dir)
dword3 |= BD_ISP;
if (req_len > BD_MAX_BUFF_SIZE) {
dword2 |= BD_MAX_BUFF_SIZE;
req_len -= BD_MAX_BUFF_SIZE;
} else {
/* this should be the last bd */
dword2 |= req_len;
dword3 |= BD_IOC;
dword3 |= BD_EOT;
}
/* Currently only 1 INT target is supported */
dword2 |= BD_INTR_TARGET(0);
bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
if (unlikely(!bd)) {
dev_err(bdc->dev, "Err bd pointing to wrong addr\n");
return -EINVAL;
}
/* write bd */
bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
bd->offset[2] = cpu_to_le32(dword2);
bd->offset[3] = cpu_to_le32(dword3);
/* advance eqp pointer */
ep_bdlist_eqp_adv(ep);
/* advance the buff pointer */
buf_add += BD_MAX_BUFF_SIZE;
dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
(unsigned long long)buf_add, req_len, bd,
ep->bd_list.eqp_bdi);
bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
bd->offset[3] = cpu_to_le32(BD_SBF);
}
/* clear the STOP BD fetch bit from the first bd of this xfr */
bd = bdi_to_bd(ep, bd_xfr->start_bdi);
bd->offset[3] &= cpu_to_le32(~BD_SBF);
/* the new eqp will be next hw dqp */
bd_xfr->num_bds = num_bds;
bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
/* everything is written correctly before notifying the HW */
wmb();
return 0;
}
/* Queue the xfr */
static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
{
int num_bds, bd_available;
struct bdc_ep *ep;
int ret;
ep = req->ep;
dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
dev_dbg(bdc->dev, "eqp_bdi:%d hwd_bdi:%d\n",
ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
num_bds = bd_needed_req(req);
bd_available = bd_available_ep(ep);
/* how many bd's are avaialble on ep */
if (num_bds > bd_available)
return -ENOMEM;
ret = setup_bd_list_xfr(bdc, req, num_bds);
if (ret)
return ret;
list_add_tail(&req->queue, &ep->queue);
bdc_dbg_bd_list(bdc, ep);
bdc_notify_xfr(bdc, ep->ep_num);
return 0;
}
/* callback to gadget layer when xfr completes */
static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
int status)
{
struct bdc *bdc = ep->bdc;
if (req == NULL)
return;
dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
list_del(&req->queue);
req->usb_req.status = status;
usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
if (req->usb_req.complete) {
spin_unlock(&bdc->lock);
usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
spin_lock(&bdc->lock);
}
}
/* Disable the endpoint */
int bdc_ep_disable(struct bdc_ep *ep)
{
struct bdc_req *req;
struct bdc *bdc;
int ret;
ret = 0;
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
/* Stop the endpoint */
ret = bdc_stop_ep(bdc, ep->ep_num);
/*
* Intentionally don't check the ret value of stop, it can fail in
* disconnect scenarios, continue with dconfig
*/
/* de-queue any pending requests */
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct bdc_req,
queue);
bdc_req_complete(ep, req, -ESHUTDOWN);
}
/* deconfigure the endpoint */
ret = bdc_dconfig_ep(bdc, ep);
if (ret)
dev_warn(bdc->dev,
"dconfig fail but continue with memory free");
ep->flags = 0;
/* ep0 memory is not freed, but reused on next connect sr */
if (ep->ep_num == 1)
return 0;
/* Free the bdl memory */
ep_bd_list_free(ep, ep->bd_list.num_tabs);
ep->desc = NULL;
ep->comp_desc = NULL;
ep->usb_ep.desc = NULL;
ep->ep_type = 0;
return ret;
}
/* Enable the ep */
int bdc_ep_enable(struct bdc_ep *ep)
{
struct bdc *bdc;
int ret = 0;
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s NUM_TABLES:%d %d\n",
__func__, NUM_TABLES, NUM_TABLES_ISOCH);
ret = ep_bd_list_alloc(ep);
if (ret) {
dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
return -ENOMEM;
}
bdc_dbg_bd_list(bdc, ep);
/* only for ep0: config ep is called for ep0 from connect event */
if (ep->ep_num == 1)
return ret;
/* Issue a configure endpoint command */
ret = bdc_config_ep(bdc, ep);
if (ret)
return ret;
ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
ep->usb_ep.desc = ep->desc;
ep->usb_ep.comp_desc = ep->comp_desc;
ep->ep_type = usb_endpoint_type(ep->desc);
ep->flags |= BDC_EP_ENABLED;
return 0;
}
/* EP0 related code */
/* Queue a status stage BD */
static int ep0_queue_status_stage(struct bdc *bdc)
{
struct bdc_req *status_req;
struct bdc_ep *ep;
status_req = &bdc->status_req;
ep = bdc->bdc_ep_array[1];
status_req->ep = ep;
status_req->usb_req.length = 0;
status_req->usb_req.status = -EINPROGRESS;
status_req->usb_req.actual = 0;
status_req->usb_req.complete = NULL;
bdc_queue_xfr(bdc, status_req);
return 0;
}
/* Queue xfr on ep0 */
static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
{
struct bdc *bdc;
int ret;
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s()\n", __func__);
req->usb_req.actual = 0;
req->usb_req.status = -EINPROGRESS;
req->epnum = ep->ep_num;
if (bdc->delayed_status) {
bdc->delayed_status = false;
/* if status stage was delayed? */
if (bdc->ep0_state == WAIT_FOR_STATUS_START) {
/* Queue a status stage BD */
ep0_queue_status_stage(bdc);
bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
return 0;
}
} else {
/*
* if delayed status is false and 0 length transfer is requested
* i.e. for status stage of some setup request, then just
* return from here the status stage is queued independently
*/
if (req->usb_req.length == 0)
return 0;
}
ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
if (ret) {
dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
return ret;
}
return bdc_queue_xfr(bdc, req);
}
/* Queue data stage */
static int ep0_queue_data_stage(struct bdc *bdc)
{
struct bdc_ep *ep;
dev_dbg(bdc->dev, "%s\n", __func__);
ep = bdc->bdc_ep_array[1];
bdc->ep0_req.ep = ep;
bdc->ep0_req.usb_req.complete = NULL;
return ep0_queue(ep, &bdc->ep0_req);
}
/* Queue req on ep */
static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
{
struct bdc *bdc;
int ret = 0;
if (!req || !ep->usb_ep.desc)
return -EINVAL;
bdc = ep->bdc;
req->usb_req.actual = 0;
req->usb_req.status = -EINPROGRESS;
req->epnum = ep->ep_num;
ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
if (ret) {
dev_err(bdc->dev, "dma mapping failed\n");
return ret;
}
return bdc_queue_xfr(bdc, req);
}
/* Dequeue a request from ep */
static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
{
int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
bool start_pending, end_pending;
bool first_remove = false;
struct bdc_req *first_req;
struct bdc_bd *bd_start;
struct bd_table *table;
dma_addr_t next_bd_dma;
u64 deq_ptr_64 = 0;
struct bdc *bdc;
u32 tmp_32;
int ret;
bdc = ep->bdc;
start_pending = end_pending = false;
eqp_bdi = ep->bd_list.eqp_bdi - 1;
if (eqp_bdi < 0)
eqp_bdi = ep->bd_list.max_bdi;
start_bdi = req->bd_xfr.start_bdi;
end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
__func__, ep->name, start_bdi, end_bdi);
dev_dbg(bdc->dev, "%s ep=%p ep->desc=%p\n", __func__,
ep, (void *)ep->usb_ep.desc);
/* if still connected, stop the ep to see where the HW is ? */
if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
ret = bdc_stop_ep(bdc, ep->ep_num);
/* if there is an issue, then no need to go further */
if (ret)
return 0;
} else
return 0;
/*
* After endpoint is stopped, there can be 3 cases, the request
* is processed, pending or in the middle of processing
*/
/* The current hw dequeue pointer */
tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0);
deq_ptr_64 = tmp_32;
tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1);
deq_ptr_64 |= ((u64)tmp_32 << 32);
/* we have the dma addr of next bd that will be fetched by hardware */
curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
if (curr_hw_dqpi < 0)
return curr_hw_dqpi;
/*
* curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
* curr_hw_dqbdi..eqp_bdi.
*/
/* Check if start_bdi and end_bdi are in range of HW owned BD's */
if (curr_hw_dqpi > eqp_bdi) {
/* there is a wrap from last to 0 */
if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
start_pending = true;
end_pending = true;
} else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
end_pending = true;
}
} else {
if (start_bdi >= curr_hw_dqpi) {
start_pending = true;
end_pending = true;
} else if (end_bdi >= curr_hw_dqpi) {
end_pending = true;
}
}
dev_dbg(bdc->dev,
"start_pending:%d end_pending:%d speed:%d\n",
start_pending, end_pending, bdc->gadget.speed);
/* If both start till end are processes, we cannot deq req */
if (!start_pending && !end_pending)
return -EINVAL;
/*
* if ep_dequeue is called after disconnect then just return
* success from here
*/
if (bdc->gadget.speed == USB_SPEED_UNKNOWN)
return 0;
tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
table = ep->bd_list.bd_table_array[tbi];
next_bd_dma = table->dma +
sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
tbi * ep->bd_list.num_bds_table);
first_req = list_first_entry(&ep->queue, struct bdc_req,
queue);
if (req == first_req)
first_remove = true;
/*
* Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
* incase if start is pending this is the first request in the list
* then issue ep_bla instead of marking as chain bd
*/
if (start_pending && !first_remove) {
/*
* Mark the start bd as Chain bd, and point the chain
* bd to next_bd_dma
*/
bd_start = bdi_to_bd(ep, start_bdi);
bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
bd_start->offset[2] = 0x0;
bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
bdc_dbg_bd_list(bdc, ep);
} else if (end_pending) {
/*
* The transfer is stopped in the middle, move the
* HW deq pointer to next_bd_dma
*/
ret = bdc_ep_bla(bdc, ep, next_bd_dma);
if (ret) {
dev_err(bdc->dev, "error in ep_bla:%d\n", ret);
return ret;
}
}
return 0;
}
/* Halt/Clear the ep based on value */
static int ep_set_halt(struct bdc_ep *ep, u32 value)
{
struct bdc *bdc;
int ret;
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
if (value) {
dev_dbg(bdc->dev, "Halt\n");
if (ep->ep_num == 1)
bdc->ep0_state = WAIT_FOR_SETUP;
ret = bdc_ep_set_stall(bdc, ep->ep_num);
if (ret)
dev_err(bdc->dev, "failed to set STALL on %s\n",
ep->name);
else
ep->flags |= BDC_EP_STALL;
} else {
/* Clear */
dev_dbg(bdc->dev, "Before Clear\n");
ret = bdc_ep_clear_stall(bdc, ep->ep_num);
if (ret)
dev_err(bdc->dev, "failed to clear STALL on %s\n",
ep->name);
else
ep->flags &= ~BDC_EP_STALL;
dev_dbg(bdc->dev, "After Clear\n");
}
return ret;
}
/* Free all the ep */
void bdc_free_ep(struct bdc *bdc)
{
struct bdc_ep *ep;
u8 epnum;
dev_dbg(bdc->dev, "%s\n", __func__);
for (epnum = 1; epnum < bdc->num_eps; epnum++) {
ep = bdc->bdc_ep_array[epnum];
if (!ep)
continue;
if (ep->flags & BDC_EP_ENABLED)
ep_bd_list_free(ep, ep->bd_list.num_tabs);
/* ep0 is not in this gadget list */
if (epnum != 1)
list_del(&ep->usb_ep.ep_list);
kfree(ep);
}
}
/* USB2 spec, section 7.1.20 */
static int bdc_set_test_mode(struct bdc *bdc)
{
u32 usb2_pm;
usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
usb2_pm &= ~BDC_PTC_MASK;
dev_dbg(bdc->dev, "%s\n", __func__);
switch (bdc->test_mode) {
case USB_TEST_J:
case USB_TEST_K:
case USB_TEST_SE0_NAK:
case USB_TEST_PACKET:
case USB_TEST_FORCE_ENABLE:
usb2_pm |= bdc->test_mode << 28;
break;
default:
return -EINVAL;
}
dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
return 0;
}
/*
* Helper function to handle Transfer status report with status as either
* success or short
*/
static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
struct bdc_sr *sreport)
{
int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds;
struct bd_list *bd_list = &ep->bd_list;
int actual_length, length_short;
struct bd_transfer *bd_xfr;
struct bdc_bd *short_bd;
struct bdc_req *req;
u64 deq_ptr_64 = 0;
int status = 0;
int sr_status;
u32 tmp_32;
dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
bdc_dbg_srr(bdc, 0);
/* do not process thie sr if ignore flag is set */
if (ep->ignore_next_sr) {
ep->ignore_next_sr = false;
return;
}
if (unlikely(list_empty(&ep->queue))) {
dev_warn(bdc->dev, "xfr srr with no BD's queued\n");
return;
}
req = list_entry(ep->queue.next, struct bdc_req,
queue);
bd_xfr = &req->bd_xfr;
sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
/*
* sr_status is short and this transfer has more than 1 bd then it needs
* special handling, this is only applicable for bulk and ctrl
*/
if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) {
/*
* This is multi bd xfr, lets see which bd
* caused short transfer and how many bytes have been
* transferred so far.
*/
tmp_32 = le32_to_cpu(sreport->offset[0]);
deq_ptr_64 = tmp_32;
tmp_32 = le32_to_cpu(sreport->offset[1]);
deq_ptr_64 |= ((u64)tmp_32 << 32);
short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
if (unlikely(short_bdi < 0))
dev_warn(bdc->dev, "bd doesn't exist?\n");
start_bdi = bd_xfr->start_bdi;
/*
* We know the start_bdi and short_bdi, how many xfr
* bds in between
*/
if (start_bdi <= short_bdi) {
max_len_bds = short_bdi - start_bdi;
if (max_len_bds <= bd_list->num_bds_table) {
if (!(bdi_to_tbi(ep, start_bdi) ==
bdi_to_tbi(ep, short_bdi)))
max_len_bds--;
} else {
chain_bds = max_len_bds/bd_list->num_bds_table;
max_len_bds -= chain_bds;
}
} else {
/* there is a wrap in the ring within a xfr */
chain_bds = (bd_list->max_bdi - start_bdi)/
bd_list->num_bds_table;
chain_bds += short_bdi/bd_list->num_bds_table;
max_len_bds = bd_list->max_bdi - start_bdi;
max_len_bds += short_bdi;
max_len_bds -= chain_bds;
}
/* max_len_bds is the number of full length bds */
end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
if (!(end_bdi == short_bdi))
ep->ignore_next_sr = true;
actual_length = max_len_bds * BD_MAX_BUFF_SIZE;
short_bd = bdi_to_bd(ep, short_bdi);
/* length queued */
length_short = le32_to_cpu(short_bd->offset[2]) & 0x1FFFFF;
/* actual length trensfered */
length_short -= SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
actual_length += length_short;
req->usb_req.actual = actual_length;
} else {
req->usb_req.actual = req->usb_req.length -
SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
dev_dbg(bdc->dev,
"len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
req->usb_req.length, req->usb_req.actual,
bd_xfr->next_hwd_bdi);
}
/* Update the dequeue pointer */
ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
if (req->usb_req.actual < req->usb_req.length) {
dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
if (req->usb_req.short_not_ok)
status = -EREMOTEIO;
}
bdc_req_complete(ep, bd_xfr->req, status);
}
/* EP0 setup related packet handlers */
/*
* Setup packet received, just store the packet and process on next DS or SS
* started SR
*/
void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
{
struct usb_ctrlrequest *setup_pkt;
u32 len;
dev_dbg(bdc->dev,
"%s ep0_state:%s\n",
__func__, ep0_state_string[bdc->ep0_state]);
/* Store received setup packet */
setup_pkt = &bdc->setup_pkt;
memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
len = le16_to_cpu(setup_pkt->wLength);
if (!len)
bdc->ep0_state = WAIT_FOR_STATUS_START;
else
bdc->ep0_state = WAIT_FOR_DATA_START;
dev_dbg(bdc->dev,
"%s exit ep0_state:%s\n",
__func__, ep0_state_string[bdc->ep0_state]);
}
/* Stall ep0 */
static void ep0_stall(struct bdc *bdc)
{
struct bdc_ep *ep = bdc->bdc_ep_array[1];
struct bdc_req *req;
dev_dbg(bdc->dev, "%s\n", __func__);
bdc->delayed_status = false;
ep_set_halt(ep, 1);
/* de-queue any pendig requests */
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct bdc_req,
queue);
bdc_req_complete(ep, req, -ESHUTDOWN);
}
}
/* SET_ADD handlers */
static int ep0_set_address(struct bdc *bdc, struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = bdc->gadget.state;
int ret = 0;
u32 addr;
addr = le16_to_cpu(ctrl->wValue);
dev_dbg(bdc->dev,
"%s addr:%d dev state:%d\n",
__func__, addr, state);
if (addr > 127)
return -EINVAL;
switch (state) {
case USB_STATE_DEFAULT:
case USB_STATE_ADDRESS:
/* Issue Address device command */
ret = bdc_address_device(bdc, addr);
if (ret)
return ret;
if (addr)
usb_gadget_set_state(&bdc->gadget, USB_STATE_ADDRESS);
else
usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
bdc->dev_addr = addr;
break;
default:
dev_warn(bdc->dev,
"SET Address in wrong device state %d\n",
state);
ret = -EINVAL;
}
return ret;
}
/* Handler for SET/CLEAR FEATURE requests for device */
static int ep0_handle_feature_dev(struct bdc *bdc, u16 wValue,
u16 wIndex, bool set)
{
enum usb_device_state state = bdc->gadget.state;
u32 usppms = 0;
dev_dbg(bdc->dev, "%s set:%d dev state:%d\n",
__func__, set, state);
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
dev_dbg(bdc->dev, "USB_DEVICE_REMOTE_WAKEUP\n");
if (set)
bdc->devstatus |= REMOTE_WAKE_ENABLE;
else
bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
break;
case USB_DEVICE_TEST_MODE:
dev_dbg(bdc->dev, "USB_DEVICE_TEST_MODE\n");
if ((wIndex & 0xFF) ||
(bdc->gadget.speed != USB_SPEED_HIGH) || !set)
return -EINVAL;
bdc->test_mode = wIndex >> 8;
break;
case USB_DEVICE_U1_ENABLE:
dev_dbg(bdc->dev, "USB_DEVICE_U1_ENABLE\n");
if (bdc->gadget.speed != USB_SPEED_SUPER ||
state != USB_STATE_CONFIGURED)
return -EINVAL;
usppms = bdc_readl(bdc->regs, BDC_USPPMS);
if (set) {
/* clear previous u1t */
usppms &= ~BDC_U1T(BDC_U1T_MASK);
usppms |= BDC_U1T(U1_TIMEOUT);
usppms |= BDC_U1E | BDC_PORT_W1S;
bdc->devstatus |= (1 << USB_DEV_STAT_U1_ENABLED);
} else {
usppms &= ~BDC_U1E;
usppms |= BDC_PORT_W1S;
bdc->devstatus &= ~(1 << USB_DEV_STAT_U1_ENABLED);
}
bdc_writel(bdc->regs, BDC_USPPMS, usppms);
break;
case USB_DEVICE_U2_ENABLE:
dev_dbg(bdc->dev, "USB_DEVICE_U2_ENABLE\n");
if (bdc->gadget.speed != USB_SPEED_SUPER ||
state != USB_STATE_CONFIGURED)
return -EINVAL;
usppms = bdc_readl(bdc->regs, BDC_USPPMS);
if (set) {
usppms |= BDC_U2E;
usppms |= BDC_U2A;
bdc->devstatus |= (1 << USB_DEV_STAT_U2_ENABLED);
} else {
usppms &= ~BDC_U2E;
usppms &= ~BDC_U2A;
bdc->devstatus &= ~(1 << USB_DEV_STAT_U2_ENABLED);
}
bdc_writel(bdc->regs, BDC_USPPMS, usppms);
break;
case USB_DEVICE_LTM_ENABLE:
dev_dbg(bdc->dev, "USB_DEVICE_LTM_ENABLE?\n");
if (bdc->gadget.speed != USB_SPEED_SUPER ||
state != USB_STATE_CONFIGURED)
return -EINVAL;
break;
default:
dev_err(bdc->dev, "Unknown wValue:%d\n", wValue);
return -EOPNOTSUPP;
} /* USB_RECIP_DEVICE end */
return 0;
}
/* SET/CLEAR FEATURE handler */
static int ep0_handle_feature(struct bdc *bdc,
struct usb_ctrlrequest *setup_pkt, bool set)
{
enum usb_device_state state = bdc->gadget.state;
struct bdc_ep *ep;
u16 wValue;
u16 wIndex;
int epnum;
wValue = le16_to_cpu(setup_pkt->wValue);
wIndex = le16_to_cpu(setup_pkt->wIndex);
dev_dbg(bdc->dev,
"%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
__func__, wValue, wIndex, state,
bdc->gadget.speed, set);
switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
return ep0_handle_feature_dev(bdc, wValue, wIndex, set);
case USB_RECIP_INTERFACE:
dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
/* USB3 spec, sec 9.4.9 */
if (wValue != USB_INTRF_FUNC_SUSPEND)
return -EINVAL;
/* USB3 spec, Table 9-8 */
if (set) {
if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) {
dev_dbg(bdc->dev, "SET REMOTE_WAKEUP\n");
bdc->devstatus |= REMOTE_WAKE_ENABLE;
} else {
dev_dbg(bdc->dev, "CLEAR REMOTE_WAKEUP\n");
bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
}
}
break;
case USB_RECIP_ENDPOINT:
dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
if (wValue != USB_ENDPOINT_HALT)
return -EINVAL;
epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
if (epnum) {
if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
epnum = epnum * 2 + 1;
else
epnum *= 2;
} else {
epnum = 1; /*EP0*/
}
/*
* If CLEAR_FEATURE on ep0 then don't do anything as the stall
* condition on ep0 has already been cleared when SETUP packet
* was received.
*/
if (epnum == 1 && !set) {
dev_dbg(bdc->dev, "ep0 stall already cleared\n");
return 0;
}
dev_dbg(bdc->dev, "epnum=%d\n", epnum);
ep = bdc->bdc_ep_array[epnum];
if (!ep)
return -EINVAL;
return ep_set_halt(ep, set);
default:
dev_err(bdc->dev, "Unknown recipient\n");
return -EINVAL;
}
return 0;
}
/* GET_STATUS request handler */
static int ep0_handle_status(struct bdc *bdc,
struct usb_ctrlrequest *setup_pkt)
{
enum usb_device_state state = bdc->gadget.state;
struct bdc_ep *ep;
u16 usb_status = 0;
u32 epnum;
u16 wIndex;
/* USB2.0 spec sec 9.4.5 */
if (state == USB_STATE_DEFAULT)
return -EINVAL;
wIndex = le16_to_cpu(setup_pkt->wIndex);
dev_dbg(bdc->dev, "%s\n", __func__);
usb_status = bdc->devstatus;
switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
dev_dbg(bdc->dev,
"USB_RECIP_DEVICE devstatus:%08x\n",
bdc->devstatus);
/* USB3 spec, sec 9.4.5 */
if (bdc->gadget.speed == USB_SPEED_SUPER)
usb_status &= ~REMOTE_WAKE_ENABLE;
break;
case USB_RECIP_INTERFACE:
dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
if (bdc->gadget.speed == USB_SPEED_SUPER) {
/*
* This should come from func for Func remote wkup
* usb_status |=1;
*/
if (bdc->devstatus & REMOTE_WAKE_ENABLE)
usb_status |= REMOTE_WAKE_ENABLE;
} else {
usb_status = 0;
}
break;
case USB_RECIP_ENDPOINT:
dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
if (epnum) {
if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
epnum = epnum*2 + 1;
else
epnum *= 2;
} else {
epnum = 1; /* EP0 */
}
ep = bdc->bdc_ep_array[epnum];
if (!ep) {
dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?");
return -EINVAL;
}
if (ep->flags & BDC_EP_STALL)
usb_status |= 1 << USB_ENDPOINT_HALT;
break;
default:
dev_err(bdc->dev, "Unknown recipient for get_status\n");
return -EINVAL;
}
/* prepare a data stage for GET_STATUS */
dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
*(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
bdc->ep0_req.usb_req.length = 2;
bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
ep0_queue_data_stage(bdc);
return 0;
}
static void ep0_set_sel_cmpl(struct usb_ep *_ep, struct usb_request *_req)
{
/* ep0_set_sel_cmpl */
}
/* Queue data stage to handle 6 byte SET_SEL request */
static int ep0_set_sel(struct bdc *bdc,
struct usb_ctrlrequest *setup_pkt)
{
struct bdc_ep *ep;
u16 wLength;
dev_dbg(bdc->dev, "%s\n", __func__);
wLength = le16_to_cpu(setup_pkt->wLength);
if (unlikely(wLength != 6)) {
dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
return -EINVAL;
}
ep = bdc->bdc_ep_array[1];
bdc->ep0_req.ep = ep;
bdc->ep0_req.usb_req.length = 6;
bdc->ep0_req.usb_req.buf = bdc->ep0_response_buff;
bdc->ep0_req.usb_req.complete = ep0_set_sel_cmpl;
ep0_queue_data_stage(bdc);
return 0;
}
/*
* Queue a 0 byte bd only if wLength is more than the length and length is
* a multiple of MaxPacket then queue 0 byte BD
*/
static int ep0_queue_zlp(struct bdc *bdc)
{
int ret;
dev_dbg(bdc->dev, "%s\n", __func__);
bdc->ep0_req.ep = bdc->bdc_ep_array[1];
bdc->ep0_req.usb_req.length = 0;
bdc->ep0_req.usb_req.complete = NULL;
bdc->ep0_state = WAIT_FOR_DATA_START;
ret = bdc_queue_xfr(bdc, &bdc->ep0_req);
if (ret) {
dev_err(bdc->dev, "err queueing zlp :%d\n", ret);
return ret;
}
bdc->ep0_state = WAIT_FOR_DATA_XMIT;
return 0;
}
/* Control request handler */
static int handle_control_request(struct bdc *bdc)
{
enum usb_device_state state = bdc->gadget.state;
struct usb_ctrlrequest *setup_pkt;
int delegate_setup = 0;
int ret = 0;
int config = 0;
setup_pkt = &bdc->setup_pkt;
dev_dbg(bdc->dev, "%s\n", __func__);
if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup_pkt->bRequest) {
case USB_REQ_SET_ADDRESS:
dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
ret = ep0_set_address(bdc, setup_pkt);
bdc->devstatus &= DEVSTATUS_CLEAR;
break;
case USB_REQ_SET_CONFIGURATION:
dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n");
if (state == USB_STATE_ADDRESS) {
usb_gadget_set_state(&bdc->gadget,
USB_STATE_CONFIGURED);
} else if (state == USB_STATE_CONFIGURED) {
/*
* USB2 spec sec 9.4.7, if wValue is 0 then dev
* is moved to addressed state
*/
config = le16_to_cpu(setup_pkt->wValue);
if (!config)
usb_gadget_set_state(
&bdc->gadget,
USB_STATE_ADDRESS);
}
delegate_setup = 1;
break;
case USB_REQ_SET_FEATURE:
dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
ret = ep0_handle_feature(bdc, setup_pkt, 1);
break;
case USB_REQ_CLEAR_FEATURE:
dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
ret = ep0_handle_feature(bdc, setup_pkt, 0);
break;
case USB_REQ_GET_STATUS:
dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
ret = ep0_handle_status(bdc, setup_pkt);
break;
case USB_REQ_SET_SEL:
dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
ret = ep0_set_sel(bdc, setup_pkt);
break;
case USB_REQ_SET_ISOCH_DELAY:
dev_warn(bdc->dev,
"USB_REQ_SET_ISOCH_DELAY not handled\n");
ret = 0;
break;
default:
delegate_setup = 1;
}
} else {
delegate_setup = 1;
}
if (delegate_setup) {
spin_unlock(&bdc->lock);
ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
spin_lock(&bdc->lock);
}
return ret;
}
/* EP0: Data stage started */
void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
{
struct bdc_ep *ep;
int ret = 0;
dev_dbg(bdc->dev, "%s\n", __func__);
ep = bdc->bdc_ep_array[1];
/* If ep0 was stalled, the clear it first */
if (ep->flags & BDC_EP_STALL) {
ret = ep_set_halt(ep, 0);
if (ret)
goto err;
}
if (bdc->ep0_state != WAIT_FOR_DATA_START)
dev_warn(bdc->dev,
"Data stage not expected ep0_state:%s\n",
ep0_state_string[bdc->ep0_state]);
ret = handle_control_request(bdc);
if (ret == USB_GADGET_DELAYED_STATUS) {
/*
* The ep0 state will remain WAIT_FOR_DATA_START till
* we received ep_queue on ep0
*/
bdc->delayed_status = true;
return;
}
if (!ret) {
bdc->ep0_state = WAIT_FOR_DATA_XMIT;
dev_dbg(bdc->dev,
"ep0_state:%s", ep0_state_string[bdc->ep0_state]);
return;
}
err:
ep0_stall(bdc);
}
/* EP0: status stage started */
void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
{
struct usb_ctrlrequest *setup_pkt;
struct bdc_ep *ep;
int ret = 0;
dev_dbg(bdc->dev,
"%s ep0_state:%s",
__func__, ep0_state_string[bdc->ep0_state]);
ep = bdc->bdc_ep_array[1];
/* check if ZLP was queued? */
if (bdc->zlp_needed)
bdc->zlp_needed = false;
if (ep->flags & BDC_EP_STALL) {
ret = ep_set_halt(ep, 0);
if (ret)
goto err;
}
if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
(bdc->ep0_state != WAIT_FOR_DATA_XMIT))
dev_err(bdc->dev,
"Status stage recv but ep0_state:%s\n",
ep0_state_string[bdc->ep0_state]);
/* check if data stage is in progress ? */
if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
bdc->ep0_state = STATUS_PENDING;
/* Status stage will be queued upon Data stage transmit event */
dev_dbg(bdc->dev,
"status started but data not transmitted yet\n");
return;
}
setup_pkt = &bdc->setup_pkt;
/*
* 2 stage setup then only process the setup, for 3 stage setup the date
* stage is already handled
*/
if (!le16_to_cpu(setup_pkt->wLength)) {
ret = handle_control_request(bdc);
if (ret == USB_GADGET_DELAYED_STATUS) {
bdc->delayed_status = true;
/* ep0_state will remain WAIT_FOR_STATUS_START */
return;
}
}
if (!ret) {
/* Queue a status stage BD */
ep0_queue_status_stage(bdc);
bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
dev_dbg(bdc->dev,
"ep0_state:%s", ep0_state_string[bdc->ep0_state]);
return;
}
err:
ep0_stall(bdc);
}
/* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
static void ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
{
dev_dbg(bdc->dev, "%s\n", __func__);
switch (bdc->ep0_state) {
case WAIT_FOR_DATA_XMIT:
bdc->ep0_state = WAIT_FOR_STATUS_START;
break;
case WAIT_FOR_STATUS_XMIT:
bdc->ep0_state = WAIT_FOR_SETUP;
if (bdc->test_mode) {
int ret;
dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
ret = bdc_set_test_mode(bdc);
if (ret < 0) {
dev_err(bdc->dev, "Err in setting Test mode\n");
return;
}
bdc->test_mode = 0;
}
break;
case STATUS_PENDING:
bdc_xsf_ep0_status_start(bdc, sreport);
break;
default:
dev_err(bdc->dev,
"Unknown ep0_state:%s\n",
ep0_state_string[bdc->ep0_state]);
}
}
/* xfr completion status report handler */
void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport)
{
struct bdc_ep *ep;
u32 sr_status;
u8 ep_num;
ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
ep = bdc->bdc_ep_array[ep_num];
if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
dev_err(bdc->dev, "xsf for ep not enabled\n");
return;
}
/*
* check if this transfer is after link went from U3->U0 due
* to remote wakeup
*/
if (bdc->devstatus & FUNC_WAKE_ISSUED) {
bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
__func__);
}
sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
__func__, sr_status, ep->name);
switch (sr_status) {
case XSF_SUCC:
case XSF_SHORT:
handle_xsr_succ_status(bdc, ep, sreport);
if (ep_num == 1)
ep0_xsf_complete(bdc, sreport);
break;
case XSF_SETUP_RECV:
case XSF_DATA_START:
case XSF_STATUS_START:
if (ep_num != 1) {
dev_err(bdc->dev,
"ep0 related packets on non ep0 endpoint");
return;
}
bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport);
break;
case XSF_BABB:
if (ep_num == 1) {
dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
bdc->zlp_needed);
/*
* If the last completed transfer had wLength >Data Len,
* and Len is multiple of MaxPacket,then queue ZLP
*/
if (bdc->zlp_needed) {
/* queue 0 length bd */
ep0_queue_zlp(bdc);
return;
}
}
dev_warn(bdc->dev, "Babble on ep not handled\n");
break;
default:
dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status);
break;
}
}
static int bdc_gadget_ep_queue(struct usb_ep *_ep,
struct usb_request *_req, gfp_t gfp_flags)
{
struct bdc_req *req;
unsigned long flags;
struct bdc_ep *ep;
struct bdc *bdc;
int ret;
if (!_ep || !_ep->desc)
return -ESHUTDOWN;
if (!_req || !_req->complete || !_req->buf)
return -EINVAL;
ep = to_bdc_ep(_ep);
req = to_bdc_req(_req);
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
dev_dbg(bdc->dev, "queuing request %p to %s length %d zero:%d\n",
_req, ep->name, _req->length, _req->zero);
if (!ep->usb_ep.desc) {
dev_warn(bdc->dev,
"trying to queue req %p to disabled %s\n",
_req, ep->name);
return -ESHUTDOWN;
}
if (_req->length > MAX_XFR_LEN) {
dev_warn(bdc->dev,
"req length > supported MAX:%d requested:%d\n",
MAX_XFR_LEN, _req->length);
return -EOPNOTSUPP;
}
spin_lock_irqsave(&bdc->lock, flags);
if (ep == bdc->bdc_ep_array[1])
ret = ep0_queue(ep, req);
else
ret = ep_queue(ep, req);
spin_unlock_irqrestore(&bdc->lock, flags);
return ret;
}
static int bdc_gadget_ep_dequeue(struct usb_ep *_ep,
struct usb_request *_req)
{
struct bdc_req *req;
struct bdc_req *iter;
unsigned long flags;
struct bdc_ep *ep;
struct bdc *bdc;
int ret;
if (!_ep || !_req)
return -EINVAL;
ep = to_bdc_ep(_ep);
req = to_bdc_req(_req);
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
bdc_dbg_bd_list(bdc, ep);
spin_lock_irqsave(&bdc->lock, flags);
req = NULL;
/* make sure it's still queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->usb_req != _req)
continue;
req = iter;
break;
}
if (!req) {
spin_unlock_irqrestore(&bdc->lock, flags);
dev_err(bdc->dev, "usb_req !=req n");
return -EINVAL;
}
ret = ep_dequeue(ep, req);
if (ret) {
ret = -EOPNOTSUPP;
goto err;
}
bdc_req_complete(ep, req, -ECONNRESET);
err:
bdc_dbg_bd_list(bdc, ep);
spin_unlock_irqrestore(&bdc->lock, flags);
return ret;
}
static int bdc_gadget_ep_set_halt(struct usb_ep *_ep, int value)
{
unsigned long flags;
struct bdc_ep *ep;
struct bdc *bdc;
int ret;
ep = to_bdc_ep(_ep);
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
spin_lock_irqsave(&bdc->lock, flags);
if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
ret = -EINVAL;
else if (!list_empty(&ep->queue))
ret = -EAGAIN;
else
ret = ep_set_halt(ep, value);
spin_unlock_irqrestore(&bdc->lock, flags);
return ret;
}
static struct usb_request *bdc_gadget_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct bdc_req *req;
struct bdc_ep *ep;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
ep = to_bdc_ep(_ep);
req->ep = ep;
req->epnum = ep->ep_num;
req->usb_req.dma = DMA_ADDR_INVALID;
dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
return &req->usb_req;
}
static void bdc_gadget_free_request(struct usb_ep *_ep,
struct usb_request *_req)
{
struct bdc_req *req;
req = to_bdc_req(_req);
kfree(req);
}
/* endpoint operations */
/* configure endpoint and also allocate resources */
static int bdc_gadget_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
unsigned long flags;
struct bdc_ep *ep;
struct bdc *bdc;
int ret;
if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
pr_debug("%s invalid parameters\n", __func__);
return -EINVAL;
}
if (!desc->wMaxPacketSize) {
pr_debug("%s missing wMaxPacketSize\n", __func__);
return -EINVAL;
}
ep = to_bdc_ep(_ep);
bdc = ep->bdc;
/* Sanity check, upper layer will not send enable for ep0 */
if (ep == bdc->bdc_ep_array[1])
return -EINVAL;
if (!bdc->gadget_driver
|| bdc->gadget.speed == USB_SPEED_UNKNOWN) {
return -ESHUTDOWN;
}
dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
spin_lock_irqsave(&bdc->lock, flags);
ep->desc = desc;
ep->comp_desc = _ep->comp_desc;
ret = bdc_ep_enable(ep);
spin_unlock_irqrestore(&bdc->lock, flags);
return ret;
}
static int bdc_gadget_ep_disable(struct usb_ep *_ep)
{
unsigned long flags;
struct bdc_ep *ep;
struct bdc *bdc;
int ret;
if (!_ep) {
pr_debug("bdc: invalid parameters\n");
return -EINVAL;
}
ep = to_bdc_ep(_ep);
bdc = ep->bdc;
/* Upper layer will not call this for ep0, but do a sanity check */
if (ep == bdc->bdc_ep_array[1]) {
dev_warn(bdc->dev, "%s called for ep0\n", __func__);
return -EINVAL;
}
dev_dbg(bdc->dev,
"%s() ep:%s ep->flags:%08x\n",
__func__, ep->name, ep->flags);
if (!(ep->flags & BDC_EP_ENABLED)) {
if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
dev_warn(bdc->dev, "%s is already disabled\n",
ep->name);
return 0;
}
spin_lock_irqsave(&bdc->lock, flags);
ret = bdc_ep_disable(ep);
spin_unlock_irqrestore(&bdc->lock, flags);
return ret;
}
static const struct usb_ep_ops bdc_gadget_ep_ops = {
.enable = bdc_gadget_ep_enable,
.disable = bdc_gadget_ep_disable,
.alloc_request = bdc_gadget_alloc_request,
.free_request = bdc_gadget_free_request,
.queue = bdc_gadget_ep_queue,
.dequeue = bdc_gadget_ep_dequeue,
.set_halt = bdc_gadget_ep_set_halt
};
/* dir = 1 is IN */
static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
{
struct bdc_ep *ep;
dev_dbg(bdc->dev, "%s epnum=%d dir=%d\n", __func__, epnum, dir);
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
ep->bdc = bdc;
ep->dir = dir;
if (dir)
ep->usb_ep.caps.dir_in = true;
else
ep->usb_ep.caps.dir_out = true;
/* ep->ep_num is the index inside bdc_ep */
if (epnum == 1) {
ep->ep_num = 1;
bdc->bdc_ep_array[ep->ep_num] = ep;
snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
ep->usb_ep.caps.type_control = true;
ep->comp_desc = NULL;
bdc->gadget.ep0 = &ep->usb_ep;
} else {
if (dir)
ep->ep_num = epnum * 2 - 1;
else
ep->ep_num = epnum * 2 - 2;
bdc->bdc_ep_array[ep->ep_num] = ep;
snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
dir & 1 ? "in" : "out");
usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
ep->usb_ep.caps.type_iso = true;
ep->usb_ep.caps.type_bulk = true;
ep->usb_ep.caps.type_int = true;
ep->usb_ep.max_streams = 0;
list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
}
ep->usb_ep.ops = &bdc_gadget_ep_ops;
ep->usb_ep.name = ep->name;
ep->flags = 0;
ep->ignore_next_sr = false;
dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
ep, ep->usb_ep.name, epnum, ep->ep_num);
INIT_LIST_HEAD(&ep->queue);
return 0;
}
/* Init all ep */
int bdc_init_ep(struct bdc *bdc)
{
u8 epnum;
int ret;
dev_dbg(bdc->dev, "%s()\n", __func__);
INIT_LIST_HEAD(&bdc->gadget.ep_list);
/* init ep0 */
ret = init_ep(bdc, 1, 0);
if (ret) {
dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
return ret;
}
for (epnum = 2; epnum <= bdc->num_eps / 2; epnum++) {
/* OUT */
ret = init_ep(bdc, epnum, 0);
if (ret) {
dev_err(bdc->dev,
"init ep failed for:%d error: %d\n",
epnum, ret);
return ret;
}
/* IN */
ret = init_ep(bdc, epnum, 1);
if (ret) {
dev_err(bdc->dev,
"init ep failed for:%d error: %d\n",
epnum, ret);
return ret;
}
}
return 0;
}
| linux-master | drivers/usb/gadget/udc/bdc/bdc_ep.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_core.c - BRCM BDC USB3.0 device controller core operations
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/moduleparam.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/clk.h>
#include "bdc.h"
#include "bdc_dbg.h"
/* Poll till controller status is not OIP */
static int poll_oip(struct bdc *bdc, u32 usec)
{
u32 status;
int ret;
ret = readl_poll_timeout(bdc->regs + BDC_BDCSC, status,
(BDC_CSTS(status) != BDC_OIP), 10, usec);
if (ret)
dev_err(bdc->dev, "operation timedout BDCSC: 0x%08x\n", status);
else
dev_dbg(bdc->dev, "%s complete status=%d", __func__, BDC_CSTS(status));
return ret;
}
/* Stop the BDC controller */
int bdc_stop(struct bdc *bdc)
{
int ret;
u32 temp;
dev_dbg(bdc->dev, "%s ()\n\n", __func__);
temp = bdc_readl(bdc->regs, BDC_BDCSC);
/* Check if BDC is already halted */
if (BDC_CSTS(temp) == BDC_HLT) {
dev_vdbg(bdc->dev, "BDC already halted\n");
return 0;
}
temp &= ~BDC_COP_MASK;
temp |= BDC_COS|BDC_COP_STP;
bdc_writel(bdc->regs, BDC_BDCSC, temp);
ret = poll_oip(bdc, BDC_COP_TIMEOUT);
if (ret)
dev_err(bdc->dev, "bdc stop operation failed");
return ret;
}
/* Issue a reset to BDC controller */
int bdc_reset(struct bdc *bdc)
{
u32 temp;
int ret;
dev_dbg(bdc->dev, "%s ()\n", __func__);
/* First halt the controller */
ret = bdc_stop(bdc);
if (ret)
return ret;
temp = bdc_readl(bdc->regs, BDC_BDCSC);
temp &= ~BDC_COP_MASK;
temp |= BDC_COS|BDC_COP_RST;
bdc_writel(bdc->regs, BDC_BDCSC, temp);
ret = poll_oip(bdc, BDC_COP_TIMEOUT);
if (ret)
dev_err(bdc->dev, "bdc reset operation failed");
return ret;
}
/* Run the BDC controller */
int bdc_run(struct bdc *bdc)
{
u32 temp;
int ret;
dev_dbg(bdc->dev, "%s ()\n", __func__);
temp = bdc_readl(bdc->regs, BDC_BDCSC);
/* if BDC is already in running state then do not do anything */
if (BDC_CSTS(temp) == BDC_NOR) {
dev_warn(bdc->dev, "bdc is already in running state\n");
return 0;
}
temp &= ~BDC_COP_MASK;
temp |= BDC_COP_RUN;
temp |= BDC_COS;
bdc_writel(bdc->regs, BDC_BDCSC, temp);
ret = poll_oip(bdc, BDC_COP_TIMEOUT);
if (ret) {
dev_err(bdc->dev, "bdc run operation failed:%d", ret);
return ret;
}
temp = bdc_readl(bdc->regs, BDC_BDCSC);
if (BDC_CSTS(temp) != BDC_NOR) {
dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
BDC_CSTS(temp));
return -ESHUTDOWN;
}
return 0;
}
/*
* Present the termination to the host, typically called from upstream port
* event with Vbus present =1
*/
void bdc_softconn(struct bdc *bdc)
{
u32 uspc;
uspc = bdc_readl(bdc->regs, BDC_USPC);
uspc &= ~BDC_PST_MASK;
uspc |= BDC_LINK_STATE_RX_DET;
uspc |= BDC_SWS;
dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
bdc_writel(bdc->regs, BDC_USPC, uspc);
}
/* Remove the termination */
void bdc_softdisconn(struct bdc *bdc)
{
u32 uspc;
uspc = bdc_readl(bdc->regs, BDC_USPC);
uspc |= BDC_SDC;
uspc &= ~BDC_SCN;
dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
bdc_writel(bdc->regs, BDC_USPC, uspc);
}
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_setup(struct bdc *bdc)
{
int sp_buff_size;
u32 low32;
u32 upp32;
sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
if (!sp_buff_size) {
dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
return 0;
}
/* Refer to BDC spec, Table 4 for description of SPB */
sp_buff_size = 1 << (sp_buff_size + 5);
dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
&bdc->scratchpad.sp_dma,
GFP_KERNEL);
if (!bdc->scratchpad.buff)
goto fail;
bdc->sp_buff_size = sp_buff_size;
bdc->scratchpad.size = sp_buff_size;
low32 = lower_32_bits(bdc->scratchpad.sp_dma);
upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
cpu_to_le32s(&low32);
cpu_to_le32s(&upp32);
bdc_writel(bdc->regs, BDC_SPBBAL, low32);
bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
return 0;
fail:
bdc->scratchpad.buff = NULL;
return -ENOMEM;
}
/* Allocate the status report ring */
static int setup_srr(struct bdc *bdc, int interrupter)
{
dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
/* Reset the SRR */
bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
bdc->srr.dqp_index = 0;
/* allocate the status report descriptors */
bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
NUM_SR_ENTRIES * sizeof(struct bdc_bd),
&bdc->srr.dma_addr, GFP_KERNEL);
if (!bdc->srr.sr_bds)
return -ENOMEM;
return 0;
}
/* Initialize the HW regs and internal data structures */
static void bdc_mem_init(struct bdc *bdc, bool reinit)
{
u8 size = 0;
u32 usb2_pm;
u32 low32;
u32 upp32;
u32 temp;
dev_dbg(bdc->dev, "%s ()\n", __func__);
bdc->ep0_state = WAIT_FOR_SETUP;
bdc->dev_addr = 0;
bdc->srr.eqp_index = 0;
bdc->srr.dqp_index = 0;
bdc->zlp_needed = false;
bdc->delayed_status = false;
bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
/* Init the SRR */
temp = BDC_SRR_RWS | BDC_SRR_RST;
/* Reset the SRR */
bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
temp = lower_32_bits(bdc->srr.dma_addr);
size = fls(NUM_SR_ENTRIES) - 2;
temp |= size;
dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
temp, NUM_SR_ENTRIES, size);
low32 = lower_32_bits(temp);
upp32 = upper_32_bits(bdc->srr.dma_addr);
cpu_to_le32s(&low32);
cpu_to_le32s(&upp32);
/* Write the dma addresses into regs*/
bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
temp |= BDC_SRR_IE;
temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
/* Set the Interrupt Coalescence ~500 usec */
temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
temp &= ~0xffff;
temp |= INT_CLS;
bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
/* Enable hardware LPM Enable */
usb2_pm |= BDC_HLE;
bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
/* readback for debug */
usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
/* Disable any unwanted SR's on SRR */
temp = bdc_readl(bdc->regs, BDC_BDCSC);
/* We don't want Microframe counter wrap SR */
temp |= BDC_MASK_MCW;
bdc_writel(bdc->regs, BDC_BDCSC, temp);
/*
* In some error cases, driver has to reset the entire BDC controller
* in that case reinit is passed as 1
*/
if (reinit) {
int i;
/* Enable interrupts */
temp = bdc_readl(bdc->regs, BDC_BDCSC);
temp |= BDC_GIE;
bdc_writel(bdc->regs, BDC_BDCSC, temp);
/* Init scratchpad to 0 */
memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
/* Initialize SRR to 0 */
memset(bdc->srr.sr_bds, 0,
NUM_SR_ENTRIES * sizeof(struct bdc_bd));
/*
* clear ep flags to avoid post disconnect stops/deconfigs but
* not during S2 exit
*/
if (!bdc->gadget.speed)
for (i = 1; i < bdc->num_eps; ++i)
bdc->bdc_ep_array[i]->flags = 0;
} else {
/* One time initiaization only */
/* Enable status report function pointers */
bdc->sr_handler[0] = bdc_sr_xsf;
bdc->sr_handler[1] = bdc_sr_uspc;
/* EP0 status report function pointers */
bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
}
}
/* Free the dynamic memory */
static void bdc_mem_free(struct bdc *bdc)
{
dev_dbg(bdc->dev, "%s\n", __func__);
/* Free SRR */
if (bdc->srr.sr_bds)
dma_free_coherent(bdc->dev,
NUM_SR_ENTRIES * sizeof(struct bdc_bd),
bdc->srr.sr_bds, bdc->srr.dma_addr);
/* Free scratchpad */
if (bdc->scratchpad.buff)
dma_free_coherent(bdc->dev, bdc->sp_buff_size,
bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
/* Destroy the dma pools */
dma_pool_destroy(bdc->bd_table_pool);
/* Free the bdc_ep array */
kfree(bdc->bdc_ep_array);
bdc->srr.sr_bds = NULL;
bdc->scratchpad.buff = NULL;
bdc->bd_table_pool = NULL;
bdc->bdc_ep_array = NULL;
}
/*
* bdc reinit gives a controller reset and reinitialize the registers,
* called from disconnect/bus reset scenario's, to ensure proper HW cleanup
*/
int bdc_reinit(struct bdc *bdc)
{
int ret;
dev_dbg(bdc->dev, "%s\n", __func__);
ret = bdc_stop(bdc);
if (ret)
goto out;
ret = bdc_reset(bdc);
if (ret)
goto out;
/* the reinit flag is 1 */
bdc_mem_init(bdc, true);
ret = bdc_run(bdc);
out:
bdc->reinit = false;
return ret;
}
/* Allocate all the dyanmic memory */
static int bdc_mem_alloc(struct bdc *bdc)
{
u32 page_size;
unsigned int num_ieps, num_oeps;
dev_dbg(bdc->dev,
"%s() NUM_BDS_PER_TABLE:%d\n", __func__,
NUM_BDS_PER_TABLE);
page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
/* page size is 2^pgs KB */
page_size = 1 << page_size;
/* KB */
page_size <<= 10;
dev_dbg(bdc->dev, "page_size=%d\n", page_size);
/* Create a pool of bd tables */
bdc->bd_table_pool =
dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
16, page_size);
if (!bdc->bd_table_pool)
goto fail;
if (scratchpad_setup(bdc))
goto fail;
/* read from regs */
num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
/* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
bdc->num_eps = num_ieps + num_oeps + 2;
dev_dbg(bdc->dev,
"ieps:%d eops:%d num_eps:%d\n",
num_ieps, num_oeps, bdc->num_eps);
/* allocate array of ep pointers */
bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
GFP_KERNEL);
if (!bdc->bdc_ep_array)
goto fail;
dev_dbg(bdc->dev, "Allocating sr report0\n");
if (setup_srr(bdc, 0))
goto fail;
return 0;
fail:
dev_warn(bdc->dev, "Couldn't initialize memory\n");
bdc_mem_free(bdc);
return -ENOMEM;
}
/* opposite to bdc_hw_init */
static void bdc_hw_exit(struct bdc *bdc)
{
dev_dbg(bdc->dev, "%s ()\n", __func__);
bdc_mem_free(bdc);
}
/* Initialize the bdc HW and memory */
static int bdc_hw_init(struct bdc *bdc)
{
int ret;
dev_dbg(bdc->dev, "%s ()\n", __func__);
ret = bdc_reset(bdc);
if (ret) {
dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
return ret;
}
ret = bdc_mem_alloc(bdc);
if (ret) {
dev_err(bdc->dev, "Mem alloc failed, aborting\n");
return -ENOMEM;
}
bdc_mem_init(bdc, 0);
bdc_dbg_regs(bdc);
dev_dbg(bdc->dev, "HW Init done\n");
return 0;
}
static int bdc_phy_init(struct bdc *bdc)
{
int phy_num;
int ret;
for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
ret = phy_init(bdc->phys[phy_num]);
if (ret)
goto err_exit_phy;
ret = phy_power_on(bdc->phys[phy_num]);
if (ret) {
phy_exit(bdc->phys[phy_num]);
goto err_exit_phy;
}
}
return 0;
err_exit_phy:
while (--phy_num >= 0) {
phy_power_off(bdc->phys[phy_num]);
phy_exit(bdc->phys[phy_num]);
}
return ret;
}
static void bdc_phy_exit(struct bdc *bdc)
{
int phy_num;
for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
phy_power_off(bdc->phys[phy_num]);
phy_exit(bdc->phys[phy_num]);
}
}
static int bdc_probe(struct platform_device *pdev)
{
struct bdc *bdc;
int ret;
int irq;
u32 temp;
struct device *dev = &pdev->dev;
int phy_num;
dev_dbg(dev, "%s()\n", __func__);
bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
if (!bdc)
return -ENOMEM;
bdc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdc->regs))
return PTR_ERR(bdc->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
spin_lock_init(&bdc->lock);
platform_set_drvdata(pdev, bdc);
bdc->irq = irq;
bdc->dev = dev;
dev_dbg(dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
bdc->num_phys = of_count_phandle_with_args(dev->of_node,
"phys", "#phy-cells");
if (bdc->num_phys > 0) {
bdc->phys = devm_kcalloc(dev, bdc->num_phys,
sizeof(struct phy *), GFP_KERNEL);
if (!bdc->phys)
return -ENOMEM;
} else {
bdc->num_phys = 0;
}
dev_info(dev, "Using %d phy(s)\n", bdc->num_phys);
for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
bdc->phys[phy_num] = devm_of_phy_get_by_index(
dev, dev->of_node, phy_num);
if (IS_ERR(bdc->phys[phy_num])) {
ret = PTR_ERR(bdc->phys[phy_num]);
dev_err(bdc->dev,
"BDC phy specified but not found:%d\n", ret);
return ret;
}
}
bdc->clk = devm_clk_get_optional(dev, "sw_usbd");
if (IS_ERR(bdc->clk))
return PTR_ERR(bdc->clk);
ret = clk_prepare_enable(bdc->clk);
if (ret) {
dev_err(dev, "could not enable clock\n");
return ret;
}
ret = bdc_phy_init(bdc);
if (ret) {
dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
goto disable_clk;
}
temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
if ((temp & BDC_P64) &&
!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
dev_dbg(dev, "Using 64-bit address\n");
} else {
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev,
"No suitable DMA config available, abort\n");
ret = -ENOTSUPP;
goto phycleanup;
}
dev_dbg(dev, "Using 32-bit address\n");
}
ret = bdc_hw_init(bdc);
if (ret) {
dev_err(dev, "BDC init failure:%d\n", ret);
goto phycleanup;
}
ret = bdc_udc_init(bdc);
if (ret) {
dev_err(dev, "BDC Gadget init failure:%d\n", ret);
goto cleanup;
}
return 0;
cleanup:
bdc_hw_exit(bdc);
phycleanup:
bdc_phy_exit(bdc);
disable_clk:
clk_disable_unprepare(bdc->clk);
return ret;
}
static void bdc_remove(struct platform_device *pdev)
{
struct bdc *bdc;
bdc = platform_get_drvdata(pdev);
dev_dbg(bdc->dev, "%s ()\n", __func__);
bdc_udc_exit(bdc);
bdc_hw_exit(bdc);
bdc_phy_exit(bdc);
clk_disable_unprepare(bdc->clk);
}
#ifdef CONFIG_PM_SLEEP
static int bdc_suspend(struct device *dev)
{
struct bdc *bdc = dev_get_drvdata(dev);
int ret;
/* Halt the controller */
ret = bdc_stop(bdc);
if (!ret)
clk_disable_unprepare(bdc->clk);
return ret;
}
static int bdc_resume(struct device *dev)
{
struct bdc *bdc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(bdc->clk);
if (ret) {
dev_err(bdc->dev, "err enabling the clock\n");
return ret;
}
ret = bdc_reinit(bdc);
if (ret) {
dev_err(bdc->dev, "err in bdc reinit\n");
clk_disable_unprepare(bdc->clk);
return ret;
}
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(bdc_pm_ops, bdc_suspend,
bdc_resume);
static const struct of_device_id bdc_of_match[] = {
{ .compatible = "brcm,bdc-udc-v2" },
{ .compatible = "brcm,bdc" },
{ /* sentinel */ }
};
static struct platform_driver bdc_driver = {
.driver = {
.name = BRCM_BDC_NAME,
.pm = &bdc_pm_ops,
.of_match_table = bdc_of_match,
},
.probe = bdc_probe,
.remove_new = bdc_remove,
};
module_platform_driver(bdc_driver);
MODULE_AUTHOR("Ashwini Pahuja <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(BRCM_BDC_DESC);
| linux-master | drivers/usb/gadget/udc/bdc/bdc_core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_udc.c - BRCM BDC USB3.0 device controller gagdet ops
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/gadget/udc/
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/unaligned.h>
#include <linux/platform_device.h>
#include "bdc.h"
#include "bdc_ep.h"
#include "bdc_cmd.h"
#include "bdc_dbg.h"
static const struct usb_gadget_ops bdc_gadget_ops;
static const char * const conn_speed_str[] = {
"Not connected",
"Full Speed",
"Low Speed",
"High Speed",
"Super Speed",
};
/* EP0 initial descripror */
static struct usb_endpoint_descriptor bdc_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.bEndpointAddress = 0,
.wMaxPacketSize = cpu_to_le16(EP0_MAX_PKT_SIZE),
};
/* Advance the srr dqp maintained by SW */
static void srr_dqp_index_advc(struct bdc *bdc, u32 srr_num)
{
struct srr *srr;
srr = &bdc->srr;
dev_dbg_ratelimited(bdc->dev, "srr->dqp_index:%d\n", srr->dqp_index);
srr->dqp_index++;
/* rollback to 0 if we are past the last */
if (srr->dqp_index == NUM_SR_ENTRIES)
srr->dqp_index = 0;
}
/* connect sr */
static void bdc_uspc_connected(struct bdc *bdc)
{
u32 speed, temp;
u32 usppms;
int ret;
temp = bdc_readl(bdc->regs, BDC_USPC);
speed = BDC_PSP(temp);
dev_dbg(bdc->dev, "%s speed=%x\n", __func__, speed);
switch (speed) {
case BDC_SPEED_SS:
bdc_gadget_ep0_desc.wMaxPacketSize =
cpu_to_le16(EP0_MAX_PKT_SIZE);
bdc->gadget.ep0->maxpacket = EP0_MAX_PKT_SIZE;
bdc->gadget.speed = USB_SPEED_SUPER;
/* Enable U1T in SS mode */
usppms = bdc_readl(bdc->regs, BDC_USPPMS);
usppms &= ~BDC_U1T(0xff);
usppms |= BDC_U1T(U1_TIMEOUT);
usppms |= BDC_PORT_W1S;
bdc_writel(bdc->regs, BDC_USPPMS, usppms);
break;
case BDC_SPEED_HS:
bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
bdc->gadget.ep0->maxpacket = 64;
bdc->gadget.speed = USB_SPEED_HIGH;
break;
case BDC_SPEED_FS:
bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
bdc->gadget.ep0->maxpacket = 64;
bdc->gadget.speed = USB_SPEED_FULL;
break;
case BDC_SPEED_LS:
bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
bdc->gadget.ep0->maxpacket = 8;
bdc->gadget.speed = USB_SPEED_LOW;
break;
default:
dev_err(bdc->dev, "UNDEFINED SPEED\n");
return;
}
dev_dbg(bdc->dev, "connected at %s\n", conn_speed_str[speed]);
/* Now we know the speed, configure ep0 */
bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
ret = bdc_config_ep(bdc, bdc->bdc_ep_array[1]);
if (ret)
dev_err(bdc->dev, "EP0 config failed\n");
bdc->bdc_ep_array[1]->usb_ep.desc = &bdc_gadget_ep0_desc;
bdc->bdc_ep_array[1]->flags |= BDC_EP_ENABLED;
usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
}
/* device got disconnected */
static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit)
{
struct bdc_ep *ep;
dev_dbg(bdc->dev, "%s\n", __func__);
/*
* Only stop ep0 from here, rest of the endpoints will be disabled
* from gadget_disconnect
*/
ep = bdc->bdc_ep_array[1];
if (ep && (ep->flags & BDC_EP_ENABLED))
/* if enabled then stop and remove requests */
bdc_ep_disable(ep);
if (bdc->gadget_driver && bdc->gadget_driver->disconnect) {
spin_unlock(&bdc->lock);
bdc->gadget_driver->disconnect(&bdc->gadget);
spin_lock(&bdc->lock);
}
/* Set Unknown speed */
bdc->gadget.speed = USB_SPEED_UNKNOWN;
bdc->devstatus &= DEVSTATUS_CLEAR;
bdc->delayed_status = false;
bdc->reinit = reinit;
bdc->test_mode = false;
usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
}
/* TNotify wkaeup timer */
static void bdc_func_wake_timer(struct work_struct *work)
{
struct bdc *bdc = container_of(work, struct bdc, func_wake_notify.work);
unsigned long flags;
dev_dbg(bdc->dev, "%s\n", __func__);
spin_lock_irqsave(&bdc->lock, flags);
/*
* Check if host has started transferring on endpoints
* FUNC_WAKE_ISSUED is cleared when transfer has started after resume
*/
if (bdc->devstatus & FUNC_WAKE_ISSUED) {
dev_dbg(bdc->dev, "FUNC_WAKE_ISSUED FLAG IS STILL SET\n");
/* flag is still set, so again send func wake */
bdc_function_wake_fh(bdc, 0);
schedule_delayed_work(&bdc->func_wake_notify,
msecs_to_jiffies(BDC_TNOTIFY));
}
spin_unlock_irqrestore(&bdc->lock, flags);
}
/* handler for Link state change condition */
static void handle_link_state_change(struct bdc *bdc, u32 uspc)
{
u32 link_state;
dev_dbg(bdc->dev, "Link state change");
link_state = BDC_PST(uspc);
switch (link_state) {
case BDC_LINK_STATE_U3:
if ((bdc->gadget.speed != USB_SPEED_UNKNOWN) &&
bdc->gadget_driver->suspend) {
dev_dbg(bdc->dev, "Entered Suspend mode\n");
spin_unlock(&bdc->lock);
bdc->devstatus |= DEVICE_SUSPENDED;
bdc->gadget_driver->suspend(&bdc->gadget);
spin_lock(&bdc->lock);
}
break;
case BDC_LINK_STATE_U0:
if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
if (bdc->gadget.speed == USB_SPEED_SUPER) {
bdc_function_wake_fh(bdc, 0);
bdc->devstatus |= FUNC_WAKE_ISSUED;
/*
* Start a Notification timer and check if the
* Host transferred anything on any of the EPs,
* if not then send function wake again every
* TNotification secs until host initiates
* transfer to BDC, USB3 spec Table 8.13
*/
schedule_delayed_work(
&bdc->func_wake_notify,
msecs_to_jiffies(BDC_TNOTIFY));
dev_dbg(bdc->dev, "sched func_wake_notify\n");
}
}
break;
case BDC_LINK_STATE_RESUME:
dev_dbg(bdc->dev, "Resumed from Suspend\n");
if (bdc->devstatus & DEVICE_SUSPENDED) {
bdc->gadget_driver->resume(&bdc->gadget);
bdc->devstatus &= ~DEVICE_SUSPENDED;
}
break;
default:
dev_dbg(bdc->dev, "link state:%d\n", link_state);
}
}
/* something changes on upstream port, handle it here */
void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport)
{
u32 clear_flags = 0;
u32 uspc;
bool connected = false;
bool disconn = false;
uspc = bdc_readl(bdc->regs, BDC_USPC);
dev_dbg(bdc->dev, "%s uspc=0x%08x\n", __func__, uspc);
/* Port connect changed */
if (uspc & BDC_PCC) {
/* Vbus not present, and not connected to Downstream port */
if ((uspc & BDC_VBC) && !(uspc & BDC_VBS) && !(uspc & BDC_PCS))
disconn = true;
else if ((uspc & BDC_PCS) && !BDC_PST(uspc))
connected = true;
clear_flags |= BDC_PCC;
}
/* Change in VBus and VBus is present */
if ((uspc & BDC_VBC) && (uspc & BDC_VBS)) {
if (bdc->pullup) {
dev_dbg(bdc->dev, "Do a softconnect\n");
/* Attached state, do a softconnect */
bdc_softconn(bdc);
usb_gadget_set_state(&bdc->gadget, USB_STATE_POWERED);
}
clear_flags |= BDC_VBC;
} else if ((uspc & BDC_PRS) || (uspc & BDC_PRC) || disconn) {
/* Hot reset, warm reset, 2.0 bus reset or disconn */
dev_dbg(bdc->dev, "Port reset or disconn\n");
bdc_uspc_disconnected(bdc, disconn);
clear_flags |= BDC_PRC;
} else if ((uspc & BDC_PSC) && (uspc & BDC_PCS)) {
/* Change in Link state */
handle_link_state_change(bdc, uspc);
clear_flags |= BDC_PSC;
}
/*
* In SS we might not have PRC bit set before connection, but in 2.0
* the PRC bit is set before connection, so moving this condition out
* of bus reset to handle both SS/2.0 speeds.
*/
if (connected) {
/* This is the connect event for U0/L0 */
dev_dbg(bdc->dev, "Connected\n");
bdc_uspc_connected(bdc);
bdc->devstatus &= ~(DEVICE_SUSPENDED);
}
uspc = bdc_readl(bdc->regs, BDC_USPC);
uspc &= (~BDC_USPSC_RW);
dev_dbg(bdc->dev, "uspc=%x\n", uspc);
bdc_writel(bdc->regs, BDC_USPC, clear_flags);
}
/* Main interrupt handler for bdc */
static irqreturn_t bdc_udc_interrupt(int irq, void *_bdc)
{
u32 eqp_index, dqp_index, sr_type, srr_int;
struct bdc_sr *sreport;
struct bdc *bdc = _bdc;
u32 status;
int ret;
spin_lock(&bdc->lock);
status = bdc_readl(bdc->regs, BDC_BDCSC);
if (!(status & BDC_GIP)) {
spin_unlock(&bdc->lock);
return IRQ_NONE;
}
srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
/* Check if the SRR IP bit it set? */
if (!(srr_int & BDC_SRR_IP)) {
dev_warn(bdc->dev, "Global irq pending but SRR IP is 0\n");
spin_unlock(&bdc->lock);
return IRQ_NONE;
}
eqp_index = BDC_SRR_EPI(srr_int);
dqp_index = BDC_SRR_DPI(srr_int);
dev_dbg(bdc->dev,
"%s eqp_index=%d dqp_index=%d srr.dqp_index=%d\n\n",
__func__, eqp_index, dqp_index, bdc->srr.dqp_index);
/* check for ring empty condition */
if (eqp_index == dqp_index) {
dev_dbg(bdc->dev, "SRR empty?\n");
spin_unlock(&bdc->lock);
return IRQ_HANDLED;
}
while (bdc->srr.dqp_index != eqp_index) {
sreport = &bdc->srr.sr_bds[bdc->srr.dqp_index];
/* sreport is read before using it */
rmb();
sr_type = le32_to_cpu(sreport->offset[3]) & BD_TYPE_BITMASK;
dev_dbg_ratelimited(bdc->dev, "sr_type=%d\n", sr_type);
switch (sr_type) {
case SR_XSF:
bdc->sr_handler[0](bdc, sreport);
break;
case SR_USPC:
bdc->sr_handler[1](bdc, sreport);
break;
default:
dev_warn(bdc->dev, "SR:%d not handled\n", sr_type);
}
/* Advance the srr dqp index */
srr_dqp_index_advc(bdc, 0);
}
/* update the hw dequeue pointer */
srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
srr_int &= ~BDC_SRR_DPI_MASK;
srr_int &= ~(BDC_SRR_RWS|BDC_SRR_RST|BDC_SRR_ISR);
srr_int |= ((bdc->srr.dqp_index) << 16);
srr_int |= BDC_SRR_IP;
bdc_writel(bdc->regs, BDC_SRRINT(0), srr_int);
srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
if (bdc->reinit) {
ret = bdc_reinit(bdc);
if (ret)
dev_err(bdc->dev, "err in bdc reinit\n");
}
spin_unlock(&bdc->lock);
return IRQ_HANDLED;
}
/* Gadget ops */
static int bdc_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct bdc *bdc = gadget_to_bdc(gadget);
unsigned long flags;
int ret = 0;
dev_dbg(bdc->dev, "%s()\n", __func__);
spin_lock_irqsave(&bdc->lock, flags);
if (bdc->gadget_driver) {
dev_err(bdc->dev, "%s is already bound to %s\n",
bdc->gadget.name,
bdc->gadget_driver->driver.name);
ret = -EBUSY;
goto err;
}
/*
* Run the controller from here and when BDC is connected to
* Host then driver will receive a USPC SR with VBUS present
* and then driver will do a softconnect.
*/
ret = bdc_run(bdc);
if (ret) {
dev_err(bdc->dev, "%s bdc run fail\n", __func__);
goto err;
}
bdc->gadget_driver = driver;
bdc->gadget.dev.driver = &driver->driver;
err:
spin_unlock_irqrestore(&bdc->lock, flags);
return ret;
}
static int bdc_udc_stop(struct usb_gadget *gadget)
{
struct bdc *bdc = gadget_to_bdc(gadget);
unsigned long flags;
dev_dbg(bdc->dev, "%s()\n", __func__);
spin_lock_irqsave(&bdc->lock, flags);
bdc_stop(bdc);
bdc->gadget_driver = NULL;
bdc->gadget.dev.driver = NULL;
spin_unlock_irqrestore(&bdc->lock, flags);
return 0;
}
static int bdc_udc_pullup(struct usb_gadget *gadget, int is_on)
{
struct bdc *bdc = gadget_to_bdc(gadget);
unsigned long flags;
u32 uspc;
dev_dbg(bdc->dev, "%s() is_on:%d\n", __func__, is_on);
if (!gadget)
return -EINVAL;
spin_lock_irqsave(&bdc->lock, flags);
if (!is_on) {
bdc_softdisconn(bdc);
bdc->pullup = false;
} else {
/*
* For a self powered device, we need to wait till we receive
* a VBUS change and Vbus present event, then if pullup flag
* is set, then only we present the Termintation.
*/
bdc->pullup = true;
/*
* Check if BDC is already connected to Host i.e Vbus=1,
* if yes, then present TERM now, this is typical for bus
* powered devices.
*/
uspc = bdc_readl(bdc->regs, BDC_USPC);
if (uspc & BDC_VBS)
bdc_softconn(bdc);
}
spin_unlock_irqrestore(&bdc->lock, flags);
return 0;
}
static int bdc_udc_set_selfpowered(struct usb_gadget *gadget,
int is_self)
{
struct bdc *bdc = gadget_to_bdc(gadget);
unsigned long flags;
dev_dbg(bdc->dev, "%s()\n", __func__);
gadget->is_selfpowered = (is_self != 0);
spin_lock_irqsave(&bdc->lock, flags);
if (!is_self)
bdc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
else
bdc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
spin_unlock_irqrestore(&bdc->lock, flags);
return 0;
}
static int bdc_udc_wakeup(struct usb_gadget *gadget)
{
struct bdc *bdc = gadget_to_bdc(gadget);
unsigned long flags;
u8 link_state;
u32 uspc;
int ret = 0;
dev_dbg(bdc->dev,
"%s() bdc->devstatus=%08x\n",
__func__, bdc->devstatus);
if (!(bdc->devstatus & REMOTE_WAKE_ENABLE))
return -EOPNOTSUPP;
spin_lock_irqsave(&bdc->lock, flags);
uspc = bdc_readl(bdc->regs, BDC_USPC);
link_state = BDC_PST(uspc);
dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
if (link_state != BDC_LINK_STATE_U3) {
dev_warn(bdc->dev,
"can't wakeup from link state %d\n",
link_state);
ret = -EINVAL;
goto out;
}
if (bdc->gadget.speed == USB_SPEED_SUPER)
bdc->devstatus |= REMOTE_WAKEUP_ISSUED;
uspc &= ~BDC_PST_MASK;
uspc &= (~BDC_USPSC_RW);
uspc |= BDC_PST(BDC_LINK_STATE_U0);
uspc |= BDC_SWS;
bdc_writel(bdc->regs, BDC_USPC, uspc);
uspc = bdc_readl(bdc->regs, BDC_USPC);
link_state = BDC_PST(uspc);
dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
out:
spin_unlock_irqrestore(&bdc->lock, flags);
return ret;
}
static const struct usb_gadget_ops bdc_gadget_ops = {
.wakeup = bdc_udc_wakeup,
.set_selfpowered = bdc_udc_set_selfpowered,
.pullup = bdc_udc_pullup,
.udc_start = bdc_udc_start,
.udc_stop = bdc_udc_stop,
};
/* Init the gadget interface and register the udc */
int bdc_udc_init(struct bdc *bdc)
{
u32 temp;
int ret;
dev_dbg(bdc->dev, "%s()\n", __func__);
bdc->gadget.ops = &bdc_gadget_ops;
bdc->gadget.max_speed = USB_SPEED_SUPER;
bdc->gadget.speed = USB_SPEED_UNKNOWN;
bdc->gadget.dev.parent = bdc->dev;
bdc->gadget.sg_supported = false;
bdc->gadget.name = BRCM_BDC_NAME;
ret = devm_request_irq(bdc->dev, bdc->irq, bdc_udc_interrupt,
IRQF_SHARED, BRCM_BDC_NAME, bdc);
if (ret) {
dev_err(bdc->dev,
"failed to request irq #%d %d\n",
bdc->irq, ret);
return ret;
}
ret = bdc_init_ep(bdc);
if (ret) {
dev_err(bdc->dev, "bdc init ep fail: %d\n", ret);
return ret;
}
ret = usb_add_gadget_udc(bdc->dev, &bdc->gadget);
if (ret) {
dev_err(bdc->dev, "failed to register udc\n");
goto err0;
}
usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
/*
* Allocate bd list for ep0 only, ep0 will be enabled on connect
* status report when the speed is known
*/
ret = bdc_ep_enable(bdc->bdc_ep_array[1]);
if (ret) {
dev_err(bdc->dev, "fail to enable %s\n",
bdc->bdc_ep_array[1]->name);
goto err1;
}
INIT_DELAYED_WORK(&bdc->func_wake_notify, bdc_func_wake_timer);
/* Enable Interrupts */
temp = bdc_readl(bdc->regs, BDC_BDCSC);
temp |= BDC_GIE;
bdc_writel(bdc->regs, BDC_BDCSC, temp);
return 0;
err1:
usb_del_gadget_udc(&bdc->gadget);
err0:
bdc_free_ep(bdc);
return ret;
}
void bdc_udc_exit(struct bdc *bdc)
{
unsigned long flags;
dev_dbg(bdc->dev, "%s()\n", __func__);
spin_lock_irqsave(&bdc->lock, flags);
bdc_ep_disable(bdc->bdc_ep_array[1]);
spin_unlock_irqrestore(&bdc->lock, flags);
usb_del_gadget_udc(&bdc->gadget);
bdc_free_ep(bdc);
}
| linux-master | drivers/usb/gadget/udc/bdc/bdc_udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* serial.c -- USB gadget serial driver
*
* Copyright (C) 2003 Al Borchers ([email protected])
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include "u_serial.h"
/* Defines */
#define GS_VERSION_STR "v2.4"
#define GS_VERSION_NUM 0x2400
#define GS_LONG_NAME "Gadget Serial"
#define GS_VERSION_NAME GS_LONG_NAME " " GS_VERSION_STR
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
/* Thanks to NetChip Technologies for donating this product ID.
*
* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
#define GS_VENDOR_ID 0x0525 /* NetChip */
#define GS_PRODUCT_ID 0xa4a6 /* Linux-USB Serial Gadget */
#define GS_CDC_PRODUCT_ID 0xa4a7 /* ... as CDC-ACM */
#define GS_CDC_OBEX_PRODUCT_ID 0xa4a9 /* ... as CDC-OBEX */
/* string IDs are assigned dynamically */
#define STRING_DESCRIPTION_IDX USB_GADGET_FIRST_AVAIL_IDX
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = GS_VERSION_NAME,
[USB_GADGET_SERIAL_IDX].s = "",
[STRING_DESCRIPTION_IDX].s = NULL /* updated; f(use_acm) */,
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static struct usb_device_descriptor device_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
/* .bDeviceClass = f(use_acm) */
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
/* .bMaxPacketSize0 = f(hardware) */
.idVendor = cpu_to_le16(GS_VENDOR_ID),
/* .idProduct = f(use_acm) */
.bcdDevice = cpu_to_le16(GS_VERSION_NUM),
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
.bNumConfigurations = 1,
};
static const struct usb_descriptor_header *otg_desc[2];
/*-------------------------------------------------------------------------*/
/* Module */
MODULE_DESCRIPTION(GS_VERSION_NAME);
MODULE_AUTHOR("Al Borchers");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
static bool use_acm = true;
module_param(use_acm, bool, 0);
MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes");
static bool use_obex = false;
module_param(use_obex, bool, 0);
MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no");
static unsigned n_ports = 1;
module_param(n_ports, uint, 0);
MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
static bool enable = true;
static int switch_gserial_enable(bool do_enable);
static int enable_set(const char *s, const struct kernel_param *kp)
{
bool do_enable;
int ret;
if (!s) /* called for no-arg enable == default */
return 0;
ret = kstrtobool(s, &do_enable);
if (ret || enable == do_enable)
return ret;
ret = switch_gserial_enable(do_enable);
if (!ret)
enable = do_enable;
return ret;
}
static const struct kernel_param_ops enable_ops = {
.set = enable_set,
.get = param_get_bool,
};
module_param_cb(enable, &enable_ops, &enable, 0644);
/*-------------------------------------------------------------------------*/
static struct usb_configuration serial_config_driver = {
/* .label = f(use_acm) */
/* .bConfigurationValue = f(use_acm) */
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
static struct usb_function_instance *fi_serial[MAX_U_SERIAL_PORTS];
static struct usb_function *f_serial[MAX_U_SERIAL_PORTS];
static int serial_register_ports(struct usb_composite_dev *cdev,
struct usb_configuration *c, const char *f_name)
{
int i;
int ret;
ret = usb_add_config_only(cdev, c);
if (ret)
goto out;
for (i = 0; i < n_ports; i++) {
fi_serial[i] = usb_get_function_instance(f_name);
if (IS_ERR(fi_serial[i])) {
ret = PTR_ERR(fi_serial[i]);
goto fail;
}
f_serial[i] = usb_get_function(fi_serial[i]);
if (IS_ERR(f_serial[i])) {
ret = PTR_ERR(f_serial[i]);
goto err_get_func;
}
ret = usb_add_function(c, f_serial[i]);
if (ret)
goto err_add_func;
}
return 0;
err_add_func:
usb_put_function(f_serial[i]);
err_get_func:
usb_put_function_instance(fi_serial[i]);
fail:
i--;
while (i >= 0) {
usb_remove_function(c, f_serial[i]);
usb_put_function(f_serial[i]);
usb_put_function_instance(fi_serial[i]);
i--;
}
out:
return ret;
}
static int gs_bind(struct usb_composite_dev *cdev)
{
int status;
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto fail;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
status = strings_dev[STRING_DESCRIPTION_IDX].id;
serial_config_driver.iConfiguration = status;
if (gadget_is_otg(cdev->gadget)) {
if (!otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
serial_config_driver.descriptors = otg_desc;
serial_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
/* register our configuration */
if (use_acm) {
status = serial_register_ports(cdev, &serial_config_driver,
"acm");
usb_ep_autoconfig_reset(cdev->gadget);
} else if (use_obex)
status = serial_register_ports(cdev, &serial_config_driver,
"obex");
else {
status = serial_register_ports(cdev, &serial_config_driver,
"gser");
}
if (status < 0)
goto fail1;
usb_composite_overwrite_options(cdev, &coverwrite);
INFO(cdev, "%s\n", GS_VERSION_NAME);
return 0;
fail1:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail:
return status;
}
static int gs_unbind(struct usb_composite_dev *cdev)
{
int i;
for (i = 0; i < n_ports; i++) {
usb_put_function(f_serial[i]);
usb_put_function_instance(fi_serial[i]);
}
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver gserial_driver = {
.name = "g_serial",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = gs_bind,
.unbind = gs_unbind,
};
static int switch_gserial_enable(bool do_enable)
{
if (!serial_config_driver.label)
/* gserial_init() was not called, yet */
return 0;
if (do_enable)
return usb_composite_probe(&gserial_driver);
usb_composite_unregister(&gserial_driver);
return 0;
}
static int __init gserial_init(void)
{
/* We *could* export two configs; that'd be much cleaner...
* but neither of these product IDs was defined that way.
*/
if (use_acm) {
serial_config_driver.label = "CDC ACM config";
serial_config_driver.bConfigurationValue = 2;
device_desc.bDeviceClass = USB_CLASS_COMM;
device_desc.idProduct =
cpu_to_le16(GS_CDC_PRODUCT_ID);
} else if (use_obex) {
serial_config_driver.label = "CDC OBEX config";
serial_config_driver.bConfigurationValue = 3;
device_desc.bDeviceClass = USB_CLASS_COMM;
device_desc.idProduct =
cpu_to_le16(GS_CDC_OBEX_PRODUCT_ID);
} else {
serial_config_driver.label = "Generic Serial config";
serial_config_driver.bConfigurationValue = 1;
device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
device_desc.idProduct =
cpu_to_le16(GS_PRODUCT_ID);
}
strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label;
if (!enable)
return 0;
return usb_composite_probe(&gserial_driver);
}
module_init(gserial_init);
static void __exit gserial_cleanup(void)
{
if (enable)
usb_composite_unregister(&gserial_driver);
}
module_exit(gserial_cleanup);
| linux-master | drivers/usb/gadget/legacy/serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* gmidi.c -- USB MIDI Gadget Driver
*
* Copyright (C) 2006 Thumtronics Pty Ltd.
* Developed for Thumtronics by Grey Innovation
* Ben Williamson <[email protected]>
*
* This code is based in part on:
*
* Gadget Zero driver, Copyright (C) 2003-2004 David Brownell.
* USB Audio driver, Copyright (C) 2002 by Takashi Iwai.
* USB MIDI driver, Copyright (C) 2002-2005 Clemens Ladisch.
*
* Refer to the USB Device Class Definition for MIDI Devices:
* http://www.usb.org/developers/devclass_docs/midi10.pdf
*/
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/initval.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include "u_midi.h"
/*-------------------------------------------------------------------------*/
MODULE_AUTHOR("Ben Williamson");
MODULE_LICENSE("GPL v2");
static const char longname[] = "MIDI Gadget";
USB_GADGET_COMPOSITE_OPTIONS();
static int index = SNDRV_DEFAULT_IDX1;
module_param(index, int, S_IRUGO);
MODULE_PARM_DESC(index, "Index value for the USB MIDI Gadget adapter.");
static char *id = SNDRV_DEFAULT_STR1;
module_param(id, charp, S_IRUGO);
MODULE_PARM_DESC(id, "ID string for the USB MIDI Gadget adapter.");
static unsigned int buflen = 512;
module_param(buflen, uint, S_IRUGO);
MODULE_PARM_DESC(buflen, "MIDI buffer length");
static unsigned int qlen = 32;
module_param(qlen, uint, S_IRUGO);
MODULE_PARM_DESC(qlen, "USB read and write request queue length");
static unsigned int in_ports = 1;
module_param(in_ports, uint, S_IRUGO);
MODULE_PARM_DESC(in_ports, "Number of MIDI input ports");
static unsigned int out_ports = 1;
module_param(out_ports, uint, S_IRUGO);
MODULE_PARM_DESC(out_ports, "Number of MIDI output ports");
/* Thanks to Grey Innovation for donating this product ID.
*
* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
#define DRIVER_VENDOR_NUM 0x17b3 /* Grey Innovation */
#define DRIVER_PRODUCT_NUM 0x0004 /* Linux-USB "MIDI Gadget" */
/* string IDs are assigned dynamically */
#define STRING_DESCRIPTION_IDX USB_GADGET_FIRST_AVAIL_IDX
static struct usb_device_descriptor device_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.idVendor = cpu_to_le16(DRIVER_VENDOR_NUM),
.idProduct = cpu_to_le16(DRIVER_PRODUCT_NUM),
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
.bNumConfigurations = 1,
};
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "Grey Innovation",
[USB_GADGET_PRODUCT_IDX].s = "MIDI Gadget",
[USB_GADGET_SERIAL_IDX].s = "",
[STRING_DESCRIPTION_IDX].s = "MIDI",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static struct usb_function_instance *fi_midi;
static struct usb_function *f_midi;
static int midi_unbind(struct usb_composite_dev *dev)
{
usb_put_function(f_midi);
usb_put_function_instance(fi_midi);
return 0;
}
static struct usb_configuration midi_config = {
.label = "MIDI Gadget",
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_ONE,
.MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
};
static int midi_bind_config(struct usb_configuration *c)
{
int status;
f_midi = usb_get_function(fi_midi);
if (IS_ERR(f_midi))
return PTR_ERR(f_midi);
status = usb_add_function(c, f_midi);
if (status < 0) {
usb_put_function(f_midi);
return status;
}
return 0;
}
static int midi_bind(struct usb_composite_dev *cdev)
{
struct f_midi_opts *midi_opts;
int status;
fi_midi = usb_get_function_instance("midi");
if (IS_ERR(fi_midi))
return PTR_ERR(fi_midi);
midi_opts = container_of(fi_midi, struct f_midi_opts, func_inst);
midi_opts->index = index;
midi_opts->id = id;
midi_opts->in_ports = in_ports;
midi_opts->out_ports = out_ports;
midi_opts->buflen = buflen;
midi_opts->qlen = qlen;
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto put;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
midi_config.iConfiguration = strings_dev[STRING_DESCRIPTION_IDX].id;
status = usb_add_config(cdev, &midi_config, midi_bind_config);
if (status < 0)
goto put;
usb_composite_overwrite_options(cdev, &coverwrite);
pr_info("%s\n", longname);
return 0;
put:
usb_put_function_instance(fi_midi);
return status;
}
static struct usb_composite_driver midi_driver = {
.name = longname,
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = midi_bind,
.unbind = midi_unbind,
};
module_usb_composite_driver(midi_driver);
| linux-master | drivers/usb/gadget/legacy/gmidi.c |
// SPDX-License-Identifier: GPL-2.0
/* Target based USB-Gadget
*
* UAS protocol handling, target callbacks, configfs handling,
* BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
*
* Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/usb/ch9.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/usb/storage.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <asm/unaligned.h>
#include "u_tcm.h"
USB_GADGET_COMPOSITE_OPTIONS();
#define UAS_VENDOR_ID 0x0525 /* NetChip */
#define UAS_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
static struct usb_device_descriptor usbg_device_desc = {
.bLength = sizeof(usbg_device_desc),
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.idVendor = cpu_to_le16(UAS_VENDOR_ID),
.idProduct = cpu_to_le16(UAS_PRODUCT_ID),
.bNumConfigurations = 1,
};
#define USB_G_STR_CONFIG USB_GADGET_FIRST_AVAIL_IDX
static struct usb_string usbg_us_strings[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "Target Manufacturer",
[USB_GADGET_PRODUCT_IDX].s = "Target Product",
[USB_GADGET_SERIAL_IDX].s = "000000000001",
[USB_G_STR_CONFIG].s = "default config",
{ },
};
static struct usb_gadget_strings usbg_stringtab = {
.language = 0x0409,
.strings = usbg_us_strings,
};
static struct usb_gadget_strings *usbg_strings[] = {
&usbg_stringtab,
NULL,
};
static struct usb_function_instance *fi_tcm;
static struct usb_function *f_tcm;
static int guas_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR_OR_NULL(f_tcm))
usb_put_function(f_tcm);
return 0;
}
static int tcm_do_config(struct usb_configuration *c)
{
int status;
f_tcm = usb_get_function(fi_tcm);
if (IS_ERR(f_tcm))
return PTR_ERR(f_tcm);
status = usb_add_function(c, f_tcm);
if (status < 0) {
usb_put_function(f_tcm);
return status;
}
return 0;
}
static struct usb_configuration usbg_config_driver = {
.label = "Linux Target",
.bConfigurationValue = 1,
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
static int usbg_attach(struct usb_function_instance *f);
static void usbg_detach(struct usb_function_instance *f);
static int usb_target_bind(struct usb_composite_dev *cdev)
{
int ret;
ret = usb_string_ids_tab(cdev, usbg_us_strings);
if (ret)
return ret;
usbg_device_desc.iManufacturer =
usbg_us_strings[USB_GADGET_MANUFACTURER_IDX].id;
usbg_device_desc.iProduct = usbg_us_strings[USB_GADGET_PRODUCT_IDX].id;
usbg_device_desc.iSerialNumber =
usbg_us_strings[USB_GADGET_SERIAL_IDX].id;
usbg_config_driver.iConfiguration =
usbg_us_strings[USB_G_STR_CONFIG].id;
ret = usb_add_config(cdev, &usbg_config_driver, tcm_do_config);
if (ret)
return ret;
usb_composite_overwrite_options(cdev, &coverwrite);
return 0;
}
static struct usb_composite_driver usbg_driver = {
.name = "g_target",
.dev = &usbg_device_desc,
.strings = usbg_strings,
.max_speed = USB_SPEED_SUPER,
.bind = usb_target_bind,
.unbind = guas_unbind,
};
static int usbg_attach(struct usb_function_instance *f)
{
return usb_composite_probe(&usbg_driver);
}
static void usbg_detach(struct usb_function_instance *f)
{
usb_composite_unregister(&usbg_driver);
}
static int __init usb_target_gadget_init(void)
{
struct f_tcm_opts *tcm_opts;
fi_tcm = usb_get_function_instance("tcm");
if (IS_ERR(fi_tcm))
return PTR_ERR(fi_tcm);
tcm_opts = container_of(fi_tcm, struct f_tcm_opts, func_inst);
mutex_lock(&tcm_opts->dep_lock);
tcm_opts->tcm_register_callback = usbg_attach;
tcm_opts->tcm_unregister_callback = usbg_detach;
tcm_opts->dependent = THIS_MODULE;
tcm_opts->can_attach = true;
tcm_opts->has_dep = true;
mutex_unlock(&tcm_opts->dep_lock);
fi_tcm->set_inst_name(fi_tcm, "tcm-legacy");
return 0;
}
module_init(usb_target_gadget_init);
static void __exit usb_target_gadget_exit(void)
{
if (!IS_ERR_OR_NULL(fi_tcm))
usb_put_function_instance(fi_tcm);
}
module_exit(usb_target_gadget_exit);
MODULE_AUTHOR("Sebastian Andrzej Siewior <[email protected]>");
MODULE_DESCRIPTION("usb-gadget fabric");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/gadget/legacy/tcm_usb_gadget.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* g_ffs.c -- user mode file system API for USB composite function controllers
*
* Copyright (C) 2010 Samsung Electronics
* Author: Michal Nazarewicz <[email protected]>
*/
#define pr_fmt(fmt) "g_ffs: " fmt
#include <linux/module.h>
#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
#include <linux/netdevice.h>
# if defined USB_ETH_RNDIS
# undef USB_ETH_RNDIS
# endif
# ifdef CONFIG_USB_FUNCTIONFS_RNDIS
# define USB_ETH_RNDIS y
# endif
# include "u_ecm.h"
# include "u_gether.h"
# ifdef USB_ETH_RNDIS
# include "u_rndis.h"
# include "rndis.h"
# endif
# include "u_ether.h"
USB_ETHERNET_MODULE_PARAMETERS();
# ifdef CONFIG_USB_FUNCTIONFS_ETH
static int eth_bind_config(struct usb_configuration *c);
static struct usb_function_instance *fi_ecm;
static struct usb_function *f_ecm;
static struct usb_function_instance *fi_geth;
static struct usb_function *f_geth;
# endif
# ifdef CONFIG_USB_FUNCTIONFS_RNDIS
static int bind_rndis_config(struct usb_configuration *c);
static struct usb_function_instance *fi_rndis;
static struct usb_function *f_rndis;
# endif
#endif
#include "u_fs.h"
#define DRIVER_NAME "g_ffs"
#define DRIVER_DESC "USB Function Filesystem"
#define DRIVER_VERSION "24 Aug 2004"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Michal Nazarewicz");
MODULE_LICENSE("GPL");
#define GFS_VENDOR_ID 0x1d6b /* Linux Foundation */
#define GFS_PRODUCT_ID 0x0105 /* FunctionFS Gadget */
#define GFS_MAX_DEVS 10
USB_GADGET_COMPOSITE_OPTIONS();
static struct usb_device_descriptor gfs_dev_desc = {
.bLength = sizeof gfs_dev_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.idVendor = cpu_to_le16(GFS_VENDOR_ID),
.idProduct = cpu_to_le16(GFS_PRODUCT_ID),
};
static char *func_names[GFS_MAX_DEVS];
static unsigned int func_num;
module_param_named(bDeviceClass, gfs_dev_desc.bDeviceClass, byte, 0644);
MODULE_PARM_DESC(bDeviceClass, "USB Device class");
module_param_named(bDeviceSubClass, gfs_dev_desc.bDeviceSubClass, byte, 0644);
MODULE_PARM_DESC(bDeviceSubClass, "USB Device subclass");
module_param_named(bDeviceProtocol, gfs_dev_desc.bDeviceProtocol, byte, 0644);
MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol");
module_param_array_named(functions, func_names, charp, &func_num, 0);
MODULE_PARM_DESC(functions, "USB Functions list");
static const struct usb_descriptor_header *gfs_otg_desc[2];
/* String IDs are assigned dynamically */
static struct usb_string gfs_strings[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
{ .s = "FunctionFS + RNDIS" },
#endif
#ifdef CONFIG_USB_FUNCTIONFS_ETH
{ .s = "FunctionFS + ECM" },
#endif
#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
{ .s = "FunctionFS" },
#endif
{ } /* end of list */
};
static struct usb_gadget_strings *gfs_dev_strings[] = {
&(struct usb_gadget_strings) {
.language = 0x0409, /* en-us */
.strings = gfs_strings,
},
NULL,
};
struct gfs_configuration {
struct usb_configuration c;
int (*eth)(struct usb_configuration *c);
int num;
};
static struct gfs_configuration gfs_configurations[] = {
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
{
.eth = bind_rndis_config,
},
#endif
#ifdef CONFIG_USB_FUNCTIONFS_ETH
{
.eth = eth_bind_config,
},
#endif
#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
{
},
#endif
};
static void *functionfs_acquire_dev(struct ffs_dev *dev);
static void functionfs_release_dev(struct ffs_dev *dev);
static int functionfs_ready_callback(struct ffs_data *ffs);
static void functionfs_closed_callback(struct ffs_data *ffs);
static int gfs_bind(struct usb_composite_dev *cdev);
static int gfs_unbind(struct usb_composite_dev *cdev);
static int gfs_do_config(struct usb_configuration *c);
static struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
.strings = gfs_dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = gfs_bind,
.unbind = gfs_unbind,
};
static unsigned int missing_funcs;
static bool gfs_registered;
static bool gfs_single_func;
static struct usb_function_instance **fi_ffs;
static struct usb_function **f_ffs[] = {
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
NULL,
#endif
#ifdef CONFIG_USB_FUNCTIONFS_ETH
NULL,
#endif
#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
NULL,
#endif
};
#define N_CONF ARRAY_SIZE(f_ffs)
static int __init gfs_init(void)
{
struct f_fs_opts *opts;
int i;
int ret = 0;
if (func_num < 2) {
gfs_single_func = true;
func_num = 1;
}
/*
* Allocate in one chunk for easier maintenance
*/
f_ffs[0] = kcalloc(func_num * N_CONF, sizeof(*f_ffs), GFP_KERNEL);
if (!f_ffs[0]) {
ret = -ENOMEM;
goto no_func;
}
for (i = 1; i < N_CONF; ++i)
f_ffs[i] = f_ffs[0] + i * func_num;
fi_ffs = kcalloc(func_num, sizeof(*fi_ffs), GFP_KERNEL);
if (!fi_ffs) {
ret = -ENOMEM;
goto no_func;
}
for (i = 0; i < func_num; i++) {
fi_ffs[i] = usb_get_function_instance("ffs");
if (IS_ERR(fi_ffs[i])) {
ret = PTR_ERR(fi_ffs[i]);
--i;
goto no_dev;
}
opts = to_f_fs_opts(fi_ffs[i]);
if (gfs_single_func)
ret = ffs_single_dev(opts->dev);
else
ret = ffs_name_dev(opts->dev, func_names[i]);
if (ret)
goto no_dev;
opts->dev->ffs_ready_callback = functionfs_ready_callback;
opts->dev->ffs_closed_callback = functionfs_closed_callback;
opts->dev->ffs_acquire_dev_callback = functionfs_acquire_dev;
opts->dev->ffs_release_dev_callback = functionfs_release_dev;
opts->no_configfs = true;
}
missing_funcs = func_num;
return 0;
no_dev:
while (i >= 0)
usb_put_function_instance(fi_ffs[i--]);
kfree(fi_ffs);
no_func:
kfree(f_ffs[0]);
return ret;
}
module_init(gfs_init);
static void __exit gfs_exit(void)
{
int i;
if (gfs_registered)
usb_composite_unregister(&gfs_driver);
gfs_registered = false;
kfree(f_ffs[0]);
for (i = 0; i < func_num; i++)
usb_put_function_instance(fi_ffs[i]);
kfree(fi_ffs);
}
module_exit(gfs_exit);
static void *functionfs_acquire_dev(struct ffs_dev *dev)
{
if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENOENT);
return NULL;
}
static void functionfs_release_dev(struct ffs_dev *dev)
{
module_put(THIS_MODULE);
}
/*
* The caller of this function takes ffs_lock
*/
static int functionfs_ready_callback(struct ffs_data *ffs)
{
int ret = 0;
if (--missing_funcs)
return 0;
if (gfs_registered)
return -EBUSY;
gfs_registered = true;
ret = usb_composite_probe(&gfs_driver);
if (unlikely(ret < 0)) {
++missing_funcs;
gfs_registered = false;
}
return ret;
}
/*
* The caller of this function takes ffs_lock
*/
static void functionfs_closed_callback(struct ffs_data *ffs)
{
missing_funcs++;
if (gfs_registered)
usb_composite_unregister(&gfs_driver);
gfs_registered = false;
}
/*
* It is assumed that gfs_bind is called from a context where ffs_lock is held
*/
static int gfs_bind(struct usb_composite_dev *cdev)
{
#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
struct net_device *net;
#endif
int ret, i;
if (missing_funcs)
return -ENODEV;
#if defined CONFIG_USB_FUNCTIONFS_ETH
if (can_support_ecm(cdev->gadget)) {
struct f_ecm_opts *ecm_opts;
fi_ecm = usb_get_function_instance("ecm");
if (IS_ERR(fi_ecm))
return PTR_ERR(fi_ecm);
ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
net = ecm_opts->net;
} else {
struct f_gether_opts *geth_opts;
fi_geth = usb_get_function_instance("geth");
if (IS_ERR(fi_geth))
return PTR_ERR(fi_geth);
geth_opts = container_of(fi_geth, struct f_gether_opts,
func_inst);
net = geth_opts->net;
}
#endif
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
{
fi_rndis = usb_get_function_instance("rndis");
if (IS_ERR(fi_rndis)) {
ret = PTR_ERR(fi_rndis);
goto error;
}
#ifndef CONFIG_USB_FUNCTIONFS_ETH
net = container_of(fi_rndis, struct f_rndis_opts,
func_inst)->net;
#endif
}
#endif
#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
gether_set_qmult(net, qmult);
if (!gether_set_host_addr(net, host_addr))
pr_info("using host ethernet address: %s", host_addr);
if (!gether_set_dev_addr(net, dev_addr))
pr_info("using self ethernet address: %s", dev_addr);
#endif
#if defined CONFIG_USB_FUNCTIONFS_RNDIS && defined CONFIG_USB_FUNCTIONFS_ETH
gether_set_gadget(net, cdev->gadget);
ret = gether_register_netdev(net);
if (ret)
goto error_rndis;
if (can_support_ecm(cdev->gadget)) {
struct f_ecm_opts *ecm_opts;
ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
ecm_opts->bound = true;
} else {
struct f_gether_opts *geth_opts;
geth_opts = container_of(fi_geth, struct f_gether_opts,
func_inst);
geth_opts->bound = true;
}
rndis_borrow_net(fi_rndis, net);
#endif
/* TODO: gstrings_attach? */
ret = usb_string_ids_tab(cdev, gfs_strings);
if (unlikely(ret < 0))
goto error_rndis;
gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(cdev->gadget) && !gfs_otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc) {
ret = -ENOMEM;
goto error_rndis;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc);
gfs_otg_desc[0] = usb_desc;
gfs_otg_desc[1] = NULL;
}
for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
struct gfs_configuration *c = gfs_configurations + i;
int sid = USB_GADGET_FIRST_AVAIL_IDX + i;
c->c.label = gfs_strings[sid].s;
c->c.iConfiguration = gfs_strings[sid].id;
c->c.bConfigurationValue = 1 + i;
c->c.bmAttributes = USB_CONFIG_ATT_SELFPOWER;
c->num = i;
ret = usb_add_config(cdev, &c->c, gfs_do_config);
if (unlikely(ret < 0))
goto error_unbind;
}
usb_composite_overwrite_options(cdev, &coverwrite);
return 0;
/* TODO */
error_unbind:
kfree(gfs_otg_desc[0]);
gfs_otg_desc[0] = NULL;
error_rndis:
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
usb_put_function_instance(fi_rndis);
error:
#endif
#if defined CONFIG_USB_FUNCTIONFS_ETH
if (can_support_ecm(cdev->gadget))
usb_put_function_instance(fi_ecm);
else
usb_put_function_instance(fi_geth);
#endif
return ret;
}
/*
* It is assumed that gfs_unbind is called from a context where ffs_lock is held
*/
static int gfs_unbind(struct usb_composite_dev *cdev)
{
int i;
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
usb_put_function(f_rndis);
usb_put_function_instance(fi_rndis);
#endif
#if defined CONFIG_USB_FUNCTIONFS_ETH
if (can_support_ecm(cdev->gadget)) {
usb_put_function(f_ecm);
usb_put_function_instance(fi_ecm);
} else {
usb_put_function(f_geth);
usb_put_function_instance(fi_geth);
}
#endif
for (i = 0; i < N_CONF * func_num; ++i)
usb_put_function(*(f_ffs[0] + i));
kfree(gfs_otg_desc[0]);
gfs_otg_desc[0] = NULL;
return 0;
}
/*
* It is assumed that gfs_do_config is called from a context where
* ffs_lock is held
*/
static int gfs_do_config(struct usb_configuration *c)
{
struct gfs_configuration *gc =
container_of(c, struct gfs_configuration, c);
int i;
int ret;
if (missing_funcs)
return -ENODEV;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = gfs_otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
if (gc->eth) {
ret = gc->eth(c);
if (unlikely(ret < 0))
return ret;
}
for (i = 0; i < func_num; i++) {
f_ffs[gc->num][i] = usb_get_function(fi_ffs[i]);
if (IS_ERR(f_ffs[gc->num][i])) {
ret = PTR_ERR(f_ffs[gc->num][i]);
goto error;
}
ret = usb_add_function(c, f_ffs[gc->num][i]);
if (ret < 0) {
usb_put_function(f_ffs[gc->num][i]);
goto error;
}
}
/*
* After previous do_configs there may be some invalid
* pointers in c->interface array. This happens every time
* a user space function with fewer interfaces than a user
* space function that was run before the new one is run. The
* compasit's set_config() assumes that if there is no more
* then MAX_CONFIG_INTERFACES interfaces in a configuration
* then there is a NULL pointer after the last interface in
* c->interface array. We need to make sure this is true.
*/
if (c->next_interface_id < ARRAY_SIZE(c->interface))
c->interface[c->next_interface_id] = NULL;
return 0;
error:
while (--i >= 0) {
if (!IS_ERR(f_ffs[gc->num][i]))
usb_remove_function(c, f_ffs[gc->num][i]);
usb_put_function(f_ffs[gc->num][i]);
}
return ret;
}
#ifdef CONFIG_USB_FUNCTIONFS_ETH
static int eth_bind_config(struct usb_configuration *c)
{
int status = 0;
if (can_support_ecm(c->cdev->gadget)) {
f_ecm = usb_get_function(fi_ecm);
if (IS_ERR(f_ecm))
return PTR_ERR(f_ecm);
status = usb_add_function(c, f_ecm);
if (status < 0)
usb_put_function(f_ecm);
} else {
f_geth = usb_get_function(fi_geth);
if (IS_ERR(f_geth))
return PTR_ERR(f_geth);
status = usb_add_function(c, f_geth);
if (status < 0)
usb_put_function(f_geth);
}
return status;
}
#endif
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
static int bind_rndis_config(struct usb_configuration *c)
{
int status = 0;
f_rndis = usb_get_function(fi_rndis);
if (IS_ERR(f_rndis))
return PTR_ERR(f_rndis);
status = usb_add_function(c, f_rndis);
if (status < 0)
usb_put_function(f_rndis);
return status;
}
#endif
| linux-master | drivers/usb/gadget/legacy/g_ffs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* zero.c -- Gadget Zero, for USB development
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
*/
/*
* Gadget Zero only needs two bulk endpoints, and is an example of how you
* can write a hardware-agnostic gadget driver running inside a USB device.
* Some hardware details are visible, but don't affect most of the driver.
*
* Use it with the Linux host side "usbtest" driver to get a basic functional
* test of your device-side usb stack, or with "usb-skeleton".
*
* It supports two similar configurations. One sinks whatever the usb host
* writes, and in return sources zeroes. The other loops whatever the host
* writes back, so the host can read it.
*
* Many drivers will only have one configuration, letting them be much
* simpler if they also don't support high speed operation (like this
* driver does).
*
* Why is *this* driver using two configurations, rather than setting up
* two interfaces with different functions? To help verify that multiple
* configuration infrastructure is working correctly; also, so that it can
* work with low capability USB controllers without four bulk endpoints.
*/
/*
* driver assumes self-powered hardware, and
* has no way for users to trigger remote wakeup.
*/
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/usb/composite.h>
#include "g_zero.h"
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
#define DRIVER_VERSION "Cinco de Mayo 2008"
static const char longname[] = "Gadget Zero";
/*
* Normally the "loopback" configuration is second (index 1) so
* it's not the default. Here's where to change that order, to
* work better with hosts where config changes are problematic or
* controllers (like original superh) that only support one config.
*/
static bool loopdefault = 0;
module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
static struct usb_zero_options gzero_options = {
.isoc_interval = GZERO_ISOC_INTERVAL,
.isoc_maxpacket = GZERO_ISOC_MAXPACKET,
.bulk_buflen = GZERO_BULK_BUFLEN,
.qlen = GZERO_QLEN,
.ss_bulk_qlen = GZERO_SS_BULK_QLEN,
.ss_iso_qlen = GZERO_SS_ISO_QLEN,
};
/*-------------------------------------------------------------------------*/
/* Thanks to NetChip Technologies for donating this product ID.
*
* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
#ifndef CONFIG_USB_ZERO_HNPTEST
#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */
#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */
#define DEFAULT_AUTORESUME 0
#else
#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */
#define DRIVER_PRODUCT_NUM 0xbadd
#define DEFAULT_AUTORESUME 5
#endif
/* If the optional "autoresume" mode is enabled, it provides good
* functional coverage for the "USBCV" test harness from USB-IF.
* It's always set if OTG mode is enabled.
*/
static unsigned autoresume = DEFAULT_AUTORESUME;
module_param(autoresume, uint, S_IRUGO);
MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup");
/* Maximum Autoresume time */
static unsigned max_autoresume;
module_param(max_autoresume, uint, S_IRUGO);
MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup");
/* Interval between two remote wakeups */
static unsigned autoresume_interval_ms;
module_param(autoresume_interval_ms, uint, S_IRUGO);
MODULE_PARM_DESC(autoresume_interval_ms,
"milliseconds to increase successive wakeup delays");
static unsigned autoresume_step_ms;
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_VENDOR_SPEC,
.idVendor = cpu_to_le16(DRIVER_VENDOR_NUM),
.idProduct = cpu_to_le16(DRIVER_PRODUCT_NUM),
.bNumConfigurations = 2,
};
static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
/* default serial number takes at least two packets */
static char serial[] = "0123456789.0123456789.0123456789";
#define USB_GZERO_SS_DESC (USB_GADGET_FIRST_AVAIL_IDX + 0)
#define USB_GZERO_LB_DESC (USB_GADGET_FIRST_AVAIL_IDX + 1)
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = longname,
[USB_GADGET_SERIAL_IDX].s = serial,
[USB_GZERO_SS_DESC].s = "source and sink data",
[USB_GZERO_LB_DESC].s = "loop input to output",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
/*-------------------------------------------------------------------------*/
static struct timer_list autoresume_timer;
static struct usb_composite_dev *autoresume_cdev;
static void zero_autoresume(struct timer_list *unused)
{
struct usb_composite_dev *cdev = autoresume_cdev;
struct usb_gadget *g = cdev->gadget;
/* unconfigured devices can't issue wakeups */
if (!cdev->config)
return;
/* Normally the host would be woken up for something
* more significant than just a timer firing; likely
* because of some direct user request.
*/
if (g->speed != USB_SPEED_UNKNOWN) {
int status = usb_gadget_wakeup(g);
INFO(cdev, "%s --> %d\n", __func__, status);
}
}
static void zero_suspend(struct usb_composite_dev *cdev)
{
if (cdev->gadget->speed == USB_SPEED_UNKNOWN)
return;
if (autoresume) {
if (max_autoresume &&
(autoresume_step_ms > max_autoresume * 1000))
autoresume_step_ms = autoresume * 1000;
mod_timer(&autoresume_timer, jiffies +
msecs_to_jiffies(autoresume_step_ms));
DBG(cdev, "suspend, wakeup in %d milliseconds\n",
autoresume_step_ms);
autoresume_step_ms += autoresume_interval_ms;
} else
DBG(cdev, "%s\n", __func__);
}
static void zero_resume(struct usb_composite_dev *cdev)
{
DBG(cdev, "%s\n", __func__);
del_timer(&autoresume_timer);
}
/*-------------------------------------------------------------------------*/
static struct usb_configuration loopback_driver = {
.label = "loopback",
.bConfigurationValue = 2,
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
/* .iConfiguration = DYNAMIC */
};
static struct usb_function *func_ss;
static struct usb_function_instance *func_inst_ss;
static int ss_config_setup(struct usb_configuration *c,
const struct usb_ctrlrequest *ctrl)
{
switch (ctrl->bRequest) {
case 0x5b:
case 0x5c:
return func_ss->setup(func_ss, ctrl);
default:
return -EOPNOTSUPP;
}
}
static struct usb_configuration sourcesink_driver = {
.label = "source/sink",
.setup = ss_config_setup,
.bConfigurationValue = 3,
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
/* .iConfiguration = DYNAMIC */
};
module_param_named(buflen, gzero_options.bulk_buflen, uint, 0);
module_param_named(pattern, gzero_options.pattern, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(pattern, "0 = all zeroes, 1 = mod63, 2 = none");
module_param_named(isoc_interval, gzero_options.isoc_interval, uint,
S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(isoc_interval, "1 - 16");
module_param_named(isoc_maxpacket, gzero_options.isoc_maxpacket, uint,
S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(isoc_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
module_param_named(isoc_mult, gzero_options.isoc_mult, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(isoc_mult, "0 - 2 (hs/ss only)");
module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint,
S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
static struct usb_function *func_lb;
static struct usb_function_instance *func_inst_lb;
module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(qlen, "depth of loopback queue");
module_param_named(ss_bulk_qlen, gzero_options.ss_bulk_qlen, uint,
S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(bulk_qlen, "depth of sourcesink queue for bulk transfer");
module_param_named(ss_iso_qlen, gzero_options.ss_iso_qlen, uint,
S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(iso_qlen, "depth of sourcesink queue for iso transfer");
static int zero_bind(struct usb_composite_dev *cdev)
{
struct f_ss_opts *ss_opts;
struct f_lb_opts *lb_opts;
int status;
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
return status;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
device_desc.iSerialNumber = strings_dev[USB_GADGET_SERIAL_IDX].id;
autoresume_cdev = cdev;
timer_setup(&autoresume_timer, zero_autoresume, 0);
func_inst_ss = usb_get_function_instance("SourceSink");
if (IS_ERR(func_inst_ss))
return PTR_ERR(func_inst_ss);
ss_opts = container_of(func_inst_ss, struct f_ss_opts, func_inst);
ss_opts->pattern = gzero_options.pattern;
ss_opts->isoc_interval = gzero_options.isoc_interval;
ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
ss_opts->isoc_mult = gzero_options.isoc_mult;
ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
ss_opts->bulk_buflen = gzero_options.bulk_buflen;
ss_opts->bulk_qlen = gzero_options.ss_bulk_qlen;
ss_opts->iso_qlen = gzero_options.ss_iso_qlen;
func_ss = usb_get_function(func_inst_ss);
if (IS_ERR(func_ss)) {
status = PTR_ERR(func_ss);
goto err_put_func_inst_ss;
}
func_inst_lb = usb_get_function_instance("Loopback");
if (IS_ERR(func_inst_lb)) {
status = PTR_ERR(func_inst_lb);
goto err_put_func_ss;
}
lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst);
lb_opts->bulk_buflen = gzero_options.bulk_buflen;
lb_opts->qlen = gzero_options.qlen;
func_lb = usb_get_function(func_inst_lb);
if (IS_ERR(func_lb)) {
status = PTR_ERR(func_lb);
goto err_put_func_inst_lb;
}
sourcesink_driver.iConfiguration = strings_dev[USB_GZERO_SS_DESC].id;
loopback_driver.iConfiguration = strings_dev[USB_GZERO_LB_DESC].id;
/* support autoresume for remote wakeup testing */
sourcesink_driver.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
loopback_driver.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
sourcesink_driver.descriptors = NULL;
loopback_driver.descriptors = NULL;
if (autoresume) {
sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
autoresume_step_ms = autoresume * 1000;
}
/* support OTG systems */
if (gadget_is_otg(cdev->gadget)) {
if (!otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc) {
status = -ENOMEM;
goto err_conf_flb;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
sourcesink_driver.descriptors = otg_desc;
sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
loopback_driver.descriptors = otg_desc;
loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
/* Register primary, then secondary configuration. Note that
* SH3 only allows one config...
*/
if (loopdefault) {
usb_add_config_only(cdev, &loopback_driver);
usb_add_config_only(cdev, &sourcesink_driver);
} else {
usb_add_config_only(cdev, &sourcesink_driver);
usb_add_config_only(cdev, &loopback_driver);
}
status = usb_add_function(&sourcesink_driver, func_ss);
if (status)
goto err_free_otg_desc;
usb_ep_autoconfig_reset(cdev->gadget);
status = usb_add_function(&loopback_driver, func_lb);
if (status)
goto err_free_otg_desc;
usb_ep_autoconfig_reset(cdev->gadget);
usb_composite_overwrite_options(cdev, &coverwrite);
INFO(cdev, "%s, version: " DRIVER_VERSION "\n", longname);
return 0;
err_free_otg_desc:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
err_conf_flb:
usb_put_function(func_lb);
func_lb = NULL;
err_put_func_inst_lb:
usb_put_function_instance(func_inst_lb);
func_inst_lb = NULL;
err_put_func_ss:
usb_put_function(func_ss);
func_ss = NULL;
err_put_func_inst_ss:
usb_put_function_instance(func_inst_ss);
func_inst_ss = NULL;
return status;
}
static int zero_unbind(struct usb_composite_dev *cdev)
{
del_timer_sync(&autoresume_timer);
if (!IS_ERR_OR_NULL(func_ss))
usb_put_function(func_ss);
usb_put_function_instance(func_inst_ss);
if (!IS_ERR_OR_NULL(func_lb))
usb_put_function(func_lb);
usb_put_function_instance(func_inst_lb);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver zero_driver = {
.name = "zero",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = zero_bind,
.unbind = zero_unbind,
.suspend = zero_suspend,
.resume = zero_resume,
};
module_usb_composite_driver(zero_driver);
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/zero.c |
// SPDX-License-Identifier: GPL-2.0
/*
* nokia.c -- Nokia Composite Gadget Driver
*
* Copyright (C) 2008-2010 Nokia Corporation
* Contact: Felipe Balbi <[email protected]>
*
* This gadget driver borrows from serial.c which is:
*
* Copyright (C) 2003 Al Borchers ([email protected])
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include "u_serial.h"
#include "u_ether.h"
#include "u_phonet.h"
#include "u_ecm.h"
#include "f_mass_storage.h"
/* Defines */
#define NOKIA_VERSION_NUM 0x0211
#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)"
USB_GADGET_COMPOSITE_OPTIONS();
USB_ETHERNET_MODULE_PARAMETERS();
static struct fsg_module_parameters fsg_mod_data = {
.stall = 0,
.luns = 2,
.removable_count = 2,
.removable = { 1, 1, },
};
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
#else
/*
* Number of buffers we will use.
* 2 is usually enough for good buffering pipeline
*/
#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
#endif /* CONFIG_USB_DEBUG */
FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
/* string IDs are assigned dynamically */
#define STRING_DESCRIPTION_IDX USB_GADGET_FIRST_AVAIL_IDX
static char manufacturer_nokia[] = "Nokia";
static const char description_nokia[] = "PC-Suite Configuration";
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = manufacturer_nokia,
[USB_GADGET_PRODUCT_IDX].s = NOKIA_LONG_NAME,
[USB_GADGET_SERIAL_IDX].s = "",
[STRING_DESCRIPTION_IDX].s = description_nokia,
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static struct usb_device_descriptor device_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_COMM,
.idVendor = cpu_to_le16(NOKIA_VENDOR_ID),
.idProduct = cpu_to_le16(NOKIA_PRODUCT_ID),
.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM),
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
.bNumConfigurations = 1,
};
/*-------------------------------------------------------------------------*/
/* Module */
MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
MODULE_AUTHOR("Felipe Balbi");
MODULE_LICENSE("GPL");
/*-------------------------------------------------------------------------*/
static struct usb_function *f_acm_cfg1;
static struct usb_function *f_acm_cfg2;
static struct usb_function *f_ecm_cfg1;
static struct usb_function *f_ecm_cfg2;
static struct usb_function *f_obex1_cfg1;
static struct usb_function *f_obex2_cfg1;
static struct usb_function *f_obex1_cfg2;
static struct usb_function *f_obex2_cfg2;
static struct usb_function *f_phonet_cfg1;
static struct usb_function *f_phonet_cfg2;
static struct usb_function *f_msg_cfg1;
static struct usb_function *f_msg_cfg2;
static struct usb_configuration nokia_config_500ma_driver = {
.label = "Bus Powered",
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_ONE,
.MaxPower = 500,
};
static struct usb_configuration nokia_config_100ma_driver = {
.label = "Self Powered",
.bConfigurationValue = 2,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
.MaxPower = 100,
};
static struct usb_function_instance *fi_acm;
static struct usb_function_instance *fi_ecm;
static struct usb_function_instance *fi_obex1;
static struct usb_function_instance *fi_obex2;
static struct usb_function_instance *fi_phonet;
static struct usb_function_instance *fi_msg;
static int nokia_bind_config(struct usb_configuration *c)
{
struct usb_function *f_acm;
struct usb_function *f_phonet = NULL;
struct usb_function *f_obex1 = NULL;
struct usb_function *f_ecm;
struct usb_function *f_obex2 = NULL;
struct usb_function *f_msg;
int status = 0;
int obex1_stat = -1;
int obex2_stat = -1;
int phonet_stat = -1;
if (!IS_ERR(fi_phonet)) {
f_phonet = usb_get_function(fi_phonet);
if (IS_ERR(f_phonet))
pr_debug("could not get phonet function\n");
}
if (!IS_ERR(fi_obex1)) {
f_obex1 = usb_get_function(fi_obex1);
if (IS_ERR(f_obex1))
pr_debug("could not get obex function 0\n");
}
if (!IS_ERR(fi_obex2)) {
f_obex2 = usb_get_function(fi_obex2);
if (IS_ERR(f_obex2))
pr_debug("could not get obex function 1\n");
}
f_acm = usb_get_function(fi_acm);
if (IS_ERR(f_acm)) {
status = PTR_ERR(f_acm);
goto err_get_acm;
}
f_ecm = usb_get_function(fi_ecm);
if (IS_ERR(f_ecm)) {
status = PTR_ERR(f_ecm);
goto err_get_ecm;
}
f_msg = usb_get_function(fi_msg);
if (IS_ERR(f_msg)) {
status = PTR_ERR(f_msg);
goto err_get_msg;
}
if (!IS_ERR_OR_NULL(f_phonet)) {
phonet_stat = usb_add_function(c, f_phonet);
if (phonet_stat)
pr_debug("could not add phonet function\n");
}
if (!IS_ERR_OR_NULL(f_obex1)) {
obex1_stat = usb_add_function(c, f_obex1);
if (obex1_stat)
pr_debug("could not add obex function 0\n");
}
if (!IS_ERR_OR_NULL(f_obex2)) {
obex2_stat = usb_add_function(c, f_obex2);
if (obex2_stat)
pr_debug("could not add obex function 1\n");
}
status = usb_add_function(c, f_acm);
if (status)
goto err_conf;
status = usb_add_function(c, f_ecm);
if (status) {
pr_debug("could not bind ecm config %d\n", status);
goto err_ecm;
}
status = usb_add_function(c, f_msg);
if (status)
goto err_msg;
if (c == &nokia_config_500ma_driver) {
f_acm_cfg1 = f_acm;
f_ecm_cfg1 = f_ecm;
f_phonet_cfg1 = f_phonet;
f_obex1_cfg1 = f_obex1;
f_obex2_cfg1 = f_obex2;
f_msg_cfg1 = f_msg;
} else {
f_acm_cfg2 = f_acm;
f_ecm_cfg2 = f_ecm;
f_phonet_cfg2 = f_phonet;
f_obex1_cfg2 = f_obex1;
f_obex2_cfg2 = f_obex2;
f_msg_cfg2 = f_msg;
}
return status;
err_msg:
usb_remove_function(c, f_ecm);
err_ecm:
usb_remove_function(c, f_acm);
err_conf:
if (!obex2_stat)
usb_remove_function(c, f_obex2);
if (!obex1_stat)
usb_remove_function(c, f_obex1);
if (!phonet_stat)
usb_remove_function(c, f_phonet);
usb_put_function(f_msg);
err_get_msg:
usb_put_function(f_ecm);
err_get_ecm:
usb_put_function(f_acm);
err_get_acm:
if (!IS_ERR_OR_NULL(f_obex2))
usb_put_function(f_obex2);
if (!IS_ERR_OR_NULL(f_obex1))
usb_put_function(f_obex1);
if (!IS_ERR_OR_NULL(f_phonet))
usb_put_function(f_phonet);
return status;
}
static int nokia_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct fsg_opts *fsg_opts;
struct fsg_config fsg_config;
int status;
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto err_usb;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
status = strings_dev[STRING_DESCRIPTION_IDX].id;
nokia_config_500ma_driver.iConfiguration = status;
nokia_config_100ma_driver.iConfiguration = status;
if (!gadget_is_altset_supported(gadget)) {
status = -ENODEV;
goto err_usb;
}
fi_phonet = usb_get_function_instance("phonet");
if (IS_ERR(fi_phonet))
pr_debug("could not find phonet function\n");
fi_obex1 = usb_get_function_instance("obex");
if (IS_ERR(fi_obex1))
pr_debug("could not find obex function 1\n");
fi_obex2 = usb_get_function_instance("obex");
if (IS_ERR(fi_obex2))
pr_debug("could not find obex function 2\n");
fi_acm = usb_get_function_instance("acm");
if (IS_ERR(fi_acm)) {
status = PTR_ERR(fi_acm);
goto err_obex2_inst;
}
fi_ecm = usb_get_function_instance("ecm");
if (IS_ERR(fi_ecm)) {
status = PTR_ERR(fi_ecm);
goto err_acm_inst;
}
fi_msg = usb_get_function_instance("mass_storage");
if (IS_ERR(fi_msg)) {
status = PTR_ERR(fi_msg);
goto err_ecm_inst;
}
/* set up mass storage function */
fsg_config_from_params(&fsg_config, &fsg_mod_data, fsg_num_buffers);
fsg_config.vendor_name = "Nokia";
fsg_config.product_name = "N900";
fsg_opts = fsg_opts_from_func_inst(fi_msg);
fsg_opts->no_configfs = true;
status = fsg_common_set_num_buffers(fsg_opts->common, fsg_num_buffers);
if (status)
goto err_msg_inst;
status = fsg_common_set_cdev(fsg_opts->common, cdev, fsg_config.can_stall);
if (status)
goto err_msg_buf;
fsg_common_set_sysfs(fsg_opts->common, true);
status = fsg_common_create_luns(fsg_opts->common, &fsg_config);
if (status)
goto err_msg_buf;
fsg_common_set_inquiry_string(fsg_opts->common, fsg_config.vendor_name,
fsg_config.product_name);
/* finally register the configuration */
status = usb_add_config(cdev, &nokia_config_500ma_driver,
nokia_bind_config);
if (status < 0)
goto err_msg_luns;
status = usb_add_config(cdev, &nokia_config_100ma_driver,
nokia_bind_config);
if (status < 0)
goto err_put_cfg1;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
return 0;
err_put_cfg1:
usb_put_function(f_acm_cfg1);
if (!IS_ERR_OR_NULL(f_obex1_cfg1))
usb_put_function(f_obex1_cfg1);
if (!IS_ERR_OR_NULL(f_obex2_cfg1))
usb_put_function(f_obex2_cfg1);
if (!IS_ERR_OR_NULL(f_phonet_cfg1))
usb_put_function(f_phonet_cfg1);
usb_put_function(f_ecm_cfg1);
err_msg_luns:
fsg_common_remove_luns(fsg_opts->common);
err_msg_buf:
fsg_common_free_buffers(fsg_opts->common);
err_msg_inst:
usb_put_function_instance(fi_msg);
err_ecm_inst:
usb_put_function_instance(fi_ecm);
err_acm_inst:
usb_put_function_instance(fi_acm);
err_obex2_inst:
if (!IS_ERR(fi_obex2))
usb_put_function_instance(fi_obex2);
if (!IS_ERR(fi_obex1))
usb_put_function_instance(fi_obex1);
if (!IS_ERR(fi_phonet))
usb_put_function_instance(fi_phonet);
err_usb:
return status;
}
static int nokia_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR_OR_NULL(f_obex1_cfg2))
usb_put_function(f_obex1_cfg2);
if (!IS_ERR_OR_NULL(f_obex2_cfg2))
usb_put_function(f_obex2_cfg2);
if (!IS_ERR_OR_NULL(f_obex1_cfg1))
usb_put_function(f_obex1_cfg1);
if (!IS_ERR_OR_NULL(f_obex2_cfg1))
usb_put_function(f_obex2_cfg1);
if (!IS_ERR_OR_NULL(f_phonet_cfg1))
usb_put_function(f_phonet_cfg1);
if (!IS_ERR_OR_NULL(f_phonet_cfg2))
usb_put_function(f_phonet_cfg2);
usb_put_function(f_acm_cfg1);
usb_put_function(f_acm_cfg2);
usb_put_function(f_ecm_cfg1);
usb_put_function(f_ecm_cfg2);
usb_put_function(f_msg_cfg1);
usb_put_function(f_msg_cfg2);
usb_put_function_instance(fi_msg);
usb_put_function_instance(fi_ecm);
if (!IS_ERR(fi_obex2))
usb_put_function_instance(fi_obex2);
if (!IS_ERR(fi_obex1))
usb_put_function_instance(fi_obex1);
if (!IS_ERR(fi_phonet))
usb_put_function_instance(fi_phonet);
usb_put_function_instance(fi_acm);
return 0;
}
static struct usb_composite_driver nokia_driver = {
.name = "g_nokia",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = nokia_bind,
.unbind = nokia_unbind,
};
module_usb_composite_driver(nokia_driver);
| linux-master | drivers/usb/gadget/legacy/nokia.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* mass_storage.c -- Mass Storage USB Gadget
*
* Copyright (C) 2003-2008 Alan Stern
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz <[email protected]>
* All rights reserved.
*/
/*
* The Mass Storage Gadget acts as a USB Mass Storage device,
* appearing to the host as a disk drive or as a CD-ROM drive. In
* addition to providing an example of a genuinely useful gadget
* driver for a USB device, it also illustrates a technique of
* double-buffering for increased throughput. Last but not least, it
* gives an easy way to probe the behavior of the Mass Storage drivers
* in a USB host.
*
* Since this file serves only administrative purposes and all the
* business logic is implemented in f_mass_storage.* file. Read
* comments in this file for more detailed description.
*/
#include <linux/kernel.h>
#include <linux/usb/ch9.h>
#include <linux/module.h>
/*-------------------------------------------------------------------------*/
#define DRIVER_DESC "Mass Storage Gadget"
#define DRIVER_VERSION "2009/09/11"
/*
* Thanks to NetChip Technologies for donating this product ID.
*
* DO NOT REUSE THESE IDs with any other driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
#define FSG_VENDOR_ID 0x0525 /* NetChip */
#define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
#include "f_mass_storage.h"
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
static struct usb_device_descriptor msg_device_desc = {
.bLength = sizeof msg_device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_PER_INTERFACE,
/* Vendor and product id can be overridden by module parameters. */
.idVendor = cpu_to_le16(FSG_VENDOR_ID),
.idProduct = cpu_to_le16(FSG_PRODUCT_ID),
.bNumConfigurations = 1,
};
static const struct usb_descriptor_header *otg_desc[2];
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static struct usb_function_instance *fi_msg;
static struct usb_function *f_msg;
/****************************** Configurations ******************************/
static struct fsg_module_parameters mod_data = {
.stall = 1
};
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
#else
/*
* Number of buffers we will use.
* 2 is usually enough for good buffering pipeline
*/
#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static int msg_do_config(struct usb_configuration *c)
{
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_msg = usb_get_function(fi_msg);
if (IS_ERR(f_msg))
return PTR_ERR(f_msg);
ret = usb_add_function(c, f_msg);
if (ret)
goto put_func;
return 0;
put_func:
usb_put_function(f_msg);
return ret;
}
static struct usb_configuration msg_config_driver = {
.label = "Linux File-Backed Storage",
.bConfigurationValue = 1,
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/****************************** Gadget Bind ******************************/
static int msg_bind(struct usb_composite_dev *cdev)
{
struct fsg_opts *opts;
struct fsg_config config;
int status;
fi_msg = usb_get_function_instance("mass_storage");
if (IS_ERR(fi_msg))
return PTR_ERR(fi_msg);
fsg_config_from_params(&config, &mod_data, fsg_num_buffers);
opts = fsg_opts_from_func_inst(fi_msg);
opts->no_configfs = true;
status = fsg_common_set_num_buffers(opts->common, fsg_num_buffers);
if (status)
goto fail;
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
fsg_common_set_sysfs(opts->common, true);
status = fsg_common_create_luns(opts->common, &config);
if (status)
goto fail_set_cdev;
fsg_common_set_inquiry_string(opts->common, config.vendor_name,
config.product_name);
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto fail_string_ids;
msg_device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail_string_ids;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
status = usb_add_config(cdev, &msg_config_driver, msg_do_config);
if (status < 0)
goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&cdev->gadget->dev,
DRIVER_DESC ", version: " DRIVER_VERSION "\n");
return 0;
fail_otg_desc:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail_string_ids:
fsg_common_remove_luns(opts->common);
fail_set_cdev:
fsg_common_free_buffers(opts->common);
fail:
usb_put_function_instance(fi_msg);
return status;
}
static int msg_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR(f_msg))
usb_put_function(f_msg);
if (!IS_ERR(fi_msg))
usb_put_function_instance(fi_msg);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
/****************************** Some noise ******************************/
static struct usb_composite_driver msg_driver = {
.name = "g_mass_storage",
.dev = &msg_device_desc,
.max_speed = USB_SPEED_SUPER_PLUS,
.needs_serial = 1,
.strings = dev_strings,
.bind = msg_bind,
.unbind = msg_unbind,
};
module_usb_composite_driver(msg_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Michal Nazarewicz");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/mass_storage.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* ether.c -- Ethernet gadget driver, with CDC and non-CDC options
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
*/
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/netdevice.h>
#if defined USB_ETH_RNDIS
# undef USB_ETH_RNDIS
#endif
#ifdef CONFIG_USB_ETH_RNDIS
# define USB_ETH_RNDIS y
#endif
#include "u_ether.h"
/*
* Ethernet gadget driver -- with CDC and non-CDC options
* Builds on hardware support for a full duplex link.
*
* CDC Ethernet is the standard USB solution for sending Ethernet frames
* using USB. Real hardware tends to use the same framing protocol but look
* different for control features. This driver strongly prefers to use
* this USB-IF standard as its open-systems interoperability solution;
* most host side USB stacks (except from Microsoft) support it.
*
* This is sometimes called "CDC ECM" (Ethernet Control Model) to support
* TLA-soup. "CDC ACM" (Abstract Control Model) is for modems, and a new
* "CDC EEM" (Ethernet Emulation Model) is starting to spread.
*
* There's some hardware that can't talk CDC ECM. We make that hardware
* implement a "minimalist" vendor-agnostic CDC core: same framing, but
* link-level setup only requires activating the configuration. Only the
* endpoint descriptors, and product/vendor IDs, are relevant; no control
* operations are available. Linux supports it, but other host operating
* systems may not. (This is a subset of CDC Ethernet.)
*
* It turns out that if you add a few descriptors to that "CDC Subset",
* (Windows) host side drivers from MCCI can treat it as one submode of
* a proprietary scheme called "SAFE" ... without needing to know about
* specific product/vendor IDs. So we do that, making it easier to use
* those MS-Windows drivers. Those added descriptors make it resemble a
* CDC MDLM device, but they don't change device behavior at all. (See
* MCCI Engineering report 950198 "SAFE Networking Functions".)
*
* A third option is also in use. Rather than CDC Ethernet, or something
* simpler, Microsoft pushes their own approach: RNDIS. The published
* RNDIS specs are ambiguous and appear to be incomplete, and are also
* needlessly complex. They borrow more from CDC ACM than CDC ECM.
*/
#define DRIVER_DESC "Ethernet Gadget"
#define DRIVER_VERSION "Memorial Day 2008"
#ifdef USB_ETH_RNDIS
#define PREFIX "RNDIS/"
#else
#define PREFIX ""
#endif
/*
* This driver aims for interoperability by using CDC ECM unless
*
* can_support_ecm()
*
* returns false, in which case it supports the CDC Subset. By default,
* that returns true; most hardware has no problems with CDC ECM, that's
* a good default. Previous versions of this driver had no default; this
* version changes that, removing overhead for new controller support.
*
* IF YOUR HARDWARE CAN'T SUPPORT CDC ECM, UPDATE THAT ROUTINE!
*/
static inline bool has_rndis(void)
{
#ifdef USB_ETH_RNDIS
return true;
#else
return false;
#endif
}
#include <linux/module.h>
#include "u_ecm.h"
#include "u_gether.h"
#ifdef USB_ETH_RNDIS
#include "u_rndis.h"
#include "rndis.h"
#else
#define rndis_borrow_net(...) do {} while (0)
#endif
#include "u_eem.h"
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
USB_ETHERNET_MODULE_PARAMETERS();
/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
/* Thanks to NetChip Technologies for donating this product ID.
* It's for devices with only CDC Ethernet configurations.
*/
#define CDC_VENDOR_NUM 0x0525 /* NetChip */
#define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */
/* For hardware that can't talk CDC, we use the same vendor ID that
* ARM Linux has used for ethernet-over-usb, both with sa1100 and
* with pxa250. We're protocol-compatible, if the host-side drivers
* use the endpoint descriptors. bcdDevice (version) is nonzero, so
* drivers that need to hard-wire endpoint numbers have a hook.
*
* The protocol is a minimal subset of CDC Ether, which works on any bulk
* hardware that's not deeply broken ... even on hardware that can't talk
* RNDIS (like SA-1100, with no interrupt endpoint, or anything that
* doesn't handle control-OUT).
*/
#define SIMPLE_VENDOR_NUM 0x049f
#define SIMPLE_PRODUCT_NUM 0x505a
/* For hardware that can talk RNDIS and either of the above protocols,
* use this ID ... the windows INF files will know it. Unless it's
* used with CDC Ethernet, Linux 2.4 hosts will need updates to choose
* the non-RNDIS configuration.
*/
#define RNDIS_VENDOR_NUM 0x0525 /* NetChip */
#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */
/* For EEM gadgets */
#define EEM_VENDOR_NUM 0x1d6b /* Linux Foundation */
#define EEM_PRODUCT_NUM 0x0102 /* EEM Gadget */
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_COMM,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
/* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id defaults change according to what configs
* we support. (As does bNumConfigurations.) These values can
* also be overridden by module parameters.
*/
.idVendor = cpu_to_le16 (CDC_VENDOR_NUM),
.idProduct = cpu_to_le16 (CDC_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
/* NO SERIAL NUMBER */
.bNumConfigurations = 1,
};
static const struct usb_descriptor_header *otg_desc[2];
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = PREFIX DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static struct usb_function_instance *fi_ecm;
static struct usb_function *f_ecm;
static struct usb_function_instance *fi_eem;
static struct usb_function *f_eem;
static struct usb_function_instance *fi_geth;
static struct usb_function *f_geth;
static struct usb_function_instance *fi_rndis;
static struct usb_function *f_rndis;
/*-------------------------------------------------------------------------*/
/*
* We may not have an RNDIS configuration, but if we do it needs to be
* the first one present. That's to make Microsoft's drivers happy,
* and to follow DOCSIS 1.0 (cable modem standard).
*/
static int rndis_do_config(struct usb_configuration *c)
{
int status;
/* FIXME alloc iConfiguration string, set it in c->strings */
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_rndis = usb_get_function(fi_rndis);
if (IS_ERR(f_rndis))
return PTR_ERR(f_rndis);
status = usb_add_function(c, f_rndis);
if (status < 0)
usb_put_function(f_rndis);
return status;
}
static struct usb_configuration rndis_config_driver = {
.label = "RNDIS",
.bConfigurationValue = 2,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_ETH_EEM
static bool use_eem = 1;
#else
static bool use_eem;
#endif
module_param(use_eem, bool, 0);
MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
/*
* We _always_ have an ECM, CDC Subset, or EEM configuration.
*/
static int eth_do_config(struct usb_configuration *c)
{
int status = 0;
/* FIXME alloc iConfiguration string, set it in c->strings */
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
if (use_eem) {
f_eem = usb_get_function(fi_eem);
if (IS_ERR(f_eem))
return PTR_ERR(f_eem);
status = usb_add_function(c, f_eem);
if (status < 0)
usb_put_function(f_eem);
return status;
} else if (can_support_ecm(c->cdev->gadget)) {
f_ecm = usb_get_function(fi_ecm);
if (IS_ERR(f_ecm))
return PTR_ERR(f_ecm);
status = usb_add_function(c, f_ecm);
if (status < 0)
usb_put_function(f_ecm);
return status;
} else {
f_geth = usb_get_function(fi_geth);
if (IS_ERR(f_geth))
return PTR_ERR(f_geth);
status = usb_add_function(c, f_geth);
if (status < 0)
usb_put_function(f_geth);
return status;
}
}
static struct usb_configuration eth_config_driver = {
/* .label = f(hardware) */
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/*-------------------------------------------------------------------------*/
static int eth_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct f_eem_opts *eem_opts = NULL;
struct f_ecm_opts *ecm_opts = NULL;
struct f_gether_opts *geth_opts = NULL;
struct net_device *net;
int status;
/* set up main config label and device descriptor */
if (use_eem) {
/* EEM */
fi_eem = usb_get_function_instance("eem");
if (IS_ERR(fi_eem))
return PTR_ERR(fi_eem);
eem_opts = container_of(fi_eem, struct f_eem_opts, func_inst);
net = eem_opts->net;
eth_config_driver.label = "CDC Ethernet (EEM)";
device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM);
device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM);
} else if (can_support_ecm(gadget)) {
/* ECM */
fi_ecm = usb_get_function_instance("ecm");
if (IS_ERR(fi_ecm))
return PTR_ERR(fi_ecm);
ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
net = ecm_opts->net;
eth_config_driver.label = "CDC Ethernet (ECM)";
} else {
/* CDC Subset */
fi_geth = usb_get_function_instance("geth");
if (IS_ERR(fi_geth))
return PTR_ERR(fi_geth);
geth_opts = container_of(fi_geth, struct f_gether_opts,
func_inst);
net = geth_opts->net;
eth_config_driver.label = "CDC Subset/SAFE";
device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM);
device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM);
if (!has_rndis())
device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
}
gether_set_qmult(net, qmult);
if (!gether_set_host_addr(net, host_addr))
pr_info("using host ethernet address: %s", host_addr);
if (!gether_set_dev_addr(net, dev_addr))
pr_info("using self ethernet address: %s", dev_addr);
if (has_rndis()) {
/* RNDIS plus ECM-or-Subset */
gether_set_gadget(net, cdev->gadget);
status = gether_register_netdev(net);
if (status)
goto fail;
if (use_eem)
eem_opts->bound = true;
else if (can_support_ecm(gadget))
ecm_opts->bound = true;
else
geth_opts->bound = true;
fi_rndis = usb_get_function_instance("rndis");
if (IS_ERR(fi_rndis)) {
status = PTR_ERR(fi_rndis);
goto fail;
}
rndis_borrow_net(fi_rndis, net);
device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM);
device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM);
device_desc.bNumConfigurations = 2;
}
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto fail1;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail1;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
/* register our configuration(s); RNDIS first, if it's used */
if (has_rndis()) {
status = usb_add_config(cdev, &rndis_config_driver,
rndis_do_config);
if (status < 0)
goto fail2;
}
status = usb_add_config(cdev, ð_config_driver, eth_do_config);
if (status < 0)
goto fail2;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
DRIVER_DESC);
return 0;
fail2:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail1:
if (has_rndis())
usb_put_function_instance(fi_rndis);
fail:
if (use_eem)
usb_put_function_instance(fi_eem);
else if (can_support_ecm(gadget))
usb_put_function_instance(fi_ecm);
else
usb_put_function_instance(fi_geth);
return status;
}
static int eth_unbind(struct usb_composite_dev *cdev)
{
if (has_rndis()) {
usb_put_function(f_rndis);
usb_put_function_instance(fi_rndis);
}
if (use_eem) {
usb_put_function(f_eem);
usb_put_function_instance(fi_eem);
} else if (can_support_ecm(cdev->gadget)) {
usb_put_function(f_ecm);
usb_put_function_instance(fi_ecm);
} else {
usb_put_function(f_geth);
usb_put_function_instance(fi_geth);
}
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver eth_driver = {
.name = "g_ether",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = eth_bind,
.unbind = eth_unbind,
};
module_usb_composite_driver(eth_driver);
MODULE_DESCRIPTION(PREFIX DRIVER_DESC);
MODULE_AUTHOR("David Brownell, Benedikt Spanger");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/ether.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* multi.c -- Multifunction Composite driver
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz ([email protected])
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "u_serial.h"
#if defined USB_ETH_RNDIS
# undef USB_ETH_RNDIS
#endif
#ifdef CONFIG_USB_G_MULTI_RNDIS
# define USB_ETH_RNDIS y
#endif
#define DRIVER_DESC "Multifunction Composite Gadget"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Michal Nazarewicz");
MODULE_LICENSE("GPL");
#include "f_mass_storage.h"
#include "u_ecm.h"
#ifdef USB_ETH_RNDIS
# include "u_rndis.h"
# include "rndis.h"
#endif
#include "u_ether.h"
USB_GADGET_COMPOSITE_OPTIONS();
USB_ETHERNET_MODULE_PARAMETERS();
/***************************** Device Descriptor ****************************/
#define MULTI_VENDOR_NUM 0x1d6b /* Linux Foundation */
#define MULTI_PRODUCT_NUM 0x0104 /* Multifunction Composite Gadget */
enum {
__MULTI_NO_CONFIG,
#ifdef CONFIG_USB_G_MULTI_RNDIS
MULTI_RNDIS_CONFIG_NUM,
#endif
#ifdef CONFIG_USB_G_MULTI_CDC
MULTI_CDC_CONFIG_NUM,
#endif
};
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_MISC /* 0xEF */,
.bDeviceSubClass = 2,
.bDeviceProtocol = 1,
/* Vendor and product id can be overridden by module parameters. */
.idVendor = cpu_to_le16(MULTI_VENDOR_NUM),
.idProduct = cpu_to_le16(MULTI_PRODUCT_NUM),
};
static const struct usb_descriptor_header *otg_desc[2];
enum {
MULTI_STRING_RNDIS_CONFIG_IDX = USB_GADGET_FIRST_AVAIL_IDX,
MULTI_STRING_CDC_CONFIG_IDX,
};
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
[MULTI_STRING_RNDIS_CONFIG_IDX].s = "Multifunction with RNDIS",
[MULTI_STRING_CDC_CONFIG_IDX].s = "Multifunction with CDC ECM",
{ } /* end of list */
};
static struct usb_gadget_strings *dev_strings[] = {
&(struct usb_gadget_strings){
.language = 0x0409, /* en-us */
.strings = strings_dev,
},
NULL,
};
/****************************** Configurations ******************************/
static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
#else
/*
* Number of buffers we will use.
* 2 is usually enough for good buffering pipeline
*/
#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
static struct usb_function_instance *fi_acm;
static struct usb_function_instance *fi_msg;
/********** RNDIS **********/
#ifdef USB_ETH_RNDIS
static struct usb_function_instance *fi_rndis;
static struct usb_function *f_acm_rndis;
static struct usb_function *f_rndis;
static struct usb_function *f_msg_rndis;
static int rndis_do_config(struct usb_configuration *c)
{
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_rndis = usb_get_function(fi_rndis);
if (IS_ERR(f_rndis))
return PTR_ERR(f_rndis);
ret = usb_add_function(c, f_rndis);
if (ret < 0)
goto err_func_rndis;
f_acm_rndis = usb_get_function(fi_acm);
if (IS_ERR(f_acm_rndis)) {
ret = PTR_ERR(f_acm_rndis);
goto err_func_acm;
}
ret = usb_add_function(c, f_acm_rndis);
if (ret)
goto err_conf;
f_msg_rndis = usb_get_function(fi_msg);
if (IS_ERR(f_msg_rndis)) {
ret = PTR_ERR(f_msg_rndis);
goto err_fsg;
}
ret = usb_add_function(c, f_msg_rndis);
if (ret)
goto err_run;
return 0;
err_run:
usb_put_function(f_msg_rndis);
err_fsg:
usb_remove_function(c, f_acm_rndis);
err_conf:
usb_put_function(f_acm_rndis);
err_func_acm:
usb_remove_function(c, f_rndis);
err_func_rndis:
usb_put_function(f_rndis);
return ret;
}
static int rndis_config_register(struct usb_composite_dev *cdev)
{
static struct usb_configuration config = {
.bConfigurationValue = MULTI_RNDIS_CONFIG_NUM,
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
config.label = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].s;
config.iConfiguration = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].id;
return usb_add_config(cdev, &config, rndis_do_config);
}
#else
static int rndis_config_register(struct usb_composite_dev *cdev)
{
return 0;
}
#endif
/********** CDC ECM **********/
#ifdef CONFIG_USB_G_MULTI_CDC
static struct usb_function_instance *fi_ecm;
static struct usb_function *f_acm_multi;
static struct usb_function *f_ecm;
static struct usb_function *f_msg_multi;
static int cdc_do_config(struct usb_configuration *c)
{
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_ecm = usb_get_function(fi_ecm);
if (IS_ERR(f_ecm))
return PTR_ERR(f_ecm);
ret = usb_add_function(c, f_ecm);
if (ret < 0)
goto err_func_ecm;
/* implicit port_num is zero */
f_acm_multi = usb_get_function(fi_acm);
if (IS_ERR(f_acm_multi)) {
ret = PTR_ERR(f_acm_multi);
goto err_func_acm;
}
ret = usb_add_function(c, f_acm_multi);
if (ret)
goto err_conf;
f_msg_multi = usb_get_function(fi_msg);
if (IS_ERR(f_msg_multi)) {
ret = PTR_ERR(f_msg_multi);
goto err_fsg;
}
ret = usb_add_function(c, f_msg_multi);
if (ret)
goto err_run;
return 0;
err_run:
usb_put_function(f_msg_multi);
err_fsg:
usb_remove_function(c, f_acm_multi);
err_conf:
usb_put_function(f_acm_multi);
err_func_acm:
usb_remove_function(c, f_ecm);
err_func_ecm:
usb_put_function(f_ecm);
return ret;
}
static int cdc_config_register(struct usb_composite_dev *cdev)
{
static struct usb_configuration config = {
.bConfigurationValue = MULTI_CDC_CONFIG_NUM,
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
config.label = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].s;
config.iConfiguration = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].id;
return usb_add_config(cdev, &config, cdc_do_config);
}
#else
static int cdc_config_register(struct usb_composite_dev *cdev)
{
return 0;
}
#endif
/****************************** Gadget Bind ******************************/
static int multi_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
#ifdef CONFIG_USB_G_MULTI_CDC
struct f_ecm_opts *ecm_opts;
#endif
#ifdef USB_ETH_RNDIS
struct f_rndis_opts *rndis_opts;
#endif
struct fsg_opts *fsg_opts;
struct fsg_config config;
int status;
if (!can_support_ecm(cdev->gadget)) {
dev_err(&gadget->dev, "controller '%s' not usable\n",
gadget->name);
return -EINVAL;
}
#ifdef CONFIG_USB_G_MULTI_CDC
fi_ecm = usb_get_function_instance("ecm");
if (IS_ERR(fi_ecm))
return PTR_ERR(fi_ecm);
ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
gether_set_qmult(ecm_opts->net, qmult);
if (!gether_set_host_addr(ecm_opts->net, host_addr))
pr_info("using host ethernet address: %s", host_addr);
if (!gether_set_dev_addr(ecm_opts->net, dev_addr))
pr_info("using self ethernet address: %s", dev_addr);
#endif
#ifdef USB_ETH_RNDIS
fi_rndis = usb_get_function_instance("rndis");
if (IS_ERR(fi_rndis)) {
status = PTR_ERR(fi_rndis);
goto fail;
}
rndis_opts = container_of(fi_rndis, struct f_rndis_opts, func_inst);
gether_set_qmult(rndis_opts->net, qmult);
if (!gether_set_host_addr(rndis_opts->net, host_addr))
pr_info("using host ethernet address: %s", host_addr);
if (!gether_set_dev_addr(rndis_opts->net, dev_addr))
pr_info("using self ethernet address: %s", dev_addr);
#endif
#if (defined CONFIG_USB_G_MULTI_CDC && defined USB_ETH_RNDIS)
/*
* If both ecm and rndis are selected then:
* 1) rndis borrows the net interface from ecm
* 2) since the interface is shared it must not be bound
* twice - in ecm's _and_ rndis' binds, so do it here.
*/
gether_set_gadget(ecm_opts->net, cdev->gadget);
status = gether_register_netdev(ecm_opts->net);
if (status)
goto fail0;
rndis_borrow_net(fi_rndis, ecm_opts->net);
ecm_opts->bound = true;
#endif
/* set up serial link layer */
fi_acm = usb_get_function_instance("acm");
if (IS_ERR(fi_acm)) {
status = PTR_ERR(fi_acm);
goto fail0;
}
/* set up mass storage function */
fi_msg = usb_get_function_instance("mass_storage");
if (IS_ERR(fi_msg)) {
status = PTR_ERR(fi_msg);
goto fail1;
}
fsg_config_from_params(&config, &fsg_mod_data, fsg_num_buffers);
fsg_opts = fsg_opts_from_func_inst(fi_msg);
fsg_opts->no_configfs = true;
status = fsg_common_set_num_buffers(fsg_opts->common, fsg_num_buffers);
if (status)
goto fail2;
status = fsg_common_set_cdev(fsg_opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
fsg_common_set_sysfs(fsg_opts->common, true);
status = fsg_common_create_luns(fsg_opts->common, &config);
if (status)
goto fail_set_cdev;
fsg_common_set_inquiry_string(fsg_opts->common, config.vendor_name,
config.product_name);
/* allocate string IDs */
status = usb_string_ids_tab(cdev, strings_dev);
if (unlikely(status < 0))
goto fail_string_ids;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail_string_ids;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
/* register configurations */
status = rndis_config_register(cdev);
if (unlikely(status < 0))
goto fail_otg_desc;
status = cdc_config_register(cdev);
if (unlikely(status < 0))
goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
/* we're done */
dev_info(&gadget->dev, DRIVER_DESC "\n");
return 0;
/* error recovery */
fail_otg_desc:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail_string_ids:
fsg_common_remove_luns(fsg_opts->common);
fail_set_cdev:
fsg_common_free_buffers(fsg_opts->common);
fail2:
usb_put_function_instance(fi_msg);
fail1:
usb_put_function_instance(fi_acm);
fail0:
#ifdef USB_ETH_RNDIS
usb_put_function_instance(fi_rndis);
fail:
#endif
#ifdef CONFIG_USB_G_MULTI_CDC
usb_put_function_instance(fi_ecm);
#endif
return status;
}
static int multi_unbind(struct usb_composite_dev *cdev)
{
#ifdef CONFIG_USB_G_MULTI_CDC
usb_put_function(f_msg_multi);
#endif
#ifdef USB_ETH_RNDIS
usb_put_function(f_msg_rndis);
#endif
usb_put_function_instance(fi_msg);
#ifdef CONFIG_USB_G_MULTI_CDC
usb_put_function(f_acm_multi);
#endif
#ifdef USB_ETH_RNDIS
usb_put_function(f_acm_rndis);
#endif
usb_put_function_instance(fi_acm);
#ifdef USB_ETH_RNDIS
usb_put_function(f_rndis);
usb_put_function_instance(fi_rndis);
#endif
#ifdef CONFIG_USB_G_MULTI_CDC
usb_put_function(f_ecm);
usb_put_function_instance(fi_ecm);
#endif
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
/****************************** Some noise ******************************/
static struct usb_composite_driver multi_driver = {
.name = "g_multi",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = multi_bind,
.unbind = multi_unbind,
.needs_serial = 1,
};
module_usb_composite_driver(multi_driver);
| linux-master | drivers/usb/gadget/legacy/multi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* inode.c -- user mode filesystem api for usb gadget controllers
*
* Copyright (C) 2003-2004 David Brownell
* Copyright (C) 2003 Agilent Technologies
*/
/* #define VERBOSE_DEBUG */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/uts.h>
#include <linux/wait.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/kthread.h>
#include <linux/aio.h>
#include <linux/uio.h>
#include <linux/refcount.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
#include <linux/usb/gadgetfs.h>
#include <linux/usb/gadget.h>
/*
* The gadgetfs API maps each endpoint to a file descriptor so that you
* can use standard synchronous read/write calls for I/O. There's some
* O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
* drivers show how this works in practice. You can also use AIO to
* eliminate I/O gaps between requests, to help when streaming data.
*
* Key parts that must be USB-specific are protocols defining how the
* read/write operations relate to the hardware state machines. There
* are two types of files. One type is for the device, implementing ep0.
* The other type is for each IN or OUT endpoint. In both cases, the
* user mode driver must configure the hardware before using it.
*
* - First, dev_config() is called when /dev/gadget/$CHIP is configured
* (by writing configuration and device descriptors). Afterwards it
* may serve as a source of device events, used to handle all control
* requests other than basic enumeration.
*
* - Then, after a SET_CONFIGURATION control request, ep_config() is
* called when each /dev/gadget/ep* file is configured (by writing
* endpoint descriptors). Afterwards these files are used to write()
* IN data or to read() OUT data. To halt the endpoint, a "wrong
* direction" request is issued (like reading an IN endpoint).
*
* Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
* not possible on all hardware. For example, precise fault handling with
* respect to data left in endpoint fifos after aborted operations; or
* selective clearing of endpoint halts, to implement SET_INTERFACE.
*/
#define DRIVER_DESC "USB Gadget filesystem"
#define DRIVER_VERSION "24 Aug 2004"
static const char driver_desc [] = DRIVER_DESC;
static const char shortname [] = "gadgetfs";
MODULE_DESCRIPTION (DRIVER_DESC);
MODULE_AUTHOR ("David Brownell");
MODULE_LICENSE ("GPL");
static int ep_open(struct inode *, struct file *);
/*----------------------------------------------------------------------*/
#define GADGETFS_MAGIC 0xaee71ee7
/* /dev/gadget/$CHIP represents ep0 and the whole device */
enum ep0_state {
/* DISABLED is the initial state. */
STATE_DEV_DISABLED = 0,
/* Only one open() of /dev/gadget/$CHIP; only one file tracks
* ep0/device i/o modes and binding to the controller. Driver
* must always write descriptors to initialize the device, then
* the device becomes UNCONNECTED until enumeration.
*/
STATE_DEV_OPENED,
/* From then on, ep0 fd is in either of two basic modes:
* - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
* - SETUP: read/write will transfer control data and succeed;
* or if "wrong direction", performs protocol stall
*/
STATE_DEV_UNCONNECTED,
STATE_DEV_CONNECTED,
STATE_DEV_SETUP,
/* UNBOUND means the driver closed ep0, so the device won't be
* accessible again (DEV_DISABLED) until all fds are closed.
*/
STATE_DEV_UNBOUND,
};
/* enough for the whole queue: most events invalidate others */
#define N_EVENT 5
#define RBUF_SIZE 256
struct dev_data {
spinlock_t lock;
refcount_t count;
int udc_usage;
enum ep0_state state; /* P: lock */
struct usb_gadgetfs_event event [N_EVENT];
unsigned ev_next;
struct fasync_struct *fasync;
u8 current_config;
/* drivers reading ep0 MUST handle control requests (SETUP)
* reported that way; else the host will time out.
*/
unsigned usermode_setup : 1,
setup_in : 1,
setup_can_stall : 1,
setup_out_ready : 1,
setup_out_error : 1,
setup_abort : 1,
gadget_registered : 1;
unsigned setup_wLength;
/* the rest is basically write-once */
struct usb_config_descriptor *config, *hs_config;
struct usb_device_descriptor *dev;
struct usb_request *req;
struct usb_gadget *gadget;
struct list_head epfiles;
void *buf;
wait_queue_head_t wait;
struct super_block *sb;
struct dentry *dentry;
/* except this scratch i/o buffer for ep0 */
u8 rbuf[RBUF_SIZE];
};
static inline void get_dev (struct dev_data *data)
{
refcount_inc (&data->count);
}
static void put_dev (struct dev_data *data)
{
if (likely (!refcount_dec_and_test (&data->count)))
return;
/* needs no more cleanup */
BUG_ON (waitqueue_active (&data->wait));
kfree (data);
}
static struct dev_data *dev_new (void)
{
struct dev_data *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->state = STATE_DEV_DISABLED;
refcount_set (&dev->count, 1);
spin_lock_init (&dev->lock);
INIT_LIST_HEAD (&dev->epfiles);
init_waitqueue_head (&dev->wait);
return dev;
}
/*----------------------------------------------------------------------*/
/* other /dev/gadget/$ENDPOINT files represent endpoints */
enum ep_state {
STATE_EP_DISABLED = 0,
STATE_EP_READY,
STATE_EP_ENABLED,
STATE_EP_UNBOUND,
};
struct ep_data {
struct mutex lock;
enum ep_state state;
refcount_t count;
struct dev_data *dev;
/* must hold dev->lock before accessing ep or req */
struct usb_ep *ep;
struct usb_request *req;
ssize_t status;
char name [16];
struct usb_endpoint_descriptor desc, hs_desc;
struct list_head epfiles;
wait_queue_head_t wait;
struct dentry *dentry;
};
static inline void get_ep (struct ep_data *data)
{
refcount_inc (&data->count);
}
static void put_ep (struct ep_data *data)
{
if (likely (!refcount_dec_and_test (&data->count)))
return;
put_dev (data->dev);
/* needs no more cleanup */
BUG_ON (!list_empty (&data->epfiles));
BUG_ON (waitqueue_active (&data->wait));
kfree (data);
}
/*----------------------------------------------------------------------*/
/* most "how to use the hardware" policy choices are in userspace:
* mapping endpoint roles (which the driver needs) to the capabilities
* which the usb controller has. most of those capabilities are exposed
* implicitly, starting with the driver name and then endpoint names.
*/
static const char *CHIP;
static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */
/*----------------------------------------------------------------------*/
/* NOTE: don't use dev_printk calls before binding to the gadget
* at the end of ep0 configuration, or after unbind.
*/
/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
#define xprintk(d,level,fmt,args...) \
printk(level "%s: " fmt , shortname , ## args)
#ifdef DEBUG
#define DBG(dev,fmt,args...) \
xprintk(dev , KERN_DEBUG , fmt , ## args)
#else
#define DBG(dev,fmt,args...) \
do { } while (0)
#endif /* DEBUG */
#ifdef VERBOSE_DEBUG
#define VDEBUG DBG
#else
#define VDEBUG(dev,fmt,args...) \
do { } while (0)
#endif /* DEBUG */
#define ERROR(dev,fmt,args...) \
xprintk(dev , KERN_ERR , fmt , ## args)
#define INFO(dev,fmt,args...) \
xprintk(dev , KERN_INFO , fmt , ## args)
/*----------------------------------------------------------------------*/
/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
*
* After opening, configure non-control endpoints. Then use normal
* stream read() and write() requests; and maybe ioctl() to get more
* precise FIFO status when recovering from cancellation.
*/
static void epio_complete (struct usb_ep *ep, struct usb_request *req)
{
struct ep_data *epdata = ep->driver_data;
if (!req->context)
return;
if (req->status)
epdata->status = req->status;
else
epdata->status = req->actual;
complete ((struct completion *)req->context);
}
/* tasklock endpoint, returning when it's connected.
* still need dev->lock to use epdata->ep.
*/
static int
get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
{
int val;
if (f_flags & O_NONBLOCK) {
if (!mutex_trylock(&epdata->lock))
goto nonblock;
if (epdata->state != STATE_EP_ENABLED &&
(!is_write || epdata->state != STATE_EP_READY)) {
mutex_unlock(&epdata->lock);
nonblock:
val = -EAGAIN;
} else
val = 0;
return val;
}
val = mutex_lock_interruptible(&epdata->lock);
if (val < 0)
return val;
switch (epdata->state) {
case STATE_EP_ENABLED:
return 0;
case STATE_EP_READY: /* not configured yet */
if (is_write)
return 0;
fallthrough;
case STATE_EP_UNBOUND: /* clean disconnect */
break;
// case STATE_EP_DISABLED: /* "can't happen" */
default: /* error! */
pr_debug ("%s: ep %p not available, state %d\n",
shortname, epdata, epdata->state);
}
mutex_unlock(&epdata->lock);
return -ENODEV;
}
static ssize_t
ep_io (struct ep_data *epdata, void *buf, unsigned len)
{
DECLARE_COMPLETION_ONSTACK (done);
int value;
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
struct usb_request *req = epdata->req;
req->context = &done;
req->complete = epio_complete;
req->buf = buf;
req->length = len;
value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
} else
value = -ENODEV;
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
value = wait_for_completion_interruptible(&done);
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
DBG (epdata->dev, "%s i/o interrupted\n",
epdata->name);
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
wait_for_completion(&done);
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
spin_unlock_irq (&epdata->dev->lock);
DBG (epdata->dev, "endpoint gone\n");
wait_for_completion(&done);
epdata->status = -ENODEV;
}
}
return epdata->status;
}
return value;
}
static int
ep_release (struct inode *inode, struct file *fd)
{
struct ep_data *data = fd->private_data;
int value;
value = mutex_lock_interruptible(&data->lock);
if (value < 0)
return value;
/* clean up if this can be reopened */
if (data->state != STATE_EP_UNBOUND) {
data->state = STATE_EP_DISABLED;
data->desc.bDescriptorType = 0;
data->hs_desc.bDescriptorType = 0;
usb_ep_disable(data->ep);
}
mutex_unlock(&data->lock);
put_ep (data);
return 0;
}
static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
{
struct ep_data *data = fd->private_data;
int status;
if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
return status;
spin_lock_irq (&data->dev->lock);
if (likely (data->ep != NULL)) {
switch (code) {
case GADGETFS_FIFO_STATUS:
status = usb_ep_fifo_status (data->ep);
break;
case GADGETFS_FIFO_FLUSH:
usb_ep_fifo_flush (data->ep);
break;
case GADGETFS_CLEAR_HALT:
status = usb_ep_clear_halt (data->ep);
break;
default:
status = -ENOTTY;
}
} else
status = -ENODEV;
spin_unlock_irq (&data->dev->lock);
mutex_unlock(&data->lock);
return status;
}
/*----------------------------------------------------------------------*/
/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
struct kiocb_priv {
struct usb_request *req;
struct ep_data *epdata;
struct kiocb *iocb;
struct mm_struct *mm;
struct work_struct work;
void *buf;
struct iov_iter to;
const void *to_free;
unsigned actual;
};
static int ep_aio_cancel(struct kiocb *iocb)
{
struct kiocb_priv *priv = iocb->private;
struct ep_data *epdata;
int value;
local_irq_disable();
epdata = priv->epdata;
// spin_lock(&epdata->dev->lock);
if (likely(epdata && epdata->ep && priv->req))
value = usb_ep_dequeue (epdata->ep, priv->req);
else
value = -EINVAL;
// spin_unlock(&epdata->dev->lock);
local_irq_enable();
return value;
}
static void ep_user_copy_worker(struct work_struct *work)
{
struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
struct mm_struct *mm = priv->mm;
struct kiocb *iocb = priv->iocb;
size_t ret;
kthread_use_mm(mm);
ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
kthread_unuse_mm(mm);
if (!ret)
ret = -EFAULT;
/* completing the iocb can drop the ctx and mm, don't touch mm after */
iocb->ki_complete(iocb, ret);
kfree(priv->buf);
kfree(priv->to_free);
kfree(priv);
}
static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
{
struct kiocb *iocb = req->context;
struct kiocb_priv *priv = iocb->private;
struct ep_data *epdata = priv->epdata;
/* lock against disconnect (and ideally, cancel) */
spin_lock(&epdata->dev->lock);
priv->req = NULL;
priv->epdata = NULL;
/* if this was a write or a read returning no data then we
* don't need to copy anything to userspace, so we can
* complete the aio request immediately.
*/
if (priv->to_free == NULL || unlikely(req->actual == 0)) {
kfree(req->buf);
kfree(priv->to_free);
kfree(priv);
iocb->private = NULL;
iocb->ki_complete(iocb,
req->actual ? req->actual : (long)req->status);
} else {
/* ep_copy_to_user() won't report both; we hide some faults */
if (unlikely(0 != req->status))
DBG(epdata->dev, "%s fault %d len %d\n",
ep->name, req->status, req->actual);
priv->buf = req->buf;
priv->actual = req->actual;
INIT_WORK(&priv->work, ep_user_copy_worker);
schedule_work(&priv->work);
}
usb_ep_free_request(ep, req);
spin_unlock(&epdata->dev->lock);
put_ep(epdata);
}
static ssize_t ep_aio(struct kiocb *iocb,
struct kiocb_priv *priv,
struct ep_data *epdata,
char *buf,
size_t len)
{
struct usb_request *req;
ssize_t value;
iocb->private = priv;
priv->iocb = iocb;
kiocb_set_cancel_fn(iocb, ep_aio_cancel);
get_ep(epdata);
priv->epdata = epdata;
priv->actual = 0;
priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
/* each kiocb is coupled to one usb_request, but we can't
* allocate or submit those if the host disconnected.
*/
spin_lock_irq(&epdata->dev->lock);
value = -ENODEV;
if (unlikely(epdata->ep == NULL))
goto fail;
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
value = -ENOMEM;
if (unlikely(!req))
goto fail;
priv->req = req;
req->buf = buf;
req->length = len;
req->complete = ep_aio_complete;
req->context = iocb;
value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
if (unlikely(0 != value)) {
usb_ep_free_request(epdata->ep, req);
goto fail;
}
spin_unlock_irq(&epdata->dev->lock);
return -EIOCBQUEUED;
fail:
spin_unlock_irq(&epdata->dev->lock);
kfree(priv->to_free);
kfree(priv);
put_ep(epdata);
return value;
}
static ssize_t
ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct ep_data *epdata = file->private_data;
size_t len = iov_iter_count(to);
ssize_t value;
char *buf;
if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
return value;
/* halt any endpoint by doing a "wrong direction" i/o call */
if (usb_endpoint_dir_in(&epdata->desc)) {
if (usb_endpoint_xfer_isoc(&epdata->desc) ||
!is_sync_kiocb(iocb)) {
mutex_unlock(&epdata->lock);
return -EINVAL;
}
DBG (epdata->dev, "%s halt\n", epdata->name);
spin_lock_irq(&epdata->dev->lock);
if (likely(epdata->ep != NULL))
usb_ep_set_halt(epdata->ep);
spin_unlock_irq(&epdata->dev->lock);
mutex_unlock(&epdata->lock);
return -EBADMSG;
}
buf = kmalloc(len, GFP_KERNEL);
if (unlikely(!buf)) {
mutex_unlock(&epdata->lock);
return -ENOMEM;
}
if (is_sync_kiocb(iocb)) {
value = ep_io(epdata, buf, len);
if (value >= 0 && (copy_to_iter(buf, value, to) != value))
value = -EFAULT;
} else {
struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
value = -ENOMEM;
if (!priv)
goto fail;
priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
kfree(priv);
goto fail;
}
value = ep_aio(iocb, priv, epdata, buf, len);
if (value == -EIOCBQUEUED)
buf = NULL;
}
fail:
kfree(buf);
mutex_unlock(&epdata->lock);
return value;
}
static ssize_t ep_config(struct ep_data *, const char *, size_t);
static ssize_t
ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct ep_data *epdata = file->private_data;
size_t len = iov_iter_count(from);
bool configured;
ssize_t value;
char *buf;
if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
return value;
configured = epdata->state == STATE_EP_ENABLED;
/* halt any endpoint by doing a "wrong direction" i/o call */
if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
if (usb_endpoint_xfer_isoc(&epdata->desc) ||
!is_sync_kiocb(iocb)) {
mutex_unlock(&epdata->lock);
return -EINVAL;
}
DBG (epdata->dev, "%s halt\n", epdata->name);
spin_lock_irq(&epdata->dev->lock);
if (likely(epdata->ep != NULL))
usb_ep_set_halt(epdata->ep);
spin_unlock_irq(&epdata->dev->lock);
mutex_unlock(&epdata->lock);
return -EBADMSG;
}
buf = kmalloc(len, GFP_KERNEL);
if (unlikely(!buf)) {
mutex_unlock(&epdata->lock);
return -ENOMEM;
}
if (unlikely(!copy_from_iter_full(buf, len, from))) {
value = -EFAULT;
goto out;
}
if (unlikely(!configured)) {
value = ep_config(epdata, buf, len);
} else if (is_sync_kiocb(iocb)) {
value = ep_io(epdata, buf, len);
} else {
struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
value = -ENOMEM;
if (priv) {
value = ep_aio(iocb, priv, epdata, buf, len);
if (value == -EIOCBQUEUED)
buf = NULL;
}
}
out:
kfree(buf);
mutex_unlock(&epdata->lock);
return value;
}
/*----------------------------------------------------------------------*/
/* used after endpoint configuration */
static const struct file_operations ep_io_operations = {
.owner = THIS_MODULE,
.open = ep_open,
.release = ep_release,
.llseek = no_llseek,
.unlocked_ioctl = ep_ioctl,
.read_iter = ep_read_iter,
.write_iter = ep_write_iter,
};
/* ENDPOINT INITIALIZATION
*
* fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
* status = write (fd, descriptors, sizeof descriptors)
*
* That write establishes the endpoint configuration, configuring
* the controller to process bulk, interrupt, or isochronous transfers
* at the right maxpacket size, and so on.
*
* The descriptors are message type 1, identified by a host order u32
* at the beginning of what's written. Descriptor order is: full/low
* speed descriptor, then optional high speed descriptor.
*/
static ssize_t
ep_config (struct ep_data *data, const char *buf, size_t len)
{
struct usb_ep *ep;
u32 tag;
int value, length = len;
if (data->state != STATE_EP_READY) {
value = -EL2HLT;
goto fail;
}
value = len;
if (len < USB_DT_ENDPOINT_SIZE + 4)
goto fail0;
/* we might need to change message format someday */
memcpy(&tag, buf, 4);
if (tag != 1) {
DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
goto fail0;
}
buf += 4;
len -= 4;
/* NOTE: audio endpoint extensions not accepted here;
* just don't include the extra bytes.
*/
/* full/low speed descriptor, then high speed */
memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
goto fail0;
if (len != USB_DT_ENDPOINT_SIZE) {
if (len != 2 * USB_DT_ENDPOINT_SIZE)
goto fail0;
memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
USB_DT_ENDPOINT_SIZE);
if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
|| data->hs_desc.bDescriptorType
!= USB_DT_ENDPOINT) {
DBG(data->dev, "config %s, bad hs length or type\n",
data->name);
goto fail0;
}
}
spin_lock_irq (&data->dev->lock);
if (data->dev->state == STATE_DEV_UNBOUND) {
value = -ENOENT;
goto gone;
} else {
ep = data->ep;
if (ep == NULL) {
value = -ENODEV;
goto gone;
}
}
switch (data->dev->gadget->speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
ep->desc = &data->desc;
break;
case USB_SPEED_HIGH:
/* fails if caller didn't provide that descriptor... */
ep->desc = &data->hs_desc;
break;
default:
DBG(data->dev, "unconnected, %s init abandoned\n",
data->name);
value = -EINVAL;
goto gone;
}
value = usb_ep_enable(ep);
if (value == 0) {
data->state = STATE_EP_ENABLED;
value = length;
}
gone:
spin_unlock_irq (&data->dev->lock);
if (value < 0) {
fail:
data->desc.bDescriptorType = 0;
data->hs_desc.bDescriptorType = 0;
}
return value;
fail0:
value = -EINVAL;
goto fail;
}
static int
ep_open (struct inode *inode, struct file *fd)
{
struct ep_data *data = inode->i_private;
int value = -EBUSY;
if (mutex_lock_interruptible(&data->lock) != 0)
return -EINTR;
spin_lock_irq (&data->dev->lock);
if (data->dev->state == STATE_DEV_UNBOUND)
value = -ENOENT;
else if (data->state == STATE_EP_DISABLED) {
value = 0;
data->state = STATE_EP_READY;
get_ep (data);
fd->private_data = data;
VDEBUG (data->dev, "%s ready\n", data->name);
} else
DBG (data->dev, "%s state %d\n",
data->name, data->state);
spin_unlock_irq (&data->dev->lock);
mutex_unlock(&data->lock);
return value;
}
/*----------------------------------------------------------------------*/
/* EP0 IMPLEMENTATION can be partly in userspace.
*
* Drivers that use this facility receive various events, including
* control requests the kernel doesn't handle. Drivers that don't
* use this facility may be too simple-minded for real applications.
*/
static inline void ep0_readable (struct dev_data *dev)
{
wake_up (&dev->wait);
kill_fasync (&dev->fasync, SIGIO, POLL_IN);
}
static void clean_req (struct usb_ep *ep, struct usb_request *req)
{
struct dev_data *dev = ep->driver_data;
if (req->buf != dev->rbuf) {
kfree(req->buf);
req->buf = dev->rbuf;
}
req->complete = epio_complete;
dev->setup_out_ready = 0;
}
static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
{
struct dev_data *dev = ep->driver_data;
unsigned long flags;
int free = 1;
/* for control OUT, data must still get to userspace */
spin_lock_irqsave(&dev->lock, flags);
if (!dev->setup_in) {
dev->setup_out_error = (req->status != 0);
if (!dev->setup_out_error)
free = 0;
dev->setup_out_ready = 1;
ep0_readable (dev);
}
/* clean up as appropriate */
if (free && req->buf != &dev->rbuf)
clean_req (ep, req);
req->complete = epio_complete;
spin_unlock_irqrestore(&dev->lock, flags);
}
static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
{
struct dev_data *dev = ep->driver_data;
if (dev->setup_out_ready) {
DBG (dev, "ep0 request busy!\n");
return -EBUSY;
}
if (len > sizeof (dev->rbuf))
req->buf = kmalloc(len, GFP_ATOMIC);
if (req->buf == NULL) {
req->buf = dev->rbuf;
return -ENOMEM;
}
req->complete = ep0_complete;
req->length = len;
req->zero = 0;
return 0;
}
static ssize_t
ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
{
struct dev_data *dev = fd->private_data;
ssize_t retval;
enum ep0_state state;
spin_lock_irq (&dev->lock);
if (dev->state <= STATE_DEV_OPENED) {
retval = -EINVAL;
goto done;
}
/* report fd mode change before acting on it */
if (dev->setup_abort) {
dev->setup_abort = 0;
retval = -EIDRM;
goto done;
}
/* control DATA stage */
if ((state = dev->state) == STATE_DEV_SETUP) {
if (dev->setup_in) { /* stall IN */
VDEBUG(dev, "ep0in stall\n");
(void) usb_ep_set_halt (dev->gadget->ep0);
retval = -EL2HLT;
dev->state = STATE_DEV_CONNECTED;
} else if (len == 0) { /* ack SET_CONFIGURATION etc */
struct usb_ep *ep = dev->gadget->ep0;
struct usb_request *req = dev->req;
if ((retval = setup_req (ep, req, 0)) == 0) {
++dev->udc_usage;
spin_unlock_irq (&dev->lock);
retval = usb_ep_queue (ep, req, GFP_KERNEL);
spin_lock_irq (&dev->lock);
--dev->udc_usage;
}
dev->state = STATE_DEV_CONNECTED;
/* assume that was SET_CONFIGURATION */
if (dev->current_config) {
unsigned power;
if (gadget_is_dualspeed(dev->gadget)
&& (dev->gadget->speed
== USB_SPEED_HIGH))
power = dev->hs_config->bMaxPower;
else
power = dev->config->bMaxPower;
usb_gadget_vbus_draw(dev->gadget, 2 * power);
}
} else { /* collect OUT data */
if ((fd->f_flags & O_NONBLOCK) != 0
&& !dev->setup_out_ready) {
retval = -EAGAIN;
goto done;
}
spin_unlock_irq (&dev->lock);
retval = wait_event_interruptible (dev->wait,
dev->setup_out_ready != 0);
/* FIXME state could change from under us */
spin_lock_irq (&dev->lock);
if (retval)
goto done;
if (dev->state != STATE_DEV_SETUP) {
retval = -ECANCELED;
goto done;
}
dev->state = STATE_DEV_CONNECTED;
if (dev->setup_out_error)
retval = -EIO;
else {
len = min (len, (size_t)dev->req->actual);
++dev->udc_usage;
spin_unlock_irq(&dev->lock);
if (copy_to_user (buf, dev->req->buf, len))
retval = -EFAULT;
else
retval = len;
spin_lock_irq(&dev->lock);
--dev->udc_usage;
clean_req (dev->gadget->ep0, dev->req);
/* NOTE userspace can't yet choose to stall */
}
}
goto done;
}
/* else normal: return event data */
if (len < sizeof dev->event [0]) {
retval = -EINVAL;
goto done;
}
len -= len % sizeof (struct usb_gadgetfs_event);
dev->usermode_setup = 1;
scan:
/* return queued events right away */
if (dev->ev_next != 0) {
unsigned i, n;
n = len / sizeof (struct usb_gadgetfs_event);
if (dev->ev_next < n)
n = dev->ev_next;
/* ep0 i/o has special semantics during STATE_DEV_SETUP */
for (i = 0; i < n; i++) {
if (dev->event [i].type == GADGETFS_SETUP) {
dev->state = STATE_DEV_SETUP;
n = i + 1;
break;
}
}
spin_unlock_irq (&dev->lock);
len = n * sizeof (struct usb_gadgetfs_event);
if (copy_to_user (buf, &dev->event, len))
retval = -EFAULT;
else
retval = len;
if (len > 0) {
/* NOTE this doesn't guard against broken drivers;
* concurrent ep0 readers may lose events.
*/
spin_lock_irq (&dev->lock);
if (dev->ev_next > n) {
memmove(&dev->event[0], &dev->event[n],
sizeof (struct usb_gadgetfs_event)
* (dev->ev_next - n));
}
dev->ev_next -= n;
spin_unlock_irq (&dev->lock);
}
return retval;
}
if (fd->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto done;
}
switch (state) {
default:
DBG (dev, "fail %s, state %d\n", __func__, state);
retval = -ESRCH;
break;
case STATE_DEV_UNCONNECTED:
case STATE_DEV_CONNECTED:
spin_unlock_irq (&dev->lock);
DBG (dev, "%s wait\n", __func__);
/* wait for events */
retval = wait_event_interruptible (dev->wait,
dev->ev_next != 0);
if (retval < 0)
return retval;
spin_lock_irq (&dev->lock);
goto scan;
}
done:
spin_unlock_irq (&dev->lock);
return retval;
}
static struct usb_gadgetfs_event *
next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
{
struct usb_gadgetfs_event *event;
unsigned i;
switch (type) {
/* these events purge the queue */
case GADGETFS_DISCONNECT:
if (dev->state == STATE_DEV_SETUP)
dev->setup_abort = 1;
fallthrough;
case GADGETFS_CONNECT:
dev->ev_next = 0;
break;
case GADGETFS_SETUP: /* previous request timed out */
case GADGETFS_SUSPEND: /* same effect */
/* these events can't be repeated */
for (i = 0; i != dev->ev_next; i++) {
if (dev->event [i].type != type)
continue;
DBG(dev, "discard old event[%d] %d\n", i, type);
dev->ev_next--;
if (i == dev->ev_next)
break;
/* indices start at zero, for simplicity */
memmove (&dev->event [i], &dev->event [i + 1],
sizeof (struct usb_gadgetfs_event)
* (dev->ev_next - i));
}
break;
default:
BUG ();
}
VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
event = &dev->event [dev->ev_next++];
BUG_ON (dev->ev_next > N_EVENT);
memset (event, 0, sizeof *event);
event->type = type;
return event;
}
static ssize_t
ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
{
struct dev_data *dev = fd->private_data;
ssize_t retval = -ESRCH;
/* report fd mode change before acting on it */
if (dev->setup_abort) {
dev->setup_abort = 0;
retval = -EIDRM;
/* data and/or status stage for control request */
} else if (dev->state == STATE_DEV_SETUP) {
len = min_t(size_t, len, dev->setup_wLength);
if (dev->setup_in) {
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
dev->state = STATE_DEV_CONNECTED;
++dev->udc_usage;
spin_unlock_irq (&dev->lock);
if (copy_from_user (dev->req->buf, buf, len))
retval = -EFAULT;
else {
if (len < dev->setup_wLength)
dev->req->zero = 1;
retval = usb_ep_queue (
dev->gadget->ep0, dev->req,
GFP_KERNEL);
}
spin_lock_irq(&dev->lock);
--dev->udc_usage;
if (retval < 0) {
clean_req (dev->gadget->ep0, dev->req);
} else
retval = len;
return retval;
}
/* can stall some OUT transfers */
} else if (dev->setup_can_stall) {
VDEBUG(dev, "ep0out stall\n");
(void) usb_ep_set_halt (dev->gadget->ep0);
retval = -EL2HLT;
dev->state = STATE_DEV_CONNECTED;
} else {
DBG(dev, "bogus ep0out stall!\n");
}
} else
DBG (dev, "fail %s, state %d\n", __func__, dev->state);
return retval;
}
static int
ep0_fasync (int f, struct file *fd, int on)
{
struct dev_data *dev = fd->private_data;
// caller must F_SETOWN before signal delivery happens
VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
return fasync_helper (f, fd, on, &dev->fasync);
}
static struct usb_gadget_driver gadgetfs_driver;
static int
dev_release (struct inode *inode, struct file *fd)
{
struct dev_data *dev = fd->private_data;
/* closing ep0 === shutdown all */
if (dev->gadget_registered) {
usb_gadget_unregister_driver (&gadgetfs_driver);
dev->gadget_registered = false;
}
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
* alternatively, all host requests will time out.
*/
kfree (dev->buf);
dev->buf = NULL;
/* other endpoints were all decoupled from this device */
spin_lock_irq(&dev->lock);
dev->state = STATE_DEV_DISABLED;
spin_unlock_irq(&dev->lock);
put_dev (dev);
return 0;
}
static __poll_t
ep0_poll (struct file *fd, poll_table *wait)
{
struct dev_data *dev = fd->private_data;
__poll_t mask = 0;
if (dev->state <= STATE_DEV_OPENED)
return DEFAULT_POLLMASK;
poll_wait(fd, &dev->wait, wait);
spin_lock_irq(&dev->lock);
/* report fd mode change before acting on it */
if (dev->setup_abort) {
dev->setup_abort = 0;
mask = EPOLLHUP;
goto out;
}
if (dev->state == STATE_DEV_SETUP) {
if (dev->setup_in || dev->setup_can_stall)
mask = EPOLLOUT;
} else {
if (dev->ev_next != 0)
mask = EPOLLIN;
}
out:
spin_unlock_irq(&dev->lock);
return mask;
}
static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
{
struct dev_data *dev = fd->private_data;
struct usb_gadget *gadget = dev->gadget;
long ret = -ENOTTY;
spin_lock_irq(&dev->lock);
if (dev->state == STATE_DEV_OPENED ||
dev->state == STATE_DEV_UNBOUND) {
/* Not bound to a UDC */
} else if (gadget->ops->ioctl) {
++dev->udc_usage;
spin_unlock_irq(&dev->lock);
ret = gadget->ops->ioctl (gadget, code, value);
spin_lock_irq(&dev->lock);
--dev->udc_usage;
}
spin_unlock_irq(&dev->lock);
return ret;
}
/*----------------------------------------------------------------------*/
/* The in-kernel gadget driver handles most ep0 issues, in particular
* enumerating the single configuration (as provided from user space).
*
* Unrecognized ep0 requests may be handled in user space.
*/
static void make_qualifier (struct dev_data *dev)
{
struct usb_qualifier_descriptor qual;
struct usb_device_descriptor *desc;
qual.bLength = sizeof qual;
qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
qual.bcdUSB = cpu_to_le16 (0x0200);
desc = dev->dev;
qual.bDeviceClass = desc->bDeviceClass;
qual.bDeviceSubClass = desc->bDeviceSubClass;
qual.bDeviceProtocol = desc->bDeviceProtocol;
/* assumes ep0 uses the same value for both speeds ... */
qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
qual.bNumConfigurations = 1;
qual.bRESERVED = 0;
memcpy (dev->rbuf, &qual, sizeof qual);
}
static int
config_buf (struct dev_data *dev, u8 type, unsigned index)
{
int len;
int hs = 0;
/* only one configuration */
if (index > 0)
return -EINVAL;
if (gadget_is_dualspeed(dev->gadget)) {
hs = (dev->gadget->speed == USB_SPEED_HIGH);
if (type == USB_DT_OTHER_SPEED_CONFIG)
hs = !hs;
}
if (hs) {
dev->req->buf = dev->hs_config;
len = le16_to_cpu(dev->hs_config->wTotalLength);
} else {
dev->req->buf = dev->config;
len = le16_to_cpu(dev->config->wTotalLength);
}
((u8 *)dev->req->buf) [1] = type;
return len;
}
static int
gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
{
struct dev_data *dev = get_gadget_data (gadget);
struct usb_request *req = dev->req;
int value = -EOPNOTSUPP;
struct usb_gadgetfs_event *event;
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
if (w_length > RBUF_SIZE) {
if (ctrl->bRequestType & USB_DIR_IN) {
/* Cast away the const, we are going to overwrite on purpose. */
__le16 *temp = (__le16 *)&ctrl->wLength;
*temp = cpu_to_le16(RBUF_SIZE);
w_length = RBUF_SIZE;
} else {
return value;
}
}
spin_lock (&dev->lock);
dev->setup_abort = 0;
if (dev->state == STATE_DEV_UNCONNECTED) {
if (gadget_is_dualspeed(gadget)
&& gadget->speed == USB_SPEED_HIGH
&& dev->hs_config == NULL) {
spin_unlock(&dev->lock);
ERROR (dev, "no high speed config??\n");
return -EINVAL;
}
dev->state = STATE_DEV_CONNECTED;
INFO (dev, "connected\n");
event = next_event (dev, GADGETFS_CONNECT);
event->u.speed = gadget->speed;
ep0_readable (dev);
/* host may have given up waiting for response. we can miss control
* requests handled lower down (device/endpoint status and features);
* then ep0_{read,write} will report the wrong status. controller
* driver will have aborted pending i/o.
*/
} else if (dev->state == STATE_DEV_SETUP)
dev->setup_abort = 1;
req->buf = dev->rbuf;
req->context = NULL;
switch (ctrl->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
if (ctrl->bRequestType != USB_DIR_IN)
goto unrecognized;
switch (w_value >> 8) {
case USB_DT_DEVICE:
value = min (w_length, (u16) sizeof *dev->dev);
dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
req->buf = dev->dev;
break;
case USB_DT_DEVICE_QUALIFIER:
if (!dev->hs_config)
break;
value = min (w_length, (u16)
sizeof (struct usb_qualifier_descriptor));
make_qualifier (dev);
break;
case USB_DT_OTHER_SPEED_CONFIG:
case USB_DT_CONFIG:
value = config_buf (dev,
w_value >> 8,
w_value & 0xff);
if (value >= 0)
value = min (w_length, (u16) value);
break;
case USB_DT_STRING:
goto unrecognized;
default: // all others are errors
break;
}
break;
/* currently one config, two speeds */
case USB_REQ_SET_CONFIGURATION:
if (ctrl->bRequestType != 0)
goto unrecognized;
if (0 == (u8) w_value) {
value = 0;
dev->current_config = 0;
usb_gadget_vbus_draw(gadget, 8 /* mA */ );
// user mode expected to disable endpoints
} else {
u8 config, power;
if (gadget_is_dualspeed(gadget)
&& gadget->speed == USB_SPEED_HIGH) {
config = dev->hs_config->bConfigurationValue;
power = dev->hs_config->bMaxPower;
} else {
config = dev->config->bConfigurationValue;
power = dev->config->bMaxPower;
}
if (config == (u8) w_value) {
value = 0;
dev->current_config = config;
usb_gadget_vbus_draw(gadget, 2 * power);
}
}
/* report SET_CONFIGURATION like any other control request,
* except that usermode may not stall this. the next
* request mustn't be allowed start until this finishes:
* endpoints and threads set up, etc.
*
* NOTE: older PXA hardware (before PXA 255: without UDCCFR)
* has bad/racey automagic that prevents synchronizing here.
* even kernel mode drivers often miss them.
*/
if (value == 0) {
INFO (dev, "configuration #%d\n", dev->current_config);
usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
if (dev->usermode_setup) {
dev->setup_can_stall = 0;
goto delegate;
}
}
break;
#ifndef CONFIG_USB_PXA25X
/* PXA automagically handles this request too */
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != 0x80)
goto unrecognized;
*(u8 *)req->buf = dev->current_config;
value = min (w_length, (u16) 1);
break;
#endif
default:
unrecognized:
VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
dev->usermode_setup ? "delegate" : "fail",
ctrl->bRequestType, ctrl->bRequest,
w_value, le16_to_cpu(ctrl->wIndex), w_length);
/* if there's an ep0 reader, don't stall */
if (dev->usermode_setup) {
dev->setup_can_stall = 1;
delegate:
dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
? 1 : 0;
dev->setup_wLength = w_length;
dev->setup_out_ready = 0;
dev->setup_out_error = 0;
/* read DATA stage for OUT right away */
if (unlikely (!dev->setup_in && w_length)) {
value = setup_req (gadget->ep0, dev->req,
w_length);
if (value < 0)
break;
++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
GFP_KERNEL);
spin_lock (&dev->lock);
--dev->udc_usage;
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
}
/* we can't currently stall these */
dev->setup_can_stall = 0;
}
/* state changes when reader collects event */
event = next_event (dev, GADGETFS_SETUP);
event->u.setup = *ctrl;
ep0_readable (dev);
spin_unlock (&dev->lock);
return 0;
}
}
/* proceed with data transfer and status phases? */
if (value >= 0 && dev->state != STATE_DEV_SETUP) {
req->length = value;
req->zero = value < w_length;
++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
spin_lock(&dev->lock);
--dev->udc_usage;
spin_unlock(&dev->lock);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
}
return value;
}
/* device stalls when value < 0 */
spin_unlock (&dev->lock);
return value;
}
static void destroy_ep_files (struct dev_data *dev)
{
DBG (dev, "%s %d\n", __func__, dev->state);
/* dev->state must prevent interference */
spin_lock_irq (&dev->lock);
while (!list_empty(&dev->epfiles)) {
struct ep_data *ep;
struct inode *parent;
struct dentry *dentry;
/* break link to FS */
ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
list_del_init (&ep->epfiles);
spin_unlock_irq (&dev->lock);
dentry = ep->dentry;
ep->dentry = NULL;
parent = d_inode(dentry->d_parent);
/* break link to controller */
mutex_lock(&ep->lock);
if (ep->state == STATE_EP_ENABLED)
(void) usb_ep_disable (ep->ep);
ep->state = STATE_EP_UNBOUND;
usb_ep_free_request (ep->ep, ep->req);
ep->ep = NULL;
mutex_unlock(&ep->lock);
wake_up (&ep->wait);
put_ep (ep);
/* break link to dcache */
inode_lock(parent);
d_delete (dentry);
dput (dentry);
inode_unlock(parent);
spin_lock_irq (&dev->lock);
}
spin_unlock_irq (&dev->lock);
}
static struct dentry *
gadgetfs_create_file (struct super_block *sb, char const *name,
void *data, const struct file_operations *fops);
static int activate_ep_files (struct dev_data *dev)
{
struct usb_ep *ep;
struct ep_data *data;
gadget_for_each_ep (ep, dev->gadget) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto enomem0;
data->state = STATE_EP_DISABLED;
mutex_init(&data->lock);
init_waitqueue_head (&data->wait);
strncpy (data->name, ep->name, sizeof (data->name) - 1);
refcount_set (&data->count, 1);
data->dev = dev;
get_dev (dev);
data->ep = ep;
ep->driver_data = data;
data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
if (!data->req)
goto enomem1;
data->dentry = gadgetfs_create_file (dev->sb, data->name,
data, &ep_io_operations);
if (!data->dentry)
goto enomem2;
list_add_tail (&data->epfiles, &dev->epfiles);
}
return 0;
enomem2:
usb_ep_free_request (ep, data->req);
enomem1:
put_dev (dev);
kfree (data);
enomem0:
DBG (dev, "%s enomem\n", __func__);
destroy_ep_files (dev);
return -ENOMEM;
}
static void
gadgetfs_unbind (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
DBG (dev, "%s\n", __func__);
spin_lock_irq (&dev->lock);
dev->state = STATE_DEV_UNBOUND;
while (dev->udc_usage > 0) {
spin_unlock_irq(&dev->lock);
usleep_range(1000, 2000);
spin_lock_irq(&dev->lock);
}
spin_unlock_irq (&dev->lock);
destroy_ep_files (dev);
gadget->ep0->driver_data = NULL;
set_gadget_data (gadget, NULL);
/* we've already been disconnected ... no i/o is active */
if (dev->req)
usb_ep_free_request (gadget->ep0, dev->req);
DBG (dev, "%s done\n", __func__);
put_dev (dev);
}
static struct dev_data *the_device;
static int gadgetfs_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct dev_data *dev = the_device;
if (!dev)
return -ESRCH;
if (0 != strcmp (CHIP, gadget->name)) {
pr_err("%s expected %s controller not %s\n",
shortname, CHIP, gadget->name);
return -ENODEV;
}
set_gadget_data (gadget, dev);
dev->gadget = gadget;
gadget->ep0->driver_data = dev;
/* preallocate control response and buffer */
dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
if (!dev->req)
goto enomem;
dev->req->context = NULL;
dev->req->complete = epio_complete;
if (activate_ep_files (dev) < 0)
goto enomem;
INFO (dev, "bound to %s driver\n", gadget->name);
spin_lock_irq(&dev->lock);
dev->state = STATE_DEV_UNCONNECTED;
spin_unlock_irq(&dev->lock);
get_dev (dev);
return 0;
enomem:
gadgetfs_unbind (gadget);
return -ENOMEM;
}
static void
gadgetfs_disconnect (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
unsigned long flags;
spin_lock_irqsave (&dev->lock, flags);
if (dev->state == STATE_DEV_UNCONNECTED)
goto exit;
dev->state = STATE_DEV_UNCONNECTED;
INFO (dev, "disconnected\n");
next_event (dev, GADGETFS_DISCONNECT);
ep0_readable (dev);
exit:
spin_unlock_irqrestore (&dev->lock, flags);
}
static void
gadgetfs_suspend (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
unsigned long flags;
INFO (dev, "suspended from state %d\n", dev->state);
spin_lock_irqsave(&dev->lock, flags);
switch (dev->state) {
case STATE_DEV_SETUP: // VERY odd... host died??
case STATE_DEV_CONNECTED:
case STATE_DEV_UNCONNECTED:
next_event (dev, GADGETFS_SUSPEND);
ep0_readable (dev);
fallthrough;
default:
break;
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static struct usb_gadget_driver gadgetfs_driver = {
.function = (char *) driver_desc,
.bind = gadgetfs_bind,
.unbind = gadgetfs_unbind,
.setup = gadgetfs_setup,
.reset = gadgetfs_disconnect,
.disconnect = gadgetfs_disconnect,
.suspend = gadgetfs_suspend,
.driver = {
.name = shortname,
},
};
/*----------------------------------------------------------------------*/
/* DEVICE INITIALIZATION
*
* fd = open ("/dev/gadget/$CHIP", O_RDWR)
* status = write (fd, descriptors, sizeof descriptors)
*
* That write establishes the device configuration, so the kernel can
* bind to the controller ... guaranteeing it can handle enumeration
* at all necessary speeds. Descriptor order is:
*
* . message tag (u32, host order) ... for now, must be zero; it
* would change to support features like multi-config devices
* . full/low speed config ... all wTotalLength bytes (with interface,
* class, altsetting, endpoint, and other descriptors)
* . high speed config ... all descriptors, for high speed operation;
* this one's optional except for high-speed hardware
* . device descriptor
*
* Endpoints are not yet enabled. Drivers must wait until device
* configuration and interface altsetting changes create
* the need to configure (or unconfigure) them.
*
* After initialization, the device stays active for as long as that
* $CHIP file is open. Events must then be read from that descriptor,
* such as configuration notifications.
*/
static int is_valid_config(struct usb_config_descriptor *config,
unsigned int total)
{
return config->bDescriptorType == USB_DT_CONFIG
&& config->bLength == USB_DT_CONFIG_SIZE
&& total >= USB_DT_CONFIG_SIZE
&& config->bConfigurationValue != 0
&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
/* FIXME check lengths: walk to end */
}
static ssize_t
dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
{
struct dev_data *dev = fd->private_data;
ssize_t value, length = len;
unsigned total;
u32 tag;
char *kbuf;
spin_lock_irq(&dev->lock);
if (dev->state > STATE_DEV_OPENED) {
value = ep0_write(fd, buf, len, ptr);
spin_unlock_irq(&dev->lock);
return value;
}
spin_unlock_irq(&dev->lock);
if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
(len > PAGE_SIZE * 4))
return -EINVAL;
/* we might need to change message format someday */
if (copy_from_user (&tag, buf, 4))
return -EFAULT;
if (tag != 0)
return -EINVAL;
buf += 4;
length -= 4;
kbuf = memdup_user(buf, length);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
spin_lock_irq (&dev->lock);
value = -EINVAL;
if (dev->buf) {
spin_unlock_irq(&dev->lock);
kfree(kbuf);
return value;
}
dev->buf = kbuf;
/* full or low speed config */
dev->config = (void *) kbuf;
total = le16_to_cpu(dev->config->wTotalLength);
if (!is_valid_config(dev->config, total) ||
total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
/* optional high speed config */
if (kbuf [1] == USB_DT_CONFIG) {
dev->hs_config = (void *) kbuf;
total = le16_to_cpu(dev->hs_config->wTotalLength);
if (!is_valid_config(dev->hs_config, total) ||
total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
} else {
dev->hs_config = NULL;
}
/* could support multiple configs, using another encoding! */
/* device descriptor (tweaked for paranoia) */
if (length != USB_DT_DEVICE_SIZE)
goto fail;
dev->dev = (void *)kbuf;
if (dev->dev->bLength != USB_DT_DEVICE_SIZE
|| dev->dev->bDescriptorType != USB_DT_DEVICE
|| dev->dev->bNumConfigurations != 1)
goto fail;
dev->dev->bcdUSB = cpu_to_le16 (0x0200);
/* triggers gadgetfs_bind(); then we can enumerate. */
spin_unlock_irq (&dev->lock);
if (dev->hs_config)
gadgetfs_driver.max_speed = USB_SPEED_HIGH;
else
gadgetfs_driver.max_speed = USB_SPEED_FULL;
value = usb_gadget_register_driver(&gadgetfs_driver);
if (value != 0) {
spin_lock_irq(&dev->lock);
goto fail;
} else {
/* at this point "good" hardware has for the first time
* let the USB the host see us. alternatively, if users
* unplug/replug that will clear all the error state.
*
* note: everything running before here was guaranteed
* to choke driver model style diagnostics. from here
* on, they can work ... except in cleanup paths that
* kick in after the ep0 descriptor is closed.
*/
value = len;
dev->gadget_registered = true;
}
return value;
fail:
dev->config = NULL;
dev->hs_config = NULL;
dev->dev = NULL;
spin_unlock_irq (&dev->lock);
pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
kfree (dev->buf);
dev->buf = NULL;
return value;
}
static int
gadget_dev_open (struct inode *inode, struct file *fd)
{
struct dev_data *dev = inode->i_private;
int value = -EBUSY;
spin_lock_irq(&dev->lock);
if (dev->state == STATE_DEV_DISABLED) {
dev->ev_next = 0;
dev->state = STATE_DEV_OPENED;
fd->private_data = dev;
get_dev (dev);
value = 0;
}
spin_unlock_irq(&dev->lock);
return value;
}
static const struct file_operations ep0_operations = {
.llseek = no_llseek,
.open = gadget_dev_open,
.read = ep0_read,
.write = dev_config,
.fasync = ep0_fasync,
.poll = ep0_poll,
.unlocked_ioctl = gadget_dev_ioctl,
.release = dev_release,
};
/*----------------------------------------------------------------------*/
/* FILESYSTEM AND SUPERBLOCK OPERATIONS
*
* Mounting the filesystem creates a controller file, used first for
* device configuration then later for event monitoring.
*/
/* FIXME PAM etc could set this security policy without mount options
* if epfiles inherited ownership and permissons from ep0 ...
*/
static unsigned default_uid;
static unsigned default_gid;
static unsigned default_perm = S_IRUSR | S_IWUSR;
module_param (default_uid, uint, 0644);
module_param (default_gid, uint, 0644);
module_param (default_perm, uint, 0644);
static struct inode *
gadgetfs_make_inode (struct super_block *sb,
void *data, const struct file_operations *fops,
int mode)
{
struct inode *inode = new_inode (sb);
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = make_kuid(&init_user_ns, default_uid);
inode->i_gid = make_kgid(&init_user_ns, default_gid);
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_private = data;
inode->i_fop = fops;
}
return inode;
}
/* creates in fs root directory, so non-renamable and non-linkable.
* so inode and dentry are paired, until device reconfig.
*/
static struct dentry *
gadgetfs_create_file (struct super_block *sb, char const *name,
void *data, const struct file_operations *fops)
{
struct dentry *dentry;
struct inode *inode;
dentry = d_alloc_name(sb->s_root, name);
if (!dentry)
return NULL;
inode = gadgetfs_make_inode (sb, data, fops,
S_IFREG | (default_perm & S_IRWXUGO));
if (!inode) {
dput(dentry);
return NULL;
}
d_add (dentry, inode);
return dentry;
}
static const struct super_operations gadget_fs_operations = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
static int
gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct dev_data *dev;
int rc;
mutex_lock(&sb_mutex);
if (the_device) {
rc = -ESRCH;
goto Done;
}
CHIP = usb_get_gadget_udc_name();
if (!CHIP) {
rc = -ENODEV;
goto Done;
}
/* superblock */
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = GADGETFS_MAGIC;
sb->s_op = &gadget_fs_operations;
sb->s_time_gran = 1;
/* root inode */
inode = gadgetfs_make_inode (sb,
NULL, &simple_dir_operations,
S_IFDIR | S_IRUGO | S_IXUGO);
if (!inode)
goto Enomem;
inode->i_op = &simple_dir_inode_operations;
if (!(sb->s_root = d_make_root (inode)))
goto Enomem;
/* the ep0 file is named after the controller we expect;
* user mode code can use it for sanity checks, like we do.
*/
dev = dev_new ();
if (!dev)
goto Enomem;
dev->sb = sb;
dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
if (!dev->dentry) {
put_dev(dev);
goto Enomem;
}
/* other endpoint files are available after hardware setup,
* from binding to a controller.
*/
the_device = dev;
rc = 0;
goto Done;
Enomem:
kfree(CHIP);
CHIP = NULL;
rc = -ENOMEM;
Done:
mutex_unlock(&sb_mutex);
return rc;
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
static int gadgetfs_get_tree(struct fs_context *fc)
{
return get_tree_single(fc, gadgetfs_fill_super);
}
static const struct fs_context_operations gadgetfs_context_ops = {
.get_tree = gadgetfs_get_tree,
};
static int gadgetfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &gadgetfs_context_ops;
return 0;
}
static void
gadgetfs_kill_sb (struct super_block *sb)
{
mutex_lock(&sb_mutex);
kill_litter_super (sb);
if (the_device) {
put_dev (the_device);
the_device = NULL;
}
kfree(CHIP);
CHIP = NULL;
mutex_unlock(&sb_mutex);
}
/*----------------------------------------------------------------------*/
static struct file_system_type gadgetfs_type = {
.owner = THIS_MODULE,
.name = shortname,
.init_fs_context = gadgetfs_init_fs_context,
.kill_sb = gadgetfs_kill_sb,
};
MODULE_ALIAS_FS("gadgetfs");
/*----------------------------------------------------------------------*/
static int __init gadgetfs_init (void)
{
int status;
status = register_filesystem (&gadgetfs_type);
if (status == 0)
pr_info ("%s: %s, version " DRIVER_VERSION "\n",
shortname, driver_desc);
return status;
}
module_init (gadgetfs_init);
static void __exit gadgetfs_cleanup (void)
{
pr_debug ("unregister %s\n", shortname);
unregister_filesystem (&gadgetfs_type);
}
module_exit (gadgetfs_cleanup);
| linux-master | drivers/usb/gadget/legacy/inode.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* acm_ms.c -- Composite driver, with ACM and mass storage support
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
* Author: David Brownell
* Modified: Klaus Schwarzkopf <[email protected]>
*
* Heavily based on multi.c and cdc2.c
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "u_serial.h"
#define DRIVER_DESC "Composite Gadget (ACM + MS)"
#define DRIVER_VERSION "2011/10/10"
/*-------------------------------------------------------------------------*/
/*
* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
#define ACM_MS_VENDOR_NUM 0x1d6b /* Linux Foundation */
#define ACM_MS_PRODUCT_NUM 0x0106 /* Composite Gadget: ACM + MS*/
#include "f_mass_storage.h"
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_MISC /* 0xEF */,
.bDeviceSubClass = 2,
.bDeviceProtocol = 1,
/* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id can be overridden by module parameters. */
.idVendor = cpu_to_le16(ACM_MS_VENDOR_NUM),
.idProduct = cpu_to_le16(ACM_MS_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
/* NO SERIAL NUMBER */
/*.bNumConfigurations = DYNAMIC*/
};
static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
/****************************** Configurations ******************************/
static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
#else
/*
* Number of buffers we will use.
* 2 is usually enough for good buffering pipeline
*/
#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
/*-------------------------------------------------------------------------*/
static struct usb_function *f_acm;
static struct usb_function_instance *f_acm_inst;
static struct usb_function_instance *fi_msg;
static struct usb_function *f_msg;
/*
* We _always_ have both ACM and mass storage functions.
*/
static int acm_ms_do_config(struct usb_configuration *c)
{
int status;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_acm = usb_get_function(f_acm_inst);
if (IS_ERR(f_acm))
return PTR_ERR(f_acm);
f_msg = usb_get_function(fi_msg);
if (IS_ERR(f_msg)) {
status = PTR_ERR(f_msg);
goto put_acm;
}
status = usb_add_function(c, f_acm);
if (status < 0)
goto put_msg;
status = usb_add_function(c, f_msg);
if (status)
goto remove_acm;
return 0;
remove_acm:
usb_remove_function(c, f_acm);
put_msg:
usb_put_function(f_msg);
put_acm:
usb_put_function(f_acm);
return status;
}
static struct usb_configuration acm_ms_config_driver = {
.label = DRIVER_DESC,
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/*-------------------------------------------------------------------------*/
static int acm_ms_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct fsg_opts *opts;
struct fsg_config config;
int status;
f_acm_inst = usb_get_function_instance("acm");
if (IS_ERR(f_acm_inst))
return PTR_ERR(f_acm_inst);
fi_msg = usb_get_function_instance("mass_storage");
if (IS_ERR(fi_msg)) {
status = PTR_ERR(fi_msg);
goto fail_get_msg;
}
/* set up mass storage function */
fsg_config_from_params(&config, &fsg_mod_data, fsg_num_buffers);
opts = fsg_opts_from_func_inst(fi_msg);
opts->no_configfs = true;
status = fsg_common_set_num_buffers(opts->common, fsg_num_buffers);
if (status)
goto fail;
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
fsg_common_set_sysfs(opts->common, true);
status = fsg_common_create_luns(opts->common, &config);
if (status)
goto fail_set_cdev;
fsg_common_set_inquiry_string(opts->common, config.vendor_name,
config.product_name);
/*
* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto fail_string_ids;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail_string_ids;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
/* register our configuration */
status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config);
if (status < 0)
goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
DRIVER_DESC);
return 0;
/* error recovery */
fail_otg_desc:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail_string_ids:
fsg_common_remove_luns(opts->common);
fail_set_cdev:
fsg_common_free_buffers(opts->common);
fail:
usb_put_function_instance(fi_msg);
fail_get_msg:
usb_put_function_instance(f_acm_inst);
return status;
}
static int acm_ms_unbind(struct usb_composite_dev *cdev)
{
usb_put_function(f_msg);
usb_put_function_instance(fi_msg);
usb_put_function(f_acm);
usb_put_function_instance(f_acm_inst);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver acm_ms_driver = {
.name = "g_acm_ms",
.dev = &device_desc,
.max_speed = USB_SPEED_SUPER,
.strings = dev_strings,
.bind = acm_ms_bind,
.unbind = acm_ms_unbind,
};
module_usb_composite_driver(acm_ms_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Klaus Schwarzkopf <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/gadget/legacy/acm_ms.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* printer.c -- Printer gadget driver
*
* Copyright (C) 2003-2005 David Brownell
* Copyright (C) 2006 Craig W. Nadler
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
#include <linux/usb/ch9.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/usb/g_printer.h>
USB_GADGET_COMPOSITE_OPTIONS();
#define DRIVER_DESC "Printer Gadget"
#define DRIVER_VERSION "2015 FEB 17"
static const char shortname [] = "printer";
#include "u_printer.h"
/*-------------------------------------------------------------------------*/
/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
/* Thanks to NetChip Technologies for donating this product ID.
*/
#define PRINTER_VENDOR_NUM 0x0525 /* NetChip */
#define PRINTER_PRODUCT_NUM 0xa4a8 /* Linux-USB Printer Gadget */
/* Some systems will want different product identifiers published in the
* device descriptor, either numbers or strings or both. These string
* parameters are in UTF-8 (superset of ASCII's 7 bit characters).
*/
module_param_named(iSerialNum, coverwrite.serial_number, charp, S_IRUGO);
MODULE_PARM_DESC(iSerialNum, "1");
static char *iPNPstring;
module_param(iPNPstring, charp, S_IRUGO);
MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
/* Number of requests to allocate per endpoint, not used for ep0. */
static unsigned qlen = 10;
module_param(qlen, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(qlen, "The number of 8k buffers to use per endpoint");
#define QLEN qlen
static struct usb_function_instance *fi_printer;
static struct usb_function *f_printer;
/*-------------------------------------------------------------------------*/
/*
* DESCRIPTORS ... most are static, but strings and (full) configuration
* descriptors are built on demand.
*/
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
.idVendor = cpu_to_le16(PRINTER_VENDOR_NUM),
.idProduct = cpu_to_le16(PRINTER_PRODUCT_NUM),
.bNumConfigurations = 1
};
static const struct usb_descriptor_header *otg_desc[2];
/*-------------------------------------------------------------------------*/
/* descriptors that are built on-demand */
static char product_desc [40] = DRIVER_DESC;
static char serial_num [40] = "1";
static char *pnp_string =
"MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;";
/* static strings, in UTF-8 */
static struct usb_string strings [] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = product_desc,
[USB_GADGET_SERIAL_IDX].s = serial_num,
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static struct usb_configuration printer_cfg_driver = {
.label = "printer",
.bConfigurationValue = 1,
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
};
static int printer_do_config(struct usb_configuration *c)
{
struct usb_gadget *gadget = c->cdev->gadget;
int status = 0;
usb_ep_autoconfig_reset(gadget);
usb_gadget_set_selfpowered(gadget);
if (gadget_is_otg(gadget)) {
printer_cfg_driver.descriptors = otg_desc;
printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_printer = usb_get_function(fi_printer);
if (IS_ERR(f_printer))
return PTR_ERR(f_printer);
status = usb_add_function(c, f_printer);
if (status < 0)
usb_put_function(f_printer);
return status;
}
static int printer_bind(struct usb_composite_dev *cdev)
{
struct f_printer_opts *opts;
int ret;
fi_printer = usb_get_function_instance("printer");
if (IS_ERR(fi_printer))
return PTR_ERR(fi_printer);
opts = container_of(fi_printer, struct f_printer_opts, func_inst);
opts->minor = 0;
opts->q_len = QLEN;
if (iPNPstring) {
opts->pnp_string = kstrdup(iPNPstring, GFP_KERNEL);
if (!opts->pnp_string) {
ret = -ENOMEM;
goto fail_put_func_inst;
}
opts->pnp_string_allocated = true;
/*
* we don't free this memory in case of error
* as printer cleanup func will do this for us
*/
} else {
opts->pnp_string = pnp_string;
}
ret = usb_string_ids_tab(cdev, strings);
if (ret < 0)
goto fail_put_func_inst;
device_desc.iManufacturer = strings[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings[USB_GADGET_PRODUCT_IDX].id;
device_desc.iSerialNumber = strings[USB_GADGET_SERIAL_IDX].id;
if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc) {
ret = -ENOMEM;
goto fail_put_func_inst;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
ret = usb_add_config(cdev, &printer_cfg_driver, printer_do_config);
if (ret)
goto fail_free_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
return ret;
fail_free_otg_desc:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail_put_func_inst:
usb_put_function_instance(fi_printer);
return ret;
}
static int printer_unbind(struct usb_composite_dev *cdev)
{
usb_put_function(f_printer);
usb_put_function_instance(fi_printer);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver printer_driver = {
.name = shortname,
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = printer_bind,
.unbind = printer_unbind,
};
module_usb_composite_driver(printer_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Craig Nadler");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/printer.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* ncm.c -- NCM gadget driver
*
* Copyright (C) 2010 Nokia Corporation
* Contact: Yauheni Kaliuta <[email protected]>
*
* The driver borrows from ether.c which is:
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
*/
/* #define DEBUG */
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/composite.h>
#include "u_ether.h"
#include "u_ncm.h"
#define DRIVER_DESC "NCM Gadget"
/*-------------------------------------------------------------------------*/
/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
/* Thanks to NetChip Technologies for donating this product ID.
* It's for devices with only CDC Ethernet configurations.
*/
#define CDC_VENDOR_NUM 0x0525 /* NetChip */
#define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
USB_ETHERNET_MODULE_PARAMETERS();
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_COMM,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
/* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id defaults change according to what configs
* we support. (As does bNumConfigurations.) These values can
* also be overridden by module parameters.
*/
.idVendor = cpu_to_le16 (CDC_VENDOR_NUM),
.idProduct = cpu_to_le16 (CDC_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
/* NO SERIAL NUMBER */
.bNumConfigurations = 1,
};
static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
static struct usb_function_instance *f_ncm_inst;
static struct usb_function *f_ncm;
/*-------------------------------------------------------------------------*/
static int ncm_do_config(struct usb_configuration *c)
{
int status;
/* FIXME alloc iConfiguration string, set it in c->strings */
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_ncm = usb_get_function(f_ncm_inst);
if (IS_ERR(f_ncm))
return PTR_ERR(f_ncm);
status = usb_add_function(c, f_ncm);
if (status < 0) {
usb_put_function(f_ncm);
return status;
}
return 0;
}
static struct usb_configuration ncm_config_driver = {
/* .label = f(hardware) */
.label = "CDC Ethernet (NCM)",
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/*-------------------------------------------------------------------------*/
static int gncm_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct f_ncm_opts *ncm_opts;
int status;
f_ncm_inst = usb_get_function_instance("ncm");
if (IS_ERR(f_ncm_inst))
return PTR_ERR(f_ncm_inst);
ncm_opts = container_of(f_ncm_inst, struct f_ncm_opts, func_inst);
gether_set_qmult(ncm_opts->net, qmult);
if (!gether_set_host_addr(ncm_opts->net, host_addr))
pr_info("using host ethernet address: %s", host_addr);
if (!gether_set_dev_addr(ncm_opts->net, dev_addr))
pr_info("using self ethernet address: %s", dev_addr);
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto fail;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
status = usb_add_config(cdev, &ncm_config_driver,
ncm_do_config);
if (status < 0)
goto fail1;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s\n", DRIVER_DESC);
return 0;
fail1:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail:
usb_put_function_instance(f_ncm_inst);
return status;
}
static int gncm_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR_OR_NULL(f_ncm))
usb_put_function(f_ncm);
if (!IS_ERR_OR_NULL(f_ncm_inst))
usb_put_function_instance(f_ncm_inst);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver ncm_driver = {
.name = "g_ncm",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = gncm_bind,
.unbind = gncm_unbind,
};
module_usb_composite_driver(ncm_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Yauheni Kaliuta");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/ncm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* cdc2.c -- CDC Composite driver, with ECM and ACM support
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "u_ether.h"
#include "u_serial.h"
#include "u_ecm.h"
#define DRIVER_DESC "CDC Composite Gadget"
#define DRIVER_VERSION "King Kamehameha Day 2008"
/*-------------------------------------------------------------------------*/
/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
/* Thanks to NetChip Technologies for donating this product ID.
* It's for devices with only this composite CDC configuration.
*/
#define CDC_VENDOR_NUM 0x0525 /* NetChip */
#define CDC_PRODUCT_NUM 0xa4aa /* CDC Composite: ECM + ACM */
USB_GADGET_COMPOSITE_OPTIONS();
USB_ETHERNET_MODULE_PARAMETERS();
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_COMM,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
/* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id can be overridden by module parameters. */
.idVendor = cpu_to_le16(CDC_VENDOR_NUM),
.idProduct = cpu_to_le16(CDC_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
/* NO SERIAL NUMBER */
.bNumConfigurations = 1,
};
static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
/*-------------------------------------------------------------------------*/
static struct usb_function *f_acm;
static struct usb_function_instance *fi_serial;
static struct usb_function *f_ecm;
static struct usb_function_instance *fi_ecm;
/*
* We _always_ have both CDC ECM and CDC ACM functions.
*/
static int cdc_do_config(struct usb_configuration *c)
{
int status;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
f_ecm = usb_get_function(fi_ecm);
if (IS_ERR(f_ecm)) {
status = PTR_ERR(f_ecm);
goto err_get_ecm;
}
status = usb_add_function(c, f_ecm);
if (status)
goto err_add_ecm;
f_acm = usb_get_function(fi_serial);
if (IS_ERR(f_acm)) {
status = PTR_ERR(f_acm);
goto err_get_acm;
}
status = usb_add_function(c, f_acm);
if (status)
goto err_add_acm;
return 0;
err_add_acm:
usb_put_function(f_acm);
err_get_acm:
usb_remove_function(c, f_ecm);
err_add_ecm:
usb_put_function(f_ecm);
err_get_ecm:
return status;
}
static struct usb_configuration cdc_config_driver = {
.label = "CDC Composite (ECM + ACM)",
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/*-------------------------------------------------------------------------*/
static int cdc_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct f_ecm_opts *ecm_opts;
int status;
if (!can_support_ecm(cdev->gadget)) {
dev_err(&gadget->dev, "controller '%s' not usable\n",
gadget->name);
return -EINVAL;
}
fi_ecm = usb_get_function_instance("ecm");
if (IS_ERR(fi_ecm))
return PTR_ERR(fi_ecm);
ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
gether_set_qmult(ecm_opts->net, qmult);
if (!gether_set_host_addr(ecm_opts->net, host_addr))
pr_info("using host ethernet address: %s", host_addr);
if (!gether_set_dev_addr(ecm_opts->net, dev_addr))
pr_info("using self ethernet address: %s", dev_addr);
fi_serial = usb_get_function_instance("acm");
if (IS_ERR(fi_serial)) {
status = PTR_ERR(fi_serial);
goto fail;
}
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto fail1;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail1;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
/* register our configuration */
status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config);
if (status < 0)
goto fail2;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
DRIVER_DESC);
return 0;
fail2:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail1:
usb_put_function_instance(fi_serial);
fail:
usb_put_function_instance(fi_ecm);
return status;
}
static int cdc_unbind(struct usb_composite_dev *cdev)
{
usb_put_function(f_acm);
usb_put_function_instance(fi_serial);
if (!IS_ERR_OR_NULL(f_ecm))
usb_put_function(f_ecm);
if (!IS_ERR_OR_NULL(fi_ecm))
usb_put_function_instance(fi_ecm);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = cdc_bind,
.unbind = cdc_unbind,
};
module_usb_composite_driver(cdc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/cdc2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* hid.c -- HID Composite driver
*
* Based on multi.c
*
* Copyright (C) 2010 Fabien Chouteau <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/usb/composite.h>
#include <linux/usb/g_hid.h>
#define DRIVER_DESC "HID Gadget"
#define DRIVER_VERSION "2010/03/16"
#include "u_hid.h"
/*-------------------------------------------------------------------------*/
#define HIDG_VENDOR_NUM 0x0525 /* XXX NetChip */
#define HIDG_PRODUCT_NUM 0xa4ac /* Linux-USB HID gadget */
/*-------------------------------------------------------------------------*/
struct hidg_func_node {
struct usb_function_instance *fi;
struct usb_function *f;
struct list_head node;
struct hidg_func_descriptor *func;
};
static LIST_HEAD(hidg_func_list);
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
/* .bDeviceClass = USB_CLASS_COMM, */
/* .bDeviceSubClass = 0, */
/* .bDeviceProtocol = 0, */
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
/* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id can be overridden by module parameters. */
.idVendor = cpu_to_le16(HIDG_VENDOR_NUM),
.idProduct = cpu_to_le16(HIDG_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
/* NO SERIAL NUMBER */
.bNumConfigurations = 1,
};
static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *dev_strings[] = {
&stringtab_dev,
NULL,
};
/****************************** Configurations ******************************/
static int do_config(struct usb_configuration *c)
{
struct hidg_func_node *e, *n;
int status = 0;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
list_for_each_entry(e, &hidg_func_list, node) {
e->f = usb_get_function(e->fi);
if (IS_ERR(e->f)) {
status = PTR_ERR(e->f);
goto put;
}
status = usb_add_function(c, e->f);
if (status < 0) {
usb_put_function(e->f);
goto put;
}
}
return 0;
put:
list_for_each_entry(n, &hidg_func_list, node) {
if (n == e)
break;
usb_remove_function(c, n->f);
usb_put_function(n->f);
}
return status;
}
static struct usb_configuration config_driver = {
.label = "HID Gadget",
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/****************************** Gadget Bind ******************************/
static int hid_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct hidg_func_node *n = NULL, *m, *iter_n;
struct f_hid_opts *hid_opts;
int status, funcs;
funcs = list_count_nodes(&hidg_func_list);
if (!funcs)
return -ENODEV;
list_for_each_entry(iter_n, &hidg_func_list, node) {
iter_n->fi = usb_get_function_instance("hid");
if (IS_ERR(iter_n->fi)) {
status = PTR_ERR(iter_n->fi);
n = iter_n;
goto put;
}
hid_opts = container_of(iter_n->fi, struct f_hid_opts, func_inst);
hid_opts->subclass = iter_n->func->subclass;
hid_opts->protocol = iter_n->func->protocol;
hid_opts->report_length = iter_n->func->report_length;
hid_opts->report_desc_length = iter_n->func->report_desc_length;
hid_opts->report_desc = iter_n->func->report_desc;
}
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
*/
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto put;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) {
status = -ENOMEM;
goto put;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
/* register our configuration */
status = usb_add_config(cdev, &config_driver, do_config);
if (status < 0)
goto free_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
return 0;
free_otg_desc:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
put:
list_for_each_entry(m, &hidg_func_list, node) {
if (m == n)
break;
usb_put_function_instance(m->fi);
}
return status;
}
static int hid_unbind(struct usb_composite_dev *cdev)
{
struct hidg_func_node *n;
list_for_each_entry(n, &hidg_func_list, node) {
usb_put_function(n->f);
usb_put_function_instance(n->fi);
}
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static int hidg_plat_driver_probe(struct platform_device *pdev)
{
struct hidg_func_descriptor *func = dev_get_platdata(&pdev->dev);
struct hidg_func_node *entry;
if (!func) {
dev_err(&pdev->dev, "Platform data missing\n");
return -ENODEV;
}
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->func = func;
list_add_tail(&entry->node, &hidg_func_list);
return 0;
}
static void hidg_plat_driver_remove(struct platform_device *pdev)
{
struct hidg_func_node *e, *n;
list_for_each_entry_safe(e, n, &hidg_func_list, node) {
list_del(&e->node);
kfree(e);
}
}
/****************************** Some noise ******************************/
static struct usb_composite_driver hidg_driver = {
.name = "g_hid",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = hid_bind,
.unbind = hid_unbind,
};
static struct platform_driver hidg_plat_driver = {
.remove_new = hidg_plat_driver_remove,
.driver = {
.name = "hidg",
},
};
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Fabien Chouteau, Peter Korsgaard");
MODULE_LICENSE("GPL");
static int __init hidg_init(void)
{
int status;
status = platform_driver_probe(&hidg_plat_driver,
hidg_plat_driver_probe);
if (status < 0)
return status;
status = usb_composite_probe(&hidg_driver);
if (status < 0)
platform_driver_unregister(&hidg_plat_driver);
return status;
}
module_init(hidg_init);
static void __exit hidg_cleanup(void)
{
usb_composite_unregister(&hidg_driver);
platform_driver_unregister(&hidg_plat_driver);
}
module_exit(hidg_cleanup);
| linux-master | drivers/usb/gadget/legacy/hid.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* webcam.c -- USB webcam gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart ([email protected])
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/usb/video.h>
#include "u_uvc.h"
USB_GADGET_COMPOSITE_OPTIONS();
/*-------------------------------------------------------------------------*/
/* module parameters specific to the Video streaming endpoint */
static unsigned int streaming_interval = 1;
module_param(streaming_interval, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(streaming_interval, "1 - 16");
static unsigned int streaming_maxpacket = 1024;
module_param(streaming_maxpacket, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(streaming_maxpacket, "1 - 1023 (FS), 1 - 3072 (hs/ss)");
static unsigned int streaming_maxburst;
module_param(streaming_maxburst, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(streaming_maxburst, "0 - 15 (ss only)");
/* --------------------------------------------------------------------------
* Device descriptor
*/
#define WEBCAM_VENDOR_ID 0x1d6b /* Linux Foundation */
#define WEBCAM_PRODUCT_ID 0x0102 /* Webcam A/V gadget */
#define WEBCAM_DEVICE_BCD 0x0010 /* 0.10 */
static char webcam_vendor_label[] = "Linux Foundation";
static char webcam_product_label[] = "Webcam gadget";
static char webcam_config_label[] = "Video";
/* string IDs are assigned dynamically */
#define STRING_DESCRIPTION_IDX USB_GADGET_FIRST_AVAIL_IDX
static struct usb_string webcam_strings[] = {
[USB_GADGET_MANUFACTURER_IDX].s = webcam_vendor_label,
[USB_GADGET_PRODUCT_IDX].s = webcam_product_label,
[USB_GADGET_SERIAL_IDX].s = "",
[STRING_DESCRIPTION_IDX].s = webcam_config_label,
{ }
};
static struct usb_gadget_strings webcam_stringtab = {
.language = 0x0409, /* en-us */
.strings = webcam_strings,
};
static struct usb_gadget_strings *webcam_device_strings[] = {
&webcam_stringtab,
NULL,
};
static struct usb_function_instance *fi_uvc;
static struct usb_function *f_uvc;
static struct usb_device_descriptor webcam_device_descriptor = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
.bDeviceClass = USB_CLASS_MISC,
.bDeviceSubClass = 0x02,
.bDeviceProtocol = 0x01,
.bMaxPacketSize0 = 0, /* dynamic */
.idVendor = cpu_to_le16(WEBCAM_VENDOR_ID),
.idProduct = cpu_to_le16(WEBCAM_PRODUCT_ID),
.bcdDevice = cpu_to_le16(WEBCAM_DEVICE_BCD),
.iManufacturer = 0, /* dynamic */
.iProduct = 0, /* dynamic */
.iSerialNumber = 0, /* dynamic */
.bNumConfigurations = 0, /* dynamic */
};
DECLARE_UVC_HEADER_DESCRIPTOR(1);
static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
.bLength = UVC_DT_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_HEADER,
.bcdUVC = cpu_to_le16(0x0110),
.wTotalLength = 0, /* dynamic */
.dwClockFrequency = cpu_to_le32(48000000),
.bInCollection = 0, /* dynamic */
.baInterfaceNr[0] = 0, /* dynamic */
};
static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = {
.bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_INPUT_TERMINAL,
.bTerminalID = 1,
.wTerminalType = cpu_to_le16(0x0201),
.bAssocTerminal = 0,
.iTerminal = 0,
.wObjectiveFocalLengthMin = cpu_to_le16(0),
.wObjectiveFocalLengthMax = cpu_to_le16(0),
.wOcularFocalLength = cpu_to_le16(0),
.bControlSize = 3,
.bmControls[0] = 2,
.bmControls[1] = 0,
.bmControls[2] = 0,
};
static const struct uvc_processing_unit_descriptor uvc_processing = {
.bLength = UVC_DT_PROCESSING_UNIT_SIZE(2),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_PROCESSING_UNIT,
.bUnitID = 2,
.bSourceID = 1,
.wMaxMultiplier = cpu_to_le16(16*1024),
.bControlSize = 2,
.bmControls[0] = 1,
.bmControls[1] = 0,
.iProcessing = 0,
.bmVideoStandards = 0,
};
static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
.bLength = UVC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL,
.bTerminalID = 3,
.wTerminalType = cpu_to_le16(0x0101),
.bAssocTerminal = 0,
.bSourceID = 2,
.iTerminal = 0,
};
DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 2);
static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
.bLength = UVC_DT_INPUT_HEADER_SIZE(1, 2),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_INPUT_HEADER,
.bNumFormats = 2,
.wTotalLength = 0, /* dynamic */
.bEndpointAddress = 0, /* dynamic */
.bmInfo = 0,
.bTerminalLink = 3,
.bStillCaptureMethod = 0,
.bTriggerSupport = 0,
.bTriggerUsage = 0,
.bControlSize = 1,
.bmaControls[0][0] = 0,
.bmaControls[1][0] = 4,
};
static const struct uvc_format_uncompressed uvc_format_yuv = {
.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED,
.bFormatIndex = 1,
.bNumFrameDescriptors = 2,
.guidFormat =
{ 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
.bBitsPerPixel = 16,
.bDefaultFrameIndex = 1,
.bAspectRatioX = 0,
.bAspectRatioY = 0,
.bmInterlaceFlags = 0,
.bCopyProtect = 0,
};
DECLARE_UVC_FRAME_UNCOMPRESSED(1);
DECLARE_UVC_FRAME_UNCOMPRESSED(3);
static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
.bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 1,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(640),
.wHeight = cpu_to_le16(360),
.dwMinBitRate = cpu_to_le32(18432000),
.dwMaxBitRate = cpu_to_le32(55296000),
.dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
.dwDefaultFrameInterval = cpu_to_le32(666666),
.bFrameIntervalType = 3,
.dwFrameInterval[0] = cpu_to_le32(666666),
.dwFrameInterval[1] = cpu_to_le32(1000000),
.dwFrameInterval[2] = cpu_to_le32(5000000),
};
static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
.bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 2,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(1280),
.wHeight = cpu_to_le16(720),
.dwMinBitRate = cpu_to_le32(29491200),
.dwMaxBitRate = cpu_to_le32(29491200),
.dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
.dwDefaultFrameInterval = cpu_to_le32(5000000),
.bFrameIntervalType = 1,
.dwFrameInterval[0] = cpu_to_le32(5000000),
};
static const struct uvc_format_mjpeg uvc_format_mjpg = {
.bLength = UVC_DT_FORMAT_MJPEG_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
.bFormatIndex = 2,
.bNumFrameDescriptors = 2,
.bmFlags = 0,
.bDefaultFrameIndex = 1,
.bAspectRatioX = 0,
.bAspectRatioY = 0,
.bmInterlaceFlags = 0,
.bCopyProtect = 0,
};
DECLARE_UVC_FRAME_MJPEG(1);
DECLARE_UVC_FRAME_MJPEG(3);
static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
.bLength = UVC_DT_FRAME_MJPEG_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 1,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(640),
.wHeight = cpu_to_le16(360),
.dwMinBitRate = cpu_to_le32(18432000),
.dwMaxBitRate = cpu_to_le32(55296000),
.dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
.dwDefaultFrameInterval = cpu_to_le32(666666),
.bFrameIntervalType = 3,
.dwFrameInterval[0] = cpu_to_le32(666666),
.dwFrameInterval[1] = cpu_to_le32(1000000),
.dwFrameInterval[2] = cpu_to_le32(5000000),
};
static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
.bLength = UVC_DT_FRAME_MJPEG_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 2,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(1280),
.wHeight = cpu_to_le16(720),
.dwMinBitRate = cpu_to_le32(29491200),
.dwMaxBitRate = cpu_to_le32(29491200),
.dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
.dwDefaultFrameInterval = cpu_to_le32(5000000),
.bFrameIntervalType = 1,
.dwFrameInterval[0] = cpu_to_le32(5000000),
};
static const struct uvc_color_matching_descriptor uvc_color_matching = {
.bLength = UVC_DT_COLOR_MATCHING_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_COLORFORMAT,
.bColorPrimaries = 1,
.bTransferCharacteristics = 1,
.bMatrixCoefficients = 4,
};
static const struct uvc_descriptor_header * const uvc_fs_control_cls[] = {
(const struct uvc_descriptor_header *) &uvc_control_header,
(const struct uvc_descriptor_header *) &uvc_camera_terminal,
(const struct uvc_descriptor_header *) &uvc_processing,
(const struct uvc_descriptor_header *) &uvc_output_terminal,
NULL,
};
static const struct uvc_descriptor_header * const uvc_ss_control_cls[] = {
(const struct uvc_descriptor_header *) &uvc_control_header,
(const struct uvc_descriptor_header *) &uvc_camera_terminal,
(const struct uvc_descriptor_header *) &uvc_processing,
(const struct uvc_descriptor_header *) &uvc_output_terminal,
NULL,
};
static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
NULL,
};
static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
NULL,
};
static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
NULL,
};
/* --------------------------------------------------------------------------
* USB configuration
*/
static int
webcam_config_bind(struct usb_configuration *c)
{
int status = 0;
f_uvc = usb_get_function(fi_uvc);
if (IS_ERR(f_uvc))
return PTR_ERR(f_uvc);
status = usb_add_function(c, f_uvc);
if (status < 0)
usb_put_function(f_uvc);
return status;
}
static struct usb_configuration webcam_config_driver = {
.label = webcam_config_label,
.bConfigurationValue = 1,
.iConfiguration = 0, /* dynamic */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
.MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
};
static int
webcam_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR_OR_NULL(f_uvc))
usb_put_function(f_uvc);
if (!IS_ERR_OR_NULL(fi_uvc))
usb_put_function_instance(fi_uvc);
return 0;
}
static int
webcam_bind(struct usb_composite_dev *cdev)
{
struct f_uvc_opts *uvc_opts;
int ret;
fi_uvc = usb_get_function_instance("uvc");
if (IS_ERR(fi_uvc))
return PTR_ERR(fi_uvc);
uvc_opts = container_of(fi_uvc, struct f_uvc_opts, func_inst);
uvc_opts->streaming_interval = streaming_interval;
uvc_opts->streaming_maxpacket = streaming_maxpacket;
uvc_opts->streaming_maxburst = streaming_maxburst;
uvc_opts->fs_control = uvc_fs_control_cls;
uvc_opts->ss_control = uvc_ss_control_cls;
uvc_opts->fs_streaming = uvc_fs_streaming_cls;
uvc_opts->hs_streaming = uvc_hs_streaming_cls;
uvc_opts->ss_streaming = uvc_ss_streaming_cls;
/* Allocate string descriptor numbers ... note that string contents
* can be overridden by the composite_dev glue.
*/
ret = usb_string_ids_tab(cdev, webcam_strings);
if (ret < 0)
goto error;
webcam_device_descriptor.iManufacturer =
webcam_strings[USB_GADGET_MANUFACTURER_IDX].id;
webcam_device_descriptor.iProduct =
webcam_strings[USB_GADGET_PRODUCT_IDX].id;
webcam_config_driver.iConfiguration =
webcam_strings[STRING_DESCRIPTION_IDX].id;
/* Register our configuration. */
if ((ret = usb_add_config(cdev, &webcam_config_driver,
webcam_config_bind)) < 0)
goto error;
usb_composite_overwrite_options(cdev, &coverwrite);
INFO(cdev, "Webcam Video Gadget\n");
return 0;
error:
usb_put_function_instance(fi_uvc);
return ret;
}
/* --------------------------------------------------------------------------
* Driver
*/
static struct usb_composite_driver webcam_driver = {
.name = "g_webcam",
.dev = &webcam_device_descriptor,
.strings = webcam_device_strings,
.max_speed = USB_SPEED_SUPER,
.bind = webcam_bind,
.unbind = webcam_unbind,
};
module_usb_composite_driver(webcam_driver);
MODULE_AUTHOR("Laurent Pinchart");
MODULE_DESCRIPTION("Webcam Video Gadget");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/webcam.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dbgp.c -- EHCI Debug Port device gadget
*
* Copyright (C) 2010 Stephane Duverger
*
* Released under the GPLv2.
*/
/* verbose messages */
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "u_serial.h"
#define DRIVER_VENDOR_ID 0x0525 /* NetChip */
#define DRIVER_PRODUCT_ID 0xc0de /* undefined */
#define USB_DEBUG_MAX_PACKET_SIZE 8
#define DBGP_REQ_EP0_LEN 128
#define DBGP_REQ_LEN 512
static struct dbgp {
struct usb_gadget *gadget;
struct usb_request *req;
struct usb_ep *i_ep;
struct usb_ep *o_ep;
#ifdef CONFIG_USB_G_DBGP_SERIAL
struct gserial *serial;
#endif
} dbgp;
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_VENDOR_SPEC,
.idVendor = cpu_to_le16(DRIVER_VENDOR_ID),
.idProduct = cpu_to_le16(DRIVER_PRODUCT_ID),
.bNumConfigurations = 1,
};
static struct usb_debug_descriptor dbg_desc = {
.bLength = sizeof dbg_desc,
.bDescriptorType = USB_DT_DEBUG,
};
static struct usb_endpoint_descriptor i_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.bEndpointAddress = USB_DIR_IN,
};
static struct usb_endpoint_descriptor o_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.bEndpointAddress = USB_DIR_OUT,
};
#ifdef CONFIG_USB_G_DBGP_PRINTK
static int dbgp_consume(char *buf, unsigned len)
{
char c;
if (!len)
return 0;
c = buf[len-1];
if (c != 0)
buf[len-1] = 0;
printk(KERN_NOTICE "%s%c", buf, c);
return 0;
}
static void __disable_ep(struct usb_ep *ep)
{
usb_ep_disable(ep);
}
static void dbgp_disable_ep(void)
{
__disable_ep(dbgp.i_ep);
__disable_ep(dbgp.o_ep);
}
static void dbgp_complete(struct usb_ep *ep, struct usb_request *req)
{
int stp;
int err = 0;
int status = req->status;
if (ep == dbgp.i_ep) {
stp = 1;
goto fail;
}
if (status != 0) {
stp = 2;
goto release_req;
}
dbgp_consume(req->buf, req->actual);
req->length = DBGP_REQ_LEN;
err = usb_ep_queue(ep, req, GFP_ATOMIC);
if (err < 0) {
stp = 3;
goto release_req;
}
return;
release_req:
kfree(req->buf);
usb_ep_free_request(dbgp.o_ep, req);
dbgp_disable_ep();
fail:
dev_dbg(&dbgp.gadget->dev,
"complete: failure (%d:%d) ==> %d\n", stp, err, status);
}
static int dbgp_enable_ep_req(struct usb_ep *ep)
{
int err, stp;
struct usb_request *req;
req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!req) {
err = -ENOMEM;
stp = 1;
goto fail_1;
}
req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
if (!req->buf) {
err = -ENOMEM;
stp = 2;
goto fail_2;
}
req->complete = dbgp_complete;
req->length = DBGP_REQ_LEN;
err = usb_ep_queue(ep, req, GFP_ATOMIC);
if (err < 0) {
stp = 3;
goto fail_3;
}
return 0;
fail_3:
kfree(req->buf);
fail_2:
usb_ep_free_request(dbgp.o_ep, req);
fail_1:
dev_dbg(&dbgp.gadget->dev,
"enable ep req: failure (%d:%d)\n", stp, err);
return err;
}
static int __enable_ep(struct usb_ep *ep, struct usb_endpoint_descriptor *desc)
{
int err;
ep->desc = desc;
err = usb_ep_enable(ep);
return err;
}
static int dbgp_enable_ep(void)
{
int err, stp;
err = __enable_ep(dbgp.i_ep, &i_desc);
if (err < 0) {
stp = 1;
goto fail_1;
}
err = __enable_ep(dbgp.o_ep, &o_desc);
if (err < 0) {
stp = 2;
goto fail_2;
}
err = dbgp_enable_ep_req(dbgp.o_ep);
if (err < 0) {
stp = 3;
goto fail_3;
}
return 0;
fail_3:
__disable_ep(dbgp.o_ep);
fail_2:
__disable_ep(dbgp.i_ep);
fail_1:
dev_dbg(&dbgp.gadget->dev, "enable ep: failure (%d:%d)\n", stp, err);
return err;
}
#endif
static void dbgp_disconnect(struct usb_gadget *gadget)
{
#ifdef CONFIG_USB_G_DBGP_PRINTK
dbgp_disable_ep();
#else
gserial_disconnect(dbgp.serial);
#endif
}
static void dbgp_unbind(struct usb_gadget *gadget)
{
#ifdef CONFIG_USB_G_DBGP_SERIAL
kfree(dbgp.serial);
dbgp.serial = NULL;
#endif
if (dbgp.req) {
kfree(dbgp.req->buf);
usb_ep_free_request(gadget->ep0, dbgp.req);
dbgp.req = NULL;
}
}
#ifdef CONFIG_USB_G_DBGP_SERIAL
static unsigned char tty_line;
#endif
static int dbgp_configure_endpoints(struct usb_gadget *gadget)
{
int stp;
usb_ep_autoconfig_reset(gadget);
dbgp.i_ep = usb_ep_autoconfig(gadget, &i_desc);
if (!dbgp.i_ep) {
stp = 1;
goto fail_1;
}
i_desc.wMaxPacketSize =
cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
dbgp.o_ep = usb_ep_autoconfig(gadget, &o_desc);
if (!dbgp.o_ep) {
stp = 2;
goto fail_1;
}
o_desc.wMaxPacketSize =
cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
dbg_desc.bDebugInEndpoint = i_desc.bEndpointAddress;
dbg_desc.bDebugOutEndpoint = o_desc.bEndpointAddress;
#ifdef CONFIG_USB_G_DBGP_SERIAL
dbgp.serial->in = dbgp.i_ep;
dbgp.serial->out = dbgp.o_ep;
dbgp.serial->in->desc = &i_desc;
dbgp.serial->out->desc = &o_desc;
#endif
return 0;
fail_1:
dev_dbg(&dbgp.gadget->dev, "ep config: failure (%d)\n", stp);
return -ENODEV;
}
static int dbgp_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
int err, stp;
dbgp.gadget = gadget;
dbgp.req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
if (!dbgp.req) {
err = -ENOMEM;
stp = 1;
goto fail;
}
dbgp.req->buf = kmalloc(DBGP_REQ_EP0_LEN, GFP_KERNEL);
if (!dbgp.req->buf) {
err = -ENOMEM;
stp = 2;
goto fail;
}
dbgp.req->length = DBGP_REQ_EP0_LEN;
#ifdef CONFIG_USB_G_DBGP_SERIAL
dbgp.serial = kzalloc(sizeof(struct gserial), GFP_KERNEL);
if (!dbgp.serial) {
stp = 3;
err = -ENOMEM;
goto fail;
}
if (gserial_alloc_line(&tty_line)) {
stp = 4;
err = -ENODEV;
goto fail;
}
#endif
err = dbgp_configure_endpoints(gadget);
if (err < 0) {
stp = 5;
goto fail;
}
dev_dbg(&dbgp.gadget->dev, "bind: success\n");
return 0;
fail:
dev_dbg(&gadget->dev, "bind: failure (%d:%d)\n", stp, err);
dbgp_unbind(gadget);
return err;
}
static void dbgp_setup_complete(struct usb_ep *ep,
struct usb_request *req)
{
dev_dbg(&dbgp.gadget->dev, "setup complete: %d, %d/%d\n",
req->status, req->actual, req->length);
}
static int dbgp_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl)
{
struct usb_request *req = dbgp.req;
u8 request = ctrl->bRequest;
u16 value = le16_to_cpu(ctrl->wValue);
u16 length = le16_to_cpu(ctrl->wLength);
int err = -EOPNOTSUPP;
void *data = NULL;
u16 len = 0;
if (length > DBGP_REQ_LEN) {
if (ctrl->bRequestType & USB_DIR_IN) {
/* Cast away the const, we are going to overwrite on purpose. */
__le16 *temp = (__le16 *)&ctrl->wLength;
*temp = cpu_to_le16(DBGP_REQ_LEN);
length = DBGP_REQ_LEN;
} else {
return err;
}
}
if (request == USB_REQ_GET_DESCRIPTOR) {
switch (value>>8) {
case USB_DT_DEVICE:
dev_dbg(&dbgp.gadget->dev, "setup: desc device\n");
len = sizeof device_desc;
data = &device_desc;
device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
break;
case USB_DT_DEBUG:
dev_dbg(&dbgp.gadget->dev, "setup: desc debug\n");
len = sizeof dbg_desc;
data = &dbg_desc;
break;
default:
goto fail;
}
err = 0;
} else if (request == USB_REQ_SET_FEATURE &&
value == USB_DEVICE_DEBUG_MODE) {
dev_dbg(&dbgp.gadget->dev, "setup: feat debug\n");
#ifdef CONFIG_USB_G_DBGP_PRINTK
err = dbgp_enable_ep();
#else
err = dbgp_configure_endpoints(gadget);
if (err < 0) {
goto fail;
}
err = gserial_connect(dbgp.serial, tty_line);
#endif
if (err < 0)
goto fail;
} else
goto fail;
req->length = min(length, len);
req->zero = len < req->length;
if (data && req->length)
memcpy(req->buf, data, req->length);
req->complete = dbgp_setup_complete;
return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
fail:
dev_dbg(&dbgp.gadget->dev,
"setup: failure req %x v %x\n", request, value);
return err;
}
static struct usb_gadget_driver dbgp_driver = {
.function = "dbgp",
.max_speed = USB_SPEED_HIGH,
.bind = dbgp_bind,
.unbind = dbgp_unbind,
.setup = dbgp_setup,
.reset = dbgp_disconnect,
.disconnect = dbgp_disconnect,
.driver = {
.owner = THIS_MODULE,
.name = "dbgp"
},
};
static int __init dbgp_init(void)
{
return usb_gadget_register_driver(&dbgp_driver);
}
static void __exit dbgp_exit(void)
{
usb_gadget_unregister_driver(&dbgp_driver);
#ifdef CONFIG_USB_G_DBGP_SERIAL
gserial_free_line(tty_line);
#endif
}
MODULE_AUTHOR("Stephane Duverger");
MODULE_LICENSE("GPL");
module_init(dbgp_init);
module_exit(dbgp_exit);
| linux-master | drivers/usb/gadget/legacy/dbgp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* audio.c -- Audio gadget driver
*
* Copyright (C) 2008 Bryan Wu <[email protected]>
* Copyright (C) 2008 Analog Devices, Inc
*/
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/composite.h>
#define DRIVER_DESC "Linux USB Audio Gadget"
#define DRIVER_VERSION "Feb 2, 2012"
USB_GADGET_COMPOSITE_OPTIONS();
#ifndef CONFIG_GADGET_UAC1
#include "u_uac2.h"
/* Playback(USB-IN) Default Stereo - Fl/Fr */
static int p_chmask = UAC2_DEF_PCHMASK;
module_param(p_chmask, uint, 0444);
MODULE_PARM_DESC(p_chmask, "Playback Channel Mask");
/* Playback Default 48 KHz */
static int p_srates[UAC_MAX_RATES] = {UAC2_DEF_PSRATE};
static int p_srates_cnt = 1;
module_param_array_named(p_srate, p_srates, uint, &p_srates_cnt, 0444);
MODULE_PARM_DESC(p_srate, "Playback Sampling Rates (array)");
/* Playback Default 16bits/sample */
static int p_ssize = UAC2_DEF_PSSIZE;
module_param(p_ssize, uint, 0444);
MODULE_PARM_DESC(p_ssize, "Playback Sample Size(bytes)");
/* Playback bInterval for HS/SS (1-4: fixed, 0: auto) */
static u8 p_hs_bint = UAC2_DEF_PHSBINT;
module_param(p_hs_bint, byte, 0444);
MODULE_PARM_DESC(p_hs_bint,
"Playback bInterval for HS/SS (1-4: fixed, 0: auto)");
/* Capture(USB-OUT) Default Stereo - Fl/Fr */
static int c_chmask = UAC2_DEF_CCHMASK;
module_param(c_chmask, uint, 0444);
MODULE_PARM_DESC(c_chmask, "Capture Channel Mask");
/* Capture Default 64 KHz */
static int c_srates[UAC_MAX_RATES] = {UAC2_DEF_CSRATE};
static int c_srates_cnt = 1;
module_param_array_named(c_srate, c_srates, uint, &c_srates_cnt, 0444);
MODULE_PARM_DESC(c_srate, "Capture Sampling Rates (array)");
/* Capture Default 16bits/sample */
static int c_ssize = UAC2_DEF_CSSIZE;
module_param(c_ssize, uint, 0444);
MODULE_PARM_DESC(c_ssize, "Capture Sample Size(bytes)");
/* capture bInterval for HS/SS (1-4: fixed, 0: auto) */
static u8 c_hs_bint = UAC2_DEF_CHSBINT;
module_param(c_hs_bint, byte, 0444);
MODULE_PARM_DESC(c_hs_bint,
"Capture bInterval for HS/SS (1-4: fixed, 0: auto)");
#else
#ifndef CONFIG_GADGET_UAC1_LEGACY
#include "u_uac1.h"
/* Playback(USB-IN) Default Stereo - Fl/Fr */
static int p_chmask = UAC1_DEF_PCHMASK;
module_param(p_chmask, uint, 0444);
MODULE_PARM_DESC(p_chmask, "Playback Channel Mask");
/* Playback Default 48 KHz */
static int p_srates[UAC_MAX_RATES] = {UAC1_DEF_PSRATE};
static int p_srates_cnt = 1;
module_param_array_named(p_srate, p_srates, uint, &p_srates_cnt, 0444);
MODULE_PARM_DESC(p_srate, "Playback Sampling Rates (array)");
/* Playback Default 16bits/sample */
static int p_ssize = UAC1_DEF_PSSIZE;
module_param(p_ssize, uint, 0444);
MODULE_PARM_DESC(p_ssize, "Playback Sample Size(bytes)");
/* Capture(USB-OUT) Default Stereo - Fl/Fr */
static int c_chmask = UAC1_DEF_CCHMASK;
module_param(c_chmask, uint, 0444);
MODULE_PARM_DESC(c_chmask, "Capture Channel Mask");
/* Capture Default 48 KHz */
static int c_srates[UAC_MAX_RATES] = {UAC1_DEF_CSRATE};
static int c_srates_cnt = 1;
module_param_array_named(c_srate, c_srates, uint, &c_srates_cnt, 0444);
MODULE_PARM_DESC(c_srate, "Capture Sampling Rates (array)");
/* Capture Default 16bits/sample */
static int c_ssize = UAC1_DEF_CSSIZE;
module_param(c_ssize, uint, 0444);
MODULE_PARM_DESC(c_ssize, "Capture Sample Size(bytes)");
#else /* CONFIG_GADGET_UAC1_LEGACY */
#include "u_uac1_legacy.h"
static char *fn_play = FILE_PCM_PLAYBACK;
module_param(fn_play, charp, 0444);
MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
static char *fn_cap = FILE_PCM_CAPTURE;
module_param(fn_cap, charp, 0444);
MODULE_PARM_DESC(fn_cap, "Capture PCM device file name");
static char *fn_cntl = FILE_CONTROL;
module_param(fn_cntl, charp, 0444);
MODULE_PARM_DESC(fn_cntl, "Control device file name");
static int req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
module_param(req_buf_size, int, 0444);
MODULE_PARM_DESC(req_buf_size, "ISO OUT endpoint request buffer size");
static int req_count = UAC1_REQ_COUNT;
module_param(req_count, int, 0444);
MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
static int audio_buf_size = UAC1_AUDIO_BUF_SIZE;
module_param(audio_buf_size, int, 0444);
MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
#endif /* CONFIG_GADGET_UAC1_LEGACY */
#endif
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC,
[USB_GADGET_SERIAL_IDX].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
.strings = strings_dev,
};
static struct usb_gadget_strings *audio_strings[] = {
&stringtab_dev,
NULL,
};
#ifndef CONFIG_GADGET_UAC1
static struct usb_function_instance *fi_uac2;
static struct usb_function *f_uac2;
#else
static struct usb_function_instance *fi_uac1;
static struct usb_function *f_uac1;
#endif
/*-------------------------------------------------------------------------*/
/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
/* Thanks to Linux Foundation for donating this product ID. */
#define AUDIO_VENDOR_NUM 0x1d6b /* Linux Foundation */
#define AUDIO_PRODUCT_NUM 0x0101 /* Linux-USB Audio Gadget */
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
/* .bcdUSB = DYNAMIC */
#ifdef CONFIG_GADGET_UAC1_LEGACY
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
#else
.bDeviceClass = USB_CLASS_MISC,
.bDeviceSubClass = 0x02,
.bDeviceProtocol = 0x01,
#endif
/* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id defaults change according to what configs
* we support. (As does bNumConfigurations.) These values can
* also be overridden by module parameters.
*/
.idVendor = cpu_to_le16(AUDIO_VENDOR_NUM),
.idProduct = cpu_to_le16(AUDIO_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
/* NO SERIAL NUMBER */
.bNumConfigurations = 1,
};
static const struct usb_descriptor_header *otg_desc[2];
/*-------------------------------------------------------------------------*/
static int audio_do_config(struct usb_configuration *c)
{
int status;
/* FIXME alloc iConfiguration string, set it in c->strings */
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
#ifdef CONFIG_GADGET_UAC1
f_uac1 = usb_get_function(fi_uac1);
if (IS_ERR(f_uac1)) {
status = PTR_ERR(f_uac1);
return status;
}
status = usb_add_function(c, f_uac1);
if (status < 0) {
usb_put_function(f_uac1);
return status;
}
#else
f_uac2 = usb_get_function(fi_uac2);
if (IS_ERR(f_uac2)) {
status = PTR_ERR(f_uac2);
return status;
}
status = usb_add_function(c, f_uac2);
if (status < 0) {
usb_put_function(f_uac2);
return status;
}
#endif
return 0;
}
static struct usb_configuration audio_config_driver = {
.label = DRIVER_DESC,
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
/*-------------------------------------------------------------------------*/
static int audio_bind(struct usb_composite_dev *cdev)
{
#ifndef CONFIG_GADGET_UAC1
struct f_uac2_opts *uac2_opts;
int i;
#else
#ifndef CONFIG_GADGET_UAC1_LEGACY
struct f_uac1_opts *uac1_opts;
int i;
#else
struct f_uac1_legacy_opts *uac1_opts;
#endif
#endif
int status;
#ifndef CONFIG_GADGET_UAC1
fi_uac2 = usb_get_function_instance("uac2");
if (IS_ERR(fi_uac2))
return PTR_ERR(fi_uac2);
#else
#ifndef CONFIG_GADGET_UAC1_LEGACY
fi_uac1 = usb_get_function_instance("uac1");
#else
fi_uac1 = usb_get_function_instance("uac1_legacy");
#endif
if (IS_ERR(fi_uac1))
return PTR_ERR(fi_uac1);
#endif
#ifndef CONFIG_GADGET_UAC1
uac2_opts = container_of(fi_uac2, struct f_uac2_opts, func_inst);
uac2_opts->p_chmask = p_chmask;
for (i = 0; i < p_srates_cnt; ++i)
uac2_opts->p_srates[i] = p_srates[i];
uac2_opts->p_ssize = p_ssize;
uac2_opts->p_hs_bint = p_hs_bint;
uac2_opts->c_chmask = c_chmask;
for (i = 0; i < c_srates_cnt; ++i)
uac2_opts->c_srates[i] = c_srates[i];
uac2_opts->c_ssize = c_ssize;
uac2_opts->c_hs_bint = c_hs_bint;
uac2_opts->req_number = UAC2_DEF_REQ_NUM;
#else
#ifndef CONFIG_GADGET_UAC1_LEGACY
uac1_opts = container_of(fi_uac1, struct f_uac1_opts, func_inst);
uac1_opts->p_chmask = p_chmask;
for (i = 0; i < p_srates_cnt; ++i)
uac1_opts->p_srates[i] = p_srates[i];
uac1_opts->p_ssize = p_ssize;
uac1_opts->c_chmask = c_chmask;
for (i = 0; i < c_srates_cnt; ++i)
uac1_opts->c_srates[i] = c_srates[i];
uac1_opts->c_ssize = c_ssize;
uac1_opts->req_number = UAC1_DEF_REQ_NUM;
#else /* CONFIG_GADGET_UAC1_LEGACY */
uac1_opts = container_of(fi_uac1, struct f_uac1_legacy_opts, func_inst);
uac1_opts->fn_play = fn_play;
uac1_opts->fn_cap = fn_cap;
uac1_opts->fn_cntl = fn_cntl;
uac1_opts->req_buf_size = req_buf_size;
uac1_opts->req_count = req_count;
uac1_opts->audio_buf_size = audio_buf_size;
#endif /* CONFIG_GADGET_UAC1_LEGACY */
#endif
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
goto fail;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc) {
status = -ENOMEM;
goto fail;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
}
status = usb_add_config(cdev, &audio_config_driver, audio_do_config);
if (status < 0)
goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
return 0;
fail_otg_desc:
kfree(otg_desc[0]);
otg_desc[0] = NULL;
fail:
#ifndef CONFIG_GADGET_UAC1
usb_put_function_instance(fi_uac2);
#else
usb_put_function_instance(fi_uac1);
#endif
return status;
}
static int audio_unbind(struct usb_composite_dev *cdev)
{
#ifdef CONFIG_GADGET_UAC1
if (!IS_ERR_OR_NULL(f_uac1))
usb_put_function(f_uac1);
if (!IS_ERR_OR_NULL(fi_uac1))
usb_put_function_instance(fi_uac1);
#else
if (!IS_ERR_OR_NULL(f_uac2))
usb_put_function(f_uac2);
if (!IS_ERR_OR_NULL(fi_uac2))
usb_put_function_instance(fi_uac2);
#endif
kfree(otg_desc[0]);
otg_desc[0] = NULL;
return 0;
}
static struct usb_composite_driver audio_driver = {
.name = "g_audio",
.dev = &device_desc,
.strings = audio_strings,
.max_speed = USB_SPEED_HIGH,
.bind = audio_bind,
.unbind = audio_unbind,
};
module_usb_composite_driver(audio_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Bryan Wu <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/legacy/audio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Raw Gadget driver.
* See Documentation/usb/raw-gadget.rst for more details.
*
* Copyright (c) 2020 Google, Inc.
* Author: Andrey Konovalov <[email protected]>
*/
#include <linux/compiler.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/semaphore.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/ch11.h>
#include <linux/usb/gadget.h>
#include <uapi/linux/usb/raw_gadget.h>
#define DRIVER_DESC "USB Raw Gadget"
#define DRIVER_NAME "raw-gadget"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Andrey Konovalov");
MODULE_LICENSE("GPL");
/*----------------------------------------------------------------------*/
static DEFINE_IDA(driver_id_numbers);
#define DRIVER_DRIVER_NAME_LENGTH_MAX 32
#define RAW_EVENT_QUEUE_SIZE 16
struct raw_event_queue {
/* See the comment in raw_event_queue_fetch() for locking details. */
spinlock_t lock;
struct semaphore sema;
struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
int size;
};
static void raw_event_queue_init(struct raw_event_queue *queue)
{
spin_lock_init(&queue->lock);
sema_init(&queue->sema, 0);
queue->size = 0;
}
static int raw_event_queue_add(struct raw_event_queue *queue,
enum usb_raw_event_type type, size_t length, const void *data)
{
unsigned long flags;
struct usb_raw_event *event;
spin_lock_irqsave(&queue->lock, flags);
if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) {
spin_unlock_irqrestore(&queue->lock, flags);
return -ENOMEM;
}
event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
if (!event) {
spin_unlock_irqrestore(&queue->lock, flags);
return -ENOMEM;
}
event->type = type;
event->length = length;
if (event->length)
memcpy(&event->data[0], data, length);
queue->events[queue->size] = event;
queue->size++;
up(&queue->sema);
spin_unlock_irqrestore(&queue->lock, flags);
return 0;
}
static struct usb_raw_event *raw_event_queue_fetch(
struct raw_event_queue *queue)
{
int ret;
unsigned long flags;
struct usb_raw_event *event;
/*
* This function can be called concurrently. We first check that
* there's at least one event queued by decrementing the semaphore,
* and then take the lock to protect queue struct fields.
*/
ret = down_interruptible(&queue->sema);
if (ret)
return ERR_PTR(ret);
spin_lock_irqsave(&queue->lock, flags);
/*
* queue->size must have the same value as queue->sema counter (before
* the down_interruptible() call above), so this check is a fail-safe.
*/
if (WARN_ON(!queue->size)) {
spin_unlock_irqrestore(&queue->lock, flags);
return ERR_PTR(-ENODEV);
}
event = queue->events[0];
queue->size--;
memmove(&queue->events[0], &queue->events[1],
queue->size * sizeof(queue->events[0]));
spin_unlock_irqrestore(&queue->lock, flags);
return event;
}
static void raw_event_queue_destroy(struct raw_event_queue *queue)
{
int i;
for (i = 0; i < queue->size; i++)
kfree(queue->events[i]);
queue->size = 0;
}
/*----------------------------------------------------------------------*/
struct raw_dev;
enum ep_state {
STATE_EP_DISABLED,
STATE_EP_ENABLED,
};
struct raw_ep {
struct raw_dev *dev;
enum ep_state state;
struct usb_ep *ep;
u8 addr;
struct usb_request *req;
bool urb_queued;
bool disabling;
ssize_t status;
};
enum dev_state {
STATE_DEV_INVALID = 0,
STATE_DEV_OPENED,
STATE_DEV_INITIALIZED,
STATE_DEV_REGISTERING,
STATE_DEV_RUNNING,
STATE_DEV_CLOSED,
STATE_DEV_FAILED
};
struct raw_dev {
struct kref count;
spinlock_t lock;
const char *udc_name;
struct usb_gadget_driver driver;
/* Reference to misc device: */
struct device *dev;
/* Make driver names unique */
int driver_id_number;
/* Protected by lock: */
enum dev_state state;
bool gadget_registered;
struct usb_gadget *gadget;
struct usb_request *req;
bool ep0_in_pending;
bool ep0_out_pending;
bool ep0_urb_queued;
ssize_t ep0_status;
struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
int eps_num;
struct completion ep0_done;
struct raw_event_queue queue;
};
static struct raw_dev *dev_new(void)
{
struct raw_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
/* Matches kref_put() in raw_release(). */
kref_init(&dev->count);
spin_lock_init(&dev->lock);
init_completion(&dev->ep0_done);
raw_event_queue_init(&dev->queue);
dev->driver_id_number = -1;
return dev;
}
static void dev_free(struct kref *kref)
{
struct raw_dev *dev = container_of(kref, struct raw_dev, count);
int i;
kfree(dev->udc_name);
kfree(dev->driver.udc_name);
kfree(dev->driver.driver.name);
if (dev->driver_id_number >= 0)
ida_free(&driver_id_numbers, dev->driver_id_number);
if (dev->req) {
if (dev->ep0_urb_queued)
usb_ep_dequeue(dev->gadget->ep0, dev->req);
usb_ep_free_request(dev->gadget->ep0, dev->req);
}
raw_event_queue_destroy(&dev->queue);
for (i = 0; i < dev->eps_num; i++) {
if (dev->eps[i].state == STATE_EP_DISABLED)
continue;
usb_ep_disable(dev->eps[i].ep);
usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
kfree(dev->eps[i].ep->desc);
dev->eps[i].state = STATE_EP_DISABLED;
}
kfree(dev);
}
/*----------------------------------------------------------------------*/
static int raw_queue_event(struct raw_dev *dev,
enum usb_raw_event_type type, size_t length, const void *data)
{
int ret = 0;
unsigned long flags;
ret = raw_event_queue_add(&dev->queue, type, length, data);
if (ret < 0) {
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
spin_unlock_irqrestore(&dev->lock, flags);
}
return ret;
}
static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
{
struct raw_dev *dev = req->context;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (req->status)
dev->ep0_status = req->status;
else
dev->ep0_status = req->actual;
if (dev->ep0_in_pending)
dev->ep0_in_pending = false;
else
dev->ep0_out_pending = false;
spin_unlock_irqrestore(&dev->lock, flags);
complete(&dev->ep0_done);
}
static u8 get_ep_addr(const char *name)
{
/* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
* parse the endpoint address from its name. We deliberately use
* deprecated simple_strtoul() function here, as the number isn't
* followed by '\0' nor '\n'.
*/
if (isdigit(name[2]))
return simple_strtoul(&name[2], NULL, 10);
/* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
return USB_RAW_EP_ADDR_ANY;
}
static int gadget_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
int ret = 0, i = 0;
struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
struct usb_request *req;
struct usb_ep *ep;
unsigned long flags;
if (strcmp(gadget->name, dev->udc_name) != 0)
return -ENODEV;
set_gadget_data(gadget, dev);
req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
if (!req) {
dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
set_gadget_data(gadget, NULL);
return -ENOMEM;
}
spin_lock_irqsave(&dev->lock, flags);
dev->req = req;
dev->req->context = dev;
dev->req->complete = gadget_ep0_complete;
dev->gadget = gadget;
gadget_for_each_ep(ep, dev->gadget) {
dev->eps[i].ep = ep;
dev->eps[i].addr = get_ep_addr(ep->name);
dev->eps[i].state = STATE_EP_DISABLED;
i++;
}
dev->eps_num = i;
spin_unlock_irqrestore(&dev->lock, flags);
ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
if (ret < 0) {
dev_err(&gadget->dev, "failed to queue event\n");
set_gadget_data(gadget, NULL);
return ret;
}
/* Matches kref_put() in gadget_unbind(). */
kref_get(&dev->count);
return ret;
}
static void gadget_unbind(struct usb_gadget *gadget)
{
struct raw_dev *dev = get_gadget_data(gadget);
set_gadget_data(gadget, NULL);
/* Matches kref_get() in gadget_bind(). */
kref_put(&dev->count, dev_free);
}
static int gadget_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl)
{
int ret = 0;
struct raw_dev *dev = get_gadget_data(gadget);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_err(&gadget->dev, "ignoring, device is not running\n");
ret = -ENODEV;
goto out_unlock;
}
if (dev->ep0_in_pending || dev->ep0_out_pending) {
dev_dbg(&gadget->dev, "stalling, request already pending\n");
ret = -EBUSY;
goto out_unlock;
}
if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
dev->ep0_in_pending = true;
else
dev->ep0_out_pending = true;
spin_unlock_irqrestore(&dev->lock, flags);
ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
if (ret < 0)
dev_err(&gadget->dev, "failed to queue event\n");
goto out;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
out:
return ret;
}
/* These are currently unused but present in case UDC driver requires them. */
static void gadget_disconnect(struct usb_gadget *gadget) { }
static void gadget_suspend(struct usb_gadget *gadget) { }
static void gadget_resume(struct usb_gadget *gadget) { }
static void gadget_reset(struct usb_gadget *gadget) { }
/*----------------------------------------------------------------------*/
static struct miscdevice raw_misc_device;
static int raw_open(struct inode *inode, struct file *fd)
{
struct raw_dev *dev;
/* Nonblocking I/O is not supported yet. */
if (fd->f_flags & O_NONBLOCK)
return -EINVAL;
dev = dev_new();
if (!dev)
return -ENOMEM;
fd->private_data = dev;
dev->state = STATE_DEV_OPENED;
dev->dev = raw_misc_device.this_device;
return 0;
}
static int raw_release(struct inode *inode, struct file *fd)
{
int ret = 0;
struct raw_dev *dev = fd->private_data;
unsigned long flags;
bool unregister = false;
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_CLOSED;
if (!dev->gadget) {
spin_unlock_irqrestore(&dev->lock, flags);
goto out_put;
}
if (dev->gadget_registered)
unregister = true;
dev->gadget_registered = false;
spin_unlock_irqrestore(&dev->lock, flags);
if (unregister) {
ret = usb_gadget_unregister_driver(&dev->driver);
if (ret != 0)
dev_err(dev->dev,
"usb_gadget_unregister_driver() failed with %d\n",
ret);
/* Matches kref_get() in raw_ioctl_run(). */
kref_put(&dev->count, dev_free);
}
out_put:
/* Matches dev_new() in raw_open(). */
kref_put(&dev->count, dev_free);
return ret;
}
/*----------------------------------------------------------------------*/
static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
int driver_id_number;
struct usb_raw_init arg;
char *udc_driver_name;
char *udc_device_name;
char *driver_driver_name;
unsigned long flags;
if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
return -EFAULT;
switch (arg.speed) {
case USB_SPEED_UNKNOWN:
arg.speed = USB_SPEED_HIGH;
break;
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
break;
default:
return -EINVAL;
}
driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
if (driver_id_number < 0)
return driver_id_number;
driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
if (!driver_driver_name) {
ret = -ENOMEM;
goto out_free_driver_id_number;
}
snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
DRIVER_NAME ".%d", driver_id_number);
udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
if (!udc_driver_name) {
ret = -ENOMEM;
goto out_free_driver_driver_name;
}
ret = strscpy(udc_driver_name, &arg.driver_name[0],
UDC_NAME_LENGTH_MAX);
if (ret < 0)
goto out_free_udc_driver_name;
ret = 0;
udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
if (!udc_device_name) {
ret = -ENOMEM;
goto out_free_udc_driver_name;
}
ret = strscpy(udc_device_name, &arg.device_name[0],
UDC_NAME_LENGTH_MAX);
if (ret < 0)
goto out_free_udc_device_name;
ret = 0;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_OPENED) {
dev_dbg(dev->dev, "fail, device is not opened\n");
ret = -EINVAL;
goto out_unlock;
}
dev->udc_name = udc_driver_name;
dev->driver.function = DRIVER_DESC;
dev->driver.max_speed = arg.speed;
dev->driver.setup = gadget_setup;
dev->driver.disconnect = gadget_disconnect;
dev->driver.bind = gadget_bind;
dev->driver.unbind = gadget_unbind;
dev->driver.suspend = gadget_suspend;
dev->driver.resume = gadget_resume;
dev->driver.reset = gadget_reset;
dev->driver.driver.name = driver_driver_name;
dev->driver.udc_name = udc_device_name;
dev->driver.match_existing_only = 1;
dev->driver_id_number = driver_id_number;
dev->state = STATE_DEV_INITIALIZED;
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
out_free_udc_device_name:
kfree(udc_device_name);
out_free_udc_driver_name:
kfree(udc_driver_name);
out_free_driver_driver_name:
kfree(driver_driver_name);
out_free_driver_id_number:
ida_free(&driver_id_numbers, driver_id_number);
return ret;
}
static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
unsigned long flags;
if (value)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_INITIALIZED) {
dev_dbg(dev->dev, "fail, device is not initialized\n");
ret = -EINVAL;
goto out_unlock;
}
dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_register_driver(&dev->driver);
spin_lock_irqsave(&dev->lock, flags);
if (ret) {
dev_err(dev->dev,
"fail, usb_gadget_register_driver returned %d\n", ret);
dev->state = STATE_DEV_FAILED;
goto out_unlock;
}
dev->gadget_registered = true;
dev->state = STATE_DEV_RUNNING;
/* Matches kref_put() in raw_release(). */
kref_get(&dev->count);
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
{
struct usb_raw_event arg;
unsigned long flags;
struct usb_raw_event *event;
uint32_t length;
if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
return -EFAULT;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
spin_unlock_irqrestore(&dev->lock, flags);
return -EINVAL;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
spin_unlock_irqrestore(&dev->lock, flags);
return -EBUSY;
}
spin_unlock_irqrestore(&dev->lock, flags);
event = raw_event_queue_fetch(&dev->queue);
if (PTR_ERR(event) == -EINTR) {
dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
return -EINTR;
}
if (IS_ERR(event)) {
dev_err(&dev->gadget->dev, "failed to fetch event\n");
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
spin_unlock_irqrestore(&dev->lock, flags);
return -ENODEV;
}
length = min(arg.length, event->length);
if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
kfree(event);
return -EFAULT;
}
kfree(event);
return 0;
}
static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
bool get_from_user)
{
void *data;
if (copy_from_user(io, ptr, sizeof(*io)))
return ERR_PTR(-EFAULT);
if (io->ep >= USB_RAW_EPS_NUM_MAX)
return ERR_PTR(-EINVAL);
if (!usb_raw_io_flags_valid(io->flags))
return ERR_PTR(-EINVAL);
if (io->length > PAGE_SIZE)
return ERR_PTR(-EINVAL);
if (get_from_user)
data = memdup_user(ptr + sizeof(*io), io->length);
else {
data = kmalloc(io->length, GFP_KERNEL);
if (!data)
data = ERR_PTR(-ENOMEM);
}
return data;
}
static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
void *data, bool in)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_unlock;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_unlock;
}
if (dev->ep0_urb_queued) {
dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
ret = -EBUSY;
goto out_unlock;
}
if ((in && !dev->ep0_in_pending) ||
(!in && !dev->ep0_out_pending)) {
dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
ret = -EBUSY;
goto out_unlock;
}
if (WARN_ON(in && dev->ep0_out_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
goto out_done;
}
if (WARN_ON(!in && dev->ep0_in_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
goto out_done;
}
dev->req->buf = data;
dev->req->length = io->length;
dev->req->zero = usb_raw_io_flags_zero(io->flags);
dev->ep0_urb_queued = true;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
if (ret) {
dev_err(&dev->gadget->dev,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
goto out_done;
}
ret = wait_for_completion_interruptible(&dev->ep0_done);
if (ret) {
dev_dbg(&dev->gadget->dev, "wait interrupted\n");
usb_ep_dequeue(dev->gadget->ep0, dev->req);
wait_for_completion(&dev->ep0_done);
spin_lock_irqsave(&dev->lock, flags);
goto out_done;
}
spin_lock_irqsave(&dev->lock, flags);
ret = dev->ep0_status;
out_done:
dev->ep0_urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
void *data;
struct usb_raw_ep_io io;
data = raw_alloc_io_data(&io, (void __user *)value, true);
if (IS_ERR(data))
return PTR_ERR(data);
ret = raw_process_ep0_io(dev, &io, data, true);
kfree(data);
return ret;
}
static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
void *data;
struct usb_raw_ep_io io;
unsigned int length;
data = raw_alloc_io_data(&io, (void __user *)value, false);
if (IS_ERR(data))
return PTR_ERR(data);
ret = raw_process_ep0_io(dev, &io, data, false);
if (ret < 0)
goto free;
length = min(io.length, (unsigned int)ret);
if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
ret = -EFAULT;
else
ret = length;
free:
kfree(data);
return ret;
}
static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
unsigned long flags;
if (value)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_unlock;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_unlock;
}
if (dev->ep0_urb_queued) {
dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
ret = -EBUSY;
goto out_unlock;
}
if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
ret = -EBUSY;
goto out_unlock;
}
ret = usb_ep_set_halt(dev->gadget->ep0);
if (ret < 0)
dev_err(&dev->gadget->dev,
"fail, usb_ep_set_halt returned %d\n", ret);
if (dev->ep0_in_pending)
dev->ep0_in_pending = false;
else
dev->ep0_out_pending = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
{
int ret = 0, i;
unsigned long flags;
struct usb_endpoint_descriptor *desc;
struct raw_ep *ep;
bool ep_props_matched = false;
desc = memdup_user((void __user *)value, sizeof(*desc));
if (IS_ERR(desc))
return PTR_ERR(desc);
/*
* Endpoints with a maxpacket length of 0 can cause crashes in UDC
* drivers.
*/
if (usb_endpoint_maxp(desc) == 0) {
dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
kfree(desc);
return -EINVAL;
}
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_free;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_free;
}
for (i = 0; i < dev->eps_num; i++) {
ep = &dev->eps[i];
if (ep->addr != usb_endpoint_num(desc) &&
ep->addr != USB_RAW_EP_ADDR_ANY)
continue;
if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
continue;
ep_props_matched = true;
if (ep->state != STATE_EP_DISABLED)
continue;
ep->ep->desc = desc;
ret = usb_ep_enable(ep->ep);
if (ret < 0) {
dev_err(&dev->gadget->dev,
"fail, usb_ep_enable returned %d\n", ret);
goto out_free;
}
ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
if (!ep->req) {
dev_err(&dev->gadget->dev,
"fail, usb_ep_alloc_request failed\n");
usb_ep_disable(ep->ep);
ret = -ENOMEM;
goto out_free;
}
ep->state = STATE_EP_ENABLED;
ep->ep->driver_data = ep;
ret = i;
goto out_unlock;
}
if (!ep_props_matched) {
dev_dbg(&dev->gadget->dev, "fail, bad endpoint descriptor\n");
ret = -EINVAL;
} else {
dev_dbg(&dev->gadget->dev, "fail, no endpoints available\n");
ret = -EBUSY;
}
out_free:
kfree(desc);
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
{
int ret = 0, i = value;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_unlock;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_unlock;
}
if (i < 0 || i >= dev->eps_num) {
dev_dbg(dev->dev, "fail, invalid endpoint\n");
ret = -EBUSY;
goto out_unlock;
}
if (dev->eps[i].state == STATE_EP_DISABLED) {
dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
ret = -EINVAL;
goto out_unlock;
}
if (dev->eps[i].disabling) {
dev_dbg(&dev->gadget->dev,
"fail, disable already in progress\n");
ret = -EINVAL;
goto out_unlock;
}
if (dev->eps[i].urb_queued) {
dev_dbg(&dev->gadget->dev,
"fail, waiting for urb completion\n");
ret = -EINVAL;
goto out_unlock;
}
dev->eps[i].disabling = true;
spin_unlock_irqrestore(&dev->lock, flags);
usb_ep_disable(dev->eps[i].ep);
spin_lock_irqsave(&dev->lock, flags);
usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
kfree(dev->eps[i].ep->desc);
dev->eps[i].state = STATE_EP_DISABLED;
dev->eps[i].disabling = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
unsigned long value, bool set, bool halt)
{
int ret = 0, i = value;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_unlock;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_unlock;
}
if (i < 0 || i >= dev->eps_num) {
dev_dbg(dev->dev, "fail, invalid endpoint\n");
ret = -EBUSY;
goto out_unlock;
}
if (dev->eps[i].state == STATE_EP_DISABLED) {
dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
ret = -EINVAL;
goto out_unlock;
}
if (dev->eps[i].disabling) {
dev_dbg(&dev->gadget->dev,
"fail, disable is in progress\n");
ret = -EINVAL;
goto out_unlock;
}
if (dev->eps[i].urb_queued) {
dev_dbg(&dev->gadget->dev,
"fail, waiting for urb completion\n");
ret = -EINVAL;
goto out_unlock;
}
if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
dev_dbg(&dev->gadget->dev,
"fail, can't halt/wedge ISO endpoint\n");
ret = -EINVAL;
goto out_unlock;
}
if (set && halt) {
ret = usb_ep_set_halt(dev->eps[i].ep);
if (ret < 0)
dev_err(&dev->gadget->dev,
"fail, usb_ep_set_halt returned %d\n", ret);
} else if (!set && halt) {
ret = usb_ep_clear_halt(dev->eps[i].ep);
if (ret < 0)
dev_err(&dev->gadget->dev,
"fail, usb_ep_clear_halt returned %d\n", ret);
} else if (set && !halt) {
ret = usb_ep_set_wedge(dev->eps[i].ep);
if (ret < 0)
dev_err(&dev->gadget->dev,
"fail, usb_ep_set_wedge returned %d\n", ret);
}
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
{
struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
struct raw_dev *dev = r_ep->dev;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (req->status)
r_ep->status = req->status;
else
r_ep->status = req->actual;
spin_unlock_irqrestore(&dev->lock, flags);
complete((struct completion *)req->context);
}
static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
void *data, bool in)
{
int ret = 0;
unsigned long flags;
struct raw_ep *ep;
DECLARE_COMPLETION_ONSTACK(done);
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_unlock;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_unlock;
}
if (io->ep >= dev->eps_num) {
dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
ret = -EINVAL;
goto out_unlock;
}
ep = &dev->eps[io->ep];
if (ep->state != STATE_EP_ENABLED) {
dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
ret = -EBUSY;
goto out_unlock;
}
if (ep->disabling) {
dev_dbg(&dev->gadget->dev,
"fail, endpoint is already being disabled\n");
ret = -EBUSY;
goto out_unlock;
}
if (ep->urb_queued) {
dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
ret = -EBUSY;
goto out_unlock;
}
if (in != usb_endpoint_dir_in(ep->ep->desc)) {
dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
ret = -EINVAL;
goto out_unlock;
}
ep->dev = dev;
ep->req->context = &done;
ep->req->complete = gadget_ep_complete;
ep->req->buf = data;
ep->req->length = io->length;
ep->req->zero = usb_raw_io_flags_zero(io->flags);
ep->urb_queued = true;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
if (ret) {
dev_err(&dev->gadget->dev,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
goto out_done;
}
ret = wait_for_completion_interruptible(&done);
if (ret) {
dev_dbg(&dev->gadget->dev, "wait interrupted\n");
usb_ep_dequeue(ep->ep, ep->req);
wait_for_completion(&done);
spin_lock_irqsave(&dev->lock, flags);
goto out_done;
}
spin_lock_irqsave(&dev->lock, flags);
ret = ep->status;
out_done:
ep->urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
char *data;
struct usb_raw_ep_io io;
data = raw_alloc_io_data(&io, (void __user *)value, true);
if (IS_ERR(data))
return PTR_ERR(data);
ret = raw_process_ep_io(dev, &io, data, true);
kfree(data);
return ret;
}
static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
char *data;
struct usb_raw_ep_io io;
unsigned int length;
data = raw_alloc_io_data(&io, (void __user *)value, false);
if (IS_ERR(data))
return PTR_ERR(data);
ret = raw_process_ep_io(dev, &io, data, false);
if (ret < 0)
goto free;
length = min(io.length, (unsigned int)ret);
if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
ret = -EFAULT;
else
ret = length;
free:
kfree(data);
return ret;
}
static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
unsigned long flags;
if (value)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_unlock;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_unlock;
}
usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
goto out_unlock;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
goto out_unlock;
}
usb_gadget_vbus_draw(dev->gadget, 2 * value);
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static void fill_ep_caps(struct usb_ep_caps *caps,
struct usb_raw_ep_caps *raw_caps)
{
raw_caps->type_control = caps->type_control;
raw_caps->type_iso = caps->type_iso;
raw_caps->type_bulk = caps->type_bulk;
raw_caps->type_int = caps->type_int;
raw_caps->dir_in = caps->dir_in;
raw_caps->dir_out = caps->dir_out;
}
static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
{
limits->maxpacket_limit = ep->maxpacket_limit;
limits->max_streams = ep->max_streams;
}
static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
{
int ret = 0, i;
unsigned long flags;
struct usb_raw_eps_info *info;
struct raw_ep *ep;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ret = -ENOMEM;
goto out;
}
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
dev_dbg(dev->dev, "fail, device is not running\n");
ret = -EINVAL;
spin_unlock_irqrestore(&dev->lock, flags);
goto out_free;
}
if (!dev->gadget) {
dev_dbg(dev->dev, "fail, gadget is not bound\n");
ret = -EBUSY;
spin_unlock_irqrestore(&dev->lock, flags);
goto out_free;
}
for (i = 0; i < dev->eps_num; i++) {
ep = &dev->eps[i];
strscpy(&info->eps[i].name[0], ep->ep->name,
USB_RAW_EP_NAME_MAX);
info->eps[i].addr = ep->addr;
fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
fill_ep_limits(ep->ep, &info->eps[i].limits);
}
ret = dev->eps_num;
spin_unlock_irqrestore(&dev->lock, flags);
if (copy_to_user((void __user *)value, info, sizeof(*info)))
ret = -EFAULT;
out_free:
kfree(info);
out:
return ret;
}
static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
{
struct raw_dev *dev = fd->private_data;
int ret = 0;
if (!dev)
return -EBUSY;
switch (cmd) {
case USB_RAW_IOCTL_INIT:
ret = raw_ioctl_init(dev, value);
break;
case USB_RAW_IOCTL_RUN:
ret = raw_ioctl_run(dev, value);
break;
case USB_RAW_IOCTL_EVENT_FETCH:
ret = raw_ioctl_event_fetch(dev, value);
break;
case USB_RAW_IOCTL_EP0_WRITE:
ret = raw_ioctl_ep0_write(dev, value);
break;
case USB_RAW_IOCTL_EP0_READ:
ret = raw_ioctl_ep0_read(dev, value);
break;
case USB_RAW_IOCTL_EP_ENABLE:
ret = raw_ioctl_ep_enable(dev, value);
break;
case USB_RAW_IOCTL_EP_DISABLE:
ret = raw_ioctl_ep_disable(dev, value);
break;
case USB_RAW_IOCTL_EP_WRITE:
ret = raw_ioctl_ep_write(dev, value);
break;
case USB_RAW_IOCTL_EP_READ:
ret = raw_ioctl_ep_read(dev, value);
break;
case USB_RAW_IOCTL_CONFIGURE:
ret = raw_ioctl_configure(dev, value);
break;
case USB_RAW_IOCTL_VBUS_DRAW:
ret = raw_ioctl_vbus_draw(dev, value);
break;
case USB_RAW_IOCTL_EPS_INFO:
ret = raw_ioctl_eps_info(dev, value);
break;
case USB_RAW_IOCTL_EP0_STALL:
ret = raw_ioctl_ep0_stall(dev, value);
break;
case USB_RAW_IOCTL_EP_SET_HALT:
ret = raw_ioctl_ep_set_clear_halt_wedge(
dev, value, true, true);
break;
case USB_RAW_IOCTL_EP_CLEAR_HALT:
ret = raw_ioctl_ep_set_clear_halt_wedge(
dev, value, false, true);
break;
case USB_RAW_IOCTL_EP_SET_WEDGE:
ret = raw_ioctl_ep_set_clear_halt_wedge(
dev, value, true, false);
break;
default:
ret = -EINVAL;
}
return ret;
}
/*----------------------------------------------------------------------*/
static const struct file_operations raw_fops = {
.open = raw_open,
.unlocked_ioctl = raw_ioctl,
.compat_ioctl = raw_ioctl,
.release = raw_release,
.llseek = no_llseek,
};
static struct miscdevice raw_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = DRIVER_NAME,
.fops = &raw_fops,
};
module_misc_device(raw_misc_device);
| linux-master | drivers/usb/gadget/legacy/raw_gadget.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* f_mass_storage.c -- Mass Storage USB Composite Function
*
* Copyright (C) 2003-2008 Alan Stern
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz <[email protected]>
* All rights reserved.
*/
/*
* The Mass Storage Function acts as a USB Mass Storage device,
* appearing to the host as a disk drive or as a CD-ROM drive. In
* addition to providing an example of a genuinely useful composite
* function for a USB device, it also illustrates a technique of
* double-buffering for increased throughput.
*
* For more information about MSF and in particular its module
* parameters and sysfs interface read the
* <Documentation/usb/mass-storage.rst> file.
*/
/*
* MSF is configured by specifying a fsg_config structure. It has the
* following fields:
*
* nluns Number of LUNs function have (anywhere from 1
* to FSG_MAX_LUNS).
* luns An array of LUN configuration values. This
* should be filled for each LUN that
* function will include (ie. for "nluns"
* LUNs). Each element of the array has
* the following fields:
* ->filename The path to the backing file for the LUN.
* Required if LUN is not marked as
* removable.
* ->ro Flag specifying access to the LUN shall be
* read-only. This is implied if CD-ROM
* emulation is enabled as well as when
* it was impossible to open "filename"
* in R/W mode.
* ->removable Flag specifying that LUN shall be indicated as
* being removable.
* ->cdrom Flag specifying that LUN shall be reported as
* being a CD-ROM.
* ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
* commands for this LUN shall be ignored.
*
* vendor_name
* product_name
* release Information used as a reply to INQUIRY
* request. To use default set to NULL,
* NULL, 0xffff respectively. The first
* field should be 8 and the second 16
* characters or less.
*
* can_stall Set to permit function to halt bulk endpoints.
* Disabled on some USB devices known not
* to work correctly. You should set it
* to true.
*
* If "removable" is not set for a LUN then a backing file must be
* specified. If it is set, then NULL filename means the LUN's medium
* is not loaded (an empty string as "filename" in the fsg_config
* structure causes error). The CD-ROM emulation includes a single
* data track and no audio tracks; hence there need be only one
* backing file per LUN.
*
* This function is heavily based on "File-backed Storage Gadget" by
* Alan Stern which in turn is heavily based on "Gadget Zero" by David
* Brownell. The driver's SCSI command interface was based on the
* "Information technology - Small Computer System Interface - 2"
* document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
* available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
* The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
* was based on the "Universal Serial Bus Mass Storage Class UFI
* Command Specification" document, Revision 1.0, December 14, 1998,
* available at
* <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
*/
/*
* Driver Design
*
* The MSF is fairly straightforward. There is a main kernel
* thread that handles most of the work. Interrupt routines field
* callbacks from the controller driver: bulk- and interrupt-request
* completion notifications, endpoint-0 events, and disconnect events.
* Completion events are passed to the main thread by wakeup calls. Many
* ep0 requests are handled at interrupt time, but SetInterface,
* SetConfiguration, and device reset requests are forwarded to the
* thread in the form of "exceptions" using SIGUSR1 signals (since they
* should interrupt any ongoing file I/O operations).
*
* The thread's main routine implements the standard command/data/status
* parts of a SCSI interaction. It and its subroutines are full of tests
* for pending signals/exceptions -- all this polling is necessary since
* the kernel has no setjmp/longjmp equivalents. (Maybe this is an
* indication that the driver really wants to be running in userspace.)
* An important point is that so long as the thread is alive it keeps an
* open reference to the backing file. This will prevent unmounting
* the backing file's underlying filesystem and could cause problems
* during system shutdown, for example. To prevent such problems, the
* thread catches INT, TERM, and KILL signals and converts them into
* an EXIT exception.
*
* In normal operation the main thread is started during the gadget's
* fsg_bind() callback and stopped during fsg_unbind(). But it can
* also exit when it receives a signal, and there's no point leaving
* the gadget running when the thread is dead. As of this moment, MSF
* provides no way to deregister the gadget when thread dies -- maybe
* a callback functions is needed.
*
* To provide maximum throughput, the driver uses a circular pipeline of
* buffer heads (struct fsg_buffhd). In principle the pipeline can be
* arbitrarily long; in practice the benefits don't justify having more
* than 2 stages (i.e., double buffering). But it helps to think of the
* pipeline as being a long one. Each buffer head contains a bulk-in and
* a bulk-out request pointer (since the buffer can be used for both
* output and input -- directions always are given from the host's
* point of view) as well as a pointer to the buffer and various state
* variables.
*
* Use of the pipeline follows a simple protocol. There is a variable
* (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
* At any time that buffer head may still be in use from an earlier
* request, so each buffer head has a state variable indicating whether
* it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
* buffer head to be EMPTY, filling the buffer either by file I/O or by
* USB I/O (during which the buffer head is BUSY), and marking the buffer
* head FULL when the I/O is complete. Then the buffer will be emptied
* (again possibly by USB I/O, during which it is marked BUSY) and
* finally marked EMPTY again (possibly by a completion routine).
*
* A module parameter tells the driver to avoid stalling the bulk
* endpoints wherever the transport specification allows. This is
* necessary for some UDCs like the SuperH, which cannot reliably clear a
* halt on a bulk endpoint. However, under certain circumstances the
* Bulk-only specification requires a stall. In such cases the driver
* will halt the endpoint and set a flag indicating that it should clear
* the halt in software during the next device reset. Hopefully this
* will permit everything to work correctly. Furthermore, although the
* specification allows the bulk-out endpoint to halt when the host sends
* too much data, implementing this would cause an unavoidable race.
* The driver will always use the "no-stall" approach for OUT transfers.
*
* One subtle point concerns sending status-stage responses for ep0
* requests. Some of these requests, such as device reset, can involve
* interrupting an ongoing file I/O operation, which might take an
* arbitrarily long time. During that delay the host might give up on
* the original ep0 request and issue a new one. When that happens the
* driver should not notify the host about completion of the original
* request, as the host will no longer be waiting for it. So the driver
* assigns to each ep0 request a unique tag, and it keeps track of the
* tag value of the request associated with a long-running exception
* (device-reset, interface-change, or configuration-change). When the
* exception handler is finished, the status-stage response is submitted
* only if the current ep0 request tag is equal to the exception request
* tag. Thus only the most recently received ep0 request will get a
* status-stage response.
*
* Warning: This driver source file is too long. It ought to be split up
* into a header file plus about 3 separate .c files, to handle the details
* of the Gadget, USB Mass Storage, and SCSI protocols.
*/
/* #define VERBOSE_DEBUG */
/* #define DUMP_MSGS */
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/dcache.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kstrtox.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/limits.h>
#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
#include <linux/nospec.h>
#include "configfs.h"
/*------------------------------------------------------------------------*/
#define FSG_DRIVER_DESC "Mass Storage Function"
#define FSG_DRIVER_VERSION "2009/09/11"
static const char fsg_string_interface[] = "Mass Storage";
#include "storage_common.h"
#include "f_mass_storage.h"
/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
static struct usb_string fsg_strings[] = {
{FSG_STRING_INTERFACE, fsg_string_interface},
{}
};
static struct usb_gadget_strings fsg_stringtab = {
.language = 0x0409, /* en-us */
.strings = fsg_strings,
};
static struct usb_gadget_strings *fsg_strings_array[] = {
&fsg_stringtab,
NULL,
};
/*-------------------------------------------------------------------------*/
struct fsg_dev;
struct fsg_common;
/* Data shared by all the FSG instances. */
struct fsg_common {
struct usb_gadget *gadget;
struct usb_composite_dev *cdev;
struct fsg_dev *fsg;
wait_queue_head_t io_wait;
wait_queue_head_t fsg_wait;
/* filesem protects: backing files in use */
struct rw_semaphore filesem;
/* lock protects: state and thread_task */
spinlock_t lock;
struct usb_ep *ep0; /* Copy of gadget->ep0 */
struct usb_request *ep0req; /* Copy of cdev->req */
unsigned int ep0_req_tag;
struct fsg_buffhd *next_buffhd_to_fill;
struct fsg_buffhd *next_buffhd_to_drain;
struct fsg_buffhd *buffhds;
unsigned int fsg_num_buffers;
int cmnd_size;
u8 cmnd[MAX_COMMAND_SIZE];
unsigned int lun;
struct fsg_lun *luns[FSG_MAX_LUNS];
struct fsg_lun *curlun;
unsigned int bulk_out_maxpacket;
enum fsg_state state; /* For exception handling */
unsigned int exception_req_tag;
void *exception_arg;
enum data_direction data_dir;
u32 data_size;
u32 data_size_from_cmnd;
u32 tag;
u32 residue;
u32 usb_amount_left;
unsigned int can_stall:1;
unsigned int free_storage_on_release:1;
unsigned int phase_error:1;
unsigned int short_packet_received:1;
unsigned int bad_lun_okay:1;
unsigned int running:1;
unsigned int sysfs:1;
struct completion thread_notifier;
struct task_struct *thread_task;
/* Gadget's private data. */
void *private_data;
char inquiry_string[INQUIRY_STRING_LEN];
};
struct fsg_dev {
struct usb_function function;
struct usb_gadget *gadget; /* Copy of cdev->gadget */
struct fsg_common *common;
u16 interface_number;
unsigned int bulk_in_enabled:1;
unsigned int bulk_out_enabled:1;
unsigned long atomic_bitflags;
#define IGNORE_BULK_OUT 0
struct usb_ep *bulk_in;
struct usb_ep *bulk_out;
};
static inline int __fsg_is_set(struct fsg_common *common,
const char *func, unsigned line)
{
if (common->fsg)
return 1;
ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
WARN_ON(1);
return 0;
}
#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
{
return container_of(f, struct fsg_dev, function);
}
static int exception_in_progress(struct fsg_common *common)
{
return common->state > FSG_STATE_NORMAL;
}
/* Make bulk-out requests be divisible by the maxpacket size */
static void set_bulk_out_req_length(struct fsg_common *common,
struct fsg_buffhd *bh, unsigned int length)
{
unsigned int rem;
bh->bulk_out_intended_length = length;
rem = length % common->bulk_out_maxpacket;
if (rem > 0)
length += common->bulk_out_maxpacket - rem;
bh->outreq->length = length;
}
/*-------------------------------------------------------------------------*/
static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
{
const char *name;
if (ep == fsg->bulk_in)
name = "bulk-in";
else if (ep == fsg->bulk_out)
name = "bulk-out";
else
name = ep->name;
DBG(fsg, "%s set halt\n", name);
return usb_ep_set_halt(ep);
}
/*-------------------------------------------------------------------------*/
/* These routines may be called in process context or in_irq */
static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
void *arg)
{
unsigned long flags;
/*
* Do nothing if a higher-priority exception is already in progress.
* If a lower-or-equal priority exception is in progress, preempt it
* and notify the main thread by sending it a signal.
*/
spin_lock_irqsave(&common->lock, flags);
if (common->state <= new_state) {
common->exception_req_tag = common->ep0_req_tag;
common->state = new_state;
common->exception_arg = arg;
if (common->thread_task)
send_sig_info(SIGUSR1, SEND_SIG_PRIV,
common->thread_task);
}
spin_unlock_irqrestore(&common->lock, flags);
}
static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
{
__raise_exception(common, new_state, NULL);
}
/*-------------------------------------------------------------------------*/
static int ep0_queue(struct fsg_common *common)
{
int rc;
rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
common->ep0->driver_data = common;
if (rc != 0 && rc != -ESHUTDOWN) {
/* We can't do much more than wait for a reset */
WARNING(common, "error in submission: %s --> %d\n",
common->ep0->name, rc);
}
return rc;
}
/*-------------------------------------------------------------------------*/
/* Completion handlers. These always run in_irq. */
static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
{
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
if (req->status || req->actual != req->length)
DBG(common, "%s --> %d, %u/%u\n", __func__,
req->status, req->actual, req->length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Synchronize with the smp_load_acquire() in sleep_thread() */
smp_store_release(&bh->state, BUF_STATE_EMPTY);
wake_up(&common->io_wait);
}
static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
{
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
dump_msg(common, "bulk-out", req->buf, req->actual);
if (req->status || req->actual != bh->bulk_out_intended_length)
DBG(common, "%s --> %d, %u/%u\n", __func__,
req->status, req->actual, bh->bulk_out_intended_length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Synchronize with the smp_load_acquire() in sleep_thread() */
smp_store_release(&bh->state, BUF_STATE_FULL);
wake_up(&common->io_wait);
}
static int _fsg_common_get_max_lun(struct fsg_common *common)
{
int i = ARRAY_SIZE(common->luns) - 1;
while (i >= 0 && !common->luns[i])
--i;
return i;
}
static int fsg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct usb_request *req = fsg->common->ep0req;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
if (!fsg_is_set(fsg->common))
return -EOPNOTSUPP;
++fsg->common->ep0_req_tag; /* Record arrival of a new request */
req->context = NULL;
req->length = 0;
dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
switch (ctrl->bRequest) {
case US_BULK_RESET_REQUEST:
if (ctrl->bRequestType !=
(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
if (w_index != fsg->interface_number || w_value != 0 ||
w_length != 0)
return -EDOM;
/*
* Raise an exception to stop the current operation
* and reinitialize our state.
*/
DBG(fsg, "bulk reset request\n");
raise_exception(fsg->common, FSG_STATE_PROTOCOL_RESET);
return USB_GADGET_DELAYED_STATUS;
case US_BULK_GET_MAX_LUN:
if (ctrl->bRequestType !=
(USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
if (w_index != fsg->interface_number || w_value != 0 ||
w_length != 1)
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
/* Respond with data/status */
req->length = min((u16)1, w_length);
return ep0_queue(fsg->common);
}
VDBG(fsg,
"unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
ctrl->bRequestType, ctrl->bRequest,
le16_to_cpu(ctrl->wValue), w_index, w_length);
return -EOPNOTSUPP;
}
/*-------------------------------------------------------------------------*/
/* All the following routines run in process context */
/* Use this for bulk or interrupt transfers, not ep0 */
static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
struct usb_request *req)
{
int rc;
if (ep == fsg->bulk_in)
dump_msg(fsg, "bulk-in", req->buf, req->length);
rc = usb_ep_queue(ep, req, GFP_KERNEL);
if (rc) {
/* We can't do much more than wait for a reset */
req->status = rc;
/*
* Note: currently the net2280 driver fails zero-length
* submissions if DMA is enabled.
*/
if (rc != -ESHUTDOWN &&
!(rc == -EOPNOTSUPP && req->length == 0))
WARNING(fsg, "error in submission: %s --> %d\n",
ep->name, rc);
}
return rc;
}
static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_SENDING;
if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
bh->state = BUF_STATE_EMPTY;
return true;
}
static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_RECEIVING;
if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
bh->state = BUF_STATE_FULL;
return true;
}
static int sleep_thread(struct fsg_common *common, bool can_freeze,
struct fsg_buffhd *bh)
{
int rc;
/* Wait until a signal arrives or bh is no longer busy */
if (can_freeze)
/*
* synchronize with the smp_store_release(&bh->state) in
* bulk_in_complete() or bulk_out_complete()
*/
rc = wait_event_freezable(common->io_wait,
bh && smp_load_acquire(&bh->state) >=
BUF_STATE_EMPTY);
else
rc = wait_event_interruptible(common->io_wait,
bh && smp_load_acquire(&bh->state) >=
BUF_STATE_EMPTY);
return rc ? -EINTR : 0;
}
/*-------------------------------------------------------------------------*/
static int do_read(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
u64 lba;
struct fsg_buffhd *bh;
int rc;
u32 amount_left;
loff_t file_offset, file_offset_tmp;
unsigned int amount;
ssize_t nread;
/*
* Get the starting Logical Block Address and check that it's
* not too big.
*/
if (common->cmnd[0] == READ_6)
lba = get_unaligned_be24(&common->cmnd[1]);
else {
if (common->cmnd[0] == READ_16)
lba = get_unaligned_be64(&common->cmnd[2]);
else /* READ_10 or READ_12 */
lba = get_unaligned_be32(&common->cmnd[2]);
/*
* We allow DPO (Disable Page Out = don't save data in the
* cache) and FUA (Force Unit Access = don't read from the
* cache), but we don't implement them.
*/
if ((common->cmnd[1] & ~0x18) != 0) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
}
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
file_offset = ((loff_t) lba) << curlun->blkbits;
/* Carry out the file reads */
amount_left = common->data_size_from_cmnd;
if (unlikely(amount_left == 0))
return -EIO; /* No default reply */
for (;;) {
/*
* Figure out how much we need to read:
* Try to read the remaining amount.
* But don't read more than the buffer size.
* And don't try to read past the end of the file.
*/
amount = min(amount_left, FSG_BUFLEN);
amount = min((loff_t)amount,
curlun->file_length - file_offset);
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
rc = sleep_thread(common, false, bh);
if (rc)
return rc;
/*
* If we were asked to read past the end of file,
* end with an empty buffer.
*/
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
bh->inreq->length = 0;
bh->state = BUF_STATE_FULL;
break;
}
/* Perform the read */
file_offset_tmp = file_offset;
nread = kernel_read(curlun->filp, bh->buf, amount,
&file_offset_tmp);
VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
(unsigned long long)file_offset, (int)nread);
if (signal_pending(current))
return -EINTR;
if (nread < 0) {
LDBG(curlun, "error in file read: %d\n", (int)nread);
nread = 0;
} else if (nread < amount) {
LDBG(curlun, "partial file read: %d/%u\n",
(int)nread, amount);
nread = round_down(nread, curlun->blksize);
}
file_offset += nread;
amount_left -= nread;
common->residue -= nread;
/*
* Except at the end of the transfer, nread will be
* equal to the buffer size, which is divisible by the
* bulk-in maxpacket size.
*/
bh->inreq->length = nread;
bh->state = BUF_STATE_FULL;
/* If an error occurred, report it and its position */
if (nread < amount) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
if (amount_left == 0)
break; /* No more left to read */
/* Send this buffer and go read some more */
bh->inreq->zero = 0;
if (!start_in_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh->next;
}
return -EIO; /* No default reply */
}
/*-------------------------------------------------------------------------*/
static int do_write(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
u64 lba;
struct fsg_buffhd *bh;
int get_some_more;
u32 amount_left_to_req, amount_left_to_write;
loff_t usb_offset, file_offset, file_offset_tmp;
unsigned int amount;
ssize_t nwritten;
int rc;
if (curlun->ro) {
curlun->sense_data = SS_WRITE_PROTECTED;
return -EINVAL;
}
spin_lock(&curlun->filp->f_lock);
curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
spin_unlock(&curlun->filp->f_lock);
/*
* Get the starting Logical Block Address and check that it's
* not too big
*/
if (common->cmnd[0] == WRITE_6)
lba = get_unaligned_be24(&common->cmnd[1]);
else {
if (common->cmnd[0] == WRITE_16)
lba = get_unaligned_be64(&common->cmnd[2]);
else /* WRITE_10 or WRITE_12 */
lba = get_unaligned_be32(&common->cmnd[2]);
/*
* We allow DPO (Disable Page Out = don't save data in the
* cache) and FUA (Force Unit Access = write directly to the
* medium). We don't implement DPO; we implement FUA by
* performing synchronous output.
*/
if (common->cmnd[1] & ~0x18) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
spin_lock(&curlun->filp->f_lock);
curlun->filp->f_flags |= O_SYNC;
spin_unlock(&curlun->filp->f_lock);
}
}
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
/* Carry out the file writes */
get_some_more = 1;
file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
amount_left_to_req = common->data_size_from_cmnd;
amount_left_to_write = common->data_size_from_cmnd;
while (amount_left_to_write > 0) {
/* Queue a request for more data from the host */
bh = common->next_buffhd_to_fill;
if (bh->state == BUF_STATE_EMPTY && get_some_more) {
/*
* Figure out how much we want to get:
* Try to get the remaining amount,
* but not more than the buffer size.
*/
amount = min(amount_left_to_req, FSG_BUFLEN);
/* Beyond the end of the backing file? */
if (usb_offset >= curlun->file_length) {
get_some_more = 0;
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
curlun->sense_data_info =
usb_offset >> curlun->blkbits;
curlun->info_valid = 1;
continue;
}
/* Get the next buffer */
usb_offset += amount;
common->usb_amount_left -= amount;
amount_left_to_req -= amount;
if (amount_left_to_req == 0)
get_some_more = 0;
/*
* Except at the end of the transfer, amount will be
* equal to the buffer size, which is divisible by
* the bulk-out maxpacket size.
*/
set_bulk_out_req_length(common, bh, amount);
if (!start_out_transfer(common, bh))
/* Dunno what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh->next;
continue;
}
/* Write the received data to the backing file */
bh = common->next_buffhd_to_drain;
if (bh->state == BUF_STATE_EMPTY && !get_some_more)
break; /* We stopped early */
/* Wait for the data to be received */
rc = sleep_thread(common, false, bh);
if (rc)
return rc;
common->next_buffhd_to_drain = bh->next;
bh->state = BUF_STATE_EMPTY;
/* Did something go wrong with the transfer? */
if (bh->outreq->status != 0) {
curlun->sense_data = SS_COMMUNICATION_FAILURE;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
amount = bh->outreq->actual;
if (curlun->file_length - file_offset < amount) {
LERROR(curlun, "write %u @ %llu beyond end %llu\n",
amount, (unsigned long long)file_offset,
(unsigned long long)curlun->file_length);
amount = curlun->file_length - file_offset;
}
/*
* Don't accept excess data. The spec doesn't say
* what to do in this case. We'll ignore the error.
*/
amount = min(amount, bh->bulk_out_intended_length);
/* Don't write a partial block */
amount = round_down(amount, curlun->blksize);
if (amount == 0)
goto empty_write;
/* Perform the write */
file_offset_tmp = file_offset;
nwritten = kernel_write(curlun->filp, bh->buf, amount,
&file_offset_tmp);
VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
(unsigned long long)file_offset, (int)nwritten);
if (signal_pending(current))
return -EINTR; /* Interrupted! */
if (nwritten < 0) {
LDBG(curlun, "error in file write: %d\n",
(int) nwritten);
nwritten = 0;
} else if (nwritten < amount) {
LDBG(curlun, "partial file write: %d/%u\n",
(int) nwritten, amount);
nwritten = round_down(nwritten, curlun->blksize);
}
file_offset += nwritten;
amount_left_to_write -= nwritten;
common->residue -= nwritten;
/* If an error occurred, report it and its position */
if (nwritten < amount) {
curlun->sense_data = SS_WRITE_ERROR;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
empty_write:
/* Did the host decide to stop early? */
if (bh->outreq->actual < bh->bulk_out_intended_length) {
common->short_packet_received = 1;
break;
}
}
return -EIO; /* No default reply */
}
/*-------------------------------------------------------------------------*/
static int do_synchronize_cache(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
int rc;
/* We ignore the requested LBA and write out all file's
* dirty data buffers. */
rc = fsg_lun_fsync_sub(curlun);
if (rc)
curlun->sense_data = SS_WRITE_ERROR;
return 0;
}
/*-------------------------------------------------------------------------*/
static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
struct inode *inode = file_inode(filp);
unsigned long __maybe_unused rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
}
static int do_verify(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
u32 lba;
u32 verification_length;
struct fsg_buffhd *bh = common->next_buffhd_to_fill;
loff_t file_offset, file_offset_tmp;
u32 amount_left;
unsigned int amount;
ssize_t nread;
/*
* Get the starting Logical Block Address and check that it's
* not too big.
*/
lba = get_unaligned_be32(&common->cmnd[2]);
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
/*
* We allow DPO (Disable Page Out = don't save data in the
* cache) but we don't implement it.
*/
if (common->cmnd[1] & ~0x10) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
verification_length = get_unaligned_be16(&common->cmnd[7]);
if (unlikely(verification_length == 0))
return -EIO; /* No default reply */
/* Prepare to carry out the file verify */
amount_left = verification_length << curlun->blkbits;
file_offset = ((loff_t) lba) << curlun->blkbits;
/* Write out all the dirty buffers before invalidating them */
fsg_lun_fsync_sub(curlun);
if (signal_pending(current))
return -EINTR;
invalidate_sub(curlun);
if (signal_pending(current))
return -EINTR;
/* Just try to read the requested blocks */
while (amount_left > 0) {
/*
* Figure out how much we need to read:
* Try to read the remaining amount, but not more than
* the buffer size.
* And don't try to read past the end of the file.
*/
amount = min(amount_left, FSG_BUFLEN);
amount = min((loff_t)amount,
curlun->file_length - file_offset);
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
/* Perform the read */
file_offset_tmp = file_offset;
nread = kernel_read(curlun->filp, bh->buf, amount,
&file_offset_tmp);
VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
(unsigned long long) file_offset,
(int) nread);
if (signal_pending(current))
return -EINTR;
if (nread < 0) {
LDBG(curlun, "error in file verify: %d\n", (int)nread);
nread = 0;
} else if (nread < amount) {
LDBG(curlun, "partial file verify: %d/%u\n",
(int)nread, amount);
nread = round_down(nread, curlun->blksize);
}
if (nread == 0) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
file_offset += nread;
amount_left -= nread;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u8 *buf = (u8 *) bh->buf;
if (!curlun) { /* Unsupported LUNs are okay */
common->bad_lun_okay = 1;
memset(buf, 0, 36);
buf[0] = TYPE_NO_LUN; /* Unsupported, no device-type */
buf[4] = 31; /* Additional length */
return 36;
}
buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
buf[1] = curlun->removable ? 0x80 : 0;
buf[2] = 2; /* ANSI SCSI level 2 */
buf[3] = 2; /* SCSI-2 INQUIRY data format */
buf[4] = 31; /* Additional length */
buf[5] = 0; /* No special options */
buf[6] = 0;
buf[7] = 0;
if (curlun->inquiry_string[0])
memcpy(buf + 8, curlun->inquiry_string,
sizeof(curlun->inquiry_string));
else
memcpy(buf + 8, common->inquiry_string,
sizeof(common->inquiry_string));
return 36;
}
static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u8 *buf = (u8 *) bh->buf;
u32 sd, sdinfo;
int valid;
/*
* From the SCSI-2 spec., section 7.9 (Unit attention condition):
*
* If a REQUEST SENSE command is received from an initiator
* with a pending unit attention condition (before the target
* generates the contingent allegiance condition), then the
* target shall either:
* a) report any pending sense data and preserve the unit
* attention condition on the logical unit, or,
* b) report the unit attention condition, may discard any
* pending sense data, and clear the unit attention
* condition on the logical unit for that initiator.
*
* FSG normally uses option a); enable this code to use option b).
*/
#if 0
if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
curlun->sense_data = curlun->unit_attention_data;
curlun->unit_attention_data = SS_NO_SENSE;
}
#endif
if (!curlun) { /* Unsupported LUNs are okay */
common->bad_lun_okay = 1;
sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
sdinfo = 0;
valid = 0;
} else {
sd = curlun->sense_data;
sdinfo = curlun->sense_data_info;
valid = curlun->info_valid << 7;
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
memset(buf, 0, 18);
buf[0] = valid | 0x70; /* Valid, current error */
buf[2] = SK(sd);
put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
buf[7] = 18 - 8; /* Additional sense length */
buf[12] = ASC(sd);
buf[13] = ASCQ(sd);
return 18;
}
static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u32 lba = get_unaligned_be32(&common->cmnd[2]);
int pmi = common->cmnd[8];
u8 *buf = (u8 *)bh->buf;
u32 max_lba;
/* Check the PMI and LBA fields */
if (pmi > 1 || (pmi == 0 && lba != 0)) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
if (curlun->num_sectors < 0x100000000ULL)
max_lba = curlun->num_sectors - 1;
else
max_lba = 0xffffffff;
put_unaligned_be32(max_lba, &buf[0]); /* Max logical block */
put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
return 8;
}
static int do_read_capacity_16(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u64 lba = get_unaligned_be64(&common->cmnd[2]);
int pmi = common->cmnd[14];
u8 *buf = (u8 *)bh->buf;
/* Check the PMI and LBA fields */
if (pmi > 1 || (pmi == 0 && lba != 0)) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
put_unaligned_be64(curlun->num_sectors - 1, &buf[0]);
/* Max logical block */
put_unaligned_be32(curlun->blksize, &buf[8]); /* Block length */
/* It is safe to keep other fields zeroed */
memset(&buf[12], 0, 32 - 12);
return 32;
}
static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
int msf = common->cmnd[1] & 0x02;
u32 lba = get_unaligned_be32(&common->cmnd[2]);
u8 *buf = (u8 *)bh->buf;
if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
memset(buf, 0, 8);
buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
store_cdrom_address(&buf[4], msf, lba);
return 8;
}
static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
int msf = common->cmnd[1] & 0x02;
int start_track = common->cmnd[6];
u8 *buf = (u8 *)bh->buf;
u8 format;
int i, len;
format = common->cmnd[2] & 0xf;
if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
(start_track > 1 && format != 0x1)) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
/*
* Check if CDB is old style SFF-8020i
* i.e. format is in 2 MSBs of byte 9
* Mac OS-X host sends us this.
*/
if (format == 0)
format = (common->cmnd[9] >> 6) & 0x3;
switch (format) {
case 0: /* Formatted TOC */
case 1: /* Multi-session info */
len = 4 + 2*8; /* 4 byte header + 2 descriptors */
memset(buf, 0, len);
buf[1] = len - 2; /* TOC Length excludes length field */
buf[2] = 1; /* First track number */
buf[3] = 1; /* Last track number */
buf[5] = 0x16; /* Data track, copying allowed */
buf[6] = 0x01; /* Only track is number 1 */
store_cdrom_address(&buf[8], msf, 0);
buf[13] = 0x16; /* Lead-out track is data */
buf[14] = 0xAA; /* Lead-out track number */
store_cdrom_address(&buf[16], msf, curlun->num_sectors);
return len;
case 2:
/* Raw TOC */
len = 4 + 3*11; /* 4 byte header + 3 descriptors */
memset(buf, 0, len); /* Header + A0, A1 & A2 descriptors */
buf[1] = len - 2; /* TOC Length excludes length field */
buf[2] = 1; /* First complete session */
buf[3] = 1; /* Last complete session */
buf += 4;
/* fill in A0, A1 and A2 points */
for (i = 0; i < 3; i++) {
buf[0] = 1; /* Session number */
buf[1] = 0x16; /* Data track, copying allowed */
/* 2 - Track number 0 -> TOC */
buf[3] = 0xA0 + i; /* A0, A1, A2 point */
/* 4, 5, 6 - Min, sec, frame is zero */
buf[8] = 1; /* Pmin: last track number */
buf += 11; /* go to next track descriptor */
}
buf -= 11; /* go back to A2 descriptor */
/* For A2, 7, 8, 9, 10 - zero, Pmin, Psec, Pframe of Lead out */
store_cdrom_address(&buf[7], msf, curlun->num_sectors);
return len;
default:
/* PMA, ATIP, CD-TEXT not supported/required */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
}
static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
int mscmnd = common->cmnd[0];
u8 *buf = (u8 *) bh->buf;
u8 *buf0 = buf;
int pc, page_code;
int changeable_values, all_pages;
int valid_page = 0;
int len, limit;
if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
pc = common->cmnd[2] >> 6;
page_code = common->cmnd[2] & 0x3f;
if (pc == 3) {
curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
return -EINVAL;
}
changeable_values = (pc == 1);
all_pages = (page_code == 0x3f);
/*
* Write the mode parameter header. Fixed values are: default
* medium type, no cache control (DPOFUA), and no block descriptors.
* The only variable value is the WriteProtect bit. We will fill in
* the mode data length later.
*/
memset(buf, 0, 8);
if (mscmnd == MODE_SENSE) {
buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
buf += 4;
limit = 255;
} else { /* MODE_SENSE_10 */
buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
buf += 8;
limit = 65535; /* Should really be FSG_BUFLEN */
}
/* No block descriptors */
/*
* The mode pages, in numerical order. The only page we support
* is the Caching page.
*/
if (page_code == 0x08 || all_pages) {
valid_page = 1;
buf[0] = 0x08; /* Page code */
buf[1] = 10; /* Page length */
memset(buf+2, 0, 10); /* None of the fields are changeable */
if (!changeable_values) {
buf[2] = 0x04; /* Write cache enable, */
/* Read cache not disabled */
/* No cache retention priorities */
put_unaligned_be16(0xffff, &buf[4]);
/* Don't disable prefetch */
/* Minimum prefetch = 0 */
put_unaligned_be16(0xffff, &buf[8]);
/* Maximum prefetch */
put_unaligned_be16(0xffff, &buf[10]);
/* Maximum prefetch ceiling */
}
buf += 12;
}
/*
* Check that a valid page was requested and the mode data length
* isn't too long.
*/
len = buf - buf0;
if (!valid_page || len > limit) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
/* Store the mode data length */
if (mscmnd == MODE_SENSE)
buf0[0] = len - 1;
else
put_unaligned_be16(len - 2, buf0);
return len;
}
static int do_start_stop(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
int loej, start;
if (!curlun) {
return -EINVAL;
} else if (!curlun->removable) {
curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
} else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
(common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
loej = common->cmnd[4] & 0x02;
start = common->cmnd[4] & 0x01;
/*
* Our emulation doesn't support mounting; the medium is
* available for use as soon as it is loaded.
*/
if (start) {
if (!fsg_lun_is_open(curlun)) {
curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
return -EINVAL;
}
return 0;
}
/* Are we allowed to unload the media? */
if (curlun->prevent_medium_removal) {
LDBG(curlun, "unload attempt prevented\n");
curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
return -EINVAL;
}
if (!loej)
return 0;
up_read(&common->filesem);
down_write(&common->filesem);
fsg_lun_close(curlun);
up_write(&common->filesem);
down_read(&common->filesem);
return 0;
}
static int do_prevent_allow(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
int prevent;
if (!common->curlun) {
return -EINVAL;
} else if (!common->curlun->removable) {
common->curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
}
prevent = common->cmnd[4] & 0x01;
if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
if (curlun->prevent_medium_removal && !prevent)
fsg_lun_fsync_sub(curlun);
curlun->prevent_medium_removal = prevent;
return 0;
}
static int do_read_format_capacities(struct fsg_common *common,
struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u8 *buf = (u8 *) bh->buf;
buf[0] = buf[1] = buf[2] = 0;
buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
buf += 4;
put_unaligned_be32(curlun->num_sectors, &buf[0]);
/* Number of blocks */
put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
buf[4] = 0x02; /* Current capacity */
return 12;
}
static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
/* We don't support MODE SELECT */
if (curlun)
curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
}
/*-------------------------------------------------------------------------*/
static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
{
int rc;
rc = fsg_set_halt(fsg, fsg->bulk_in);
if (rc == -EAGAIN)
VDBG(fsg, "delayed bulk-in endpoint halt\n");
while (rc != 0) {
if (rc != -EAGAIN) {
WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
rc = 0;
break;
}
/* Wait for a short time and then try again */
if (msleep_interruptible(100) != 0)
return -EINTR;
rc = usb_ep_set_halt(fsg->bulk_in);
}
return rc;
}
static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
{
int rc;
DBG(fsg, "bulk-in set wedge\n");
rc = usb_ep_set_wedge(fsg->bulk_in);
if (rc == -EAGAIN)
VDBG(fsg, "delayed bulk-in endpoint wedge\n");
while (rc != 0) {
if (rc != -EAGAIN) {
WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
rc = 0;
break;
}
/* Wait for a short time and then try again */
if (msleep_interruptible(100) != 0)
return -EINTR;
rc = usb_ep_set_wedge(fsg->bulk_in);
}
return rc;
}
static int throw_away_data(struct fsg_common *common)
{
struct fsg_buffhd *bh, *bh2;
u32 amount;
int rc;
for (bh = common->next_buffhd_to_drain;
bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
bh = common->next_buffhd_to_drain) {
/* Try to submit another request if we need one */
bh2 = common->next_buffhd_to_fill;
if (bh2->state == BUF_STATE_EMPTY &&
common->usb_amount_left > 0) {
amount = min(common->usb_amount_left, FSG_BUFLEN);
/*
* Except at the end of the transfer, amount will be
* equal to the buffer size, which is divisible by
* the bulk-out maxpacket size.
*/
set_bulk_out_req_length(common, bh2, amount);
if (!start_out_transfer(common, bh2))
/* Dunno what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh2->next;
common->usb_amount_left -= amount;
continue;
}
/* Wait for the data to be received */
rc = sleep_thread(common, false, bh);
if (rc)
return rc;
/* Throw away the data in a filled buffer */
bh->state = BUF_STATE_EMPTY;
common->next_buffhd_to_drain = bh->next;
/* A short packet or an error ends everything */
if (bh->outreq->actual < bh->bulk_out_intended_length ||
bh->outreq->status != 0) {
raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
return -EINTR;
}
}
return 0;
}
static int finish_reply(struct fsg_common *common)
{
struct fsg_buffhd *bh = common->next_buffhd_to_fill;
int rc = 0;
switch (common->data_dir) {
case DATA_DIR_NONE:
break; /* Nothing to send */
/*
* If we don't know whether the host wants to read or write,
* this must be CB or CBI with an unknown command. We mustn't
* try to send or receive any data. So stall both bulk pipes
* if we can and wait for a reset.
*/
case DATA_DIR_UNKNOWN:
if (!common->can_stall) {
/* Nothing */
} else if (fsg_is_set(common)) {
fsg_set_halt(common->fsg, common->fsg->bulk_out);
rc = halt_bulk_in_endpoint(common->fsg);
} else {
/* Don't know what to do if common->fsg is NULL */
rc = -EIO;
}
break;
/* All but the last buffer of data must have already been sent */
case DATA_DIR_TO_HOST:
if (common->data_size == 0) {
/* Nothing to send */
/* Don't know what to do if common->fsg is NULL */
} else if (!fsg_is_set(common)) {
rc = -EIO;
/* If there's no residue, simply send the last buffer */
} else if (common->residue == 0) {
bh->inreq->zero = 0;
if (!start_in_transfer(common, bh))
return -EIO;
common->next_buffhd_to_fill = bh->next;
/*
* For Bulk-only, mark the end of the data with a short
* packet. If we are allowed to stall, halt the bulk-in
* endpoint. (Note: This violates the Bulk-Only Transport
* specification, which requires us to pad the data if we
* don't halt the endpoint. Presumably nobody will mind.)
*/
} else {
bh->inreq->zero = 1;
if (!start_in_transfer(common, bh))
rc = -EIO;
common->next_buffhd_to_fill = bh->next;
if (common->can_stall)
rc = halt_bulk_in_endpoint(common->fsg);
}
break;
/*
* We have processed all we want from the data the host has sent.
* There may still be outstanding bulk-out requests.
*/
case DATA_DIR_FROM_HOST:
if (common->residue == 0) {
/* Nothing to receive */
/* Did the host stop sending unexpectedly early? */
} else if (common->short_packet_received) {
raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
rc = -EINTR;
/*
* We haven't processed all the incoming data. Even though
* we may be allowed to stall, doing so would cause a race.
* The controller may already have ACK'ed all the remaining
* bulk-out packets, in which case the host wouldn't see a
* STALL. Not realizing the endpoint was halted, it wouldn't
* clear the halt -- leading to problems later on.
*/
#if 0
} else if (common->can_stall) {
if (fsg_is_set(common))
fsg_set_halt(common->fsg,
common->fsg->bulk_out);
raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
rc = -EINTR;
#endif
/*
* We can't stall. Read in the excess data and throw it
* all away.
*/
} else {
rc = throw_away_data(common);
}
break;
}
return rc;
}
static void send_status(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
struct fsg_buffhd *bh;
struct bulk_cs_wrap *csw;
int rc;
u8 status = US_BULK_STAT_OK;
u32 sd, sdinfo = 0;
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
rc = sleep_thread(common, false, bh);
if (rc)
return;
if (curlun) {
sd = curlun->sense_data;
sdinfo = curlun->sense_data_info;
} else if (common->bad_lun_okay)
sd = SS_NO_SENSE;
else
sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
if (common->phase_error) {
DBG(common, "sending phase-error status\n");
status = US_BULK_STAT_PHASE;
sd = SS_INVALID_COMMAND;
} else if (sd != SS_NO_SENSE) {
DBG(common, "sending command-failure status\n");
status = US_BULK_STAT_FAIL;
VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
" info x%x\n",
SK(sd), ASC(sd), ASCQ(sd), sdinfo);
}
/* Store and send the Bulk-only CSW */
csw = (void *)bh->buf;
csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
csw->Tag = common->tag;
csw->Residue = cpu_to_le32(common->residue);
csw->Status = status;
bh->inreq->length = US_BULK_CS_WRAP_LEN;
bh->inreq->zero = 0;
if (!start_in_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return;
common->next_buffhd_to_fill = bh->next;
return;
}
/*-------------------------------------------------------------------------*/
/*
* Check whether the command is properly formed and whether its data size
* and direction agree with the values we already have.
*/
static int check_command(struct fsg_common *common, int cmnd_size,
enum data_direction data_dir, unsigned int mask,
int needs_medium, const char *name)
{
int i;
unsigned int lun = common->cmnd[1] >> 5;
static const char dirletter[4] = {'u', 'o', 'i', 'n'};
char hdlen[20];
struct fsg_lun *curlun;
hdlen[0] = 0;
if (common->data_dir != DATA_DIR_UNKNOWN)
sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
common->data_size);
VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
name, cmnd_size, dirletter[(int) data_dir],
common->data_size_from_cmnd, common->cmnd_size, hdlen);
/*
* We can't reply at all until we know the correct data direction
* and size.
*/
if (common->data_size_from_cmnd == 0)
data_dir = DATA_DIR_NONE;
if (common->data_size < common->data_size_from_cmnd) {
/*
* Host data size < Device data size is a phase error.
* Carry out the command, but only transfer as much as
* we are allowed.
*/
common->data_size_from_cmnd = common->data_size;
common->phase_error = 1;
}
common->residue = common->data_size;
common->usb_amount_left = common->data_size;
/* Conflicting data directions is a phase error */
if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
common->phase_error = 1;
return -EINVAL;
}
/* Verify the length of the command itself */
if (cmnd_size != common->cmnd_size) {
/*
* Special case workaround: There are plenty of buggy SCSI
* implementations. Many have issues with cbw->Length
* field passing a wrong command size. For those cases we
* always try to work around the problem by using the length
* sent by the host side provided it is at least as large
* as the correct command length.
* Examples of such cases would be MS-Windows, which issues
* REQUEST SENSE with cbw->Length == 12 where it should
* be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
* REQUEST SENSE with cbw->Length == 10 where it should
* be 6 as well.
*/
if (cmnd_size <= common->cmnd_size) {
DBG(common, "%s is buggy! Expected length %d "
"but we got %d\n", name,
cmnd_size, common->cmnd_size);
cmnd_size = common->cmnd_size;
} else {
common->phase_error = 1;
return -EINVAL;
}
}
/* Check that the LUN values are consistent */
if (common->lun != lun)
DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
common->lun, lun);
/* Check the LUN */
curlun = common->curlun;
if (curlun) {
if (common->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
common->bad_lun_okay = 0;
/*
* INQUIRY and REQUEST SENSE commands are explicitly allowed
* to use unsupported LUNs; all others may not.
*/
if (common->cmnd[0] != INQUIRY &&
common->cmnd[0] != REQUEST_SENSE) {
DBG(common, "unsupported LUN %u\n", common->lun);
return -EINVAL;
}
}
/*
* If a unit attention condition exists, only INQUIRY and
* REQUEST SENSE commands are allowed; anything else must fail.
*/
if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
common->cmnd[0] != INQUIRY &&
common->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = curlun->unit_attention_data;
curlun->unit_attention_data = SS_NO_SENSE;
return -EINVAL;
}
/* Check that only command bytes listed in the mask are non-zero */
common->cmnd[1] &= 0x1f; /* Mask away the LUN */
for (i = 1; i < cmnd_size; ++i) {
if (common->cmnd[i] && !(mask & (1 << i))) {
if (curlun)
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
}
/* If the medium isn't mounted and the command needs to access
* it, return an error. */
if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
return -EINVAL;
}
return 0;
}
/* wrapper of check_command for data size in blocks handling */
static int check_command_size_in_blocks(struct fsg_common *common,
int cmnd_size, enum data_direction data_dir,
unsigned int mask, int needs_medium, const char *name)
{
if (common->curlun)
common->data_size_from_cmnd <<= common->curlun->blkbits;
return check_command(common, cmnd_size, data_dir,
mask, needs_medium, name);
}
static int do_scsi_command(struct fsg_common *common)
{
struct fsg_buffhd *bh;
int rc;
int reply = -EINVAL;
int i;
static char unknown[16];
dump_cdb(common);
/* Wait for the next buffer to become available for data or status */
bh = common->next_buffhd_to_fill;
common->next_buffhd_to_drain = bh;
rc = sleep_thread(common, false, bh);
if (rc)
return rc;
common->phase_error = 0;
common->short_packet_received = 0;
down_read(&common->filesem); /* We're using the backing file */
switch (common->cmnd[0]) {
case INQUIRY:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_TO_HOST,
(1<<4), 0,
"INQUIRY");
if (reply == 0)
reply = do_inquiry(common, bh);
break;
case MODE_SELECT:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_FROM_HOST,
(1<<1) | (1<<4), 0,
"MODE SELECT(6)");
if (reply == 0)
reply = do_mode_select(common, bh);
break;
case MODE_SELECT_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_FROM_HOST,
(1<<1) | (3<<7), 0,
"MODE SELECT(10)");
if (reply == 0)
reply = do_mode_select(common, bh);
break;
case MODE_SENSE:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_TO_HOST,
(1<<1) | (1<<2) | (1<<4), 0,
"MODE SENSE(6)");
if (reply == 0)
reply = do_mode_sense(common, bh);
break;
case MODE_SENSE_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(1<<1) | (1<<2) | (3<<7), 0,
"MODE SENSE(10)");
if (reply == 0)
reply = do_mode_sense(common, bh);
break;
case ALLOW_MEDIUM_REMOVAL:
common->data_size_from_cmnd = 0;
reply = check_command(common, 6, DATA_DIR_NONE,
(1<<4), 0,
"PREVENT-ALLOW MEDIUM REMOVAL");
if (reply == 0)
reply = do_prevent_allow(common);
break;
case READ_6:
i = common->cmnd[4];
common->data_size_from_cmnd = (i == 0) ? 256 : i;
reply = check_command_size_in_blocks(common, 6,
DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)");
if (reply == 0)
reply = do_read(common);
break;
case READ_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command_size_in_blocks(common, 10,
DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)");
if (reply == 0)
reply = do_read(common);
break;
case READ_12:
common->data_size_from_cmnd =
get_unaligned_be32(&common->cmnd[6]);
reply = check_command_size_in_blocks(common, 12,
DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)");
if (reply == 0)
reply = do_read(common);
break;
case READ_16:
common->data_size_from_cmnd =
get_unaligned_be32(&common->cmnd[10]);
reply = check_command_size_in_blocks(common, 16,
DATA_DIR_TO_HOST,
(1<<1) | (0xff<<2) | (0xf<<10), 1,
"READ(16)");
if (reply == 0)
reply = do_read(common);
break;
case READ_CAPACITY:
common->data_size_from_cmnd = 8;
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(0xf<<2) | (1<<8), 1,
"READ CAPACITY");
if (reply == 0)
reply = do_read_capacity(common, bh);
break;
case READ_HEADER:
if (!common->curlun || !common->curlun->cdrom)
goto unknown_cmnd;
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(3<<7) | (0x1f<<1), 1,
"READ HEADER");
if (reply == 0)
reply = do_read_header(common, bh);
break;
case READ_TOC:
if (!common->curlun || !common->curlun->cdrom)
goto unknown_cmnd;
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(0xf<<6) | (3<<1), 1,
"READ TOC");
if (reply == 0)
reply = do_read_toc(common, bh);
break;
case READ_FORMAT_CAPACITIES:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(3<<7), 1,
"READ FORMAT CAPACITIES");
if (reply == 0)
reply = do_read_format_capacities(common, bh);
break;
case REQUEST_SENSE:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_TO_HOST,
(1<<4), 0,
"REQUEST SENSE");
if (reply == 0)
reply = do_request_sense(common, bh);
break;
case SERVICE_ACTION_IN_16:
switch (common->cmnd[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
common->data_size_from_cmnd =
get_unaligned_be32(&common->cmnd[10]);
reply = check_command(common, 16, DATA_DIR_TO_HOST,
(1<<1) | (0xff<<2) | (0xf<<10) |
(1<<14), 1,
"READ CAPACITY(16)");
if (reply == 0)
reply = do_read_capacity_16(common, bh);
break;
default:
goto unknown_cmnd;
}
break;
case START_STOP:
common->data_size_from_cmnd = 0;
reply = check_command(common, 6, DATA_DIR_NONE,
(1<<1) | (1<<4), 0,
"START-STOP UNIT");
if (reply == 0)
reply = do_start_stop(common);
break;
case SYNCHRONIZE_CACHE:
common->data_size_from_cmnd = 0;
reply = check_command(common, 10, DATA_DIR_NONE,
(0xf<<2) | (3<<7), 1,
"SYNCHRONIZE CACHE");
if (reply == 0)
reply = do_synchronize_cache(common);
break;
case TEST_UNIT_READY:
common->data_size_from_cmnd = 0;
reply = check_command(common, 6, DATA_DIR_NONE,
0, 1,
"TEST UNIT READY");
break;
/*
* Although optional, this command is used by MS-Windows. We
* support a minimal version: BytChk must be 0.
*/
case VERIFY:
common->data_size_from_cmnd = 0;
reply = check_command(common, 10, DATA_DIR_NONE,
(1<<1) | (0xf<<2) | (3<<7), 1,
"VERIFY");
if (reply == 0)
reply = do_verify(common);
break;
case WRITE_6:
i = common->cmnd[4];
common->data_size_from_cmnd = (i == 0) ? 256 : i;
reply = check_command_size_in_blocks(common, 6,
DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)");
if (reply == 0)
reply = do_write(common);
break;
case WRITE_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command_size_in_blocks(common, 10,
DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)");
if (reply == 0)
reply = do_write(common);
break;
case WRITE_12:
common->data_size_from_cmnd =
get_unaligned_be32(&common->cmnd[6]);
reply = check_command_size_in_blocks(common, 12,
DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)");
if (reply == 0)
reply = do_write(common);
break;
case WRITE_16:
common->data_size_from_cmnd =
get_unaligned_be32(&common->cmnd[10]);
reply = check_command_size_in_blocks(common, 16,
DATA_DIR_FROM_HOST,
(1<<1) | (0xff<<2) | (0xf<<10), 1,
"WRITE(16)");
if (reply == 0)
reply = do_write(common);
break;
/*
* Some mandatory commands that we recognize but don't implement.
* They don't mean much in this setting. It's left as an exercise
* for anyone interested to implement RESERVE and RELEASE in terms
* of Posix locks.
*/
case FORMAT_UNIT:
case RELEASE:
case RESERVE:
case SEND_DIAGNOSTIC:
default:
unknown_cmnd:
common->data_size_from_cmnd = 0;
sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
reply = check_command(common, common->cmnd_size,
DATA_DIR_UNKNOWN, ~0, 0, unknown);
if (reply == 0) {
common->curlun->sense_data = SS_INVALID_COMMAND;
reply = -EINVAL;
}
break;
}
up_read(&common->filesem);
if (reply == -EINTR || signal_pending(current))
return -EINTR;
/* Set up the single reply buffer for finish_reply() */
if (reply == -EINVAL)
reply = 0; /* Error reply length */
if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
reply = min((u32)reply, common->data_size_from_cmnd);
bh->inreq->length = reply;
bh->state = BUF_STATE_FULL;
common->residue -= reply;
} /* Otherwise it's already set */
return 0;
}
/*-------------------------------------------------------------------------*/
static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
struct usb_request *req = bh->outreq;
struct bulk_cb_wrap *cbw = req->buf;
struct fsg_common *common = fsg->common;
/* Was this a real packet? Should it be ignored? */
if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
return -EINVAL;
/* Is the CBW valid? */
if (req->actual != US_BULK_CB_WRAP_LEN ||
cbw->Signature != cpu_to_le32(
US_BULK_CB_SIGN)) {
DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
req->actual,
le32_to_cpu(cbw->Signature));
/*
* The Bulk-only spec says we MUST stall the IN endpoint
* (6.6.1), so it's unavoidable. It also says we must
* retain this state until the next reset, but there's
* no way to tell the controller driver it should ignore
* Clear-Feature(HALT) requests.
*
* We aren't required to halt the OUT endpoint; instead
* we can simply accept and discard any data received
* until the next reset.
*/
wedge_bulk_in_endpoint(fsg);
set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
return -EINVAL;
}
/* Is the CBW meaningful? */
if (cbw->Lun >= ARRAY_SIZE(common->luns) ||
cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 ||
cbw->Length > MAX_COMMAND_SIZE) {
DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
"cmdlen %u\n",
cbw->Lun, cbw->Flags, cbw->Length);
/*
* We can do anything we want here, so let's stall the
* bulk pipes if we are allowed to.
*/
if (common->can_stall) {
fsg_set_halt(fsg, fsg->bulk_out);
halt_bulk_in_endpoint(fsg);
}
return -EINVAL;
}
/* Save the command for later */
common->cmnd_size = cbw->Length;
memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
if (cbw->Flags & US_BULK_FLAG_IN)
common->data_dir = DATA_DIR_TO_HOST;
else
common->data_dir = DATA_DIR_FROM_HOST;
common->data_size = le32_to_cpu(cbw->DataTransferLength);
if (common->data_size == 0)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
if (common->lun < ARRAY_SIZE(common->luns))
common->curlun = common->luns[common->lun];
else
common->curlun = NULL;
common->tag = cbw->Tag;
return 0;
}
static int get_next_command(struct fsg_common *common)
{
struct fsg_buffhd *bh;
int rc = 0;
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
rc = sleep_thread(common, true, bh);
if (rc)
return rc;
/* Queue a request to read a Bulk-only CBW */
set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
if (!start_out_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return -EIO;
/*
* We will drain the buffer in software, which means we
* can reuse it for the next filling. No need to advance
* next_buffhd_to_fill.
*/
/* Wait for the CBW to arrive */
rc = sleep_thread(common, true, bh);
if (rc)
return rc;
rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
bh->state = BUF_STATE_EMPTY;
return rc;
}
/*-------------------------------------------------------------------------*/
static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
struct usb_request **preq)
{
*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (*preq)
return 0;
ERROR(common, "can't allocate request for %s\n", ep->name);
return -ENOMEM;
}
/* Reset interface setting and re-init endpoint state (toggle etc). */
static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
{
struct fsg_dev *fsg;
int i, rc = 0;
if (common->running)
DBG(common, "reset interface\n");
reset:
/* Deallocate the requests */
if (common->fsg) {
fsg = common->fsg;
for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
if (bh->inreq) {
usb_ep_free_request(fsg->bulk_in, bh->inreq);
bh->inreq = NULL;
}
if (bh->outreq) {
usb_ep_free_request(fsg->bulk_out, bh->outreq);
bh->outreq = NULL;
}
}
/* Disable the endpoints */
if (fsg->bulk_in_enabled) {
usb_ep_disable(fsg->bulk_in);
fsg->bulk_in_enabled = 0;
}
if (fsg->bulk_out_enabled) {
usb_ep_disable(fsg->bulk_out);
fsg->bulk_out_enabled = 0;
}
common->fsg = NULL;
wake_up(&common->fsg_wait);
}
common->running = 0;
if (!new_fsg || rc)
return rc;
common->fsg = new_fsg;
fsg = common->fsg;
/* Enable the endpoints */
rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
if (rc)
goto reset;
rc = usb_ep_enable(fsg->bulk_in);
if (rc)
goto reset;
fsg->bulk_in->driver_data = common;
fsg->bulk_in_enabled = 1;
rc = config_ep_by_speed(common->gadget, &(fsg->function),
fsg->bulk_out);
if (rc)
goto reset;
rc = usb_ep_enable(fsg->bulk_out);
if (rc)
goto reset;
fsg->bulk_out->driver_data = common;
fsg->bulk_out_enabled = 1;
common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
/* Allocate the requests */
for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
if (rc)
goto reset;
rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
if (rc)
goto reset;
bh->inreq->buf = bh->outreq->buf = bh->buf;
bh->inreq->context = bh->outreq->context = bh;
bh->inreq->complete = bulk_in_complete;
bh->outreq->complete = bulk_out_complete;
}
common->running = 1;
for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
if (common->luns[i])
common->luns[i]->unit_attention_data =
SS_RESET_OCCURRED;
return rc;
}
/****************************** ALT CONFIGS ******************************/
static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
return USB_GADGET_DELAYED_STATUS;
}
static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
/* Disable the endpoints */
if (fsg->bulk_in_enabled) {
usb_ep_disable(fsg->bulk_in);
fsg->bulk_in_enabled = 0;
}
if (fsg->bulk_out_enabled) {
usb_ep_disable(fsg->bulk_out);
fsg->bulk_out_enabled = 0;
}
__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
}
/*-------------------------------------------------------------------------*/
static void handle_exception(struct fsg_common *common)
{
int i;
struct fsg_buffhd *bh;
enum fsg_state old_state;
struct fsg_lun *curlun;
unsigned int exception_req_tag;
struct fsg_dev *new_fsg;
/*
* Clear the existing signals. Anything but SIGUSR1 is converted
* into a high-priority EXIT exception.
*/
for (;;) {
int sig = kernel_dequeue_signal();
if (!sig)
break;
if (sig != SIGUSR1) {
spin_lock_irq(&common->lock);
if (common->state < FSG_STATE_EXIT)
DBG(common, "Main thread exiting on signal\n");
common->state = FSG_STATE_EXIT;
spin_unlock_irq(&common->lock);
}
}
/* Cancel all the pending transfers */
if (likely(common->fsg)) {
for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
if (bh->state == BUF_STATE_SENDING)
usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
if (bh->state == BUF_STATE_RECEIVING)
usb_ep_dequeue(common->fsg->bulk_out,
bh->outreq);
/* Wait for a transfer to become idle */
if (sleep_thread(common, false, bh))
return;
}
/* Clear out the controller's fifos */
if (common->fsg->bulk_in_enabled)
usb_ep_fifo_flush(common->fsg->bulk_in);
if (common->fsg->bulk_out_enabled)
usb_ep_fifo_flush(common->fsg->bulk_out);
}
/*
* Reset the I/O buffer states and pointers, the SCSI
* state, and the exception. Then invoke the handler.
*/
spin_lock_irq(&common->lock);
for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
bh->state = BUF_STATE_EMPTY;
}
common->next_buffhd_to_fill = &common->buffhds[0];
common->next_buffhd_to_drain = &common->buffhds[0];
exception_req_tag = common->exception_req_tag;
new_fsg = common->exception_arg;
old_state = common->state;
common->state = FSG_STATE_NORMAL;
if (old_state != FSG_STATE_ABORT_BULK_OUT) {
for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
curlun = common->luns[i];
if (!curlun)
continue;
curlun->prevent_medium_removal = 0;
curlun->sense_data = SS_NO_SENSE;
curlun->unit_attention_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
}
spin_unlock_irq(&common->lock);
/* Carry out any extra actions required for the exception */
switch (old_state) {
case FSG_STATE_NORMAL:
break;
case FSG_STATE_ABORT_BULK_OUT:
send_status(common);
break;
case FSG_STATE_PROTOCOL_RESET:
/*
* In case we were forced against our will to halt a
* bulk endpoint, clear the halt now. (The SuperH UDC
* requires this.)
*/
if (!fsg_is_set(common))
break;
if (test_and_clear_bit(IGNORE_BULK_OUT,
&common->fsg->atomic_bitflags))
usb_ep_clear_halt(common->fsg->bulk_in);
if (common->ep0_req_tag == exception_req_tag)
ep0_queue(common); /* Complete the status stage */
/*
* Technically this should go here, but it would only be
* a waste of time. Ditto for the INTERFACE_CHANGE and
* CONFIG_CHANGE cases.
*/
/* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
/* if (common->luns[i]) */
/* common->luns[i]->unit_attention_data = */
/* SS_RESET_OCCURRED; */
break;
case FSG_STATE_CONFIG_CHANGE:
do_set_interface(common, new_fsg);
if (new_fsg)
usb_composite_setup_continue(common->cdev);
break;
case FSG_STATE_EXIT:
do_set_interface(common, NULL); /* Free resources */
spin_lock_irq(&common->lock);
common->state = FSG_STATE_TERMINATED; /* Stop the thread */
spin_unlock_irq(&common->lock);
break;
case FSG_STATE_TERMINATED:
break;
}
}
/*-------------------------------------------------------------------------*/
static int fsg_main_thread(void *common_)
{
struct fsg_common *common = common_;
int i;
/*
* Allow the thread to be killed by a signal, but set the signal mask
* to block everything but INT, TERM, KILL, and USR1.
*/
allow_signal(SIGINT);
allow_signal(SIGTERM);
allow_signal(SIGKILL);
allow_signal(SIGUSR1);
/* Allow the thread to be frozen */
set_freezable();
/* The main loop */
while (common->state != FSG_STATE_TERMINATED) {
if (exception_in_progress(common) || signal_pending(current)) {
handle_exception(common);
continue;
}
if (!common->running) {
sleep_thread(common, true, NULL);
continue;
}
if (get_next_command(common) || exception_in_progress(common))
continue;
if (do_scsi_command(common) || exception_in_progress(common))
continue;
if (finish_reply(common) || exception_in_progress(common))
continue;
send_status(common);
}
spin_lock_irq(&common->lock);
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
/* Eject media from all LUNs */
down_write(&common->filesem);
for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
struct fsg_lun *curlun = common->luns[i];
if (curlun && fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
}
up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
kthread_complete_and_exit(&common->thread_notifier, 0);
}
/*************************** DEVICE ATTRIBUTES ***************************/
static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
return fsg_show_ro(curlun, buf);
}
static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
return fsg_show_nofua(curlun, buf);
}
static ssize_t file_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
return fsg_show_file(curlun, filesem, buf);
}
static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
return fsg_store_ro(curlun, filesem, buf, count);
}
static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
return fsg_store_nofua(curlun, buf, count);
}
static ssize_t file_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
return fsg_store_file(curlun, filesem, buf, count);
}
static ssize_t forced_eject_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
return fsg_store_forced_eject(curlun, filesem, buf, count);
}
static DEVICE_ATTR_RW(nofua);
static DEVICE_ATTR_WO(forced_eject);
/*
* Mode of the ro and file attribute files will be overridden in
* fsg_lun_dev_is_visible() depending on if this is a cdrom, or if it is a
* removable device.
*/
static DEVICE_ATTR_RW(ro);
static DEVICE_ATTR_RW(file);
/****************************** FSG COMMON ******************************/
static void fsg_lun_release(struct device *dev)
{
/* Nothing needs to be done */
}
static struct fsg_common *fsg_common_setup(struct fsg_common *common)
{
if (!common) {
common = kzalloc(sizeof(*common), GFP_KERNEL);
if (!common)
return ERR_PTR(-ENOMEM);
common->free_storage_on_release = 1;
} else {
common->free_storage_on_release = 0;
}
init_rwsem(&common->filesem);
spin_lock_init(&common->lock);
init_completion(&common->thread_notifier);
init_waitqueue_head(&common->io_wait);
init_waitqueue_head(&common->fsg_wait);
common->state = FSG_STATE_TERMINATED;
memset(common->luns, 0, sizeof(common->luns));
return common;
}
void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs)
{
common->sysfs = sysfs;
}
EXPORT_SYMBOL_GPL(fsg_common_set_sysfs);
static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
{
if (buffhds) {
struct fsg_buffhd *bh = buffhds;
while (n--) {
kfree(bh->buf);
++bh;
}
kfree(buffhds);
}
}
int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
{
struct fsg_buffhd *bh, *buffhds;
int i;
buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
if (!buffhds)
return -ENOMEM;
/* Data buffers cyclic list */
bh = buffhds;
i = n;
goto buffhds_first_it;
do {
bh->next = bh + 1;
++bh;
buffhds_first_it:
bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
if (unlikely(!bh->buf))
goto error_release;
} while (--i);
bh->next = buffhds;
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
common->fsg_num_buffers = n;
common->buffhds = buffhds;
return 0;
error_release:
/*
* "buf"s pointed to by heads after n - i are NULL
* so releasing them won't hurt
*/
_fsg_common_free_buffers(buffhds, n);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
void fsg_common_remove_lun(struct fsg_lun *lun)
{
if (device_is_registered(&lun->dev))
device_unregister(&lun->dev);
fsg_lun_close(lun);
kfree(lun);
}
EXPORT_SYMBOL_GPL(fsg_common_remove_lun);
static void _fsg_common_remove_luns(struct fsg_common *common, int n)
{
int i;
for (i = 0; i < n; ++i)
if (common->luns[i]) {
fsg_common_remove_lun(common->luns[i]);
common->luns[i] = NULL;
}
}
void fsg_common_remove_luns(struct fsg_common *common)
{
_fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
}
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
void fsg_common_free_buffers(struct fsg_common *common)
{
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
common->buffhds = NULL;
}
EXPORT_SYMBOL_GPL(fsg_common_free_buffers);
int fsg_common_set_cdev(struct fsg_common *common,
struct usb_composite_dev *cdev, bool can_stall)
{
struct usb_string *us;
common->gadget = cdev->gadget;
common->ep0 = cdev->gadget->ep0;
common->ep0req = cdev->req;
common->cdev = cdev;
us = usb_gstrings_attach(cdev, fsg_strings_array,
ARRAY_SIZE(fsg_strings));
if (IS_ERR(us))
return PTR_ERR(us);
fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id;
/*
* Some peripheral controllers are known not to be able to
* halt bulk endpoints correctly. If one of them is present,
* disable stalls.
*/
common->can_stall = can_stall &&
gadget_is_stall_supported(common->gadget);
return 0;
}
EXPORT_SYMBOL_GPL(fsg_common_set_cdev);
static struct attribute *fsg_lun_dev_attrs[] = {
&dev_attr_ro.attr,
&dev_attr_file.attr,
&dev_attr_nofua.attr,
&dev_attr_forced_eject.attr,
NULL
};
static umode_t fsg_lun_dev_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct fsg_lun *lun = fsg_lun_from_dev(dev);
if (attr == &dev_attr_ro.attr)
return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO);
if (attr == &dev_attr_file.attr)
return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO;
return attr->mode;
}
static const struct attribute_group fsg_lun_dev_group = {
.attrs = fsg_lun_dev_attrs,
.is_visible = fsg_lun_dev_is_visible,
};
static const struct attribute_group *fsg_lun_dev_groups[] = {
&fsg_lun_dev_group,
NULL
};
int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
unsigned int id, const char *name,
const char **name_pfx)
{
struct fsg_lun *lun;
char *pathbuf = NULL, *p = "(no medium)";
int rc = -ENOMEM;
if (id >= ARRAY_SIZE(common->luns))
return -ENODEV;
if (common->luns[id])
return -EBUSY;
if (!cfg->filename && !cfg->removable) {
pr_err("no file given for LUN%d\n", id);
return -EINVAL;
}
lun = kzalloc(sizeof(*lun), GFP_KERNEL);
if (!lun)
return -ENOMEM;
lun->name_pfx = name_pfx;
lun->cdrom = !!cfg->cdrom;
lun->ro = cfg->cdrom || cfg->ro;
lun->initially_ro = lun->ro;
lun->removable = !!cfg->removable;
if (!common->sysfs) {
/* we DON'T own the name!*/
lun->name = name;
} else {
lun->dev.release = fsg_lun_release;
lun->dev.parent = &common->gadget->dev;
lun->dev.groups = fsg_lun_dev_groups;
dev_set_drvdata(&lun->dev, &common->filesem);
dev_set_name(&lun->dev, "%s", name);
lun->name = dev_name(&lun->dev);
rc = device_register(&lun->dev);
if (rc) {
pr_info("failed to register LUN%d: %d\n", id, rc);
put_device(&lun->dev);
goto error_sysfs;
}
}
common->luns[id] = lun;
if (cfg->filename) {
rc = fsg_lun_open(lun, cfg->filename);
if (rc)
goto error_lun;
p = "(error)";
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (pathbuf) {
p = file_path(lun->filp, pathbuf, PATH_MAX);
if (IS_ERR(p))
p = "(error)";
}
}
pr_info("LUN: %s%s%sfile: %s\n",
lun->removable ? "removable " : "",
lun->ro ? "read only " : "",
lun->cdrom ? "CD-ROM " : "",
p);
kfree(pathbuf);
return 0;
error_lun:
if (device_is_registered(&lun->dev))
device_unregister(&lun->dev);
common->luns[id] = NULL;
error_sysfs:
kfree(lun);
return rc;
}
EXPORT_SYMBOL_GPL(fsg_common_create_lun);
int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
{
char buf[8]; /* enough for 100000000 different numbers, decimal */
int i, rc;
fsg_common_remove_luns(common);
for (i = 0; i < cfg->nluns; ++i) {
snprintf(buf, sizeof(buf), "lun%d", i);
rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
if (rc)
goto fail;
}
pr_info("Number of LUNs=%d\n", cfg->nluns);
return 0;
fail:
_fsg_common_remove_luns(common, i);
return rc;
}
EXPORT_SYMBOL_GPL(fsg_common_create_luns);
void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
const char *pn)
{
int i;
/* Prepare inquiryString */
i = get_default_bcdDevice();
snprintf(common->inquiry_string, sizeof(common->inquiry_string),
"%-8s%-16s%04x", vn ?: "Linux",
/* Assume product name dependent on the first LUN */
pn ?: ((*common->luns)->cdrom
? "File-CD Gadget"
: "File-Stor Gadget"),
i);
}
EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
static void fsg_common_release(struct fsg_common *common)
{
int i;
/* If the thread isn't already dead, tell it to exit now */
if (common->state != FSG_STATE_TERMINATED) {
raise_exception(common, FSG_STATE_EXIT);
wait_for_completion(&common->thread_notifier);
}
for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
struct fsg_lun *lun = common->luns[i];
if (!lun)
continue;
fsg_lun_close(lun);
if (device_is_registered(&lun->dev))
device_unregister(&lun->dev);
kfree(lun);
}
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
if (common->free_storage_on_release)
kfree(common);
}
/*-------------------------------------------------------------------------*/
static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct fsg_common *common = fsg->common;
struct usb_gadget *gadget = c->cdev->gadget;
int i;
struct usb_ep *ep;
unsigned max_burst;
int ret;
struct fsg_opts *opts;
/* Don't allow to bind if we don't have at least one LUN */
ret = _fsg_common_get_max_lun(common);
if (ret < 0) {
pr_err("There should be at least one LUN.\n");
return -EINVAL;
}
opts = fsg_opts_from_func_inst(f->fi);
if (!opts->no_configfs) {
ret = fsg_common_set_cdev(fsg->common, c->cdev,
fsg->common->can_stall);
if (ret)
return ret;
fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
}
if (!common->thread_task) {
common->state = FSG_STATE_NORMAL;
common->thread_task =
kthread_create(fsg_main_thread, common, "file-storage");
if (IS_ERR(common->thread_task)) {
ret = PTR_ERR(common->thread_task);
common->thread_task = NULL;
common->state = FSG_STATE_TERMINATED;
return ret;
}
DBG(common, "I/O thread pid: %d\n",
task_pid_nr(common->thread_task));
wake_up_process(common->thread_task);
}
fsg->gadget = gadget;
/* New interface */
i = usb_interface_id(c, f);
if (i < 0)
goto fail;
fsg_intf_desc.bInterfaceNumber = i;
fsg->interface_number = i;
/* Find all the endpoints we will use */
ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
if (!ep)
goto autoconf_fail;
fsg->bulk_in = ep;
ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
if (!ep)
goto autoconf_fail;
fsg->bulk_out = ep;
/* Assume endpoint addresses are the same for both speeds */
fsg_hs_bulk_in_desc.bEndpointAddress =
fsg_fs_bulk_in_desc.bEndpointAddress;
fsg_hs_bulk_out_desc.bEndpointAddress =
fsg_fs_bulk_out_desc.bEndpointAddress;
/* Calculate bMaxBurst, we know packet size is 1024 */
max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
fsg_ss_bulk_in_desc.bEndpointAddress =
fsg_fs_bulk_in_desc.bEndpointAddress;
fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
fsg_ss_bulk_out_desc.bEndpointAddress =
fsg_fs_bulk_out_desc.bEndpointAddress;
fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
fsg_ss_function, fsg_ss_function);
if (ret)
goto autoconf_fail;
return 0;
autoconf_fail:
ERROR(fsg, "unable to autoconfigure all endpoints\n");
i = -ENOTSUPP;
fail:
/* terminate the thread */
if (fsg->common->state != FSG_STATE_TERMINATED) {
raise_exception(fsg->common, FSG_STATE_EXIT);
wait_for_completion(&fsg->common->thread_notifier);
}
return i;
}
/****************************** ALLOCATE FUNCTION *************************/
static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct fsg_common *common = fsg->common;
DBG(fsg, "unbind\n");
if (fsg->common->fsg == fsg) {
__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
/* FIXME: make interruptible or killable somehow? */
wait_event(common->fsg_wait, common->fsg != fsg);
}
usb_free_all_descriptors(&fsg->function);
}
static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct fsg_lun_opts, group);
}
static inline struct fsg_opts *to_fsg_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct fsg_opts,
func_inst.group);
}
static void fsg_lun_attr_release(struct config_item *item)
{
struct fsg_lun_opts *lun_opts;
lun_opts = to_fsg_lun_opts(item);
kfree(lun_opts);
}
static struct configfs_item_operations fsg_lun_item_ops = {
.release = fsg_lun_attr_release,
};
static ssize_t fsg_lun_opts_file_show(struct config_item *item, char *page)
{
struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
}
static ssize_t fsg_lun_opts_file_store(struct config_item *item,
const char *page, size_t len)
{
struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
}
CONFIGFS_ATTR(fsg_lun_opts_, file);
static ssize_t fsg_lun_opts_ro_show(struct config_item *item, char *page)
{
return fsg_show_ro(to_fsg_lun_opts(item)->lun, page);
}
static ssize_t fsg_lun_opts_ro_store(struct config_item *item,
const char *page, size_t len)
{
struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
}
CONFIGFS_ATTR(fsg_lun_opts_, ro);
static ssize_t fsg_lun_opts_removable_show(struct config_item *item,
char *page)
{
return fsg_show_removable(to_fsg_lun_opts(item)->lun, page);
}
static ssize_t fsg_lun_opts_removable_store(struct config_item *item,
const char *page, size_t len)
{
return fsg_store_removable(to_fsg_lun_opts(item)->lun, page, len);
}
CONFIGFS_ATTR(fsg_lun_opts_, removable);
static ssize_t fsg_lun_opts_cdrom_show(struct config_item *item, char *page)
{
return fsg_show_cdrom(to_fsg_lun_opts(item)->lun, page);
}
static ssize_t fsg_lun_opts_cdrom_store(struct config_item *item,
const char *page, size_t len)
{
struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
len);
}
CONFIGFS_ATTR(fsg_lun_opts_, cdrom);
static ssize_t fsg_lun_opts_nofua_show(struct config_item *item, char *page)
{
return fsg_show_nofua(to_fsg_lun_opts(item)->lun, page);
}
static ssize_t fsg_lun_opts_nofua_store(struct config_item *item,
const char *page, size_t len)
{
return fsg_store_nofua(to_fsg_lun_opts(item)->lun, page, len);
}
CONFIGFS_ATTR(fsg_lun_opts_, nofua);
static ssize_t fsg_lun_opts_inquiry_string_show(struct config_item *item,
char *page)
{
return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page);
}
static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item,
const char *page, size_t len)
{
return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len);
}
CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string);
static ssize_t fsg_lun_opts_forced_eject_store(struct config_item *item,
const char *page, size_t len)
{
struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_store_forced_eject(opts->lun, &fsg_opts->common->filesem,
page, len);
}
CONFIGFS_ATTR_WO(fsg_lun_opts_, forced_eject);
static struct configfs_attribute *fsg_lun_attrs[] = {
&fsg_lun_opts_attr_file,
&fsg_lun_opts_attr_ro,
&fsg_lun_opts_attr_removable,
&fsg_lun_opts_attr_cdrom,
&fsg_lun_opts_attr_nofua,
&fsg_lun_opts_attr_inquiry_string,
&fsg_lun_opts_attr_forced_eject,
NULL,
};
static const struct config_item_type fsg_lun_type = {
.ct_item_ops = &fsg_lun_item_ops,
.ct_attrs = fsg_lun_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *fsg_lun_make(struct config_group *group,
const char *name)
{
struct fsg_lun_opts *opts;
struct fsg_opts *fsg_opts;
struct fsg_lun_config config;
char *num_str;
u8 num;
int ret;
num_str = strchr(name, '.');
if (!num_str) {
pr_err("Unable to locate . in LUN.NUMBER\n");
return ERR_PTR(-EINVAL);
}
num_str++;
ret = kstrtou8(num_str, 0, &num);
if (ret)
return ERR_PTR(ret);
fsg_opts = to_fsg_opts(&group->cg_item);
if (num >= FSG_MAX_LUNS)
return ERR_PTR(-ERANGE);
num = array_index_nospec(num, FSG_MAX_LUNS);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
ret = -EBUSY;
goto out;
}
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts) {
ret = -ENOMEM;
goto out;
}
memset(&config, 0, sizeof(config));
config.removable = true;
ret = fsg_common_create_lun(fsg_opts->common, &config, num, name,
(const char **)&group->cg_item.ci_name);
if (ret) {
kfree(opts);
goto out;
}
opts->lun = fsg_opts->common->luns[num];
opts->lun_id = num;
mutex_unlock(&fsg_opts->lock);
config_group_init_type_name(&opts->group, name, &fsg_lun_type);
return &opts->group;
out:
mutex_unlock(&fsg_opts->lock);
return ERR_PTR(ret);
}
static void fsg_lun_drop(struct config_group *group, struct config_item *item)
{
struct fsg_lun_opts *lun_opts;
struct fsg_opts *fsg_opts;
lun_opts = to_fsg_lun_opts(item);
fsg_opts = to_fsg_opts(&group->cg_item);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt) {
struct config_item *gadget;
gadget = group->cg_item.ci_parent->ci_parent;
unregister_gadget_item(gadget);
}
fsg_common_remove_lun(lun_opts->lun);
fsg_opts->common->luns[lun_opts->lun_id] = NULL;
lun_opts->lun_id = 0;
mutex_unlock(&fsg_opts->lock);
config_item_put(item);
}
static void fsg_attr_release(struct config_item *item)
{
struct fsg_opts *opts = to_fsg_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations fsg_item_ops = {
.release = fsg_attr_release,
};
static ssize_t fsg_opts_stall_show(struct config_item *item, char *page)
{
struct fsg_opts *opts = to_fsg_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%d", opts->common->can_stall);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page,
size_t len)
{
struct fsg_opts *opts = to_fsg_opts(item);
int ret;
bool stall;
mutex_lock(&opts->lock);
if (opts->refcnt) {
mutex_unlock(&opts->lock);
return -EBUSY;
}
ret = kstrtobool(page, &stall);
if (!ret) {
opts->common->can_stall = stall;
ret = len;
}
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(fsg_opts_, stall);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static ssize_t fsg_opts_num_buffers_show(struct config_item *item, char *page)
{
struct fsg_opts *opts = to_fsg_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%d", opts->common->fsg_num_buffers);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
const char *page, size_t len)
{
struct fsg_opts *opts = to_fsg_opts(item);
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
ret = fsg_common_set_num_buffers(opts->common, num);
if (ret)
goto end;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(fsg_opts_, num_buffers);
#endif
static struct configfs_attribute *fsg_attrs[] = {
&fsg_opts_attr_stall,
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
&fsg_opts_attr_num_buffers,
#endif
NULL,
};
static struct configfs_group_operations fsg_group_ops = {
.make_group = fsg_lun_make,
.drop_item = fsg_lun_drop,
};
static const struct config_item_type fsg_func_type = {
.ct_item_ops = &fsg_item_ops,
.ct_group_ops = &fsg_group_ops,
.ct_attrs = fsg_attrs,
.ct_owner = THIS_MODULE,
};
static void fsg_free_inst(struct usb_function_instance *fi)
{
struct fsg_opts *opts;
opts = fsg_opts_from_func_inst(fi);
fsg_common_release(opts->common);
kfree(opts);
}
static struct usb_function_instance *fsg_alloc_inst(void)
{
struct fsg_opts *opts;
struct fsg_lun_config config;
int rc;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = fsg_free_inst;
opts->common = fsg_common_setup(opts->common);
if (IS_ERR(opts->common)) {
rc = PTR_ERR(opts->common);
goto release_opts;
}
rc = fsg_common_set_num_buffers(opts->common,
CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
if (rc)
goto release_common;
pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
memset(&config, 0, sizeof(config));
config.removable = true;
rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
(const char **)&opts->func_inst.group.cg_item.ci_name);
if (rc)
goto release_buffers;
opts->lun0.lun = opts->common->luns[0];
opts->lun0.lun_id = 0;
config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);
config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group);
return &opts->func_inst;
release_buffers:
fsg_common_free_buffers(opts->common);
release_common:
kfree(opts->common);
release_opts:
kfree(opts);
return ERR_PTR(rc);
}
static void fsg_free(struct usb_function *f)
{
struct fsg_dev *fsg;
struct fsg_opts *opts;
fsg = container_of(f, struct fsg_dev, function);
opts = container_of(f->fi, struct fsg_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
kfree(fsg);
}
static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
{
struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
struct fsg_common *common = opts->common;
struct fsg_dev *fsg;
fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
if (unlikely(!fsg))
return ERR_PTR(-ENOMEM);
mutex_lock(&opts->lock);
opts->refcnt++;
mutex_unlock(&opts->lock);
fsg->function.name = FSG_DRIVER_DESC;
fsg->function.bind = fsg_bind;
fsg->function.unbind = fsg_unbind;
fsg->function.setup = fsg_setup;
fsg->function.set_alt = fsg_set_alt;
fsg->function.disable = fsg_disable;
fsg->function.free_func = fsg_free;
fsg->common = common;
return &fsg->function;
}
DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Nazarewicz");
/************************* Module parameters *************************/
void fsg_config_from_params(struct fsg_config *cfg,
const struct fsg_module_parameters *params,
unsigned int fsg_num_buffers)
{
struct fsg_lun_config *lun;
unsigned i;
/* Configure LUNs */
cfg->nluns =
min(params->luns ?: (params->file_count ?: 1u),
(unsigned)FSG_MAX_LUNS);
for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
lun->ro = !!params->ro[i];
lun->cdrom = !!params->cdrom[i];
lun->removable = !!params->removable[i];
lun->filename =
params->file_count > i && params->file[i][0]
? params->file[i]
: NULL;
}
/* Let MSF use defaults */
cfg->vendor_name = NULL;
cfg->product_name = NULL;
cfg->ops = NULL;
cfg->private_data = NULL;
/* Finalise */
cfg->can_stall = params->stall;
cfg->fsg_num_buffers = fsg_num_buffers;
}
EXPORT_SYMBOL_GPL(fsg_config_from_params);
| linux-master | drivers/usb/gadget/function/f_mass_storage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* f_phonet.c -- USB CDC Phonet function
*
* Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
*
* Author: Rémi Denis-Courmont
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_phonet.h>
#include <linux/if_arp.h>
#include <linux/usb/ch9.h>
#include <linux/usb/cdc.h>
#include <linux/usb/composite.h>
#include "u_phonet.h"
#include "u_ether.h"
#define PN_MEDIA_USB 0x1B
#define MAXPACKET 512
#if (PAGE_SIZE % MAXPACKET)
#error MAXPACKET must divide PAGE_SIZE!
#endif
/*-------------------------------------------------------------------------*/
struct phonet_port {
struct f_phonet *usb;
spinlock_t lock;
};
struct f_phonet {
struct usb_function function;
struct {
struct sk_buff *skb;
spinlock_t lock;
} rx;
struct net_device *dev;
struct usb_ep *in_ep, *out_ep;
struct usb_request *in_req;
struct usb_request *out_reqv[];
};
static int phonet_rxq_size = 17;
static inline struct f_phonet *func_to_pn(struct usb_function *f)
{
return container_of(f, struct f_phonet, function);
}
/*-------------------------------------------------------------------------*/
#define USB_CDC_SUBCLASS_PHONET 0xfe
#define USB_CDC_PHONET_TYPE 0xab
static struct usb_interface_descriptor
pn_control_intf_desc = {
.bLength = sizeof pn_control_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC, */
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_PHONET,
};
static const struct usb_cdc_header_desc
pn_header_desc = {
.bLength = sizeof pn_header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static const struct usb_cdc_header_desc
pn_phonet_desc = {
.bLength = sizeof pn_phonet_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_PHONET_TYPE,
.bcdCDC = cpu_to_le16(0x1505), /* ??? */
};
static struct usb_cdc_union_desc
pn_union_desc = {
.bLength = sizeof pn_union_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
/* .bMasterInterface0 = DYNAMIC, */
/* .bSlaveInterface0 = DYNAMIC, */
};
static struct usb_interface_descriptor
pn_data_nop_intf_desc = {
.bLength = sizeof pn_data_nop_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC, */
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_CDC_DATA,
};
static struct usb_interface_descriptor
pn_data_intf_desc = {
.bLength = sizeof pn_data_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC, */
.bAlternateSetting = 1,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
};
static struct usb_endpoint_descriptor
pn_fs_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor
pn_hs_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(MAXPACKET),
};
static struct usb_endpoint_descriptor
pn_fs_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor
pn_hs_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *fs_pn_function[] = {
(struct usb_descriptor_header *) &pn_control_intf_desc,
(struct usb_descriptor_header *) &pn_header_desc,
(struct usb_descriptor_header *) &pn_phonet_desc,
(struct usb_descriptor_header *) &pn_union_desc,
(struct usb_descriptor_header *) &pn_data_nop_intf_desc,
(struct usb_descriptor_header *) &pn_data_intf_desc,
(struct usb_descriptor_header *) &pn_fs_sink_desc,
(struct usb_descriptor_header *) &pn_fs_source_desc,
NULL,
};
static struct usb_descriptor_header *hs_pn_function[] = {
(struct usb_descriptor_header *) &pn_control_intf_desc,
(struct usb_descriptor_header *) &pn_header_desc,
(struct usb_descriptor_header *) &pn_phonet_desc,
(struct usb_descriptor_header *) &pn_union_desc,
(struct usb_descriptor_header *) &pn_data_nop_intf_desc,
(struct usb_descriptor_header *) &pn_data_intf_desc,
(struct usb_descriptor_header *) &pn_hs_sink_desc,
(struct usb_descriptor_header *) &pn_hs_source_desc,
NULL,
};
/*-------------------------------------------------------------------------*/
static int pn_net_open(struct net_device *dev)
{
netif_wake_queue(dev);
return 0;
}
static int pn_net_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_phonet *fp = ep->driver_data;
struct net_device *dev = fp->dev;
struct sk_buff *skb = req->context;
switch (req->status) {
case 0:
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
break;
case -ESHUTDOWN: /* disconnected */
case -ECONNRESET: /* disabled */
dev->stats.tx_aborted_errors++;
fallthrough;
default:
dev->stats.tx_errors++;
}
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
}
static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct phonet_port *port = netdev_priv(dev);
struct f_phonet *fp;
struct usb_request *req;
unsigned long flags;
if (skb->protocol != htons(ETH_P_PHONET))
goto out;
spin_lock_irqsave(&port->lock, flags);
fp = port->usb;
if (unlikely(!fp)) /* race with carrier loss */
goto out_unlock;
req = fp->in_req;
req->buf = skb->data;
req->length = skb->len;
req->complete = pn_tx_complete;
req->zero = 1;
req->context = skb;
if (unlikely(usb_ep_queue(fp->in_ep, req, GFP_ATOMIC)))
goto out_unlock;
netif_stop_queue(dev);
skb = NULL;
out_unlock:
spin_unlock_irqrestore(&port->lock, flags);
out:
if (unlikely(skb)) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
}
return NETDEV_TX_OK;
}
static const struct net_device_ops pn_netdev_ops = {
.ndo_open = pn_net_open,
.ndo_stop = pn_net_close,
.ndo_start_xmit = pn_net_xmit,
};
static void pn_net_setup(struct net_device *dev)
{
const u8 addr = PN_MEDIA_USB;
dev->features = 0;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = PHONET_DEV_MTU;
dev->min_mtu = PHONET_MIN_MTU;
dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
dev->addr_len = 1;
dev_addr_set(dev, &addr);
dev->tx_queue_len = 1;
dev->netdev_ops = &pn_netdev_ops;
dev->needs_free_netdev = true;
dev->header_ops = &phonet_header_ops;
}
/*-------------------------------------------------------------------------*/
/*
* Queue buffer for data from the host
*/
static int
pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
{
struct page *page;
int err;
page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
if (!page)
return -ENOMEM;
req->buf = page_address(page);
req->length = PAGE_SIZE;
req->context = page;
err = usb_ep_queue(fp->out_ep, req, gfp_flags);
if (unlikely(err))
put_page(page);
return err;
}
static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_phonet *fp = ep->driver_data;
struct net_device *dev = fp->dev;
struct page *page = req->context;
struct sk_buff *skb;
unsigned long flags;
int status = req->status;
switch (status) {
case 0:
spin_lock_irqsave(&fp->rx.lock, flags);
skb = fp->rx.skb;
if (!skb)
skb = fp->rx.skb = netdev_alloc_skb(dev, 12);
if (req->actual < req->length) /* Last fragment */
fp->rx.skb = NULL;
spin_unlock_irqrestore(&fp->rx.lock, flags);
if (unlikely(!skb))
break;
if (skb->len == 0) { /* First fragment */
skb->protocol = htons(ETH_P_PHONET);
skb_reset_mac_header(skb);
/* Can't use pskb_pull() on page in IRQ */
skb_put_data(skb, page_address(page), 1);
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb->len <= 1, req->actual, PAGE_SIZE);
page = NULL;
if (req->actual < req->length) { /* Last fragment */
skb->dev = dev;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_rx(skb);
}
break;
/* Do not resubmit in these cases: */
case -ESHUTDOWN: /* disconnect */
case -ECONNABORTED: /* hw reset */
case -ECONNRESET: /* dequeued (unlink or netif down) */
req = NULL;
break;
/* Do resubmit in these cases: */
case -EOVERFLOW: /* request buffer overflow */
dev->stats.rx_over_errors++;
fallthrough;
default:
dev->stats.rx_errors++;
break;
}
if (page)
put_page(page);
if (req)
pn_rx_submit(fp, req, GFP_ATOMIC);
}
/*-------------------------------------------------------------------------*/
static void __pn_reset(struct usb_function *f)
{
struct f_phonet *fp = func_to_pn(f);
struct net_device *dev = fp->dev;
struct phonet_port *port = netdev_priv(dev);
netif_carrier_off(dev);
port->usb = NULL;
usb_ep_disable(fp->out_ep);
usb_ep_disable(fp->in_ep);
if (fp->rx.skb) {
dev_kfree_skb_irq(fp->rx.skb);
fp->rx.skb = NULL;
}
}
static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_phonet *fp = func_to_pn(f);
struct usb_gadget *gadget = fp->function.config->cdev->gadget;
if (intf == pn_control_intf_desc.bInterfaceNumber)
/* control interface, no altsetting */
return (alt > 0) ? -EINVAL : 0;
if (intf == pn_data_intf_desc.bInterfaceNumber) {
struct net_device *dev = fp->dev;
struct phonet_port *port = netdev_priv(dev);
/* data intf (0: inactive, 1: active) */
if (alt > 1)
return -EINVAL;
spin_lock(&port->lock);
if (fp->in_ep->enabled)
__pn_reset(f);
if (alt == 1) {
int i;
if (config_ep_by_speed(gadget, f, fp->in_ep) ||
config_ep_by_speed(gadget, f, fp->out_ep)) {
fp->in_ep->desc = NULL;
fp->out_ep->desc = NULL;
spin_unlock(&port->lock);
return -EINVAL;
}
usb_ep_enable(fp->out_ep);
usb_ep_enable(fp->in_ep);
port->usb = fp;
fp->out_ep->driver_data = fp;
fp->in_ep->driver_data = fp;
netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++)
pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
}
spin_unlock(&port->lock);
return 0;
}
return -EINVAL;
}
static int pn_get_alt(struct usb_function *f, unsigned intf)
{
struct f_phonet *fp = func_to_pn(f);
if (intf == pn_control_intf_desc.bInterfaceNumber)
return 0;
if (intf == pn_data_intf_desc.bInterfaceNumber) {
struct phonet_port *port = netdev_priv(fp->dev);
u8 alt;
spin_lock(&port->lock);
alt = port->usb != NULL;
spin_unlock(&port->lock);
return alt;
}
return -EINVAL;
}
static void pn_disconnect(struct usb_function *f)
{
struct f_phonet *fp = func_to_pn(f);
struct phonet_port *port = netdev_priv(fp->dev);
unsigned long flags;
/* remain disabled until set_alt */
spin_lock_irqsave(&port->lock, flags);
__pn_reset(f);
spin_unlock_irqrestore(&port->lock, flags);
}
/*-------------------------------------------------------------------------*/
static int pn_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct usb_gadget *gadget = cdev->gadget;
struct f_phonet *fp = func_to_pn(f);
struct usb_ep *ep;
int status, i;
struct f_phonet_opts *phonet_opts;
phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst);
/*
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
* configurations are bound in sequence with list_for_each_entry,
* in each configuration its functions are bound in sequence
* with list_for_each_entry, so we assume no race condition
* with regard to phonet_opts->bound access
*/
if (!phonet_opts->bound) {
gphonet_set_gadget(phonet_opts->net, gadget);
status = gphonet_register_netdev(phonet_opts->net);
if (status)
return status;
phonet_opts->bound = true;
}
/* Reserve interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto err;
pn_control_intf_desc.bInterfaceNumber = status;
pn_union_desc.bMasterInterface0 = status;
status = usb_interface_id(c, f);
if (status < 0)
goto err;
pn_data_nop_intf_desc.bInterfaceNumber = status;
pn_data_intf_desc.bInterfaceNumber = status;
pn_union_desc.bSlaveInterface0 = status;
/* Reserve endpoints */
status = -ENODEV;
ep = usb_ep_autoconfig(gadget, &pn_fs_sink_desc);
if (!ep)
goto err;
fp->out_ep = ep;
ep = usb_ep_autoconfig(gadget, &pn_fs_source_desc);
if (!ep)
goto err;
fp->in_ep = ep;
pn_hs_sink_desc.bEndpointAddress = pn_fs_sink_desc.bEndpointAddress;
pn_hs_source_desc.bEndpointAddress = pn_fs_source_desc.bEndpointAddress;
/* Do not try to bind Phonet twice... */
status = usb_assign_descriptors(f, fs_pn_function, hs_pn_function,
NULL, NULL);
if (status)
goto err;
/* Incoming USB requests */
status = -ENOMEM;
for (i = 0; i < phonet_rxq_size; i++) {
struct usb_request *req;
req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL);
if (!req)
goto err_req;
req->complete = pn_rx_complete;
fp->out_reqv[i] = req;
}
/* Outgoing USB requests */
fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL);
if (!fp->in_req)
goto err_req;
INFO(cdev, "USB CDC Phonet function\n");
INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name,
fp->out_ep->name, fp->in_ep->name);
return 0;
err_req:
for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++)
usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
usb_free_all_descriptors(f);
err:
ERROR(cdev, "USB CDC Phonet: cannot autoconfigure\n");
return status;
}
static inline struct f_phonet_opts *to_f_phonet_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_phonet_opts,
func_inst.group);
}
static void phonet_attr_release(struct config_item *item)
{
struct f_phonet_opts *opts = to_f_phonet_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations phonet_item_ops = {
.release = phonet_attr_release,
};
static ssize_t f_phonet_ifname_show(struct config_item *item, char *page)
{
return gether_get_ifname(to_f_phonet_opts(item)->net, page, PAGE_SIZE);
}
CONFIGFS_ATTR_RO(f_phonet_, ifname);
static struct configfs_attribute *phonet_attrs[] = {
&f_phonet_attr_ifname,
NULL,
};
static const struct config_item_type phonet_func_type = {
.ct_item_ops = &phonet_item_ops,
.ct_attrs = phonet_attrs,
.ct_owner = THIS_MODULE,
};
static void phonet_free_inst(struct usb_function_instance *f)
{
struct f_phonet_opts *opts;
opts = container_of(f, struct f_phonet_opts, func_inst);
if (opts->bound)
gphonet_cleanup(opts->net);
else
free_netdev(opts->net);
kfree(opts);
}
static struct usb_function_instance *phonet_alloc_inst(void)
{
struct f_phonet_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = phonet_free_inst;
opts->net = gphonet_setup_default();
if (IS_ERR(opts->net)) {
struct net_device *net = opts->net;
kfree(opts);
return ERR_CAST(net);
}
config_group_init_type_name(&opts->func_inst.group, "",
&phonet_func_type);
return &opts->func_inst;
}
static void phonet_free(struct usb_function *f)
{
struct f_phonet *phonet;
phonet = func_to_pn(f);
kfree(phonet);
}
static void pn_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_phonet *fp = func_to_pn(f);
int i;
/* We are already disconnected */
if (fp->in_req)
usb_ep_free_request(fp->in_ep, fp->in_req);
for (i = 0; i < phonet_rxq_size; i++)
if (fp->out_reqv[i])
usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
usb_free_all_descriptors(f);
}
static struct usb_function *phonet_alloc(struct usb_function_instance *fi)
{
struct f_phonet *fp;
struct f_phonet_opts *opts;
fp = kzalloc(struct_size(fp, out_reqv, phonet_rxq_size), GFP_KERNEL);
if (!fp)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_phonet_opts, func_inst);
fp->dev = opts->net;
fp->function.name = "phonet";
fp->function.bind = pn_bind;
fp->function.unbind = pn_unbind;
fp->function.set_alt = pn_set_alt;
fp->function.get_alt = pn_get_alt;
fp->function.disable = pn_disconnect;
fp->function.free_func = phonet_free;
spin_lock_init(&fp->rx.lock);
return &fp->function;
}
struct net_device *gphonet_setup_default(void)
{
struct net_device *dev;
struct phonet_port *port;
/* Create net device */
dev = alloc_netdev(sizeof(*port), "upnlink%d", NET_NAME_UNKNOWN,
pn_net_setup);
if (!dev)
return ERR_PTR(-ENOMEM);
port = netdev_priv(dev);
spin_lock_init(&port->lock);
netif_carrier_off(dev);
return dev;
}
void gphonet_set_gadget(struct net_device *net, struct usb_gadget *g)
{
SET_NETDEV_DEV(net, &g->dev);
}
int gphonet_register_netdev(struct net_device *net)
{
int status;
status = register_netdev(net);
if (status)
free_netdev(net);
return status;
}
void gphonet_cleanup(struct net_device *dev)
{
unregister_netdev(dev);
}
DECLARE_USB_FUNCTION_INIT(phonet, phonet_alloc_inst, phonet_alloc);
MODULE_AUTHOR("Rémi Denis-Courmont");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/f_phonet.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_hid.c -- USB HID function driver
*
* Copyright (C) 2010 Fabien Chouteau <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hid.h>
#include <linux/idr.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/usb/g_hid.h>
#include "u_f.h"
#include "u_hid.h"
#define HIDG_MINORS 4
static int major, minors;
static const struct class hidg_class = {
.name = "hidg",
};
static DEFINE_IDA(hidg_ida);
static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */
/*-------------------------------------------------------------------------*/
/* HID gadget struct */
struct f_hidg_req_list {
struct usb_request *req;
unsigned int pos;
struct list_head list;
};
struct f_hidg {
/* configuration */
unsigned char bInterfaceSubClass;
unsigned char bInterfaceProtocol;
unsigned char protocol;
unsigned char idle;
unsigned short report_desc_length;
char *report_desc;
unsigned short report_length;
/*
* use_out_ep - if true, the OUT Endpoint (interrupt out method)
* will be used to receive reports from the host
* using functions with the "intout" suffix.
* Otherwise, the OUT Endpoint will not be configured
* and the SETUP/SET_REPORT method ("ssreport" suffix)
* will be used to receive reports.
*/
bool use_out_ep;
/* recv report */
spinlock_t read_spinlock;
wait_queue_head_t read_queue;
/* recv report - interrupt out only (use_out_ep == 1) */
struct list_head completed_out_req;
unsigned int qlen;
/* recv report - setup set_report only (use_out_ep == 0) */
char *set_report_buf;
unsigned int set_report_length;
/* send report */
spinlock_t write_spinlock;
bool write_pending;
wait_queue_head_t write_queue;
struct usb_request *req;
struct device dev;
struct cdev cdev;
struct usb_function func;
struct usb_ep *in_ep;
struct usb_ep *out_ep;
};
static inline struct f_hidg *func_to_hidg(struct usb_function *f)
{
return container_of(f, struct f_hidg, func);
}
static void hidg_release(struct device *dev)
{
struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
kfree(hidg->set_report_buf);
kfree(hidg);
}
/*-------------------------------------------------------------------------*/
/* Static descriptors */
static struct usb_interface_descriptor hidg_interface_desc = {
.bLength = sizeof hidg_interface_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bAlternateSetting = 0,
/* .bNumEndpoints = DYNAMIC (depends on use_out_ep) */
.bInterfaceClass = USB_CLASS_HID,
/* .bInterfaceSubClass = DYNAMIC */
/* .bInterfaceProtocol = DYNAMIC */
/* .iInterface = DYNAMIC */
};
static struct hid_descriptor hidg_desc = {
.bLength = sizeof hidg_desc,
.bDescriptorType = HID_DT_HID,
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
/*.desc[0].bDescriptorType = DYNAMIC */
/*.desc[0].wDescriptorLenght = DYNAMIC */
};
/* Super-Speed Support */
static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
.bLength = sizeof(hidg_ss_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
/* .wBytesPerInterval = DYNAMIC */
};
static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
.bLength = sizeof(hidg_ss_out_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
/* .wBytesPerInterval = DYNAMIC */
};
static struct usb_descriptor_header *hidg_ss_descriptors_intout[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
(struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
(struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
(struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
NULL,
};
static struct usb_descriptor_header *hidg_ss_descriptors_ssreport[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
(struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
NULL,
};
/* High-Speed Support */
static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_descriptor_header *hidg_hs_descriptors_intout[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
(struct usb_descriptor_header *)&hidg_hs_out_ep_desc,
NULL,
};
static struct usb_descriptor_header *hidg_hs_descriptors_ssreport[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
NULL,
};
/* Full-Speed Support */
static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 10, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 10, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_descriptor_header *hidg_fs_descriptors_intout[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
(struct usb_descriptor_header *)&hidg_fs_out_ep_desc,
NULL,
};
static struct usb_descriptor_header *hidg_fs_descriptors_ssreport[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
NULL,
};
/*-------------------------------------------------------------------------*/
/* Strings */
#define CT_FUNC_HID_IDX 0
static struct usb_string ct_func_string_defs[] = {
[CT_FUNC_HID_IDX].s = "HID Interface",
{}, /* end of list */
};
static struct usb_gadget_strings ct_func_string_table = {
.language = 0x0409, /* en-US */
.strings = ct_func_string_defs,
};
static struct usb_gadget_strings *ct_func_strings[] = {
&ct_func_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
/* Char Device */
static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
size_t count, loff_t *ptr)
{
struct f_hidg *hidg = file->private_data;
struct f_hidg_req_list *list;
struct usb_request *req;
unsigned long flags;
int ret;
if (!count)
return 0;
spin_lock_irqsave(&hidg->read_spinlock, flags);
#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req))
/* wait for at least one buffer to complete */
while (!READ_COND_INTOUT) {
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(hidg->read_queue, READ_COND_INTOUT))
return -ERESTARTSYS;
spin_lock_irqsave(&hidg->read_spinlock, flags);
}
/* pick the first one */
list = list_first_entry(&hidg->completed_out_req,
struct f_hidg_req_list, list);
/*
* Remove this from list to protect it from beign free()
* while host disables our function
*/
list_del(&list->list);
req = list->req;
count = min_t(unsigned int, count, req->actual - list->pos);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
/* copy to user outside spinlock */
count -= copy_to_user(buffer, req->buf + list->pos, count);
list->pos += count;
/*
* if this request is completely handled and transfered to
* userspace, remove its entry from the list and requeue it
* again. Otherwise, we will revisit it again upon the next
* call, taking into account its current read position.
*/
if (list->pos == req->actual) {
kfree(list);
req->length = hidg->report_length;
ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
if (ret < 0) {
free_ep_req(hidg->out_ep, req);
return ret;
}
} else {
spin_lock_irqsave(&hidg->read_spinlock, flags);
list_add(&list->list, &hidg->completed_out_req);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
}
return count;
}
#define READ_COND_SSREPORT (hidg->set_report_buf != NULL)
static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer,
size_t count, loff_t *ptr)
{
struct f_hidg *hidg = file->private_data;
char *tmp_buf = NULL;
unsigned long flags;
if (!count)
return 0;
spin_lock_irqsave(&hidg->read_spinlock, flags);
while (!READ_COND_SSREPORT) {
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(hidg->read_queue, READ_COND_SSREPORT))
return -ERESTARTSYS;
spin_lock_irqsave(&hidg->read_spinlock, flags);
}
count = min_t(unsigned int, count, hidg->set_report_length);
tmp_buf = hidg->set_report_buf;
hidg->set_report_buf = NULL;
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (tmp_buf != NULL) {
count -= copy_to_user(buffer, tmp_buf, count);
kfree(tmp_buf);
} else {
count = -ENOMEM;
}
wake_up(&hidg->read_queue);
return count;
}
static ssize_t f_hidg_read(struct file *file, char __user *buffer,
size_t count, loff_t *ptr)
{
struct f_hidg *hidg = file->private_data;
if (hidg->use_out_ep)
return f_hidg_intout_read(file, buffer, count, ptr);
else
return f_hidg_ssreport_read(file, buffer, count, ptr);
}
static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
unsigned long flags;
if (req->status != 0) {
ERROR(hidg->func.config->cdev,
"End Point Request ERROR: %d\n", req->status);
}
spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
}
static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
size_t count, loff_t *offp)
{
struct f_hidg *hidg = file->private_data;
struct usb_request *req;
unsigned long flags;
ssize_t status = -ENOMEM;
spin_lock_irqsave(&hidg->write_spinlock, flags);
if (!hidg->req) {
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
return -ESHUTDOWN;
}
#define WRITE_COND (!hidg->write_pending)
try_again:
/* write queue */
while (!WRITE_COND) {
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible_exclusive(
hidg->write_queue, WRITE_COND))
return -ERESTARTSYS;
spin_lock_irqsave(&hidg->write_spinlock, flags);
}
hidg->write_pending = 1;
req = hidg->req;
count = min_t(unsigned, count, hidg->report_length);
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (!req) {
ERROR(hidg->func.config->cdev, "hidg->req is NULL\n");
status = -ESHUTDOWN;
goto release_write_pending;
}
status = copy_from_user(req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
"copy_from_user error\n");
status = -EINVAL;
goto release_write_pending;
}
spin_lock_irqsave(&hidg->write_spinlock, flags);
/* when our function has been disabled by host */
if (!hidg->req) {
free_ep_req(hidg->in_ep, req);
/*
* TODO
* Should we fail with error here?
*/
goto try_again;
}
req->status = 0;
req->zero = 0;
req->length = count;
req->complete = f_hidg_req_complete;
req->context = hidg;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (!hidg->in_ep->enabled) {
ERROR(hidg->func.config->cdev, "in_ep is disabled\n");
status = -ESHUTDOWN;
goto release_write_pending;
}
status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0)
goto release_write_pending;
else
status = count;
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
return status;
}
static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
__poll_t ret = 0;
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
if (WRITE_COND)
ret |= EPOLLOUT | EPOLLWRNORM;
if (hidg->use_out_ep) {
if (READ_COND_INTOUT)
ret |= EPOLLIN | EPOLLRDNORM;
} else {
if (READ_COND_SSREPORT)
ret |= EPOLLIN | EPOLLRDNORM;
}
return ret;
}
#undef WRITE_COND
#undef READ_COND_SSREPORT
#undef READ_COND_INTOUT
static int f_hidg_release(struct inode *inode, struct file *fd)
{
fd->private_data = NULL;
return 0;
}
static int f_hidg_open(struct inode *inode, struct file *fd)
{
struct f_hidg *hidg =
container_of(inode->i_cdev, struct f_hidg, cdev);
fd->private_data = hidg;
return 0;
}
/*-------------------------------------------------------------------------*/
/* usb_function */
static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
return alloc_ep_req(ep, length);
}
static void hidg_intout_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *) req->context;
struct usb_composite_dev *cdev = hidg->func.config->cdev;
struct f_hidg_req_list *req_list;
unsigned long flags;
switch (req->status) {
case 0:
req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
if (!req_list) {
ERROR(cdev, "Unable to allocate mem for req_list\n");
goto free_req;
}
req_list->req = req;
spin_lock_irqsave(&hidg->read_spinlock, flags);
list_add_tail(&req_list->list, &hidg->completed_out_req);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
break;
default:
ERROR(cdev, "Set report failed %d\n", req->status);
fallthrough;
case -ECONNABORTED: /* hardware forced ep reset */
case -ECONNRESET: /* request dequeued */
case -ESHUTDOWN: /* disconnect from host */
free_req:
free_ep_req(ep, req);
return;
}
}
static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *)req->context;
struct usb_composite_dev *cdev = hidg->func.config->cdev;
char *new_buf = NULL;
unsigned long flags;
if (req->status != 0 || req->buf == NULL || req->actual == 0) {
ERROR(cdev,
"%s FAILED: status=%d, buf=%p, actual=%d\n",
__func__, req->status, req->buf, req->actual);
return;
}
spin_lock_irqsave(&hidg->read_spinlock, flags);
new_buf = krealloc(hidg->set_report_buf, req->actual, GFP_ATOMIC);
if (new_buf == NULL) {
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
return;
}
hidg->set_report_buf = new_buf;
hidg->set_report_length = req->actual;
memcpy(hidg->set_report_buf, req->buf, req->actual);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
}
static int hidg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct f_hidg *hidg = func_to_hidg(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int status = 0;
__u16 value, length;
value = __le16_to_cpu(ctrl->wValue);
length = __le16_to_cpu(ctrl->wLength);
VDBG(cdev,
"%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
__func__, ctrl->bRequestType, ctrl->bRequest, value);
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_REPORT):
VDBG(cdev, "get_report\n");
/* send an empty report */
length = min_t(unsigned, length, hidg->report_length);
memset(req->buf, 0x0, length);
goto respond;
break;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_PROTOCOL):
VDBG(cdev, "get_protocol\n");
length = min_t(unsigned int, length, 1);
((u8 *) req->buf)[0] = hidg->protocol;
goto respond;
break;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_IDLE):
VDBG(cdev, "get_idle\n");
length = min_t(unsigned int, length, 1);
((u8 *) req->buf)[0] = hidg->idle;
goto respond;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_REPORT):
VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
if (hidg->use_out_ep)
goto stall;
req->complete = hidg_ssreport_complete;
req->context = hidg;
goto respond;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_PROTOCOL):
VDBG(cdev, "set_protocol\n");
if (value > HID_REPORT_PROTOCOL)
goto stall;
length = 0;
/*
* We assume that programs implementing the Boot protocol
* are also compatible with the Report Protocol
*/
if (hidg->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) {
hidg->protocol = value;
goto respond;
}
goto stall;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_IDLE):
VDBG(cdev, "set_idle\n");
length = 0;
hidg->idle = value >> 8;
goto respond;
break;
case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
| USB_REQ_GET_DESCRIPTOR):
switch (value >> 8) {
case HID_DT_HID:
{
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
hidg_desc_copy.desc[0].wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
hidg_desc_copy.bLength);
memcpy(req->buf, &hidg_desc_copy, length);
goto respond;
break;
}
case HID_DT_REPORT:
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
length = min_t(unsigned short, length,
hidg->report_desc_length);
memcpy(req->buf, hidg->report_desc, length);
goto respond;
break;
default:
VDBG(cdev, "Unknown descriptor request 0x%x\n",
value >> 8);
goto stall;
break;
}
break;
default:
VDBG(cdev, "Unknown request 0x%x\n",
ctrl->bRequest);
goto stall;
break;
}
stall:
return -EOPNOTSUPP;
respond:
req->zero = 0;
req->length = length;
status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (status < 0)
ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value);
return status;
}
static void hidg_disable(struct usb_function *f)
{
struct f_hidg *hidg = func_to_hidg(f);
struct f_hidg_req_list *list, *next;
unsigned long flags;
usb_ep_disable(hidg->in_ep);
if (hidg->out_ep) {
usb_ep_disable(hidg->out_ep);
spin_lock_irqsave(&hidg->read_spinlock, flags);
list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
free_ep_req(hidg->out_ep, list->req);
list_del(&list->list);
kfree(list);
}
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
}
spin_lock_irqsave(&hidg->write_spinlock, flags);
if (!hidg->write_pending) {
free_ep_req(hidg->in_ep, hidg->req);
hidg->write_pending = 1;
}
hidg->req = NULL;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
}
static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct f_hidg *hidg = func_to_hidg(f);
struct usb_request *req_in = NULL;
unsigned long flags;
int i, status = 0;
VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
if (hidg->in_ep != NULL) {
/* restart endpoint */
usb_ep_disable(hidg->in_ep);
status = config_ep_by_speed(f->config->cdev->gadget, f,
hidg->in_ep);
if (status) {
ERROR(cdev, "config_ep_by_speed FAILED!\n");
goto fail;
}
status = usb_ep_enable(hidg->in_ep);
if (status < 0) {
ERROR(cdev, "Enable IN endpoint FAILED!\n");
goto fail;
}
hidg->in_ep->driver_data = hidg;
req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length);
if (!req_in) {
status = -ENOMEM;
goto disable_ep_in;
}
}
if (hidg->use_out_ep && hidg->out_ep != NULL) {
/* restart endpoint */
usb_ep_disable(hidg->out_ep);
status = config_ep_by_speed(f->config->cdev->gadget, f,
hidg->out_ep);
if (status) {
ERROR(cdev, "config_ep_by_speed FAILED!\n");
goto free_req_in;
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
ERROR(cdev, "Enable OUT endpoint FAILED!\n");
goto free_req_in;
}
hidg->out_ep->driver_data = hidg;
/*
* allocate a bunch of read buffers and queue them all at once.
*/
for (i = 0; i < hidg->qlen && status == 0; i++) {
struct usb_request *req =
hidg_alloc_ep_req(hidg->out_ep,
hidg->report_length);
if (req) {
req->complete = hidg_intout_complete;
req->context = hidg;
status = usb_ep_queue(hidg->out_ep, req,
GFP_ATOMIC);
if (status) {
ERROR(cdev, "%s queue req --> %d\n",
hidg->out_ep->name, status);
free_ep_req(hidg->out_ep, req);
}
} else {
status = -ENOMEM;
goto disable_out_ep;
}
}
}
if (hidg->in_ep != NULL) {
spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->req = req_in;
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
}
return 0;
disable_out_ep:
if (hidg->out_ep)
usb_ep_disable(hidg->out_ep);
free_req_in:
if (req_in)
free_ep_req(hidg->in_ep, req_in);
disable_ep_in:
if (hidg->in_ep)
usb_ep_disable(hidg->in_ep);
fail:
return status;
}
static const struct file_operations f_hidg_fops = {
.owner = THIS_MODULE,
.open = f_hidg_open,
.release = f_hidg_release,
.write = f_hidg_write,
.read = f_hidg_read,
.poll = f_hidg_poll,
.llseek = noop_llseek,
};
static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_ep *ep;
struct f_hidg *hidg = func_to_hidg(f);
struct usb_string *us;
int status;
/* maybe allocate device-global string IDs, and patch descriptors */
us = usb_gstrings_attach(c->cdev, ct_func_strings,
ARRAY_SIZE(ct_func_string_defs));
if (IS_ERR(us))
return PTR_ERR(us);
hidg_interface_desc.iInterface = us[CT_FUNC_HID_IDX].id;
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
hidg_interface_desc.bInterfaceNumber = status;
/* allocate instance-specific endpoints */
status = -ENODEV;
ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc);
if (!ep)
goto fail;
hidg->in_ep = ep;
hidg->out_ep = NULL;
if (hidg->use_out_ep) {
ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc);
if (!ep)
goto fail;
hidg->out_ep = ep;
}
/* used only if use_out_ep == 1 */
hidg->set_report_buf = NULL;
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
hidg_interface_desc.bNumEndpoints = hidg->use_out_ep ? 2 : 1;
hidg->protocol = HID_REPORT_PROTOCOL;
hidg->idle = 1;
hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_in_comp_desc.wBytesPerInterval =
cpu_to_le16(hidg->report_length);
hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_out_comp_desc.wBytesPerInterval =
cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
/*
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
hidg_desc.desc[0].wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
hidg_fs_in_ep_desc.bEndpointAddress;
hidg_hs_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
hidg_ss_in_ep_desc.bEndpointAddress =
hidg_fs_in_ep_desc.bEndpointAddress;
hidg_ss_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
if (hidg->use_out_ep)
status = usb_assign_descriptors(f,
hidg_fs_descriptors_intout,
hidg_hs_descriptors_intout,
hidg_ss_descriptors_intout,
hidg_ss_descriptors_intout);
else
status = usb_assign_descriptors(f,
hidg_fs_descriptors_ssreport,
hidg_hs_descriptors_ssreport,
hidg_ss_descriptors_ssreport,
hidg_ss_descriptors_ssreport);
if (status)
goto fail;
spin_lock_init(&hidg->write_spinlock);
hidg->write_pending = 1;
hidg->req = NULL;
spin_lock_init(&hidg->read_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
INIT_LIST_HEAD(&hidg->completed_out_req);
/* create char device */
cdev_init(&hidg->cdev, &f_hidg_fops);
status = cdev_device_add(&hidg->cdev, &hidg->dev);
if (status)
goto fail_free_descs;
return 0;
fail_free_descs:
usb_free_all_descriptors(f);
fail:
ERROR(f->config->cdev, "hidg_bind FAILED\n");
if (hidg->req != NULL)
free_ep_req(hidg->in_ep, hidg->req);
return status;
}
static inline int hidg_get_minor(void)
{
int ret;
ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
if (ret >= HIDG_MINORS) {
ida_simple_remove(&hidg_ida, ret);
ret = -ENODEV;
}
return ret;
}
static inline struct f_hid_opts *to_f_hid_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_hid_opts,
func_inst.group);
}
static void hid_attr_release(struct config_item *item)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations hidg_item_ops = {
.release = hid_attr_release,
};
#define F_HID_OPT(name, prec, limit) \
static ssize_t f_hid_opts_##name##_show(struct config_item *item, char *page)\
{ \
struct f_hid_opts *opts = to_f_hid_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%d\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_hid_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_hid_opts *opts = to_f_hid_opts(item); \
int ret; \
u##prec num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtou##prec(page, 0, &num); \
if (ret) \
goto end; \
\
if (num > limit) { \
ret = -EINVAL; \
goto end; \
} \
opts->name = num; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_hid_opts_, name)
F_HID_OPT(subclass, 8, 255);
F_HID_OPT(protocol, 8, 255);
F_HID_OPT(no_out_endpoint, 8, 1);
F_HID_OPT(report_length, 16, 65535);
static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
int result;
mutex_lock(&opts->lock);
result = opts->report_desc_length;
memcpy(page, opts->report_desc, opts->report_desc_length);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_hid_opts_report_desc_store(struct config_item *item,
const char *page, size_t len)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
int ret = -EBUSY;
char *d;
mutex_lock(&opts->lock);
if (opts->refcnt)
goto end;
if (len > PAGE_SIZE) {
ret = -ENOSPC;
goto end;
}
d = kmemdup(page, len, GFP_KERNEL);
if (!d) {
ret = -ENOMEM;
goto end;
}
kfree(opts->report_desc);
opts->report_desc = d;
opts->report_desc_length = len;
opts->report_desc_alloc = true;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_hid_opts_, report_desc);
static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
return sprintf(page, "%d:%d\n", major, opts->minor);
}
CONFIGFS_ATTR_RO(f_hid_opts_, dev);
static struct configfs_attribute *hid_attrs[] = {
&f_hid_opts_attr_subclass,
&f_hid_opts_attr_protocol,
&f_hid_opts_attr_no_out_endpoint,
&f_hid_opts_attr_report_length,
&f_hid_opts_attr_report_desc,
&f_hid_opts_attr_dev,
NULL,
};
static const struct config_item_type hid_func_type = {
.ct_item_ops = &hidg_item_ops,
.ct_attrs = hid_attrs,
.ct_owner = THIS_MODULE,
};
static inline void hidg_put_minor(int minor)
{
ida_simple_remove(&hidg_ida, minor);
}
static void hidg_free_inst(struct usb_function_instance *f)
{
struct f_hid_opts *opts;
opts = container_of(f, struct f_hid_opts, func_inst);
mutex_lock(&hidg_ida_lock);
hidg_put_minor(opts->minor);
if (ida_is_empty(&hidg_ida))
ghid_cleanup();
mutex_unlock(&hidg_ida_lock);
if (opts->report_desc_alloc)
kfree(opts->report_desc);
kfree(opts);
}
static struct usb_function_instance *hidg_alloc_inst(void)
{
struct f_hid_opts *opts;
struct usb_function_instance *ret;
int status = 0;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = hidg_free_inst;
ret = &opts->func_inst;
mutex_lock(&hidg_ida_lock);
if (ida_is_empty(&hidg_ida)) {
status = ghid_setup(NULL, HIDG_MINORS);
if (status) {
ret = ERR_PTR(status);
kfree(opts);
goto unlock;
}
}
opts->minor = hidg_get_minor();
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
if (ida_is_empty(&hidg_ida))
ghid_cleanup();
goto unlock;
}
config_group_init_type_name(&opts->func_inst.group, "", &hid_func_type);
unlock:
mutex_unlock(&hidg_ida_lock);
return ret;
}
static void hidg_free(struct usb_function *f)
{
struct f_hidg *hidg;
struct f_hid_opts *opts;
hidg = func_to_hidg(f);
opts = container_of(f->fi, struct f_hid_opts, func_inst);
put_device(&hidg->dev);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
}
static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_hidg *hidg = func_to_hidg(f);
cdev_device_del(&hidg->cdev, &hidg->dev);
usb_free_all_descriptors(f);
}
static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
{
struct f_hidg *hidg;
struct f_hid_opts *opts;
int ret;
/* allocate and initialize one new instance */
hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
if (!hidg)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_hid_opts, func_inst);
mutex_lock(&opts->lock);
device_initialize(&hidg->dev);
hidg->dev.release = hidg_release;
hidg->dev.class = &hidg_class;
hidg->dev.devt = MKDEV(major, opts->minor);
ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor);
if (ret)
goto err_unlock;
hidg->bInterfaceSubClass = opts->subclass;
hidg->bInterfaceProtocol = opts->protocol;
hidg->report_length = opts->report_length;
hidg->report_desc_length = opts->report_desc_length;
if (opts->report_desc) {
hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
opts->report_desc_length,
GFP_KERNEL);
if (!hidg->report_desc) {
ret = -ENOMEM;
goto err_put_device;
}
}
hidg->use_out_ep = !opts->no_out_endpoint;
++opts->refcnt;
mutex_unlock(&opts->lock);
hidg->func.name = "hid";
hidg->func.bind = hidg_bind;
hidg->func.unbind = hidg_unbind;
hidg->func.set_alt = hidg_set_alt;
hidg->func.disable = hidg_disable;
hidg->func.setup = hidg_setup;
hidg->func.free_func = hidg_free;
/* this could be made configurable at some point */
hidg->qlen = 4;
return &hidg->func;
err_put_device:
put_device(&hidg->dev);
err_unlock:
mutex_unlock(&opts->lock);
return ERR_PTR(ret);
}
DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fabien Chouteau");
int ghid_setup(struct usb_gadget *g, int count)
{
int status;
dev_t dev;
status = class_register(&hidg_class);
if (status)
return status;
status = alloc_chrdev_region(&dev, 0, count, "hidg");
if (status) {
class_unregister(&hidg_class);
return status;
}
major = MAJOR(dev);
minors = count;
return 0;
}
void ghid_cleanup(void)
{
if (major) {
unregister_chrdev_region(MKDEV(major, 0), minors);
major = minors = 0;
}
class_unregister(&hidg_class);
}
| linux-master | drivers/usb/gadget/function/f_hid.c |
// SPDX-License-Identifier: GPL-2.0
/*
* uvc_configfs.c
*
* Configfs support for the uvc function.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <[email protected]>
*/
#include "uvc_configfs.h"
#include <linux/sort.h>
#include <linux/usb/video.h>
/* -----------------------------------------------------------------------------
* Global Utility Structures and Macros
*/
#define UVC_ATTR(prefix, cname, aname) \
static struct configfs_attribute prefix##attr_##cname = { \
.ca_name = __stringify(aname), \
.ca_mode = S_IRUGO | S_IWUGO, \
.ca_owner = THIS_MODULE, \
.show = prefix##cname##_show, \
.store = prefix##cname##_store, \
}
#define UVC_ATTR_RO(prefix, cname, aname) \
static struct configfs_attribute prefix##attr_##cname = { \
.ca_name = __stringify(aname), \
.ca_mode = S_IRUGO, \
.ca_owner = THIS_MODULE, \
.show = prefix##cname##_show, \
}
#define le8_to_cpu(x) (x)
#define cpu_to_le8(x) (x)
static int uvcg_config_compare_u32(const void *l, const void *r)
{
u32 li = *(const u32 *)l;
u32 ri = *(const u32 *)r;
return li < ri ? -1 : li == ri ? 0 : 1;
}
static inline int __uvcg_count_item_entries(char *buf, void *priv, unsigned int size)
{
++*((int *)priv);
return 0;
}
static inline int __uvcg_fill_item_entries(char *buf, void *priv, unsigned int size)
{
unsigned int num;
u8 **values;
int ret;
ret = kstrtouint(buf, 0, &num);
if (ret)
return ret;
if (num != (num & GENMASK((size * 8) - 1, 0)))
return -ERANGE;
values = priv;
memcpy(*values, &num, size);
*values += size;
return 0;
}
static int __uvcg_iter_item_entries(const char *page, size_t len,
int (*fun)(char *, void *, unsigned int),
void *priv, unsigned int size)
{
/* sign, base 2 representation, newline, terminator */
unsigned int bufsize = 1 + size * 8 + 1 + 1;
const char *pg = page;
int i, ret = 0;
char *buf;
if (!fun)
return -EINVAL;
buf = kzalloc(bufsize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (pg - page < len) {
i = 0;
while (i < sizeof(buf) && (pg - page < len) &&
*pg != '\0' && *pg != '\n')
buf[i++] = *pg++;
if (i == sizeof(buf)) {
ret = -EINVAL;
goto out_free_buf;
}
while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
++pg;
buf[i] = '\0';
ret = fun(buf, priv, size);
if (ret)
goto out_free_buf;
}
out_free_buf:
kfree(buf);
return ret;
}
struct uvcg_config_group_type {
struct config_item_type type;
const char *name;
const struct uvcg_config_group_type **children;
int (*create_children)(struct config_group *group);
};
static void uvcg_config_item_release(struct config_item *item)
{
struct config_group *group = to_config_group(item);
kfree(group);
}
static struct configfs_item_operations uvcg_config_item_ops = {
.release = uvcg_config_item_release,
};
static int uvcg_config_create_group(struct config_group *parent,
const struct uvcg_config_group_type *type);
static int uvcg_config_create_children(struct config_group *group,
const struct uvcg_config_group_type *type)
{
const struct uvcg_config_group_type **child;
int ret;
if (type->create_children)
return type->create_children(group);
for (child = type->children; child && *child; ++child) {
ret = uvcg_config_create_group(group, *child);
if (ret < 0)
return ret;
}
return 0;
}
static int uvcg_config_create_group(struct config_group *parent,
const struct uvcg_config_group_type *type)
{
struct config_group *group;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return -ENOMEM;
config_group_init_type_name(group, type->name, &type->type);
configfs_add_default_group(group, parent);
return uvcg_config_create_children(group, type);
}
static void uvcg_config_remove_children(struct config_group *group)
{
struct config_group *child, *n;
list_for_each_entry_safe(child, n, &group->default_groups, group_entry) {
list_del(&child->group_entry);
uvcg_config_remove_children(child);
config_item_put(&child->cg_item);
}
}
/* -----------------------------------------------------------------------------
* control/header/<NAME>
* control/header
*/
#define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit) \
static ssize_t uvcg_control_header_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct uvcg_control_header *ch = to_uvcg_control_header(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = ch->item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(ch->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
static ssize_t \
uvcg_control_header_##cname##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct uvcg_control_header *ch = to_uvcg_control_header(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\
int ret; \
u##bits num; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = ch->item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
if (ch->linked || opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtou##bits(page, 0, &num); \
if (ret) \
goto end; \
\
if (num > limit) { \
ret = -EINVAL; \
goto end; \
} \
ch->desc.aname = cpu_to_le##bits(num); \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
mutex_unlock(su_mutex); \
return ret; \
} \
\
UVC_ATTR(uvcg_control_header_, cname, aname)
UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, 16, 0xffff);
UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, 32, 0x7fffffff);
#undef UVCG_CTRL_HDR_ATTR
static struct configfs_attribute *uvcg_control_header_attrs[] = {
&uvcg_control_header_attr_bcd_uvc,
&uvcg_control_header_attr_dw_clock_frequency,
NULL,
};
static const struct config_item_type uvcg_control_header_type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_control_header_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_item *uvcg_control_header_make(struct config_group *group,
const char *name)
{
struct uvcg_control_header *h;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
h->desc.bLength = UVC_DT_HEADER_SIZE(1);
h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
h->desc.bDescriptorSubType = UVC_VC_HEADER;
h->desc.bcdUVC = cpu_to_le16(0x0110);
h->desc.dwClockFrequency = cpu_to_le32(48000000);
config_item_init_type_name(&h->item, name, &uvcg_control_header_type);
return &h->item;
}
static struct configfs_group_operations uvcg_control_header_grp_ops = {
.make_item = uvcg_control_header_make,
};
static const struct uvcg_config_group_type uvcg_control_header_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_control_header_grp_ops,
.ct_owner = THIS_MODULE,
},
.name = "header",
};
/* -----------------------------------------------------------------------------
* control/processing/default
*/
#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_processing_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_processing_unit_descriptor *pd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
pd = &opts->uvc_processing; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(pd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
UVC_ATTR_RO(uvcg_default_processing_, cname, aname)
UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, 8);
UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, 8);
UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, 16);
UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, 8);
#undef UVCG_DEFAULT_PROCESSING_ATTR
static ssize_t uvcg_default_processing_bm_controls_store(
struct config_item *item, const char *page, size_t len)
{
struct config_group *group = to_config_group(item);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_processing_unit_descriptor *pd;
struct config_item *opts_item;
struct f_uvc_opts *opts;
u8 *bm_controls, *tmp;
unsigned int i;
int ret, n = 0;
mutex_lock(su_mutex);
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
pd = &opts->uvc_processing;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto unlock;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
sizeof(u8));
if (ret)
goto unlock;
if (n > pd->bControlSize) {
ret = -EINVAL;
goto unlock;
}
tmp = bm_controls = kcalloc(n, sizeof(u8), GFP_KERNEL);
if (!bm_controls) {
ret = -ENOMEM;
goto unlock;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &tmp,
sizeof(u8));
if (ret)
goto free_mem;
for (i = 0; i < n; i++)
pd->bmControls[i] = bm_controls[i];
ret = len;
free_mem:
kfree(bm_controls);
unlock:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
static ssize_t uvcg_default_processing_bm_controls_show(
struct config_item *item, char *page)
{
struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_processing_unit_descriptor *pd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
pd = &opts->uvc_processing;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < pd->bControlSize; ++i) {
result += sprintf(pg, "%u\n", pd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
UVC_ATTR(uvcg_default_processing_, bm_controls, bmControls);
static struct configfs_attribute *uvcg_default_processing_attrs[] = {
&uvcg_default_processing_attr_b_unit_id,
&uvcg_default_processing_attr_b_source_id,
&uvcg_default_processing_attr_w_max_multiplier,
&uvcg_default_processing_attr_bm_controls,
&uvcg_default_processing_attr_i_processing,
NULL,
};
static const struct uvcg_config_group_type uvcg_default_processing_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_default_processing_attrs,
.ct_owner = THIS_MODULE,
},
.name = "default",
};
/* -----------------------------------------------------------------------------
* control/processing
*/
static const struct uvcg_config_group_type uvcg_processing_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_owner = THIS_MODULE,
},
.name = "processing",
.children = (const struct uvcg_config_group_type*[]) {
&uvcg_default_processing_type,
NULL,
},
};
/* -----------------------------------------------------------------------------
* control/terminal/camera/default
*/
#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_camera_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_camera_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent-> \
ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_camera_terminal; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
\
return result; \
} \
\
UVC_ATTR_RO(uvcg_default_camera_, cname, aname)
UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, 8);
UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, 16);
UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, 8);
UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, 8);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_min, wObjectiveFocalLengthMin,
16);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_max, wObjectiveFocalLengthMax,
16);
UVCG_DEFAULT_CAMERA_ATTR(w_ocular_focal_length, wOcularFocalLength,
16);
#undef UVCG_DEFAULT_CAMERA_ATTR
static ssize_t uvcg_default_camera_bm_controls_store(
struct config_item *item, const char *page, size_t len)
{
struct config_group *group = to_config_group(item);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_camera_terminal_descriptor *cd;
struct config_item *opts_item;
struct f_uvc_opts *opts;
u8 *bm_controls, *tmp;
unsigned int i;
int ret, n = 0;
mutex_lock(su_mutex);
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent->
ci_parent;
opts = to_f_uvc_opts(opts_item);
cd = &opts->uvc_camera_terminal;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto unlock;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
sizeof(u8));
if (ret)
goto unlock;
if (n > cd->bControlSize) {
ret = -EINVAL;
goto unlock;
}
tmp = bm_controls = kcalloc(n, sizeof(u8), GFP_KERNEL);
if (!bm_controls) {
ret = -ENOMEM;
goto unlock;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &tmp,
sizeof(u8));
if (ret)
goto free_mem;
for (i = 0; i < n; i++)
cd->bmControls[i] = bm_controls[i];
ret = len;
free_mem:
kfree(bm_controls);
unlock:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
static ssize_t uvcg_default_camera_bm_controls_show(
struct config_item *item, char *page)
{
struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_camera_terminal_descriptor *cd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent->
ci_parent;
opts = to_f_uvc_opts(opts_item);
cd = &opts->uvc_camera_terminal;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < cd->bControlSize; ++i) {
result += sprintf(pg, "%u\n", cd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
UVC_ATTR(uvcg_default_camera_, bm_controls, bmControls);
static struct configfs_attribute *uvcg_default_camera_attrs[] = {
&uvcg_default_camera_attr_b_terminal_id,
&uvcg_default_camera_attr_w_terminal_type,
&uvcg_default_camera_attr_b_assoc_terminal,
&uvcg_default_camera_attr_i_terminal,
&uvcg_default_camera_attr_w_objective_focal_length_min,
&uvcg_default_camera_attr_w_objective_focal_length_max,
&uvcg_default_camera_attr_w_ocular_focal_length,
&uvcg_default_camera_attr_bm_controls,
NULL,
};
static const struct uvcg_config_group_type uvcg_default_camera_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_default_camera_attrs,
.ct_owner = THIS_MODULE,
},
.name = "default",
};
/* -----------------------------------------------------------------------------
* control/terminal/camera
*/
static const struct uvcg_config_group_type uvcg_camera_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_owner = THIS_MODULE,
},
.name = "camera",
.children = (const struct uvcg_config_group_type*[]) {
&uvcg_default_camera_type,
NULL,
},
};
/* -----------------------------------------------------------------------------
* control/terminal/output/default
*/
#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_output_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_output_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = group->cg_item.ci_parent->ci_parent-> \
ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_output_terminal; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
\
return result; \
} \
\
UVC_ATTR_RO(uvcg_default_output_, cname, aname)
UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
#undef UVCG_DEFAULT_OUTPUT_ATTR
static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item,
char *page)
{
struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_output_terminal_descriptor *cd;
int result;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = group->cg_item.ci_parent->ci_parent->
ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
cd = &opts->uvc_output_terminal;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID));
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_output_terminal_descriptor *cd;
int result;
u8 num;
result = kstrtou8(page, 0, &num);
if (result)
return result;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = group->cg_item.ci_parent->ci_parent->
ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
cd = &opts->uvc_output_terminal;
mutex_lock(&opts->lock);
cd->bSourceID = num;
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return len;
}
UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID);
static struct configfs_attribute *uvcg_default_output_attrs[] = {
&uvcg_default_output_attr_b_terminal_id,
&uvcg_default_output_attr_w_terminal_type,
&uvcg_default_output_attr_b_assoc_terminal,
&uvcg_default_output_attr_b_source_id,
&uvcg_default_output_attr_i_terminal,
NULL,
};
static const struct uvcg_config_group_type uvcg_default_output_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_default_output_attrs,
.ct_owner = THIS_MODULE,
},
.name = "default",
};
/* -----------------------------------------------------------------------------
* control/terminal/output
*/
static const struct uvcg_config_group_type uvcg_output_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_owner = THIS_MODULE,
},
.name = "output",
.children = (const struct uvcg_config_group_type*[]) {
&uvcg_default_output_type,
NULL,
},
};
/* -----------------------------------------------------------------------------
* control/terminal
*/
static const struct uvcg_config_group_type uvcg_terminal_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_owner = THIS_MODULE,
},
.name = "terminal",
.children = (const struct uvcg_config_group_type*[]) {
&uvcg_camera_grp_type,
&uvcg_output_grp_type,
NULL,
},
};
/* -----------------------------------------------------------------------------
* control/extensions
*/
#define UVCG_EXTENSION_ATTR(cname, aname, ro...) \
static ssize_t uvcg_extension_##cname##_show(struct config_item *item, \
char *page) \
{ \
struct config_group *group = to_config_group(item->ci_parent); \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvcg_extension *xu = to_uvcg_extension(item); \
struct config_item *opts_item; \
struct f_uvc_opts *opts; \
int ret; \
\
mutex_lock(su_mutex); \
\
opts_item = item->ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
ret = sprintf(page, "%u\n", xu->desc.aname); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
\
return ret; \
} \
UVC_ATTR##ro(uvcg_extension_, cname, aname)
UVCG_EXTENSION_ATTR(b_length, bLength, _RO);
UVCG_EXTENSION_ATTR(b_unit_id, bUnitID, _RO);
UVCG_EXTENSION_ATTR(i_extension, iExtension, _RO);
static ssize_t uvcg_extension_b_num_controls_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
int ret;
u8 num;
ret = kstrtou8(page, 0, &num);
if (ret)
return ret;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
xu->desc.bNumControls = num;
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return len;
}
UVCG_EXTENSION_ATTR(b_num_controls, bNumControls);
/*
* In addition to storing bNrInPins, this function needs to realloc the
* memory for the baSourceID array and additionally expand bLength.
*/
static ssize_t uvcg_extension_b_nr_in_pins_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
void *tmp_buf;
int ret;
u8 num;
ret = kstrtou8(page, 0, &num);
if (ret)
return ret;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
if (num == xu->desc.bNrInPins) {
ret = len;
goto unlock;
}
tmp_buf = krealloc_array(xu->desc.baSourceID, num, sizeof(u8),
GFP_KERNEL | __GFP_ZERO);
if (!tmp_buf) {
ret = -ENOMEM;
goto unlock;
}
xu->desc.baSourceID = tmp_buf;
xu->desc.bNrInPins = num;
xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
xu->desc.bControlSize);
ret = len;
unlock:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
UVCG_EXTENSION_ATTR(b_nr_in_pins, bNrInPins);
/*
* In addition to storing bControlSize, this function needs to realloc the
* memory for the bmControls array and additionally expand bLength.
*/
static ssize_t uvcg_extension_b_control_size_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
void *tmp_buf;
int ret;
u8 num;
ret = kstrtou8(page, 0, &num);
if (ret)
return ret;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
if (num == xu->desc.bControlSize) {
ret = len;
goto unlock;
}
tmp_buf = krealloc_array(xu->desc.bmControls, num, sizeof(u8),
GFP_KERNEL | __GFP_ZERO);
if (!tmp_buf) {
ret = -ENOMEM;
goto unlock;
}
xu->desc.bmControls = tmp_buf;
xu->desc.bControlSize = num;
xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
xu->desc.bControlSize);
ret = len;
unlock:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
UVCG_EXTENSION_ATTR(b_control_size, bControlSize);
static ssize_t uvcg_extension_guid_extension_code_show(struct config_item *item,
char *page)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
memcpy(page, xu->desc.guidExtensionCode, sizeof(xu->desc.guidExtensionCode));
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return sizeof(xu->desc.guidExtensionCode);
}
static ssize_t uvcg_extension_guid_extension_code_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
int ret;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
memcpy(xu->desc.guidExtensionCode, page,
min(sizeof(xu->desc.guidExtensionCode), len));
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
ret = sizeof(xu->desc.guidExtensionCode);
return ret;
}
UVC_ATTR(uvcg_extension_, guid_extension_code, guidExtensionCode);
static ssize_t uvcg_extension_ba_source_id_show(struct config_item *item,
char *page)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
char *pg = page;
int ret, i;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
for (ret = 0, i = 0; i < xu->desc.bNrInPins; ++i) {
ret += sprintf(pg, "%u\n", xu->desc.baSourceID[i]);
pg = page + ret;
}
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
static ssize_t uvcg_extension_ba_source_id_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
u8 *source_ids, *iter;
int ret, n = 0;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
sizeof(u8));
if (ret)
goto unlock;
iter = source_ids = kcalloc(n, sizeof(u8), GFP_KERNEL);
if (!source_ids) {
ret = -ENOMEM;
goto unlock;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
sizeof(u8));
if (ret) {
kfree(source_ids);
goto unlock;
}
kfree(xu->desc.baSourceID);
xu->desc.baSourceID = source_ids;
xu->desc.bNrInPins = n;
xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
xu->desc.bControlSize);
ret = len;
unlock:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
UVC_ATTR(uvcg_extension_, ba_source_id, baSourceID);
static ssize_t uvcg_extension_bm_controls_show(struct config_item *item,
char *page)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
char *pg = page;
int ret, i;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
for (ret = 0, i = 0; i < xu->desc.bControlSize; ++i) {
ret += sprintf(pg, "0x%02x\n", xu->desc.bmControls[i]);
pg = page + ret;
}
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
static ssize_t uvcg_extension_bm_controls_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item->ci_parent);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
u8 *bm_controls, *iter;
int ret, n = 0;
mutex_lock(su_mutex);
opts_item = item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
sizeof(u8));
if (ret)
goto unlock;
iter = bm_controls = kcalloc(n, sizeof(u8), GFP_KERNEL);
if (!bm_controls) {
ret = -ENOMEM;
goto unlock;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
sizeof(u8));
if (ret) {
kfree(bm_controls);
goto unlock;
}
kfree(xu->desc.bmControls);
xu->desc.bmControls = bm_controls;
xu->desc.bControlSize = n;
xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
xu->desc.bControlSize);
ret = len;
unlock:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
UVC_ATTR(uvcg_extension_, bm_controls, bmControls);
static struct configfs_attribute *uvcg_extension_attrs[] = {
&uvcg_extension_attr_b_length,
&uvcg_extension_attr_b_unit_id,
&uvcg_extension_attr_b_num_controls,
&uvcg_extension_attr_b_nr_in_pins,
&uvcg_extension_attr_b_control_size,
&uvcg_extension_attr_guid_extension_code,
&uvcg_extension_attr_ba_source_id,
&uvcg_extension_attr_bm_controls,
&uvcg_extension_attr_i_extension,
NULL,
};
static void uvcg_extension_release(struct config_item *item)
{
struct uvcg_extension *xu = container_of(item, struct uvcg_extension, item);
kfree(xu);
}
static int uvcg_extension_allow_link(struct config_item *src, struct config_item *tgt)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(src);
struct config_item *gadget_item;
struct gadget_string *string;
struct config_item *strings;
int ret = 0;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
/* Validate that the target of the link is an entry in strings/<langid> */
gadget_item = src->ci_parent->ci_parent->ci_parent->ci_parent->ci_parent;
strings = config_group_find_item(to_config_group(gadget_item), "strings");
if (!strings || tgt->ci_parent->ci_parent != strings) {
ret = -EINVAL;
goto put_strings;
}
string = to_gadget_string(tgt);
xu->string_descriptor_index = string->usb_string.id;
put_strings:
config_item_put(strings);
mutex_unlock(su_mutex);
return ret;
}
static void uvcg_extension_drop_link(struct config_item *src, struct config_item *tgt)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvcg_extension *xu = to_uvcg_extension(src);
struct config_item *opts_item;
struct f_uvc_opts *opts;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = src->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
xu->string_descriptor_index = 0;
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_extension_item_ops = {
.release = uvcg_extension_release,
.allow_link = uvcg_extension_allow_link,
.drop_link = uvcg_extension_drop_link,
};
static const struct config_item_type uvcg_extension_type = {
.ct_item_ops = &uvcg_extension_item_ops,
.ct_attrs = uvcg_extension_attrs,
.ct_owner = THIS_MODULE,
};
static void uvcg_extension_drop(struct config_group *group, struct config_item *item)
{
struct uvcg_extension *xu = container_of(item, struct uvcg_extension, item);
struct config_item *opts_item;
struct f_uvc_opts *opts;
opts_item = group->cg_item.ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
config_item_put(item);
list_del(&xu->list);
kfree(xu->desc.baSourceID);
kfree(xu->desc.bmControls);
mutex_unlock(&opts->lock);
}
static struct config_item *uvcg_extension_make(struct config_group *group, const char *name)
{
struct config_item *opts_item;
struct uvcg_extension *xu;
struct f_uvc_opts *opts;
opts_item = group->cg_item.ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
xu = kzalloc(sizeof(*xu), GFP_KERNEL);
if (!xu)
return ERR_PTR(-ENOMEM);
xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(0, 0);
xu->desc.bDescriptorType = USB_DT_CS_INTERFACE;
xu->desc.bDescriptorSubType = UVC_VC_EXTENSION_UNIT;
xu->desc.bNumControls = 0;
xu->desc.bNrInPins = 0;
xu->desc.baSourceID = NULL;
xu->desc.bControlSize = 0;
xu->desc.bmControls = NULL;
mutex_lock(&opts->lock);
xu->desc.bUnitID = ++opts->last_unit_id;
config_item_init_type_name(&xu->item, name, &uvcg_extension_type);
list_add_tail(&xu->list, &opts->extension_units);
mutex_unlock(&opts->lock);
return &xu->item;
}
static struct configfs_group_operations uvcg_extensions_grp_ops = {
.make_item = uvcg_extension_make,
.drop_item = uvcg_extension_drop,
};
static const struct uvcg_config_group_type uvcg_extensions_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_extensions_grp_ops,
.ct_owner = THIS_MODULE,
},
.name = "extensions",
};
/* -----------------------------------------------------------------------------
* control/class/{fs|ss}
*/
struct uvcg_control_class_group {
struct config_group group;
const char *name;
};
static inline struct uvc_descriptor_header
**uvcg_get_ctl_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
struct uvcg_control_class_group *group =
container_of(i, struct uvcg_control_class_group,
group.cg_item);
if (!strcmp(group->name, "fs"))
return o->uvc_fs_control_cls;
if (!strcmp(group->name, "ss"))
return o->uvc_ss_control_cls;
return NULL;
}
static int uvcg_control_class_allow_link(struct config_item *src,
struct config_item *target)
{
struct config_item *control, *header;
struct f_uvc_opts *opts;
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header **class_array;
struct uvcg_control_header *target_hdr;
int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
control = src->ci_parent->ci_parent;
header = config_group_find_item(to_config_group(control), "header");
if (!header || target->ci_parent != header)
goto out;
opts = to_f_uvc_opts(control->ci_parent);
mutex_lock(&opts->lock);
class_array = uvcg_get_ctl_class_arr(src, opts);
if (!class_array)
goto unlock;
if (opts->refcnt || class_array[0]) {
ret = -EBUSY;
goto unlock;
}
target_hdr = to_uvcg_control_header(target);
++target_hdr->linked;
class_array[0] = (struct uvc_descriptor_header *)&target_hdr->desc;
ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
static void uvcg_control_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *control, *header;
struct f_uvc_opts *opts;
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header **class_array;
struct uvcg_control_header *target_hdr;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
control = src->ci_parent->ci_parent;
header = config_group_find_item(to_config_group(control), "header");
if (!header || target->ci_parent != header)
goto out;
opts = to_f_uvc_opts(control->ci_parent);
mutex_lock(&opts->lock);
class_array = uvcg_get_ctl_class_arr(src, opts);
if (!class_array || opts->refcnt)
goto unlock;
target_hdr = to_uvcg_control_header(target);
--target_hdr->linked;
class_array[0] = NULL;
unlock:
mutex_unlock(&opts->lock);
out:
config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_control_class_item_ops = {
.release = uvcg_config_item_release,
.allow_link = uvcg_control_class_allow_link,
.drop_link = uvcg_control_class_drop_link,
};
static const struct config_item_type uvcg_control_class_type = {
.ct_item_ops = &uvcg_control_class_item_ops,
.ct_owner = THIS_MODULE,
};
/* -----------------------------------------------------------------------------
* control/class
*/
static int uvcg_control_class_create_children(struct config_group *parent)
{
static const char * const names[] = { "fs", "ss" };
unsigned int i;
for (i = 0; i < ARRAY_SIZE(names); ++i) {
struct uvcg_control_class_group *group;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return -ENOMEM;
group->name = names[i];
config_group_init_type_name(&group->group, group->name,
&uvcg_control_class_type);
configfs_add_default_group(&group->group, parent);
}
return 0;
}
static const struct uvcg_config_group_type uvcg_control_class_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_owner = THIS_MODULE,
},
.name = "class",
.create_children = uvcg_control_class_create_children,
};
/* -----------------------------------------------------------------------------
* control
*/
static ssize_t uvcg_default_control_b_interface_number_show(
struct config_item *item, char *page)
{
struct config_group *group = to_config_group(item);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct config_item *opts_item;
struct f_uvc_opts *opts;
int result = 0;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = item->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
result += sprintf(page, "%u\n", opts->control_interface);
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
UVC_ATTR_RO(uvcg_default_control_, b_interface_number, bInterfaceNumber);
static ssize_t uvcg_default_control_enable_interrupt_ep_show(
struct config_item *item, char *page)
{
struct config_group *group = to_config_group(item);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct config_item *opts_item;
struct f_uvc_opts *opts;
int result = 0;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = item->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
result += sprintf(page, "%u\n", opts->enable_interrupt_ep);
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
static ssize_t uvcg_default_control_enable_interrupt_ep_store(
struct config_item *item, const char *page, size_t len)
{
struct config_group *group = to_config_group(item);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct config_item *opts_item;
struct f_uvc_opts *opts;
ssize_t ret;
u8 num;
ret = kstrtou8(page, 0, &num);
if (ret)
return ret;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = item->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
opts->enable_interrupt_ep = num;
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return len;
}
UVC_ATTR(uvcg_default_control_, enable_interrupt_ep, enable_interrupt_ep);
static struct configfs_attribute *uvcg_default_control_attrs[] = {
&uvcg_default_control_attr_b_interface_number,
&uvcg_default_control_attr_enable_interrupt_ep,
NULL,
};
static const struct uvcg_config_group_type uvcg_control_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_default_control_attrs,
.ct_owner = THIS_MODULE,
},
.name = "control",
.children = (const struct uvcg_config_group_type*[]) {
&uvcg_control_header_grp_type,
&uvcg_processing_grp_type,
&uvcg_terminal_grp_type,
&uvcg_control_class_grp_type,
&uvcg_extensions_grp_type,
NULL,
},
};
/* -----------------------------------------------------------------------------
* streaming/uncompressed
* streaming/mjpeg
*/
static const char * const uvcg_format_names[] = {
"uncompressed",
"mjpeg",
};
static struct uvcg_color_matching *
uvcg_format_get_default_color_match(struct config_item *streaming)
{
struct config_item *color_matching_item, *cm_default;
struct uvcg_color_matching *color_match;
color_matching_item = config_group_find_item(to_config_group(streaming),
"color_matching");
if (!color_matching_item)
return NULL;
cm_default = config_group_find_item(to_config_group(color_matching_item),
"default");
config_item_put(color_matching_item);
if (!cm_default)
return NULL;
color_match = to_uvcg_color_matching(to_config_group(cm_default));
config_item_put(cm_default);
return color_match;
}
static int uvcg_format_allow_link(struct config_item *src, struct config_item *tgt)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvcg_color_matching *color_matching_desc;
struct config_item *streaming, *color_matching;
struct uvcg_format *fmt;
int ret = 0;
mutex_lock(su_mutex);
streaming = src->ci_parent->ci_parent;
color_matching = config_group_find_item(to_config_group(streaming), "color_matching");
if (!color_matching || color_matching != tgt->ci_parent) {
ret = -EINVAL;
goto out_put_cm;
}
fmt = to_uvcg_format(src);
/*
* There's always a color matching descriptor associated with the format
* but without a symlink it should only ever be the default one. If it's
* not the default, there's already a symlink and we should bail out.
*/
color_matching_desc = uvcg_format_get_default_color_match(streaming);
if (fmt->color_matching != color_matching_desc) {
ret = -EBUSY;
goto out_put_cm;
}
color_matching_desc->refcnt--;
color_matching_desc = to_uvcg_color_matching(to_config_group(tgt));
fmt->color_matching = color_matching_desc;
color_matching_desc->refcnt++;
out_put_cm:
config_item_put(color_matching);
mutex_unlock(su_mutex);
return ret;
}
static void uvcg_format_drop_link(struct config_item *src, struct config_item *tgt)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvcg_color_matching *color_matching_desc;
struct config_item *streaming;
struct uvcg_format *fmt;
mutex_lock(su_mutex);
color_matching_desc = to_uvcg_color_matching(to_config_group(tgt));
color_matching_desc->refcnt--;
streaming = src->ci_parent->ci_parent;
color_matching_desc = uvcg_format_get_default_color_match(streaming);
fmt = to_uvcg_format(src);
fmt->color_matching = color_matching_desc;
color_matching_desc->refcnt++;
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_format_item_operations = {
.release = uvcg_config_item_release,
.allow_link = uvcg_format_allow_link,
.drop_link = uvcg_format_drop_link,
};
static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
{
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &f->group.cg_subsys->su_mutex;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = f->group.cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
result = sprintf(pg, "0x");
pg += result;
for (i = 0; i < UVCG_STREAMING_CONTROL_SIZE; ++i) {
result += sprintf(pg, "%x\n", f->bmaControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
static ssize_t uvcg_format_bma_controls_store(struct uvcg_format *ch,
const char *page, size_t len)
{
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &ch->group.cg_subsys->su_mutex;
int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = ch->group.cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
if (ch->linked || opts->refcnt) {
ret = -EBUSY;
goto end;
}
if (len < 4 || *page != '0' ||
(*(page + 1) != 'x' && *(page + 1) != 'X'))
goto end;
ret = hex2bin(ch->bmaControls, page + 2, 1);
if (ret < 0)
goto end;
ret = len;
end:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
/* -----------------------------------------------------------------------------
* streaming/header/<NAME>
* streaming/header
*/
static void uvcg_format_set_indices(struct config_group *fmt);
static int uvcg_streaming_header_allow_link(struct config_item *src,
struct config_item *target)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct config_item *opts_item;
struct f_uvc_opts *opts;
struct uvcg_streaming_header *src_hdr;
struct uvcg_format *target_fmt = NULL;
struct uvcg_format_ptr *format_ptr;
int i, ret = -EINVAL;
src_hdr = to_uvcg_streaming_header(src);
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = src->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
if (src_hdr->linked) {
ret = -EBUSY;
goto out;
}
/*
* Linking is only allowed to direct children of the format nodes
* (streaming/uncompressed or streaming/mjpeg nodes). First check that
* the grand-parent of the target matches the grand-parent of the source
* (the streaming node), and then verify that the target parent is a
* format node.
*/
if (src->ci_parent->ci_parent != target->ci_parent->ci_parent)
goto out;
for (i = 0; i < ARRAY_SIZE(uvcg_format_names); ++i) {
if (!strcmp(target->ci_parent->ci_name, uvcg_format_names[i]))
break;
}
if (i == ARRAY_SIZE(uvcg_format_names))
goto out;
target_fmt = container_of(to_config_group(target), struct uvcg_format,
group);
uvcg_format_set_indices(to_config_group(target));
format_ptr = kzalloc(sizeof(*format_ptr), GFP_KERNEL);
if (!format_ptr) {
ret = -ENOMEM;
goto out;
}
ret = 0;
format_ptr->fmt = target_fmt;
list_add_tail(&format_ptr->entry, &src_hdr->formats);
++src_hdr->num_fmt;
++target_fmt->linked;
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
static void uvcg_streaming_header_drop_link(struct config_item *src,
struct config_item *target)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct config_item *opts_item;
struct f_uvc_opts *opts;
struct uvcg_streaming_header *src_hdr;
struct uvcg_format *target_fmt = NULL;
struct uvcg_format_ptr *format_ptr, *tmp;
src_hdr = to_uvcg_streaming_header(src);
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = src->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
target_fmt = container_of(to_config_group(target), struct uvcg_format,
group);
list_for_each_entry_safe(format_ptr, tmp, &src_hdr->formats, entry)
if (format_ptr->fmt == target_fmt) {
list_del(&format_ptr->entry);
kfree(format_ptr);
--src_hdr->num_fmt;
break;
}
--target_fmt->linked;
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_header_item_ops = {
.release = uvcg_config_item_release,
.allow_link = uvcg_streaming_header_allow_link,
.drop_link = uvcg_streaming_header_drop_link,
};
#define UVCG_STREAMING_HEADER_ATTR(cname, aname, bits) \
static ssize_t uvcg_streaming_header_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct uvcg_streaming_header *sh = to_uvcg_streaming_header(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &sh->item.ci_group->cg_subsys->su_mutex;\
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = sh->item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(sh->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
UVC_ATTR_RO(uvcg_streaming_header_, cname, aname)
UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, 8);
UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, 8);
UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod, 8);
UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, 8);
UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, 8);
#undef UVCG_STREAMING_HEADER_ATTR
static struct configfs_attribute *uvcg_streaming_header_attrs[] = {
&uvcg_streaming_header_attr_bm_info,
&uvcg_streaming_header_attr_b_terminal_link,
&uvcg_streaming_header_attr_b_still_capture_method,
&uvcg_streaming_header_attr_b_trigger_support,
&uvcg_streaming_header_attr_b_trigger_usage,
NULL,
};
static const struct config_item_type uvcg_streaming_header_type = {
.ct_item_ops = &uvcg_streaming_header_item_ops,
.ct_attrs = uvcg_streaming_header_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_item
*uvcg_streaming_header_make(struct config_group *group, const char *name)
{
struct uvcg_streaming_header *h;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&h->formats);
h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
h->desc.bDescriptorSubType = UVC_VS_INPUT_HEADER;
h->desc.bTerminalLink = 3;
h->desc.bControlSize = UVCG_STREAMING_CONTROL_SIZE;
config_item_init_type_name(&h->item, name, &uvcg_streaming_header_type);
return &h->item;
}
static struct configfs_group_operations uvcg_streaming_header_grp_ops = {
.make_item = uvcg_streaming_header_make,
};
static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_streaming_header_grp_ops,
.ct_owner = THIS_MODULE,
},
.name = "header",
};
/* -----------------------------------------------------------------------------
* streaming/<mode>/<format>/<NAME>
*/
#define UVCG_FRAME_ATTR(cname, aname, bits) \
static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_frame *f = to_uvcg_frame(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", f->frame.cname); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \
const char *page, size_t len)\
{ \
struct uvcg_frame *f = to_uvcg_frame(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct uvcg_format *fmt; \
struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\
typeof(f->frame.cname) num; \
int ret; \
\
ret = kstrtou##bits(page, 0, &num); \
if (ret) \
return ret; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
fmt = to_uvcg_format(f->item.ci_parent); \
\
mutex_lock(&opts->lock); \
if (fmt->linked || opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
f->frame.cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
mutex_unlock(su_mutex); \
return ret; \
} \
\
UVC_ATTR(uvcg_frame_, cname, aname);
static ssize_t uvcg_frame_b_frame_index_show(struct config_item *item,
char *page)
{
struct uvcg_frame *f = to_uvcg_frame(item);
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct config_item *fmt_item;
struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;
int result;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
fmt_item = f->item.ci_parent;
fmt = to_uvcg_format(fmt_item);
if (!fmt->linked) {
result = -EBUSY;
goto out;
}
opts_item = fmt_item->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", f->frame.b_frame_index);
mutex_unlock(&opts->lock);
out:
mutex_unlock(su_mutex);
return result;
}
UVC_ATTR_RO(uvcg_frame_, b_frame_index, bFrameIndex);
UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, 8);
UVCG_FRAME_ATTR(w_width, wWidth, 16);
UVCG_FRAME_ATTR(w_height, wHeight, 16);
UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, 32);
UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, 32);
UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize, 32);
UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval, 32);
#undef UVCG_FRAME_ATTR
static ssize_t uvcg_frame_dw_frame_interval_show(struct config_item *item,
char *page)
{
struct uvcg_frame *frm = to_uvcg_frame(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &frm->item.ci_group->cg_subsys->su_mutex;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = frm->item.ci_parent->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < frm->frame.b_frame_interval_type; ++i) {
result += sprintf(pg, "%u\n", frm->dw_frame_interval[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
const char *page, size_t len)
{
struct uvcg_frame *ch = to_uvcg_frame(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct uvcg_format *fmt;
struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;
int ret = 0, n = 0;
u32 *frm_intrv, *tmp;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = ch->item.ci_parent->ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
fmt = to_uvcg_format(ch->item.ci_parent);
mutex_lock(&opts->lock);
if (fmt->linked || opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n, sizeof(u32));
if (ret)
goto end;
tmp = frm_intrv = kcalloc(n, sizeof(u32), GFP_KERNEL);
if (!frm_intrv) {
ret = -ENOMEM;
goto end;
}
ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &tmp, sizeof(u32));
if (ret) {
kfree(frm_intrv);
goto end;
}
kfree(ch->dw_frame_interval);
ch->dw_frame_interval = frm_intrv;
ch->frame.b_frame_interval_type = n;
sort(ch->dw_frame_interval, n, sizeof(*ch->dw_frame_interval),
uvcg_config_compare_u32, NULL);
ret = len;
end:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
UVC_ATTR(uvcg_frame_, dw_frame_interval, dwFrameInterval);
static struct configfs_attribute *uvcg_frame_attrs[] = {
&uvcg_frame_attr_b_frame_index,
&uvcg_frame_attr_bm_capabilities,
&uvcg_frame_attr_w_width,
&uvcg_frame_attr_w_height,
&uvcg_frame_attr_dw_min_bit_rate,
&uvcg_frame_attr_dw_max_bit_rate,
&uvcg_frame_attr_dw_max_video_frame_buffer_size,
&uvcg_frame_attr_dw_default_frame_interval,
&uvcg_frame_attr_dw_frame_interval,
NULL,
};
static const struct config_item_type uvcg_frame_type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_frame_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_item *uvcg_frame_make(struct config_group *group,
const char *name)
{
struct uvcg_frame *h;
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct uvcg_frame_ptr *frame_ptr;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
h->frame.b_descriptor_type = USB_DT_CS_INTERFACE;
h->frame.b_frame_index = 1;
h->frame.w_width = 640;
h->frame.w_height = 360;
h->frame.dw_min_bit_rate = 18432000;
h->frame.dw_max_bit_rate = 55296000;
h->frame.dw_max_video_frame_buffer_size = 460800;
h->frame.dw_default_frame_interval = 666666;
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
fmt = to_uvcg_format(&group->cg_item);
if (fmt->type == UVCG_UNCOMPRESSED) {
h->frame.b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED;
h->fmt_type = UVCG_UNCOMPRESSED;
} else if (fmt->type == UVCG_MJPEG) {
h->frame.b_descriptor_subtype = UVC_VS_FRAME_MJPEG;
h->fmt_type = UVCG_MJPEG;
} else {
mutex_unlock(&opts->lock);
kfree(h);
return ERR_PTR(-EINVAL);
}
frame_ptr = kzalloc(sizeof(*frame_ptr), GFP_KERNEL);
if (!frame_ptr) {
mutex_unlock(&opts->lock);
kfree(h);
return ERR_PTR(-ENOMEM);
}
frame_ptr->frm = h;
list_add_tail(&frame_ptr->entry, &fmt->frames);
++fmt->num_frames;
mutex_unlock(&opts->lock);
config_item_init_type_name(&h->item, name, &uvcg_frame_type);
return &h->item;
}
static void uvcg_frame_drop(struct config_group *group, struct config_item *item)
{
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct uvcg_frame *target_frm = NULL;
struct uvcg_frame_ptr *frame_ptr, *tmp;
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
target_frm = container_of(item, struct uvcg_frame, item);
fmt = to_uvcg_format(&group->cg_item);
list_for_each_entry_safe(frame_ptr, tmp, &fmt->frames, entry)
if (frame_ptr->frm == target_frm) {
list_del(&frame_ptr->entry);
kfree(frame_ptr);
--fmt->num_frames;
break;
}
mutex_unlock(&opts->lock);
config_item_put(item);
}
static void uvcg_format_set_indices(struct config_group *fmt)
{
struct config_item *ci;
unsigned int i = 1;
list_for_each_entry(ci, &fmt->cg_children, ci_entry) {
struct uvcg_frame *frm;
if (ci->ci_type != &uvcg_frame_type)
continue;
frm = to_uvcg_frame(ci);
frm->frame.b_frame_index = i++;
}
}
/* -----------------------------------------------------------------------------
* streaming/uncompressed/<NAME>
*/
static struct configfs_group_operations uvcg_uncompressed_group_ops = {
.make_item = uvcg_frame_make,
.drop_item = uvcg_frame_drop,
};
static ssize_t uvcg_uncompressed_guid_format_show(struct config_item *item,
char *page)
{
struct uvcg_uncompressed *ch = to_uvcg_uncompressed(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
memcpy(page, ch->desc.guidFormat, sizeof(ch->desc.guidFormat));
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return sizeof(ch->desc.guidFormat);
}
static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item,
const char *page, size_t len)
{
struct uvcg_uncompressed *ch = to_uvcg_uncompressed(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
int ret;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
if (ch->fmt.linked || opts->refcnt) {
ret = -EBUSY;
goto end;
}
memcpy(ch->desc.guidFormat, page,
min(sizeof(ch->desc.guidFormat), len));
ret = sizeof(ch->desc.guidFormat);
end:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return ret;
}
UVC_ATTR(uvcg_uncompressed_, guid_format, guidFormat);
#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct uvcg_uncompressed *u = to_uvcg_uncompressed(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
UVC_ATTR_RO(uvcg_uncompressed_, cname, aname);
#define UVCG_UNCOMPRESSED_ATTR(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct uvcg_uncompressed *u = to_uvcg_uncompressed(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
static ssize_t \
uvcg_uncompressed_##cname##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct uvcg_uncompressed *u = to_uvcg_uncompressed(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
int ret; \
u8 num; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
if (u->fmt.linked || opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtou8(page, 0, &num); \
if (ret) \
goto end; \
\
/* index values in uvc are never 0 */ \
if (!num) { \
ret = -EINVAL; \
goto end; \
} \
\
u->desc.aname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
mutex_unlock(su_mutex); \
return ret; \
} \
\
UVC_ATTR(uvcg_uncompressed_, cname, aname);
UVCG_UNCOMPRESSED_ATTR_RO(b_format_index, bFormatIndex, 8);
UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, 8);
UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
UVCG_UNCOMPRESSED_ATTR_RO(bm_interlace_flags, bmInterlaceFlags, 8);
#undef UVCG_UNCOMPRESSED_ATTR
#undef UVCG_UNCOMPRESSED_ATTR_RO
static inline ssize_t
uvcg_uncompressed_bma_controls_show(struct config_item *item, char *page)
{
struct uvcg_uncompressed *unc = to_uvcg_uncompressed(item);
return uvcg_format_bma_controls_show(&unc->fmt, page);
}
static inline ssize_t
uvcg_uncompressed_bma_controls_store(struct config_item *item,
const char *page, size_t len)
{
struct uvcg_uncompressed *unc = to_uvcg_uncompressed(item);
return uvcg_format_bma_controls_store(&unc->fmt, page, len);
}
UVC_ATTR(uvcg_uncompressed_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
&uvcg_uncompressed_attr_b_format_index,
&uvcg_uncompressed_attr_guid_format,
&uvcg_uncompressed_attr_b_bits_per_pixel,
&uvcg_uncompressed_attr_b_default_frame_index,
&uvcg_uncompressed_attr_b_aspect_ratio_x,
&uvcg_uncompressed_attr_b_aspect_ratio_y,
&uvcg_uncompressed_attr_bm_interlace_flags,
&uvcg_uncompressed_attr_bma_controls,
NULL,
};
static const struct config_item_type uvcg_uncompressed_type = {
.ct_item_ops = &uvcg_format_item_operations,
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *uvcg_uncompressed_make(struct config_group *group,
const char *name)
{
static char guid[] = {
'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
};
struct uvcg_color_matching *color_match;
struct config_item *streaming;
struct uvcg_uncompressed *h;
streaming = group->cg_item.ci_parent;
color_match = uvcg_format_get_default_color_match(streaming);
if (!color_match)
return ERR_PTR(-EINVAL);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
h->desc.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE;
h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
h->desc.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED;
memcpy(h->desc.guidFormat, guid, sizeof(guid));
h->desc.bBitsPerPixel = 16;
h->desc.bDefaultFrameIndex = 1;
h->desc.bAspectRatioX = 0;
h->desc.bAspectRatioY = 0;
h->desc.bmInterlaceFlags = 0;
h->desc.bCopyProtect = 0;
INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_UNCOMPRESSED;
h->fmt.color_matching = color_match;
color_match->refcnt++;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_uncompressed_type);
return &h->fmt.group;
}
static struct configfs_group_operations uvcg_uncompressed_grp_ops = {
.make_group = uvcg_uncompressed_make,
};
static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_uncompressed_grp_ops,
.ct_owner = THIS_MODULE,
},
.name = "uncompressed",
};
/* -----------------------------------------------------------------------------
* streaming/mjpeg/<NAME>
*/
static struct configfs_group_operations uvcg_mjpeg_group_ops = {
.make_item = uvcg_frame_make,
.drop_item = uvcg_frame_drop,
};
#define UVCG_MJPEG_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
UVC_ATTR_RO(uvcg_mjpeg_, cname, aname)
#define UVCG_MJPEG_ATTR(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
static ssize_t \
uvcg_mjpeg_##cname##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \
int ret; \
u8 num; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
if (u->fmt.linked || opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtou8(page, 0, &num); \
if (ret) \
goto end; \
\
/* index values in uvc are never 0 */ \
if (!num) { \
ret = -EINVAL; \
goto end; \
} \
\
u->desc.aname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
mutex_unlock(su_mutex); \
return ret; \
} \
\
UVC_ATTR(uvcg_mjpeg_, cname, aname)
UVCG_MJPEG_ATTR_RO(b_format_index, bFormatIndex, 8);
UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, 8);
UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
UVCG_MJPEG_ATTR_RO(bm_interlace_flags, bmInterlaceFlags, 8);
#undef UVCG_MJPEG_ATTR
#undef UVCG_MJPEG_ATTR_RO
static inline ssize_t
uvcg_mjpeg_bma_controls_show(struct config_item *item, char *page)
{
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item);
return uvcg_format_bma_controls_show(&u->fmt, page);
}
static inline ssize_t
uvcg_mjpeg_bma_controls_store(struct config_item *item,
const char *page, size_t len)
{
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item);
return uvcg_format_bma_controls_store(&u->fmt, page, len);
}
UVC_ATTR(uvcg_mjpeg_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
&uvcg_mjpeg_attr_b_format_index,
&uvcg_mjpeg_attr_b_default_frame_index,
&uvcg_mjpeg_attr_bm_flags,
&uvcg_mjpeg_attr_b_aspect_ratio_x,
&uvcg_mjpeg_attr_b_aspect_ratio_y,
&uvcg_mjpeg_attr_bm_interlace_flags,
&uvcg_mjpeg_attr_bma_controls,
NULL,
};
static const struct config_item_type uvcg_mjpeg_type = {
.ct_item_ops = &uvcg_format_item_operations,
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *uvcg_mjpeg_make(struct config_group *group,
const char *name)
{
struct uvcg_color_matching *color_match;
struct config_item *streaming;
struct uvcg_mjpeg *h;
streaming = group->cg_item.ci_parent;
color_match = uvcg_format_get_default_color_match(streaming);
if (!color_match)
return ERR_PTR(-EINVAL);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
h->desc.bLength = UVC_DT_FORMAT_MJPEG_SIZE;
h->desc.bDescriptorType = USB_DT_CS_INTERFACE;
h->desc.bDescriptorSubType = UVC_VS_FORMAT_MJPEG;
h->desc.bDefaultFrameIndex = 1;
h->desc.bAspectRatioX = 0;
h->desc.bAspectRatioY = 0;
h->desc.bmInterlaceFlags = 0;
h->desc.bCopyProtect = 0;
INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_MJPEG;
h->fmt.color_matching = color_match;
color_match->refcnt++;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_mjpeg_type);
return &h->fmt.group;
}
static struct configfs_group_operations uvcg_mjpeg_grp_ops = {
.make_group = uvcg_mjpeg_make,
};
static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_mjpeg_grp_ops,
.ct_owner = THIS_MODULE,
},
.name = "mjpeg",
};
/* -----------------------------------------------------------------------------
* streaming/color_matching/default
*/
#define UVCG_COLOR_MATCHING_ATTR(cname, aname, bits) \
static ssize_t uvcg_color_matching_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct uvcg_color_matching *color_match = \
to_uvcg_color_matching(group); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", \
le##bits##_to_cpu(color_match->desc.aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
static ssize_t uvcg_color_matching_##cname##_store( \
struct config_item *item, const char *page, size_t len) \
{ \
struct config_group *group = to_config_group(item); \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvcg_color_matching *color_match = \
to_uvcg_color_matching(group); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
int ret; \
u##bits num; \
\
ret = kstrtou##bits(page, 0, &num); \
if (ret) \
return ret; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
if (color_match->refcnt) { \
ret = -EBUSY; \
goto unlock_su; \
} \
\
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
\
color_match->desc.aname = num; \
ret = len; \
\
mutex_unlock(&opts->lock); \
unlock_su: \
mutex_unlock(su_mutex); \
\
return ret; \
} \
UVC_ATTR(uvcg_color_matching_, cname, aname)
UVCG_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
UVCG_COLOR_MATCHING_ATTR(b_transfer_characteristics, bTransferCharacteristics, 8);
UVCG_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
#undef UVCG_COLOR_MATCHING_ATTR
static struct configfs_attribute *uvcg_color_matching_attrs[] = {
&uvcg_color_matching_attr_b_color_primaries,
&uvcg_color_matching_attr_b_transfer_characteristics,
&uvcg_color_matching_attr_b_matrix_coefficients,
NULL,
};
static void uvcg_color_matching_release(struct config_item *item)
{
struct uvcg_color_matching *color_match =
to_uvcg_color_matching(to_config_group(item));
kfree(color_match);
}
static struct configfs_item_operations uvcg_color_matching_item_ops = {
.release = uvcg_color_matching_release,
};
static const struct config_item_type uvcg_color_matching_type = {
.ct_item_ops = &uvcg_color_matching_item_ops,
.ct_attrs = uvcg_color_matching_attrs,
.ct_owner = THIS_MODULE,
};
/* -----------------------------------------------------------------------------
* streaming/color_matching
*/
static struct config_group *uvcg_color_matching_make(struct config_group *group,
const char *name)
{
struct uvcg_color_matching *color_match;
color_match = kzalloc(sizeof(*color_match), GFP_KERNEL);
if (!color_match)
return ERR_PTR(-ENOMEM);
color_match->desc.bLength = UVC_DT_COLOR_MATCHING_SIZE;
color_match->desc.bDescriptorType = USB_DT_CS_INTERFACE;
color_match->desc.bDescriptorSubType = UVC_VS_COLORFORMAT;
config_group_init_type_name(&color_match->group, name,
&uvcg_color_matching_type);
return &color_match->group;
}
static struct configfs_group_operations uvcg_color_matching_grp_group_ops = {
.make_group = uvcg_color_matching_make,
};
static int uvcg_color_matching_create_children(struct config_group *parent)
{
struct uvcg_color_matching *color_match;
color_match = kzalloc(sizeof(*color_match), GFP_KERNEL);
if (!color_match)
return -ENOMEM;
color_match->desc.bLength = UVC_DT_COLOR_MATCHING_SIZE;
color_match->desc.bDescriptorType = USB_DT_CS_INTERFACE;
color_match->desc.bDescriptorSubType = UVC_VS_COLORFORMAT;
color_match->desc.bColorPrimaries = UVC_COLOR_PRIMARIES_BT_709_SRGB;
color_match->desc.bTransferCharacteristics = UVC_TRANSFER_CHARACTERISTICS_BT_709;
color_match->desc.bMatrixCoefficients = UVC_MATRIX_COEFFICIENTS_SMPTE_170M;
config_group_init_type_name(&color_match->group, "default",
&uvcg_color_matching_type);
configfs_add_default_group(&color_match->group, parent);
return 0;
}
static const struct uvcg_config_group_type uvcg_color_matching_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_color_matching_grp_group_ops,
.ct_owner = THIS_MODULE,
},
.name = "color_matching",
.create_children = uvcg_color_matching_create_children,
};
/* -----------------------------------------------------------------------------
* streaming/class/{fs|hs|ss}
*/
struct uvcg_streaming_class_group {
struct config_group group;
const char *name;
};
static inline struct uvc_descriptor_header
***__uvcg_get_stream_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
struct uvcg_streaming_class_group *group =
container_of(i, struct uvcg_streaming_class_group,
group.cg_item);
if (!strcmp(group->name, "fs"))
return &o->uvc_fs_streaming_cls;
if (!strcmp(group->name, "hs"))
return &o->uvc_hs_streaming_cls;
if (!strcmp(group->name, "ss"))
return &o->uvc_ss_streaming_cls;
return NULL;
}
enum uvcg_strm_type {
UVCG_HEADER = 0,
UVCG_FORMAT,
UVCG_FRAME,
UVCG_COLOR_MATCHING,
};
/*
* Iterate over a hierarchy of streaming descriptors' config items.
* The items are created by the user with configfs.
*
* It "processes" the header pointed to by @priv1, then for each format
* that follows the header "processes" the format itself and then for
* each frame inside a format "processes" the frame.
*
* As a "processing" function the @fun is used.
*
* __uvcg_iter_strm_cls() is used in two context: first, to calculate
* the amount of memory needed for an array of streaming descriptors
* and second, to actually fill the array.
*
* @h: streaming header pointer
* @priv2: an "inout" parameter (the caller might want to see the changes to it)
* @priv3: an "inout" parameter (the caller might want to see the changes to it)
* @fun: callback function for processing each level of the hierarchy
*/
static int __uvcg_iter_strm_cls(struct uvcg_streaming_header *h,
void *priv2, void *priv3,
int (*fun)(void *, void *, void *, int, enum uvcg_strm_type type))
{
struct uvcg_format_ptr *f;
struct config_group *grp;
struct config_item *item;
struct uvcg_frame *frm;
int ret, i, j;
if (!fun)
return -EINVAL;
i = j = 0;
ret = fun(h, priv2, priv3, 0, UVCG_HEADER);
if (ret)
return ret;
list_for_each_entry(f, &h->formats, entry) {
ret = fun(f->fmt, priv2, priv3, i++, UVCG_FORMAT);
if (ret)
return ret;
grp = &f->fmt->group;
list_for_each_entry(item, &grp->cg_children, ci_entry) {
frm = to_uvcg_frame(item);
ret = fun(frm, priv2, priv3, j++, UVCG_FRAME);
if (ret)
return ret;
}
ret = fun(f->fmt->color_matching, priv2, priv3, 0,
UVCG_COLOR_MATCHING);
if (ret)
return ret;
}
return ret;
}
/*
* Count how many bytes are needed for an array of streaming descriptors.
*
* @priv1: pointer to a header, format or frame
* @priv2: inout parameter, accumulated size of the array
* @priv3: inout parameter, accumulated number of the array elements
* @n: unused, this function's prototype must match @fun in __uvcg_iter_strm_cls
*/
static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n,
enum uvcg_strm_type type)
{
size_t *size = priv2;
size_t *count = priv3;
switch (type) {
case UVCG_HEADER: {
struct uvcg_streaming_header *h = priv1;
*size += sizeof(h->desc);
/* bmaControls */
*size += h->num_fmt * UVCG_STREAMING_CONTROL_SIZE;
}
break;
case UVCG_FORMAT: {
struct uvcg_format *fmt = priv1;
if (fmt->type == UVCG_UNCOMPRESSED) {
struct uvcg_uncompressed *u =
container_of(fmt, struct uvcg_uncompressed,
fmt);
*size += sizeof(u->desc);
} else if (fmt->type == UVCG_MJPEG) {
struct uvcg_mjpeg *m =
container_of(fmt, struct uvcg_mjpeg, fmt);
*size += sizeof(m->desc);
} else {
return -EINVAL;
}
}
break;
case UVCG_FRAME: {
struct uvcg_frame *frm = priv1;
int sz = sizeof(frm->dw_frame_interval);
*size += sizeof(frm->frame);
*size += frm->frame.b_frame_interval_type * sz;
}
break;
case UVCG_COLOR_MATCHING: {
struct uvcg_color_matching *color_match = priv1;
*size += sizeof(color_match->desc);
}
break;
}
++*count;
return 0;
}
/*
* Fill an array of streaming descriptors.
*
* @priv1: pointer to a header, format or frame
* @priv2: inout parameter, pointer into a block of memory
* @priv3: inout parameter, pointer to a 2-dimensional array
*/
static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
enum uvcg_strm_type type)
{
void **dest = priv2;
struct uvc_descriptor_header ***array = priv3;
size_t sz;
**array = *dest;
++*array;
switch (type) {
case UVCG_HEADER: {
struct uvc_input_header_descriptor *ihdr = *dest;
struct uvcg_streaming_header *h = priv1;
struct uvcg_format_ptr *f;
memcpy(*dest, &h->desc, sizeof(h->desc));
*dest += sizeof(h->desc);
sz = UVCG_STREAMING_CONTROL_SIZE;
list_for_each_entry(f, &h->formats, entry) {
memcpy(*dest, f->fmt->bmaControls, sz);
*dest += sz;
}
ihdr->bLength = sizeof(h->desc) + h->num_fmt * sz;
ihdr->bNumFormats = h->num_fmt;
}
break;
case UVCG_FORMAT: {
struct uvcg_format *fmt = priv1;
if (fmt->type == UVCG_UNCOMPRESSED) {
struct uvcg_uncompressed *u =
container_of(fmt, struct uvcg_uncompressed,
fmt);
u->desc.bFormatIndex = n + 1;
u->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &u->desc, sizeof(u->desc));
*dest += sizeof(u->desc);
} else if (fmt->type == UVCG_MJPEG) {
struct uvcg_mjpeg *m =
container_of(fmt, struct uvcg_mjpeg, fmt);
m->desc.bFormatIndex = n + 1;
m->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &m->desc, sizeof(m->desc));
*dest += sizeof(m->desc);
} else {
return -EINVAL;
}
}
break;
case UVCG_FRAME: {
struct uvcg_frame *frm = priv1;
struct uvc_descriptor_header *h = *dest;
sz = sizeof(frm->frame);
memcpy(*dest, &frm->frame, sz);
*dest += sz;
sz = frm->frame.b_frame_interval_type *
sizeof(*frm->dw_frame_interval);
memcpy(*dest, frm->dw_frame_interval, sz);
*dest += sz;
if (frm->fmt_type == UVCG_UNCOMPRESSED)
h->bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(
frm->frame.b_frame_interval_type);
else if (frm->fmt_type == UVCG_MJPEG)
h->bLength = UVC_DT_FRAME_MJPEG_SIZE(
frm->frame.b_frame_interval_type);
}
break;
case UVCG_COLOR_MATCHING: {
struct uvcg_color_matching *color_match = priv1;
memcpy(*dest, &color_match->desc, sizeof(color_match->desc));
*dest += sizeof(color_match->desc);
}
break;
}
return 0;
}
static int uvcg_streaming_class_allow_link(struct config_item *src,
struct config_item *target)
{
struct config_item *streaming, *header;
struct f_uvc_opts *opts;
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header ***class_array, **cl_arr;
struct uvcg_streaming_header *target_hdr;
void *data, *data_save;
size_t size = 0, count = 0;
int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
streaming = src->ci_parent->ci_parent;
header = config_group_find_item(to_config_group(streaming), "header");
if (!header || target->ci_parent != header)
goto out;
opts = to_f_uvc_opts(streaming->ci_parent);
mutex_lock(&opts->lock);
class_array = __uvcg_get_stream_class_arr(src, opts);
if (!class_array || *class_array || opts->refcnt) {
ret = -EBUSY;
goto unlock;
}
target_hdr = to_uvcg_streaming_header(target);
ret = __uvcg_iter_strm_cls(target_hdr, &size, &count, __uvcg_cnt_strm);
if (ret)
goto unlock;
count += 1; /* NULL */
*class_array = kcalloc(count, sizeof(void *), GFP_KERNEL);
if (!*class_array) {
ret = -ENOMEM;
goto unlock;
}
data = data_save = kzalloc(size, GFP_KERNEL);
if (!data) {
kfree(*class_array);
*class_array = NULL;
ret = -ENOMEM;
goto unlock;
}
cl_arr = *class_array;
ret = __uvcg_iter_strm_cls(target_hdr, &data, &cl_arr,
__uvcg_fill_strm);
if (ret) {
kfree(*class_array);
*class_array = NULL;
/*
* __uvcg_fill_strm() called from __uvcg_iter_stream_cls()
* might have advanced the "data", so use a backup copy
*/
kfree(data_save);
goto unlock;
}
++target_hdr->linked;
ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
static void uvcg_streaming_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *streaming, *header;
struct f_uvc_opts *opts;
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header ***class_array;
struct uvcg_streaming_header *target_hdr;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
streaming = src->ci_parent->ci_parent;
header = config_group_find_item(to_config_group(streaming), "header");
if (!header || target->ci_parent != header)
goto out;
opts = to_f_uvc_opts(streaming->ci_parent);
mutex_lock(&opts->lock);
class_array = __uvcg_get_stream_class_arr(src, opts);
if (!class_array || !*class_array)
goto unlock;
if (opts->refcnt)
goto unlock;
target_hdr = to_uvcg_streaming_header(target);
--target_hdr->linked;
kfree(**class_array);
kfree(*class_array);
*class_array = NULL;
unlock:
mutex_unlock(&opts->lock);
out:
config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_class_item_ops = {
.release = uvcg_config_item_release,
.allow_link = uvcg_streaming_class_allow_link,
.drop_link = uvcg_streaming_class_drop_link,
};
static const struct config_item_type uvcg_streaming_class_type = {
.ct_item_ops = &uvcg_streaming_class_item_ops,
.ct_owner = THIS_MODULE,
};
/* -----------------------------------------------------------------------------
* streaming/class
*/
static int uvcg_streaming_class_create_children(struct config_group *parent)
{
static const char * const names[] = { "fs", "hs", "ss" };
unsigned int i;
for (i = 0; i < ARRAY_SIZE(names); ++i) {
struct uvcg_streaming_class_group *group;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return -ENOMEM;
group->name = names[i];
config_group_init_type_name(&group->group, group->name,
&uvcg_streaming_class_type);
configfs_add_default_group(&group->group, parent);
}
return 0;
}
static const struct uvcg_config_group_type uvcg_streaming_class_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_owner = THIS_MODULE,
},
.name = "class",
.create_children = uvcg_streaming_class_create_children,
};
/* -----------------------------------------------------------------------------
* streaming
*/
static ssize_t uvcg_default_streaming_b_interface_number_show(
struct config_item *item, char *page)
{
struct config_group *group = to_config_group(item);
struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct config_item *opts_item;
struct f_uvc_opts *opts;
int result = 0;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
opts_item = item->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
result += sprintf(page, "%u\n", opts->streaming_interface);
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
return result;
}
UVC_ATTR_RO(uvcg_default_streaming_, b_interface_number, bInterfaceNumber);
static struct configfs_attribute *uvcg_default_streaming_attrs[] = {
&uvcg_default_streaming_attr_b_interface_number,
NULL,
};
static const struct uvcg_config_group_type uvcg_streaming_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_default_streaming_attrs,
.ct_owner = THIS_MODULE,
},
.name = "streaming",
.children = (const struct uvcg_config_group_type*[]) {
&uvcg_streaming_header_grp_type,
&uvcg_uncompressed_grp_type,
&uvcg_mjpeg_grp_type,
&uvcg_color_matching_grp_type,
&uvcg_streaming_class_grp_type,
NULL,
},
};
/* -----------------------------------------------------------------------------
* UVC function
*/
static void uvc_func_item_release(struct config_item *item)
{
struct f_uvc_opts *opts = to_f_uvc_opts(item);
uvcg_config_remove_children(to_config_group(item));
usb_put_function_instance(&opts->func_inst);
}
static int uvc_func_allow_link(struct config_item *src, struct config_item *tgt)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct gadget_string *string;
struct config_item *strings;
struct f_uvc_opts *opts;
int ret = 0;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
/* Validate that the target is an entry in strings/<langid> */
strings = config_group_find_item(to_config_group(src->ci_parent->ci_parent),
"strings");
if (!strings || tgt->ci_parent->ci_parent != strings) {
ret = -EINVAL;
goto put_strings;
}
string = to_gadget_string(tgt);
opts = to_f_uvc_opts(src);
mutex_lock(&opts->lock);
if (!strcmp(tgt->ci_name, "iad_desc"))
opts->iad_index = string->usb_string.id;
else if (!strcmp(tgt->ci_name, "vs0_desc"))
opts->vs0_index = string->usb_string.id;
else if (!strcmp(tgt->ci_name, "vs1_desc"))
opts->vs1_index = string->usb_string.id;
else
ret = -EINVAL;
mutex_unlock(&opts->lock);
put_strings:
config_item_put(strings);
mutex_unlock(su_mutex);
return ret;
}
static void uvc_func_drop_link(struct config_item *src, struct config_item *tgt)
{
struct f_uvc_opts *opts;
opts = to_f_uvc_opts(src);
mutex_lock(&opts->lock);
if (!strcmp(tgt->ci_name, "iad_desc"))
opts->iad_index = 0;
else if (!strcmp(tgt->ci_name, "vs0_desc"))
opts->vs0_index = 0;
else if (!strcmp(tgt->ci_name, "vs1_desc"))
opts->vs1_index = 0;
mutex_unlock(&opts->lock);
}
static struct configfs_item_operations uvc_func_item_ops = {
.release = uvc_func_item_release,
.allow_link = uvc_func_allow_link,
.drop_link = uvc_func_drop_link,
};
#define UVCG_OPTS_ATTR(cname, aname, limit) \
static ssize_t f_uvc_opts_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", opts->cname); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t \
f_uvc_opts_##cname##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
unsigned int num; \
int ret; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtouint(page, 0, &num); \
if (ret) \
goto end; \
\
if (num > limit) { \
ret = -EINVAL; \
goto end; \
} \
opts->cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
UVC_ATTR(f_uvc_opts_, cname, cname)
UVCG_OPTS_ATTR(streaming_interval, streaming_interval, 16);
UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, 3072);
UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
#undef UVCG_OPTS_ATTR
#define UVCG_OPTS_STRING_ATTR(cname, aname) \
static ssize_t f_uvc_opts_string_##cname##_show(struct config_item *item,\
char *page) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = snprintf(page, sizeof(opts->aname), "%s", opts->aname);\
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
const char *page, size_t len) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
int size = min(sizeof(opts->aname), len + 1); \
int ret = 0; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = strscpy(opts->aname, page, size); \
if (ret == -E2BIG) \
ret = size - 1; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
UVC_ATTR(f_uvc_opts_string_, cname, aname)
UVCG_OPTS_STRING_ATTR(function_name, function_name);
#undef UVCG_OPTS_STRING_ATTR
static struct configfs_attribute *uvc_attrs[] = {
&f_uvc_opts_attr_streaming_interval,
&f_uvc_opts_attr_streaming_maxpacket,
&f_uvc_opts_attr_streaming_maxburst,
&f_uvc_opts_string_attr_function_name,
NULL,
};
static const struct uvcg_config_group_type uvc_func_type = {
.type = {
.ct_item_ops = &uvc_func_item_ops,
.ct_attrs = uvc_attrs,
.ct_owner = THIS_MODULE,
},
.name = "",
.children = (const struct uvcg_config_group_type*[]) {
&uvcg_control_grp_type,
&uvcg_streaming_grp_type,
NULL,
},
};
int uvcg_attach_configfs(struct f_uvc_opts *opts)
{
int ret;
config_group_init_type_name(&opts->func_inst.group, uvc_func_type.name,
&uvc_func_type.type);
ret = uvcg_config_create_children(&opts->func_inst.group,
&uvc_func_type);
if (ret < 0)
config_group_put(&opts->func_inst.group);
return ret;
}
| linux-master | drivers/usb/gadget/function/uvc_configfs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_eem.c -- USB CDC Ethernet (EEM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 EF Johnson Technologies
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_eem.h"
#define EEM_HLEN 2
/*
* This function is a "CDC Ethernet Emulation Model" (CDC EEM)
* Ethernet link.
*/
struct f_eem {
struct gether port;
u8 ctrl_id;
};
struct in_context {
struct sk_buff *skb;
struct usb_ep *ep;
};
static inline struct f_eem *func_to_eem(struct usb_function *f)
{
return container_of(f, struct f_eem, port.func);
}
/*-------------------------------------------------------------------------*/
/* interface descriptor: */
static struct usb_interface_descriptor eem_intf = {
.bLength = sizeof eem_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_EEM,
.bInterfaceProtocol = USB_CDC_PROTO_EEM,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor eem_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor eem_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *eem_fs_function[] = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_fs_in_desc,
(struct usb_descriptor_header *) &eem_fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor eem_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor eem_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *eem_hs_function[] = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_hs_in_desc,
(struct usb_descriptor_header *) &eem_hs_out_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor eem_ss_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor eem_ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc = {
.bLength = sizeof eem_ss_bulk_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_descriptor_header *eem_ss_function[] = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_ss_in_desc,
(struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &eem_ss_out_desc,
(struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
NULL,
};
/* string descriptors: */
static struct usb_string eem_string_defs[] = {
[0].s = "CDC Ethernet Emulation Model (EEM)",
{ } /* end of list */
};
static struct usb_gadget_strings eem_string_table = {
.language = 0x0409, /* en-us */
.strings = eem_string_defs,
};
static struct usb_gadget_strings *eem_strings[] = {
&eem_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
/* device either stalls (value < 0) or reports success */
return -EOPNOTSUPP;
}
static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_eem *eem = func_to_eem(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct net_device *net;
/* we know alt == 0, so this is an activation or a reset */
if (alt != 0)
goto fail;
if (intf == eem->ctrl_id) {
DBG(cdev, "reset eem\n");
gether_disconnect(&eem->port);
if (!eem->port.in_ep->desc || !eem->port.out_ep->desc) {
DBG(cdev, "init eem\n");
if (config_ep_by_speed(cdev->gadget, f,
eem->port.in_ep) ||
config_ep_by_speed(cdev->gadget, f,
eem->port.out_ep)) {
eem->port.in_ep->desc = NULL;
eem->port.out_ep->desc = NULL;
goto fail;
}
}
/* zlps should not occur because zero-length EEM packets
* will be inserted in those cases where they would occur
*/
eem->port.is_zlp_ok = 1;
eem->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate eem\n");
net = gether_connect(&eem->port);
if (IS_ERR(net))
return PTR_ERR(net);
} else
goto fail;
return 0;
fail:
return -EINVAL;
}
static void eem_disable(struct usb_function *f)
{
struct f_eem *eem = func_to_eem(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "eem deactivated\n");
if (eem->port.in_ep->enabled)
gether_disconnect(&eem->port);
}
/*-------------------------------------------------------------------------*/
/* EEM function driver setup/binding */
static int eem_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_eem *eem = func_to_eem(f);
struct usb_string *us;
int status;
struct usb_ep *ep;
struct f_eem_opts *eem_opts;
eem_opts = container_of(f->fi, struct f_eem_opts, func_inst);
/*
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
* configurations are bound in sequence with list_for_each_entry,
* in each configuration its functions are bound in sequence
* with list_for_each_entry, so we assume no race condition
* with regard to eem_opts->bound access
*/
if (!eem_opts->bound) {
mutex_lock(&eem_opts->lock);
gether_set_gadget(eem_opts->net, cdev->gadget);
status = gether_register_netdev(eem_opts->net);
mutex_unlock(&eem_opts->lock);
if (status)
return status;
eem_opts->bound = true;
}
us = usb_gstrings_attach(cdev, eem_strings,
ARRAY_SIZE(eem_string_defs));
if (IS_ERR(us))
return PTR_ERR(us);
eem_intf.iInterface = us[0].id;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
eem->ctrl_id = status;
eem_intf.bInterfaceNumber = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc);
if (!ep)
goto fail;
eem->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc);
if (!ep)
goto fail;
eem->port.out_ep = ep;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
eem_hs_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress;
eem_hs_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
eem_ss_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress;
eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
eem_ss_function, eem_ss_function);
if (status)
goto fail;
DBG(cdev, "CDC Ethernet (EEM): IN/%s OUT/%s\n",
eem->port.in_ep->name, eem->port.out_ep->name);
return 0;
fail:
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct in_context *ctx = req->context;
dev_kfree_skb_any(ctx->skb);
kfree(req->buf);
usb_ep_free_request(ctx->ep, req);
kfree(ctx);
}
/*
* Add the EEM header and ethernet checksum.
* We currently do not attempt to put multiple ethernet frames
* into a single USB transfer
*/
static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
{
struct sk_buff *skb2 = NULL;
struct usb_ep *in = port->in_ep;
int headroom, tailroom, padlen = 0;
u16 len;
if (!skb)
return NULL;
len = skb->len;
headroom = skb_headroom(skb);
tailroom = skb_tailroom(skb);
/* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
* stick two bytes of zero-length EEM packet on the end.
*/
if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0)
padlen += 2;
if ((tailroom >= (ETH_FCS_LEN + padlen)) &&
(headroom >= EEM_HLEN) && !skb_cloned(skb))
goto done;
skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return skb;
done:
/* use the "no CRC" option */
put_unaligned_be32(0xdeadbeef, skb_put(skb, 4));
/* EEM packet header format:
* b0..13: length of ethernet frame
* b14: bmCRC (0 == sentinel CRC)
* b15: bmType (0 == data)
*/
len = skb->len;
put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
/* add a zero-length EEM packet, if needed */
if (padlen)
put_unaligned_le16(0, skb_put(skb, 2));
return skb;
}
/*
* Remove the EEM header. Note that there can be many EEM packets in a single
* USB transfer, so we need to break them out and handle them independently.
*/
static int eem_unwrap(struct gether *port,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct usb_composite_dev *cdev = port->func.config->cdev;
int status = 0;
do {
struct sk_buff *skb2;
u16 header;
u16 len = 0;
if (skb->len < EEM_HLEN) {
status = -EINVAL;
DBG(cdev, "invalid EEM header\n");
goto error;
}
/* remove the EEM header */
header = get_unaligned_le16(skb->data);
skb_pull(skb, EEM_HLEN);
/* EEM packet header format:
* b0..14: EEM type dependent (data or command)
* b15: bmType (0 == data, 1 == command)
*/
if (header & BIT(15)) {
struct usb_request *req;
struct in_context *ctx;
struct usb_ep *ep;
u16 bmEEMCmd;
/* EEM command packet format:
* b0..10: bmEEMCmdParam
* b11..13: bmEEMCmd
* b14: reserved (must be zero)
* b15: bmType (1 == command)
*/
if (header & BIT(14))
continue;
bmEEMCmd = (header >> 11) & 0x7;
switch (bmEEMCmd) {
case 0: /* echo */
len = header & 0x7FF;
if (skb->len < len) {
status = -EOVERFLOW;
goto error;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "EEM echo response error\n");
goto next;
}
skb_trim(skb2, len);
put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
ep = port->in_ep;
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (!req) {
dev_kfree_skb_any(skb2);
goto next;
}
req->buf = kmalloc(skb2->len, GFP_KERNEL);
if (!req->buf) {
usb_ep_free_request(ep, req);
dev_kfree_skb_any(skb2);
goto next;
}
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
kfree(req->buf);
usb_ep_free_request(ep, req);
dev_kfree_skb_any(skb2);
goto next;
}
ctx->skb = skb2;
ctx->ep = ep;
skb_copy_bits(skb2, 0, req->buf, skb2->len);
req->length = skb2->len;
req->complete = eem_cmd_complete;
req->zero = 1;
req->context = ctx;
if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
DBG(cdev, "echo response queue fail\n");
break;
case 1: /* echo response */
case 2: /* suspend hint */
case 3: /* response hint */
case 4: /* response complete hint */
case 5: /* tickle */
default: /* reserved */
continue;
}
} else {
u32 crc, crc2;
struct sk_buff *skb3;
/* check for zero-length EEM packet */
if (header == 0)
continue;
/* EEM data packet format:
* b0..13: length of ethernet frame
* b14: bmCRC (0 == sentinel, 1 == calculated)
* b15: bmType (0 == data)
*/
len = header & 0x3FFF;
if ((skb->len < len)
|| (len < (ETH_HLEN + ETH_FCS_LEN))) {
status = -EINVAL;
goto error;
}
/* validate CRC */
if (header & BIT(14)) {
crc = get_unaligned_le32(skb->data + len
- ETH_FCS_LEN);
crc2 = ~crc32_le(~0,
skb->data, len - ETH_FCS_LEN);
} else {
crc = get_unaligned_be32(skb->data + len
- ETH_FCS_LEN);
crc2 = 0xdeadbeef;
}
if (crc != crc2) {
DBG(cdev, "invalid EEM CRC\n");
goto next;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "unable to unframe EEM packet\n");
goto next;
}
skb_trim(skb2, len - ETH_FCS_LEN);
skb3 = skb_copy_expand(skb2,
NET_IP_ALIGN,
0,
GFP_ATOMIC);
if (unlikely(!skb3)) {
dev_kfree_skb_any(skb2);
goto next;
}
dev_kfree_skb_any(skb2);
skb_queue_tail(list, skb3);
}
next:
skb_pull(skb, len);
} while (skb->len);
error:
dev_kfree_skb_any(skb);
return status;
}
static inline struct f_eem_opts *to_f_eem_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_eem_opts,
func_inst.group);
}
/* f_eem_item_ops */
USB_ETHERNET_CONFIGFS_ITEM(eem);
/* f_eem_opts_dev_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(eem);
/* f_eem_opts_host_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(eem);
/* f_eem_opts_qmult */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(eem);
/* f_eem_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(eem);
static struct configfs_attribute *eem_attrs[] = {
&eem_opts_attr_dev_addr,
&eem_opts_attr_host_addr,
&eem_opts_attr_qmult,
&eem_opts_attr_ifname,
NULL,
};
static const struct config_item_type eem_func_type = {
.ct_item_ops = &eem_item_ops,
.ct_attrs = eem_attrs,
.ct_owner = THIS_MODULE,
};
static void eem_free_inst(struct usb_function_instance *f)
{
struct f_eem_opts *opts;
opts = container_of(f, struct f_eem_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
kfree(opts);
}
static struct usb_function_instance *eem_alloc_inst(void)
{
struct f_eem_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = eem_free_inst;
opts->net = gether_setup_default();
if (IS_ERR(opts->net)) {
struct net_device *net = opts->net;
kfree(opts);
return ERR_CAST(net);
}
config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type);
return &opts->func_inst;
}
static void eem_free(struct usb_function *f)
{
struct f_eem *eem;
struct f_eem_opts *opts;
eem = func_to_eem(f);
opts = container_of(f->fi, struct f_eem_opts, func_inst);
kfree(eem);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
}
static void eem_unbind(struct usb_configuration *c, struct usb_function *f)
{
DBG(c->cdev, "eem unbind\n");
usb_free_all_descriptors(f);
}
static struct usb_function *eem_alloc(struct usb_function_instance *fi)
{
struct f_eem *eem;
struct f_eem_opts *opts;
/* allocate and initialize one new instance */
eem = kzalloc(sizeof(*eem), GFP_KERNEL);
if (!eem)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_eem_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt++;
eem->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);
eem->port.cdc_filter = DEFAULT_FILTER;
eem->port.func.name = "cdc_eem";
/* descriptors are per-instance copies */
eem->port.func.bind = eem_bind;
eem->port.func.unbind = eem_unbind;
eem->port.func.set_alt = eem_set_alt;
eem->port.func.setup = eem_setup;
eem->port.func.disable = eem_disable;
eem->port.func.free_func = eem_free;
eem->port.wrap = eem_wrap;
eem->port.unwrap = eem_unwrap;
eem->port.header_len = EEM_HLEN;
return &eem->port.func;
}
DECLARE_USB_FUNCTION_INIT(eem, eem_alloc_inst, eem_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
| linux-master | drivers/usb/gadget/function/f_eem.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_ncm.c -- USB CDC Network (NCM) link function driver
*
* Copyright (C) 2010 Nokia Corporation
* Contact: Yauheni Kaliuta <[email protected]>
*
* The driver borrows from f_ecm.c which is:
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/usb/cdc.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_ncm.h"
#include "configfs.h"
/*
* This function is a "CDC Network Control Model" (CDC NCM) Ethernet link.
* NCM is intended to be used with high-speed network attachments.
*
* Note that NCM requires the use of "alternate settings" for its data
* interface. This means that the set_alt() method has real work to do,
* and also means that a get_alt() method is required.
*/
/* to trigger crc/non-crc ndp signature */
#define NCM_NDP_HDR_CRC 0x01000000
enum ncm_notify_state {
NCM_NOTIFY_NONE, /* don't notify */
NCM_NOTIFY_CONNECT, /* issue CONNECT next */
NCM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */
};
struct f_ncm {
struct gether port;
u8 ctrl_id, data_id;
char ethaddr[14];
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
atomic_t notify_count;
bool is_open;
const struct ndp_parser_opts *parser_opts;
bool is_crc;
u32 ndp_sign;
/*
* for notification, it is accessed from both
* callback and ethernet open/close
*/
spinlock_t lock;
struct net_device *netdev;
/* For multi-frame NDP TX */
struct sk_buff *skb_tx_data;
struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count;
struct hrtimer task_timer;
};
static inline struct f_ncm *func_to_ncm(struct usb_function *f)
{
return container_of(f, struct f_ncm, port.func);
}
/*-------------------------------------------------------------------------*/
/*
* We cannot group frames so use just the minimal size which ok to put
* one max-size ethernet frame.
* If the host can group frames, allow it to do that, 16K is selected,
* because it's used by default by the current linux host driver
*/
#define NTB_DEFAULT_IN_SIZE 16384
#define NTB_OUT_SIZE 16384
/* Allocation for storing the NDP, 32 should suffice for a
* 16k packet. This allows a maximum of 32 * 507 Byte packets to
* be transmitted in a single 16kB skb, though when sending full size
* packets this limit will be plenty.
* Smaller packets are not likely to be trying to maximize the
* throughput and will be mstly sending smaller infrequent frames.
*/
#define TX_MAX_NUM_DPE 32
/* Delay for the transmit to wait before sending an unfilled NTB frame. */
#define TX_TIMEOUT_NSECS 300000
#define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \
USB_CDC_NCM_NTB32_SUPPORTED)
static struct usb_cdc_ncm_ntb_parameters ntb_parameters = {
.wLength = cpu_to_le16(sizeof(ntb_parameters)),
.bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED),
.dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE),
.wNdpInDivisor = cpu_to_le16(4),
.wNdpInPayloadRemainder = cpu_to_le16(0),
.wNdpInAlignment = cpu_to_le16(4),
.dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE),
.wNdpOutDivisor = cpu_to_le16(4),
.wNdpOutPayloadRemainder = cpu_to_le16(0),
.wNdpOutAlignment = cpu_to_le16(4),
};
/*
* Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
* packet, to simplify cancellation; and a big transfer interval, to
* waste less bandwidth.
*/
#define NCM_STATUS_INTERVAL_MS 32
#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
static struct usb_interface_assoc_descriptor ncm_iad_desc = {
.bLength = sizeof ncm_iad_desc,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
/* .bFirstInterface = DYNAMIC, */
.bInterfaceCount = 2, /* control + data */
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_NCM,
.bFunctionProtocol = USB_CDC_PROTO_NONE,
/* .iFunction = DYNAMIC */
};
/* interface descriptor: */
static struct usb_interface_descriptor ncm_control_intf = {
.bLength = sizeof ncm_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
/* .iInterface = DYNAMIC */
};
static struct usb_cdc_header_desc ncm_header_desc = {
.bLength = sizeof ncm_header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static struct usb_cdc_union_desc ncm_union_desc = {
.bLength = sizeof(ncm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
/* .bMasterInterface0 = DYNAMIC */
/* .bSlaveInterface0 = DYNAMIC */
};
static struct usb_cdc_ether_desc ecm_desc = {
.bLength = sizeof ecm_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
/* this descriptor actually adds value, surprise! */
/* .iMACAddress = DYNAMIC */
.bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
.wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
.wNumberMCFilters = cpu_to_le16(0),
.bNumberPowerFilters = 0,
};
#define NCAPS (USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE)
static struct usb_cdc_ncm_desc ncm_desc = {
.bLength = sizeof ncm_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_NCM_TYPE,
.bcdNcmVersion = cpu_to_le16(0x0100),
/* can process SetEthernetPacketFilter */
.bmNetworkCapabilities = NCAPS,
};
/* the default data interface has no endpoints ... */
static struct usb_interface_descriptor ncm_data_nop_intf = {
.bLength = sizeof ncm_data_nop_intf,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 1,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB,
/* .iInterface = DYNAMIC */
};
/* ... but the "real" data interface has two bulk endpoints */
static struct usb_interface_descriptor ncm_data_intf = {
.bLength = sizeof ncm_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 1,
.bAlternateSetting = 1,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor fs_ncm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
.bInterval = NCM_STATUS_INTERVAL_MS,
};
static struct usb_endpoint_descriptor fs_ncm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor fs_ncm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *ncm_fs_function[] = {
(struct usb_descriptor_header *) &ncm_iad_desc,
/* CDC NCM control descriptors */
(struct usb_descriptor_header *) &ncm_control_intf,
(struct usb_descriptor_header *) &ncm_header_desc,
(struct usb_descriptor_header *) &ncm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
(struct usb_descriptor_header *) &ncm_desc,
(struct usb_descriptor_header *) &fs_ncm_notify_desc,
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ncm_data_nop_intf,
(struct usb_descriptor_header *) &ncm_data_intf,
(struct usb_descriptor_header *) &fs_ncm_in_desc,
(struct usb_descriptor_header *) &fs_ncm_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor hs_ncm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
.bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
};
static struct usb_endpoint_descriptor hs_ncm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor hs_ncm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *ncm_hs_function[] = {
(struct usb_descriptor_header *) &ncm_iad_desc,
/* CDC NCM control descriptors */
(struct usb_descriptor_header *) &ncm_control_intf,
(struct usb_descriptor_header *) &ncm_header_desc,
(struct usb_descriptor_header *) &ncm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
(struct usb_descriptor_header *) &ncm_desc,
(struct usb_descriptor_header *) &hs_ncm_notify_desc,
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ncm_data_nop_intf,
(struct usb_descriptor_header *) &ncm_data_intf,
(struct usb_descriptor_header *) &hs_ncm_in_desc,
(struct usb_descriptor_header *) &hs_ncm_out_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor ss_ncm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
.bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS)
};
static struct usb_ss_ep_comp_descriptor ss_ncm_notify_comp_desc = {
.bLength = sizeof(ss_ncm_notify_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 3 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
.wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
};
static struct usb_endpoint_descriptor ss_ncm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor ss_ncm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_ncm_bulk_comp_desc = {
.bLength = sizeof(ss_ncm_bulk_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
.bMaxBurst = 15,
/* .bmAttributes = 0, */
};
static struct usb_descriptor_header *ncm_ss_function[] = {
(struct usb_descriptor_header *) &ncm_iad_desc,
/* CDC NCM control descriptors */
(struct usb_descriptor_header *) &ncm_control_intf,
(struct usb_descriptor_header *) &ncm_header_desc,
(struct usb_descriptor_header *) &ncm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
(struct usb_descriptor_header *) &ncm_desc,
(struct usb_descriptor_header *) &ss_ncm_notify_desc,
(struct usb_descriptor_header *) &ss_ncm_notify_comp_desc,
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ncm_data_nop_intf,
(struct usb_descriptor_header *) &ncm_data_intf,
(struct usb_descriptor_header *) &ss_ncm_in_desc,
(struct usb_descriptor_header *) &ss_ncm_bulk_comp_desc,
(struct usb_descriptor_header *) &ss_ncm_out_desc,
(struct usb_descriptor_header *) &ss_ncm_bulk_comp_desc,
NULL,
};
/* string descriptors: */
#define STRING_CTRL_IDX 0
#define STRING_MAC_IDX 1
#define STRING_DATA_IDX 2
#define STRING_IAD_IDX 3
static struct usb_string ncm_string_defs[] = {
[STRING_CTRL_IDX].s = "CDC Network Control Model (NCM)",
[STRING_MAC_IDX].s = "",
[STRING_DATA_IDX].s = "CDC Network Data",
[STRING_IAD_IDX].s = "CDC NCM",
{ } /* end of list */
};
static struct usb_gadget_strings ncm_string_table = {
.language = 0x0409, /* en-us */
.strings = ncm_string_defs,
};
static struct usb_gadget_strings *ncm_strings[] = {
&ncm_string_table,
NULL,
};
/*
* Here are options for NCM Datagram Pointer table (NDP) parser.
* There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
* in NDP16 offsets and sizes fields are 1 16bit word wide,
* in NDP32 -- 2 16bit words wide. Also signatures are different.
* To make the parser code the same, put the differences in the structure,
* and switch pointers to the structures when the format is changed.
*/
struct ndp_parser_opts {
u32 nth_sign;
u32 ndp_sign;
unsigned nth_size;
unsigned ndp_size;
unsigned dpe_size;
unsigned ndplen_align;
/* sizes in u16 units */
unsigned dgram_item_len; /* index or length */
unsigned block_length;
unsigned ndp_index;
unsigned reserved1;
unsigned reserved2;
unsigned next_ndp_index;
};
static const struct ndp_parser_opts ndp16_opts = {
.nth_sign = USB_CDC_NCM_NTH16_SIGN,
.ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN,
.nth_size = sizeof(struct usb_cdc_ncm_nth16),
.ndp_size = sizeof(struct usb_cdc_ncm_ndp16),
.dpe_size = sizeof(struct usb_cdc_ncm_dpe16),
.ndplen_align = 4,
.dgram_item_len = 1,
.block_length = 1,
.ndp_index = 1,
.reserved1 = 0,
.reserved2 = 0,
.next_ndp_index = 1,
};
static const struct ndp_parser_opts ndp32_opts = {
.nth_sign = USB_CDC_NCM_NTH32_SIGN,
.ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN,
.nth_size = sizeof(struct usb_cdc_ncm_nth32),
.ndp_size = sizeof(struct usb_cdc_ncm_ndp32),
.dpe_size = sizeof(struct usb_cdc_ncm_dpe32),
.ndplen_align = 8,
.dgram_item_len = 2,
.block_length = 2,
.ndp_index = 2,
.reserved1 = 1,
.reserved2 = 2,
.next_ndp_index = 2,
};
static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
{
switch (size) {
case 1:
put_unaligned_le16((u16)val, *p);
break;
case 2:
put_unaligned_le32((u32)val, *p);
break;
default:
BUG();
}
*p += size;
}
static inline unsigned get_ncm(__le16 **p, unsigned size)
{
unsigned tmp;
switch (size) {
case 1:
tmp = get_unaligned_le16(*p);
break;
case 2:
tmp = get_unaligned_le32(*p);
break;
default:
BUG();
}
*p += size;
return tmp;
}
/*-------------------------------------------------------------------------*/
static inline void ncm_reset_values(struct f_ncm *ncm)
{
ncm->parser_opts = &ndp16_opts;
ncm->is_crc = false;
ncm->ndp_sign = ncm->parser_opts->ndp_sign;
ncm->port.cdc_filter = DEFAULT_FILTER;
/* doesn't make sense for ncm, fixed size used */
ncm->port.header_len = 0;
ncm->port.fixed_out_len = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
ncm->port.fixed_in_len = NTB_DEFAULT_IN_SIZE;
}
/*
* Context: ncm->lock held
*/
static void ncm_do_notify(struct f_ncm *ncm)
{
struct usb_request *req = ncm->notify_req;
struct usb_cdc_notification *event;
struct usb_composite_dev *cdev = ncm->port.func.config->cdev;
__le32 *data;
int status;
/* notification already in flight? */
if (atomic_read(&ncm->notify_count))
return;
event = req->buf;
switch (ncm->notify_state) {
case NCM_NOTIFY_NONE:
return;
case NCM_NOTIFY_CONNECT:
event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
if (ncm->is_open)
event->wValue = cpu_to_le16(1);
else
event->wValue = cpu_to_le16(0);
event->wLength = 0;
req->length = sizeof *event;
DBG(cdev, "notify connect %s\n",
ncm->is_open ? "true" : "false");
ncm->notify_state = NCM_NOTIFY_NONE;
break;
case NCM_NOTIFY_SPEED:
event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
event->wValue = cpu_to_le16(0);
event->wLength = cpu_to_le16(8);
req->length = NCM_STATUS_BYTECOUNT;
/* SPEED_CHANGE data is up/down speeds in bits/sec */
data = req->buf + sizeof *event;
data[0] = cpu_to_le32(gether_bitrate(cdev->gadget));
data[1] = data[0];
DBG(cdev, "notify speed %u\n", gether_bitrate(cdev->gadget));
ncm->notify_state = NCM_NOTIFY_CONNECT;
break;
}
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ncm->ctrl_id);
atomic_inc(&ncm->notify_count);
/*
* In double buffering if there is a space in FIFO,
* completion callback can be called right after the call,
* so unlocking
*/
spin_unlock(&ncm->lock);
status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
spin_lock(&ncm->lock);
if (status < 0) {
atomic_dec(&ncm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
/*
* Context: ncm->lock held
*/
static void ncm_notify(struct f_ncm *ncm)
{
/*
* NOTE on most versions of Linux, host side cdc-ethernet
* won't listen for notifications until its netdevice opens.
* The first notification then sits in the FIFO for a long
* time, and the second one is queued.
*
* If ncm_notify() is called before the second (CONNECT)
* notification is sent, then it will reset to send the SPEED
* notificaion again (and again, and again), but it's not a problem
*/
ncm->notify_state = NCM_NOTIFY_SPEED;
ncm_do_notify(ncm);
}
static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_ncm *ncm = req->context;
struct usb_composite_dev *cdev = ncm->port.func.config->cdev;
struct usb_cdc_notification *event = req->buf;
spin_lock(&ncm->lock);
switch (req->status) {
case 0:
VDBG(cdev, "Notification %02x sent\n",
event->bNotificationType);
atomic_dec(&ncm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
atomic_set(&ncm->notify_count, 0);
ncm->notify_state = NCM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
atomic_dec(&ncm->notify_count);
break;
}
ncm_do_notify(ncm);
spin_unlock(&ncm->lock);
}
static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
{
/* now for SET_NTB_INPUT_SIZE only */
unsigned in_size;
struct usb_function *f = req->context;
struct f_ncm *ncm = func_to_ncm(f);
struct usb_composite_dev *cdev = f->config->cdev;
req->context = NULL;
if (req->status || req->actual != req->length) {
DBG(cdev, "Bad control-OUT transfer\n");
goto invalid;
}
in_size = get_unaligned_le32(req->buf);
if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
in_size > le32_to_cpu(ntb_parameters.dwNtbInMaxSize)) {
DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size);
goto invalid;
}
ncm->port.fixed_in_len = in_size;
VDBG(cdev, "Set NTB INPUT SIZE %d\n", in_size);
return;
invalid:
usb_ep_set_halt(ep);
return;
}
static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_ncm *ncm = func_to_ncm(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
/*
* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SET_ETHERNET_PACKET_FILTER:
/*
* see 6.2.30: no data, wIndex = interface,
* wValue = packet filter bitmap
*/
if (w_length != 0 || w_index != ncm->ctrl_id)
goto invalid;
DBG(cdev, "packet filter %02x\n", w_value);
/*
* REVISIT locking of cdc_filter. This assumes the UDC
* driver won't have a concurrent packet TX irq running on
* another CPU; or that if it does, this write is atomic...
*/
ncm->port.cdc_filter = w_value;
value = 0;
break;
/*
* and optionally:
* case USB_CDC_SEND_ENCAPSULATED_COMMAND:
* case USB_CDC_GET_ENCAPSULATED_RESPONSE:
* case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
* case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
* case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
* case USB_CDC_GET_ETHERNET_STATISTIC:
*/
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_GET_NTB_PARAMETERS:
if (w_length == 0 || w_value != 0 || w_index != ncm->ctrl_id)
goto invalid;
value = w_length > sizeof ntb_parameters ?
sizeof ntb_parameters : w_length;
memcpy(req->buf, &ntb_parameters, value);
VDBG(cdev, "Host asked NTB parameters\n");
break;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_GET_NTB_INPUT_SIZE:
if (w_length < 4 || w_value != 0 || w_index != ncm->ctrl_id)
goto invalid;
put_unaligned_le32(ncm->port.fixed_in_len, req->buf);
value = 4;
VDBG(cdev, "Host asked INPUT SIZE, sending %d\n",
ncm->port.fixed_in_len);
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SET_NTB_INPUT_SIZE:
{
if (w_length != 4 || w_value != 0 || w_index != ncm->ctrl_id)
goto invalid;
req->complete = ncm_ep0out_complete;
req->length = w_length;
req->context = f;
value = req->length;
break;
}
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_GET_NTB_FORMAT:
{
uint16_t format;
if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
goto invalid;
format = (ncm->parser_opts == &ndp16_opts) ? 0x0000 : 0x0001;
put_unaligned_le16(format, req->buf);
value = 2;
VDBG(cdev, "Host asked NTB FORMAT, sending %d\n", format);
break;
}
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SET_NTB_FORMAT:
{
if (w_length != 0 || w_index != ncm->ctrl_id)
goto invalid;
switch (w_value) {
case 0x0000:
ncm->parser_opts = &ndp16_opts;
DBG(cdev, "NCM16 selected\n");
break;
case 0x0001:
ncm->parser_opts = &ndp32_opts;
DBG(cdev, "NCM32 selected\n");
break;
default:
goto invalid;
}
value = 0;
break;
}
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_GET_CRC_MODE:
{
uint16_t is_crc;
if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
goto invalid;
is_crc = ncm->is_crc ? 0x0001 : 0x0000;
put_unaligned_le16(is_crc, req->buf);
value = 2;
VDBG(cdev, "Host asked CRC MODE, sending %d\n", is_crc);
break;
}
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SET_CRC_MODE:
{
if (w_length != 0 || w_index != ncm->ctrl_id)
goto invalid;
switch (w_value) {
case 0x0000:
ncm->is_crc = false;
DBG(cdev, "non-CRC mode selected\n");
break;
case 0x0001:
ncm->is_crc = true;
DBG(cdev, "CRC mode selected\n");
break;
default:
goto invalid;
}
value = 0;
break;
}
/* and disabled in ncm descriptor: */
/* case USB_CDC_GET_NET_ADDRESS: */
/* case USB_CDC_SET_NET_ADDRESS: */
/* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */
/* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */
default:
invalid:
DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
ncm->ndp_sign = ncm->parser_opts->ndp_sign |
(ncm->is_crc ? NCM_NDP_HDR_CRC : 0);
/* respond with data transfer or status phase? */
if (value >= 0) {
DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = 0;
req->length = value;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(cdev, "ncm req %02x.%02x response err %d\n",
ctrl->bRequestType, ctrl->bRequest,
value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_ncm *ncm = func_to_ncm(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* Control interface has only altsetting 0 */
if (intf == ncm->ctrl_id) {
if (alt != 0)
goto fail;
DBG(cdev, "reset ncm control %d\n", intf);
usb_ep_disable(ncm->notify);
if (!(ncm->notify->desc)) {
DBG(cdev, "init ncm ctrl %d\n", intf);
if (config_ep_by_speed(cdev->gadget, f, ncm->notify))
goto fail;
}
usb_ep_enable(ncm->notify);
/* Data interface has two altsettings, 0 and 1 */
} else if (intf == ncm->data_id) {
if (alt > 1)
goto fail;
if (ncm->port.in_ep->enabled) {
DBG(cdev, "reset ncm\n");
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
ncm_reset_values(ncm);
}
/*
* CDC Network only sends data in non-default altsettings.
* Changing altsettings resets filters, statistics, etc.
*/
if (alt == 1) {
struct net_device *net;
if (!ncm->port.in_ep->desc ||
!ncm->port.out_ep->desc) {
DBG(cdev, "init ncm\n");
if (config_ep_by_speed(cdev->gadget, f,
ncm->port.in_ep) ||
config_ep_by_speed(cdev->gadget, f,
ncm->port.out_ep)) {
ncm->port.in_ep->desc = NULL;
ncm->port.out_ep->desc = NULL;
goto fail;
}
}
/* TODO */
/* Enable zlps by default for NCM conformance;
* override for musb_hdrc (avoids txdma ovhead)
*/
ncm->port.is_zlp_ok =
gadget_is_zlp_supported(cdev->gadget);
ncm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ncm\n");
net = gether_connect(&ncm->port);
if (IS_ERR(net))
return PTR_ERR(net);
ncm->netdev = net;
}
spin_lock(&ncm->lock);
ncm_notify(ncm);
spin_unlock(&ncm->lock);
} else
goto fail;
return 0;
fail:
return -EINVAL;
}
/*
* Because the data interface supports multiple altsettings,
* this NCM function *MUST* implement a get_alt() method.
*/
static int ncm_get_alt(struct usb_function *f, unsigned intf)
{
struct f_ncm *ncm = func_to_ncm(f);
if (intf == ncm->ctrl_id)
return 0;
return ncm->port.in_ep->enabled ? 1 : 0;
}
static struct sk_buff *package_for_tx(struct f_ncm *ncm)
{
__le16 *ntb_iter;
struct sk_buff *skb2 = NULL;
unsigned ndp_pad;
unsigned ndp_index;
unsigned new_len;
const struct ndp_parser_opts *opts = ncm->parser_opts;
const int ndp_align = le16_to_cpu(ntb_parameters.wNdpInAlignment);
const int dgram_idx_len = 2 * 2 * opts->dgram_item_len;
/* Stop the timer */
hrtimer_try_to_cancel(&ncm->task_timer);
ndp_pad = ALIGN(ncm->skb_tx_data->len, ndp_align) -
ncm->skb_tx_data->len;
ndp_index = ncm->skb_tx_data->len + ndp_pad;
new_len = ndp_index + dgram_idx_len + ncm->skb_tx_ndp->len;
/* Set the final BlockLength and wNdpIndex */
ntb_iter = (void *) ncm->skb_tx_data->data;
/* Increment pointer to BlockLength */
ntb_iter += 2 + 1 + 1;
put_ncm(&ntb_iter, opts->block_length, new_len);
put_ncm(&ntb_iter, opts->ndp_index, ndp_index);
/* Set the final NDP wLength */
new_len = opts->ndp_size +
(ncm->ndp_dgram_count * dgram_idx_len);
ncm->ndp_dgram_count = 0;
/* Increment from start to wLength */
ntb_iter = (void *) ncm->skb_tx_ndp->data;
ntb_iter += 2;
put_unaligned_le16(new_len, ntb_iter);
/* Merge the skbs */
swap(skb2, ncm->skb_tx_data);
if (ncm->skb_tx_data) {
dev_consume_skb_any(ncm->skb_tx_data);
ncm->skb_tx_data = NULL;
}
/* Insert NDP alignment. */
skb_put_zero(skb2, ndp_pad);
/* Copy NTB across. */
skb_put_data(skb2, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len);
dev_consume_skb_any(ncm->skb_tx_ndp);
ncm->skb_tx_ndp = NULL;
/* Insert zero'd datagram. */
skb_put_zero(skb2, dgram_idx_len);
return skb2;
}
static struct sk_buff *ncm_wrap_ntb(struct gether *port,
struct sk_buff *skb)
{
struct f_ncm *ncm = func_to_ncm(&port->func);
struct sk_buff *skb2 = NULL;
if (skb) {
int ncb_len = 0;
__le16 *ntb_data;
__le16 *ntb_ndp;
int dgram_pad;
unsigned max_size = ncm->port.fixed_in_len;
const struct ndp_parser_opts *opts = ncm->parser_opts;
const int ndp_align = le16_to_cpu(ntb_parameters.wNdpInAlignment);
const int div = le16_to_cpu(ntb_parameters.wNdpInDivisor);
const int rem = le16_to_cpu(ntb_parameters.wNdpInPayloadRemainder);
const int dgram_idx_len = 2 * 2 * opts->dgram_item_len;
/* Add the CRC if required up front */
if (ncm->is_crc) {
uint32_t crc;
__le16 *crc_pos;
crc = ~crc32_le(~0,
skb->data,
skb->len);
crc_pos = skb_put(skb, sizeof(uint32_t));
put_unaligned_le32(crc, crc_pos);
}
/* If the new skb is too big for the current NCM NTB then
* set the current stored skb to be sent now and clear it
* ready for new data.
* NOTE: Assume maximum align for speed of calculation.
*/
if (ncm->skb_tx_data
&& (ncm->ndp_dgram_count >= TX_MAX_NUM_DPE
|| (ncm->skb_tx_data->len +
div + rem + skb->len +
ncm->skb_tx_ndp->len + ndp_align + (2 * dgram_idx_len))
> max_size)) {
skb2 = package_for_tx(ncm);
if (!skb2)
goto err;
}
if (!ncm->skb_tx_data) {
ncb_len = opts->nth_size;
dgram_pad = ALIGN(ncb_len, div) + rem - ncb_len;
ncb_len += dgram_pad;
/* Create a new skb for the NTH and datagrams. */
ncm->skb_tx_data = alloc_skb(max_size, GFP_ATOMIC);
if (!ncm->skb_tx_data)
goto err;
ncm->skb_tx_data->dev = ncm->netdev;
ntb_data = skb_put_zero(ncm->skb_tx_data, ncb_len);
/* dwSignature */
put_unaligned_le32(opts->nth_sign, ntb_data);
ntb_data += 2;
/* wHeaderLength */
put_unaligned_le16(opts->nth_size, ntb_data++);
/* Allocate an skb for storing the NDP,
* TX_MAX_NUM_DPE should easily suffice for a
* 16k packet.
*/
ncm->skb_tx_ndp = alloc_skb((int)(opts->ndp_size
+ opts->dpe_size
* TX_MAX_NUM_DPE),
GFP_ATOMIC);
if (!ncm->skb_tx_ndp)
goto err;
ncm->skb_tx_ndp->dev = ncm->netdev;
ntb_ndp = skb_put(ncm->skb_tx_ndp, opts->ndp_size);
memset(ntb_ndp, 0, ncb_len);
/* dwSignature */
put_unaligned_le32(ncm->ndp_sign, ntb_ndp);
ntb_ndp += 2;
/* There is always a zeroed entry */
ncm->ndp_dgram_count = 1;
/* Note: we skip opts->next_ndp_index */
/* Start the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
HRTIMER_MODE_REL_SOFT);
}
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
ncb_len = ncm->skb_tx_data->len;
dgram_pad = ALIGN(ncb_len, div) + rem - ncb_len;
ncb_len += dgram_pad;
/* (d)wDatagramIndex */
put_ncm(&ntb_ndp, opts->dgram_item_len, ncb_len);
/* (d)wDatagramLength */
put_ncm(&ntb_ndp, opts->dgram_item_len, skb->len);
ncm->ndp_dgram_count++;
/* Add the new data to the skb */
skb_put_zero(ncm->skb_tx_data, dgram_pad);
skb_put_data(ncm->skb_tx_data, skb->data, skb->len);
dev_consume_skb_any(skb);
skb = NULL;
} else if (ncm->skb_tx_data) {
/* If we get here ncm_wrap_ntb() was called with NULL skb,
* because eth_start_xmit() was called with NULL skb by
* ncm_tx_timeout() - hence, this is our signal to flush/send.
*/
skb2 = package_for_tx(ncm);
if (!skb2)
goto err;
}
return skb2;
err:
ncm->netdev->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any(skb);
if (ncm->skb_tx_data)
dev_kfree_skb_any(ncm->skb_tx_data);
if (ncm->skb_tx_ndp)
dev_kfree_skb_any(ncm->skb_tx_ndp);
return NULL;
}
/*
* The transmit should only be run if no skb data has been sent
* for a certain duration.
*/
static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{
struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
struct net_device *netdev = READ_ONCE(ncm->netdev);
if (netdev) {
/* XXX This allowance of a NULL skb argument to ndo_start_xmit
* XXX is not sane. The gadget layer should be redesigned so
* XXX that the dev->wrap() invocations to build SKBs is transparent
* XXX and performed in some way outside of the ndo_start_xmit
* XXX interface.
*
* This will call directly into u_ether's eth_start_xmit()
*/
netdev->netdev_ops->ndo_start_xmit(NULL, netdev);
}
return HRTIMER_NORESTART;
}
static int ncm_unwrap_ntb(struct gether *port,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct f_ncm *ncm = func_to_ncm(&port->func);
__le16 *tmp = (void *) skb->data;
unsigned index, index2;
int ndp_index;
unsigned dg_len, dg_len2;
unsigned ndp_len;
unsigned block_len;
struct sk_buff *skb2;
int ret = -EINVAL;
unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
/* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) {
INFO(port->func.config->cdev, "Wrong NTH SIGN, skblen %d\n",
skb->len);
print_hex_dump(KERN_INFO, "HEAD:", DUMP_PREFIX_ADDRESS, 32, 1,
skb->data, 32, false);
goto err;
}
tmp += 2;
/* wHeaderLength */
if (get_unaligned_le16(tmp++) != opts->nth_size) {
INFO(port->func.config->cdev, "Wrong NTB headersize\n");
goto err;
}
tmp++; /* skip wSequence */
block_len = get_ncm(&tmp, opts->block_length);
/* (d)wBlockLength */
if (block_len > ntb_max) {
INFO(port->func.config->cdev, "OUT size exceeded\n");
goto err;
}
ndp_index = get_ncm(&tmp, opts->ndp_index);
/* Run through all the NDP's in the NTB */
do {
/*
* NCM 3.2
* dwNdpIndex
*/
if (((ndp_index % 4) != 0) ||
(ndp_index < opts->nth_size) ||
(ndp_index > (block_len -
opts->ndp_size))) {
INFO(port->func.config->cdev, "Bad index: %#X\n",
ndp_index);
goto err;
}
/*
* walk through NDP
* dwSignature
*/
tmp = (void *)(skb->data + ndp_index);
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
goto err;
}
tmp += 2;
ndp_len = get_unaligned_le16(tmp++);
/*
* NCM 3.3.1
* wLength
* entry is 2 items
* item size is 16/32 bits, opts->dgram_item_len * 2 bytes
* minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
* Each entry is a dgram index and a dgram length.
*/
if ((ndp_len < opts->ndp_size
+ 2 * 2 * (opts->dgram_item_len * 2)) ||
(ndp_len % opts->ndplen_align != 0)) {
INFO(port->func.config->cdev, "Bad NDP length: %#X\n",
ndp_len);
goto err;
}
tmp += opts->reserved1;
/* Check for another NDP (d)wNextNdpIndex */
ndp_index = get_ncm(&tmp, opts->next_ndp_index);
tmp += opts->reserved2;
ndp_len -= opts->ndp_size;
index2 = get_ncm(&tmp, opts->dgram_item_len);
dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
dgram_counter = 0;
do {
index = index2;
/* wDatagramIndex[0] */
if ((index < opts->nth_size) ||
(index > block_len - opts->dpe_size)) {
INFO(port->func.config->cdev,
"Bad index: %#X\n", index);
goto err;
}
dg_len = dg_len2;
/*
* wDatagramLength[0]
* ethernet hdr + crc or larger than max frame size
*/
if ((dg_len < 14 + crc_len) ||
(dg_len > frame_max)) {
INFO(port->func.config->cdev,
"Bad dgram length: %#X\n", dg_len);
goto err;
}
if (ncm->is_crc) {
uint32_t crc, crc2;
crc = get_unaligned_le32(skb->data +
index + dg_len -
crc_len);
crc2 = ~crc32_le(~0,
skb->data + index,
dg_len - crc_len);
if (crc != crc2) {
INFO(port->func.config->cdev,
"Bad CRC\n");
goto err;
}
}
index2 = get_ncm(&tmp, opts->dgram_item_len);
dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
/* wDatagramIndex[1] */
if (index2 > block_len - opts->dpe_size) {
INFO(port->func.config->cdev,
"Bad index: %#X\n", index2);
goto err;
}
/*
* Copy the data into a new skb.
* This ensures the truesize is correct
*/
skb2 = netdev_alloc_skb_ip_align(ncm->netdev,
dg_len - crc_len);
if (skb2 == NULL)
goto err;
skb_put_data(skb2, skb->data + index,
dg_len - crc_len);
skb_queue_tail(list, skb2);
ndp_len -= 2 * (opts->dgram_item_len * 2);
dgram_counter++;
if (index2 == 0 || dg_len2 == 0)
break;
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
dev_consume_skb_any(skb);
VDBG(port->func.config->cdev,
"Parsed NTB with %d frames\n", dgram_counter);
return 0;
err:
skb_queue_purge(list);
dev_kfree_skb_any(skb);
return ret;
}
static void ncm_disable(struct usb_function *f)
{
struct f_ncm *ncm = func_to_ncm(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "ncm deactivated\n");
if (ncm->port.in_ep->enabled) {
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
}
if (ncm->notify->enabled) {
usb_ep_disable(ncm->notify);
ncm->notify->desc = NULL;
}
}
/*-------------------------------------------------------------------------*/
/*
* Callbacks let us notify the host about connect/disconnect when the
* net device is opened or closed.
*
* For testing, note that link states on this side include both opened
* and closed variants of:
*
* - disconnected/unconfigured
* - configured but inactive (data alt 0)
* - configured and active (data alt 1)
*
* Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
* SET_INTERFACE (altsetting). Remember also that "configured" doesn't
* imply the host is actually polling the notification endpoint, and
* likewise that "active" doesn't imply it's actually using the data
* endpoints for traffic.
*/
static void ncm_open(struct gether *geth)
{
struct f_ncm *ncm = func_to_ncm(&geth->func);
DBG(ncm->port.func.config->cdev, "%s\n", __func__);
spin_lock(&ncm->lock);
ncm->is_open = true;
ncm_notify(ncm);
spin_unlock(&ncm->lock);
}
static void ncm_close(struct gether *geth)
{
struct f_ncm *ncm = func_to_ncm(&geth->func);
DBG(ncm->port.func.config->cdev, "%s\n", __func__);
spin_lock(&ncm->lock);
ncm->is_open = false;
ncm_notify(ncm);
spin_unlock(&ncm->lock);
}
/*-------------------------------------------------------------------------*/
/* ethernet function driver setup/binding */
static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_ncm *ncm = func_to_ncm(f);
struct usb_string *us;
int status;
struct usb_ep *ep;
struct f_ncm_opts *ncm_opts;
if (!can_support_ecm(cdev->gadget))
return -EINVAL;
ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
if (cdev->use_os_string) {
f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
GFP_KERNEL);
if (!f->os_desc_table)
return -ENOMEM;
f->os_desc_n = 1;
f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
}
/*
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
* configurations are bound in sequence with list_for_each_entry,
* in each configuration its functions are bound in sequence
* with list_for_each_entry, so we assume no race condition
* with regard to ncm_opts->bound access
*/
if (!ncm_opts->bound) {
mutex_lock(&ncm_opts->lock);
gether_set_gadget(ncm_opts->net, cdev->gadget);
status = gether_register_netdev(ncm_opts->net);
mutex_unlock(&ncm_opts->lock);
if (status)
goto fail;
ncm_opts->bound = true;
}
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
if (IS_ERR(us)) {
status = PTR_ERR(us);
goto fail;
}
ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
ecm_desc.iMACAddress = us[STRING_MAC_IDX].id;
ncm_iad_desc.iFunction = us[STRING_IAD_IDX].id;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ncm->ctrl_id = status;
ncm_iad_desc.bFirstInterface = status;
ncm_control_intf.bInterfaceNumber = status;
ncm_union_desc.bMasterInterface0 = status;
if (cdev->use_os_string)
f->os_desc_table[0].if_id =
ncm_iad_desc.bFirstInterface;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ncm->data_id = status;
ncm_data_nop_intf.bInterfaceNumber = status;
ncm_data_intf.bInterfaceNumber = status;
ncm_union_desc.bSlaveInterface0 = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
if (!ep)
goto fail;
ncm->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
if (!ep)
goto fail;
ncm->port.out_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
if (!ep)
goto fail;
ncm->notify = ep;
status = -ENOMEM;
/* allocate notification request and buffer */
ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!ncm->notify_req)
goto fail;
ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
if (!ncm->notify_req->buf)
goto fail;
ncm->notify_req->context = ncm;
ncm->notify_req->complete = ncm_notify_complete;
/*
* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
hs_ncm_in_desc.bEndpointAddress = fs_ncm_in_desc.bEndpointAddress;
hs_ncm_out_desc.bEndpointAddress = fs_ncm_out_desc.bEndpointAddress;
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
ss_ncm_in_desc.bEndpointAddress = fs_ncm_in_desc.bEndpointAddress;
ss_ncm_out_desc.bEndpointAddress = fs_ncm_out_desc.bEndpointAddress;
ss_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
ncm_ss_function, ncm_ss_function);
if (status)
goto fail;
/*
* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
* until we're activated via set_alt().
*/
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
ncm->notify->name);
return 0;
fail:
kfree(f->os_desc_table);
f->os_desc_n = 0;
if (ncm->notify_req) {
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_ncm_opts,
func_inst.group);
}
/* f_ncm_item_ops */
USB_ETHERNET_CONFIGFS_ITEM(ncm);
/* f_ncm_opts_dev_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm);
/* f_ncm_opts_host_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm);
/* f_ncm_opts_qmult */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
/* f_ncm_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
static struct configfs_attribute *ncm_attrs[] = {
&ncm_opts_attr_dev_addr,
&ncm_opts_attr_host_addr,
&ncm_opts_attr_qmult,
&ncm_opts_attr_ifname,
NULL,
};
static const struct config_item_type ncm_func_type = {
.ct_item_ops = &ncm_item_ops,
.ct_attrs = ncm_attrs,
.ct_owner = THIS_MODULE,
};
static void ncm_free_inst(struct usb_function_instance *f)
{
struct f_ncm_opts *opts;
opts = container_of(f, struct f_ncm_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
kfree(opts->ncm_interf_group);
kfree(opts);
}
static struct usb_function_instance *ncm_alloc_inst(void)
{
struct f_ncm_opts *opts;
struct usb_os_desc *descs[1];
char *names[1];
struct config_group *ncm_interf_group;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id;
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ncm_free_inst;
opts->net = gether_setup_default();
if (IS_ERR(opts->net)) {
struct net_device *net = opts->net;
kfree(opts);
return ERR_CAST(net);
}
INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
descs[0] = &opts->ncm_os_desc;
names[0] = "ncm";
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
ncm_interf_group =
usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
names, THIS_MODULE);
if (IS_ERR(ncm_interf_group)) {
ncm_free_inst(&opts->func_inst);
return ERR_CAST(ncm_interf_group);
}
opts->ncm_interf_group = ncm_interf_group;
return &opts->func_inst;
}
static void ncm_free(struct usb_function *f)
{
struct f_ncm *ncm;
struct f_ncm_opts *opts;
ncm = func_to_ncm(f);
opts = container_of(f->fi, struct f_ncm_opts, func_inst);
kfree(ncm);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
}
static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_ncm *ncm = func_to_ncm(f);
DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer);
kfree(f->os_desc_table);
f->os_desc_n = 0;
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
if (atomic_read(&ncm->notify_count)) {
usb_ep_dequeue(ncm->notify, ncm->notify_req);
atomic_set(&ncm->notify_count, 0);
}
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
{
struct f_ncm *ncm;
struct f_ncm_opts *opts;
int status;
/* allocate and initialize one new instance */
ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
if (!ncm)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_ncm_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt++;
/* export host's Ethernet address in CDC format */
status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
sizeof(ncm->ethaddr));
if (status < 12) { /* strlen("01234567890a") */
kfree(ncm);
mutex_unlock(&opts->lock);
return ERR_PTR(-EINVAL);
}
ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
spin_lock_init(&ncm->lock);
ncm_reset_values(ncm);
ncm->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);
ncm->port.is_fixed = true;
ncm->port.supports_multi_frame = true;
ncm->port.func.name = "cdc_network";
/* descriptors are per-instance copies */
ncm->port.func.bind = ncm_bind;
ncm->port.func.unbind = ncm_unbind;
ncm->port.func.set_alt = ncm_set_alt;
ncm->port.func.get_alt = ncm_get_alt;
ncm->port.func.setup = ncm_setup;
ncm->port.func.disable = ncm_disable;
ncm->port.func.free_func = ncm_free;
ncm->port.wrap = ncm_wrap_ntb;
ncm->port.unwrap = ncm_unwrap_ntb;
return &ncm->port.func;
}
DECLARE_USB_FUNCTION_INIT(ncm, ncm_alloc_inst, ncm_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yauheni Kaliuta");
| linux-master | drivers/usb/gadget/function/f_ncm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_obex.c -- USB CDC OBEX function driver
*
* Copyright (C) 2008 Nokia Corporation
* Contact: Felipe Balbi <[email protected]>
*
* Based on f_acm.c by Al Borchers and David Brownell.
*/
/* #define VERBOSE_DEBUG */
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include "u_serial.h"
/*
* This CDC OBEX function support just packages a TTY-ish byte stream.
* A user mode server will put it into "raw" mode and handle all the
* relevant protocol details ... this is just a kernel passthrough.
* When possible, we prevent gadget enumeration until that server is
* ready to handle the commands.
*/
struct f_obex {
struct gserial port;
u8 ctrl_id;
u8 data_id;
u8 cur_alt;
u8 port_num;
};
static inline struct f_obex *func_to_obex(struct usb_function *f)
{
return container_of(f, struct f_obex, port.func);
}
static inline struct f_obex *port_to_obex(struct gserial *p)
{
return container_of(p, struct f_obex, port);
}
/*-------------------------------------------------------------------------*/
#define OBEX_CTRL_IDX 0
#define OBEX_DATA_IDX 1
static struct usb_string obex_string_defs[] = {
[OBEX_CTRL_IDX].s = "CDC Object Exchange (OBEX)",
[OBEX_DATA_IDX].s = "CDC OBEX Data",
{ }, /* end of list */
};
static struct usb_gadget_strings obex_string_table = {
.language = 0x0409, /* en-US */
.strings = obex_string_defs,
};
static struct usb_gadget_strings *obex_strings[] = {
&obex_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static struct usb_interface_descriptor obex_control_intf = {
.bLength = sizeof(obex_control_intf),
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 0,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_OBEX,
};
static struct usb_interface_descriptor obex_data_nop_intf = {
.bLength = sizeof(obex_data_nop_intf),
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 1,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_CDC_DATA,
};
static struct usb_interface_descriptor obex_data_intf = {
.bLength = sizeof(obex_data_intf),
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 2,
.bAlternateSetting = 1,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
};
static struct usb_cdc_header_desc obex_cdc_header_desc = {
.bLength = sizeof(obex_cdc_header_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0120),
};
static struct usb_cdc_union_desc obex_cdc_union_desc = {
.bLength = sizeof(obex_cdc_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
.bMasterInterface0 = 1,
.bSlaveInterface0 = 2,
};
static struct usb_cdc_obex_desc obex_desc = {
.bLength = sizeof(obex_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_OBEX_TYPE,
.bcdVersion = cpu_to_le16(0x0100),
};
/* High-Speed Support */
static struct usb_endpoint_descriptor obex_hs_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor obex_hs_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *hs_function[] = {
(struct usb_descriptor_header *) &obex_control_intf,
(struct usb_descriptor_header *) &obex_cdc_header_desc,
(struct usb_descriptor_header *) &obex_desc,
(struct usb_descriptor_header *) &obex_cdc_union_desc,
(struct usb_descriptor_header *) &obex_data_nop_intf,
(struct usb_descriptor_header *) &obex_data_intf,
(struct usb_descriptor_header *) &obex_hs_ep_in_desc,
(struct usb_descriptor_header *) &obex_hs_ep_out_desc,
NULL,
};
/* Full-Speed Support */
static struct usb_endpoint_descriptor obex_fs_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor obex_fs_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *fs_function[] = {
(struct usb_descriptor_header *) &obex_control_intf,
(struct usb_descriptor_header *) &obex_cdc_header_desc,
(struct usb_descriptor_header *) &obex_desc,
(struct usb_descriptor_header *) &obex_cdc_union_desc,
(struct usb_descriptor_header *) &obex_data_nop_intf,
(struct usb_descriptor_header *) &obex_data_intf,
(struct usb_descriptor_header *) &obex_fs_ep_in_desc,
(struct usb_descriptor_header *) &obex_fs_ep_out_desc,
NULL,
};
/*-------------------------------------------------------------------------*/
static int obex_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_obex *obex = func_to_obex(f);
struct usb_composite_dev *cdev = f->config->cdev;
if (intf == obex->ctrl_id) {
if (alt != 0)
goto fail;
/* NOP */
dev_dbg(&cdev->gadget->dev,
"reset obex ttyGS%d control\n", obex->port_num);
} else if (intf == obex->data_id) {
if (alt > 1)
goto fail;
if (obex->port.in->enabled) {
dev_dbg(&cdev->gadget->dev,
"reset obex ttyGS%d\n", obex->port_num);
gserial_disconnect(&obex->port);
}
if (!obex->port.in->desc || !obex->port.out->desc) {
dev_dbg(&cdev->gadget->dev,
"init obex ttyGS%d\n", obex->port_num);
if (config_ep_by_speed(cdev->gadget, f,
obex->port.in) ||
config_ep_by_speed(cdev->gadget, f,
obex->port.out)) {
obex->port.out->desc = NULL;
obex->port.in->desc = NULL;
goto fail;
}
}
if (alt == 1) {
dev_dbg(&cdev->gadget->dev,
"activate obex ttyGS%d\n", obex->port_num);
gserial_connect(&obex->port, obex->port_num);
}
} else
goto fail;
obex->cur_alt = alt;
return 0;
fail:
return -EINVAL;
}
static int obex_get_alt(struct usb_function *f, unsigned intf)
{
struct f_obex *obex = func_to_obex(f);
return obex->cur_alt;
}
static void obex_disable(struct usb_function *f)
{
struct f_obex *obex = func_to_obex(f);
struct usb_composite_dev *cdev = f->config->cdev;
dev_dbg(&cdev->gadget->dev, "obex ttyGS%d disable\n", obex->port_num);
gserial_disconnect(&obex->port);
}
/*-------------------------------------------------------------------------*/
static void obex_connect(struct gserial *g)
{
struct f_obex *obex = port_to_obex(g);
struct usb_composite_dev *cdev = g->func.config->cdev;
int status;
status = usb_function_activate(&g->func);
if (status)
dev_dbg(&cdev->gadget->dev,
"obex ttyGS%d function activate --> %d\n",
obex->port_num, status);
}
static void obex_disconnect(struct gserial *g)
{
struct f_obex *obex = port_to_obex(g);
struct usb_composite_dev *cdev = g->func.config->cdev;
int status;
status = usb_function_deactivate(&g->func);
if (status)
dev_dbg(&cdev->gadget->dev,
"obex ttyGS%d function deactivate --> %d\n",
obex->port_num, status);
}
/*-------------------------------------------------------------------------*/
/* Some controllers can't support CDC OBEX ... */
static inline bool can_support_obex(struct usb_configuration *c)
{
/* Since the first interface is a NOP, we can ignore the
* issue of multi-interface support on most controllers.
*
* Altsettings are mandatory, however...
*/
if (!gadget_is_altset_supported(c->cdev->gadget))
return false;
/* everything else is *probably* fine ... */
return true;
}
static int obex_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_obex *obex = func_to_obex(f);
struct usb_string *us;
int status;
struct usb_ep *ep;
if (!can_support_obex(c))
return -EINVAL;
us = usb_gstrings_attach(cdev, obex_strings,
ARRAY_SIZE(obex_string_defs));
if (IS_ERR(us))
return PTR_ERR(us);
obex_control_intf.iInterface = us[OBEX_CTRL_IDX].id;
obex_data_nop_intf.iInterface = us[OBEX_DATA_IDX].id;
obex_data_intf.iInterface = us[OBEX_DATA_IDX].id;
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
obex->ctrl_id = status;
obex_control_intf.bInterfaceNumber = status;
obex_cdc_union_desc.bMasterInterface0 = status;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
obex->data_id = status;
obex_data_nop_intf.bInterfaceNumber = status;
obex_data_intf.bInterfaceNumber = status;
obex_cdc_union_desc.bSlaveInterface0 = status;
/* allocate instance-specific endpoints */
status = -ENODEV;
ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_in_desc);
if (!ep)
goto fail;
obex->port.in = ep;
ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_out_desc);
if (!ep)
goto fail;
obex->port.out = ep;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
obex_hs_ep_in_desc.bEndpointAddress =
obex_fs_ep_in_desc.bEndpointAddress;
obex_hs_ep_out_desc.bEndpointAddress =
obex_fs_ep_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, fs_function, hs_function, NULL,
NULL);
if (status)
goto fail;
dev_dbg(&cdev->gadget->dev, "obex ttyGS%d: IN/%s OUT/%s\n",
obex->port_num,
obex->port.in->name, obex->port.out->name);
return 0;
fail:
ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
return status;
}
static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_serial_opts,
func_inst.group);
}
static void obex_attr_release(struct config_item *item)
{
struct f_serial_opts *opts = to_f_serial_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations obex_item_ops = {
.release = obex_attr_release,
};
static ssize_t f_obex_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
}
CONFIGFS_ATTR_RO(f_obex_, port_num);
static struct configfs_attribute *acm_attrs[] = {
&f_obex_attr_port_num,
NULL,
};
static const struct config_item_type obex_func_type = {
.ct_item_ops = &obex_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
};
static void obex_free_inst(struct usb_function_instance *f)
{
struct f_serial_opts *opts;
opts = container_of(f, struct f_serial_opts, func_inst);
gserial_free_line(opts->port_num);
kfree(opts);
}
static struct usb_function_instance *obex_alloc_inst(void)
{
struct f_serial_opts *opts;
int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = obex_free_inst;
ret = gserial_alloc_line_no_console(&opts->port_num);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
}
config_group_init_type_name(&opts->func_inst.group, "",
&obex_func_type);
return &opts->func_inst;
}
static void obex_free(struct usb_function *f)
{
struct f_obex *obex;
obex = func_to_obex(f);
kfree(obex);
}
static void obex_unbind(struct usb_configuration *c, struct usb_function *f)
{
usb_free_all_descriptors(f);
}
static struct usb_function *obex_alloc(struct usb_function_instance *fi)
{
struct f_obex *obex;
struct f_serial_opts *opts;
/* allocate and initialize one new instance */
obex = kzalloc(sizeof(*obex), GFP_KERNEL);
if (!obex)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_serial_opts, func_inst);
obex->port_num = opts->port_num;
obex->port.connect = obex_connect;
obex->port.disconnect = obex_disconnect;
obex->port.func.name = "obex";
/* descriptors are per-instance copies */
obex->port.func.bind = obex_bind;
obex->port.func.unbind = obex_unbind;
obex->port.func.set_alt = obex_set_alt;
obex->port.func.get_alt = obex_get_alt;
obex->port.func.disable = obex_disable;
obex->port.func.free_func = obex_free;
obex->port.func.bind_deactivated = true;
return &obex->port.func;
}
DECLARE_USB_FUNCTION_INIT(obex, obex_alloc_inst, obex_alloc);
MODULE_AUTHOR("Felipe Balbi");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/f_obex.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_audio.c -- USB Audio class function driver
*
* Copyright (C) 2008 Bryan Wu <[email protected]>
* Copyright (C) 2008 Analog Devices, Inc
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/atomic.h>
#include "u_uac1_legacy.h"
static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
/*
* DESCRIPTORS ... most are static, but strings and full
* configuration descriptors are built on demand.
*/
/*
* We have two interfaces- AudioControl and AudioStreaming
* TODO: only supcard playback currently
*/
#define F_AUDIO_AC_INTERFACE 0
#define F_AUDIO_AS_INTERFACE 1
#define F_AUDIO_NUM_INTERFACES 1
/* B.3.1 Standard AC Interface Descriptor */
static struct usb_interface_descriptor ac_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
};
/*
* The number of AudioStreaming and MIDIStreaming interfaces
* in the Audio Interface Collection
*/
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
/* 1 input terminal, 1 output terminal and 1 feature unit */
#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+ UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
/* B.3.2 Class-Specific AC Interface Descriptor */
static struct uac1_ac_header_descriptor_1 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
.bcdADC = cpu_to_le16(0x0100),
.wTotalLength = cpu_to_le16(UAC_DT_TOTAL_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
.baInterfaceNr = {
/* Interface number of the first AudioStream interface */
[0] = 1,
}
};
#define INPUT_TERMINAL_ID 1
static struct uac_input_terminal_descriptor input_terminal_desc = {
.bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
.bTerminalID = INPUT_TERMINAL_ID,
.wTerminalType = UAC_TERMINAL_STREAMING,
.bAssocTerminal = 0,
.wChannelConfig = 0x3,
};
DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
#define FEATURE_UNIT_ID 2
static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
.bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FEATURE_UNIT,
.bUnitID = FEATURE_UNIT_ID,
.bSourceID = INPUT_TERMINAL_ID,
.bControlSize = 2,
.bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
};
static struct usb_audio_control mute_control = {
.list = LIST_HEAD_INIT(mute_control.list),
.name = "Mute Control",
.type = UAC_FU_MUTE,
/* Todo: add real Mute control code */
.set = generic_set_cmd,
.get = generic_get_cmd,
};
static struct usb_audio_control volume_control = {
.list = LIST_HEAD_INIT(volume_control.list),
.name = "Volume Control",
.type = UAC_FU_VOLUME,
/* Todo: add real Volume control code */
.set = generic_set_cmd,
.get = generic_get_cmd,
};
static struct usb_audio_control_selector feature_unit = {
.list = LIST_HEAD_INIT(feature_unit.list),
.id = FEATURE_UNIT_ID,
.name = "Mute & Volume Control",
.type = UAC_FEATURE_UNIT,
.desc = (struct usb_descriptor_header *)&feature_unit_desc,
};
#define OUTPUT_TERMINAL_ID 3
static struct uac1_output_terminal_descriptor output_terminal_desc = {
.bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
.bTerminalID = OUTPUT_TERMINAL_ID,
.wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
.bAssocTerminal = FEATURE_UNIT_ID,
.bSourceID = FEATURE_UNIT_ID,
};
/* B.4.1 Standard AS Interface Descriptor */
static struct usb_interface_descriptor as_interface_alt_0_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
static struct usb_interface_descriptor as_interface_alt_1_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
/* B.4.2 Class-Specific AS Interface Descriptor */
static struct uac1_as_header_descriptor as_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
.bTerminalLink = INPUT_TERMINAL_ID,
.bDelay = 1,
.wFormatTag = UAC_FORMAT_TYPE_I_PCM,
};
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
.bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
.bFormatType = UAC_FORMAT_TYPE_I,
.bSubframeSize = 2,
.bBitResolution = 16,
.bSamFreqType = 1,
};
/* Standard ISO OUT Endpoint Descriptor */
static struct usb_endpoint_descriptor as_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE
| USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
.bInterval = 4,
};
/* Class-specific AS ISO OUT Endpoint Descriptor */
static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
.bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 1,
.bLockDelayUnits = 1,
.wLockDelay = cpu_to_le16(1),
};
static struct usb_descriptor_header *f_audio_desc[] = {
(struct usb_descriptor_header *)&ac_interface_desc,
(struct usb_descriptor_header *)&ac_header_desc,
(struct usb_descriptor_header *)&input_terminal_desc,
(struct usb_descriptor_header *)&output_terminal_desc,
(struct usb_descriptor_header *)&feature_unit_desc,
(struct usb_descriptor_header *)&as_interface_alt_0_desc,
(struct usb_descriptor_header *)&as_interface_alt_1_desc,
(struct usb_descriptor_header *)&as_header_desc,
(struct usb_descriptor_header *)&as_type_i_desc,
(struct usb_descriptor_header *)&as_out_ep_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
NULL,
};
enum {
STR_AC_IF,
STR_INPUT_TERMINAL,
STR_INPUT_TERMINAL_CH_NAMES,
STR_FEAT_DESC_0,
STR_OUTPUT_TERMINAL,
STR_AS_IF_ALT0,
STR_AS_IF_ALT1,
};
static struct usb_string strings_uac1[] = {
[STR_AC_IF].s = "AC Interface",
[STR_INPUT_TERMINAL].s = "Input terminal",
[STR_INPUT_TERMINAL_CH_NAMES].s = "Channels",
[STR_FEAT_DESC_0].s = "Volume control & mute",
[STR_OUTPUT_TERMINAL].s = "Output terminal",
[STR_AS_IF_ALT0].s = "AS Interface",
[STR_AS_IF_ALT1].s = "AS Interface",
{ },
};
static struct usb_gadget_strings str_uac1 = {
.language = 0x0409, /* en-us */
.strings = strings_uac1,
};
static struct usb_gadget_strings *uac1_strings[] = {
&str_uac1,
NULL,
};
/*
* This function is an ALSA sound card following USB Audio Class Spec 1.0.
*/
/*-------------------------------------------------------------------------*/
struct f_audio_buf {
u8 *buf;
int actual;
struct list_head list;
};
static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
{
struct f_audio_buf *copy_buf;
copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
if (!copy_buf)
return ERR_PTR(-ENOMEM);
copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
if (!copy_buf->buf) {
kfree(copy_buf);
return ERR_PTR(-ENOMEM);
}
return copy_buf;
}
static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
{
kfree(audio_buf->buf);
kfree(audio_buf);
}
/*-------------------------------------------------------------------------*/
struct f_audio {
struct gaudio card;
u8 ac_intf, ac_alt;
u8 as_intf, as_alt;
/* endpoints handle full and/or high speeds */
struct usb_ep *out_ep;
spinlock_t lock;
struct f_audio_buf *copy_buf;
struct work_struct playback_work;
struct list_head play_queue;
/* Control Set command */
struct list_head cs;
u8 set_cmd;
struct usb_audio_control *set_con;
};
static inline struct f_audio *func_to_audio(struct usb_function *f)
{
return container_of(f, struct f_audio, card.func);
}
/*-------------------------------------------------------------------------*/
static void f_audio_playback_work(struct work_struct *data)
{
struct f_audio *audio = container_of(data, struct f_audio,
playback_work);
struct f_audio_buf *play_buf;
spin_lock_irq(&audio->lock);
if (list_empty(&audio->play_queue)) {
spin_unlock_irq(&audio->lock);
return;
}
play_buf = list_first_entry(&audio->play_queue,
struct f_audio_buf, list);
list_del(&play_buf->list);
spin_unlock_irq(&audio->lock);
u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
f_audio_buffer_free(play_buf);
}
static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_audio *audio = req->context;
struct usb_composite_dev *cdev = audio->card.func.config->cdev;
struct f_audio_buf *copy_buf = audio->copy_buf;
struct f_uac1_legacy_opts *opts;
int audio_buf_size;
int err;
opts = container_of(audio->card.func.fi, struct f_uac1_legacy_opts,
func_inst);
audio_buf_size = opts->audio_buf_size;
if (!copy_buf)
return -EINVAL;
/* Copy buffer is full, add it to the play_queue */
if (audio_buf_size - copy_buf->actual < req->actual) {
spin_lock_irq(&audio->lock);
list_add_tail(©_buf->list, &audio->play_queue);
spin_unlock_irq(&audio->lock);
schedule_work(&audio->playback_work);
copy_buf = f_audio_buffer_alloc(audio_buf_size);
if (IS_ERR(copy_buf))
return -ENOMEM;
}
memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
copy_buf->actual += req->actual;
audio->copy_buf = copy_buf;
err = usb_ep_queue(ep, req, GFP_ATOMIC);
if (err)
ERROR(cdev, "%s queue req: %d\n", ep->name, err);
return 0;
}
static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_audio *audio = req->context;
int status = req->status;
u32 data = 0;
struct usb_ep *out_ep = audio->out_ep;
switch (status) {
case 0: /* normal completion? */
if (ep == out_ep)
f_audio_out_ep_complete(ep, req);
else if (audio->set_con) {
memcpy(&data, req->buf, req->length);
audio->set_con->set(audio->set_con, audio->set_cmd,
le16_to_cpu(data));
audio->set_con = NULL;
}
break;
default:
break;
}
}
static int audio_set_intf_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct f_audio *audio = func_to_audio(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
u16 len = le16_to_cpu(ctrl->wLength);
u16 w_value = le16_to_cpu(ctrl->wValue);
u8 con_sel = (w_value >> 8) & 0xFF;
u8 cmd = (ctrl->bRequest & 0x0F);
struct usb_audio_control_selector *cs;
struct usb_audio_control *con;
DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
ctrl->bRequest, w_value, len, id);
list_for_each_entry(cs, &audio->cs, list) {
if (cs->id == id) {
list_for_each_entry(con, &cs->control, list) {
if (con->type == con_sel) {
audio->set_con = con;
break;
}
}
break;
}
}
audio->set_cmd = cmd;
req->context = audio;
req->complete = f_audio_complete;
return len;
}
static int audio_get_intf_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct f_audio *audio = func_to_audio(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
u16 len = le16_to_cpu(ctrl->wLength);
u16 w_value = le16_to_cpu(ctrl->wValue);
u8 con_sel = (w_value >> 8) & 0xFF;
u8 cmd = (ctrl->bRequest & 0x0F);
struct usb_audio_control_selector *cs;
struct usb_audio_control *con;
DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
ctrl->bRequest, w_value, len, id);
list_for_each_entry(cs, &audio->cs, list) {
if (cs->id == id) {
list_for_each_entry(con, &cs->control, list) {
if (con->type == con_sel && con->get) {
value = con->get(con, cmd);
break;
}
}
break;
}
}
req->context = audio;
req->complete = f_audio_complete;
len = min_t(size_t, sizeof(value), len);
memcpy(req->buf, &value, len);
return len;
}
static int audio_set_endpoint_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
int value = -EOPNOTSUPP;
u16 ep = le16_to_cpu(ctrl->wIndex);
u16 len = le16_to_cpu(ctrl->wLength);
u16 w_value = le16_to_cpu(ctrl->wValue);
DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
case UAC_SET_CUR:
value = len;
break;
case UAC_SET_MIN:
break;
case UAC_SET_MAX:
break;
case UAC_SET_RES:
break;
case UAC_SET_MEM:
break;
default:
break;
}
return value;
}
static int audio_get_endpoint_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
int value = -EOPNOTSUPP;
u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
u16 len = le16_to_cpu(ctrl->wLength);
u16 w_value = le16_to_cpu(ctrl->wValue);
DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
case UAC_GET_CUR:
case UAC_GET_MIN:
case UAC_GET_MAX:
case UAC_GET_RES:
value = len;
break;
case UAC_GET_MEM:
break;
default:
break;
}
return value;
}
static int
f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
/* composite driver infrastructure handles everything; interface
* activation uses set_alt().
*/
switch (ctrl->bRequestType) {
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
value = audio_set_intf_req(f, ctrl);
break;
case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
value = audio_get_intf_req(f, ctrl);
break;
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_set_endpoint_req(f, ctrl);
break;
case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_get_endpoint_req(f, ctrl);
break;
default:
ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (value >= 0) {
DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = 0;
req->length = value;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(cdev, "audio response on err %d\n", value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_audio *audio = func_to_audio(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_ep *out_ep = audio->out_ep;
struct usb_request *req;
struct f_uac1_legacy_opts *opts;
int req_buf_size, req_count, audio_buf_size;
int i = 0, err = 0;
DBG(cdev, "intf %d, alt %d\n", intf, alt);
opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
req_buf_size = opts->req_buf_size;
req_count = opts->req_count;
audio_buf_size = opts->audio_buf_size;
/* No i/f has more than 2 alt settings */
if (alt > 1) {
ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
if (intf == audio->ac_intf) {
/* Control I/f has only 1 AltSetting - 0 */
if (alt) {
ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
return 0;
} else if (intf == audio->as_intf) {
if (alt == 1) {
err = config_ep_by_speed(cdev->gadget, f, out_ep);
if (err)
return err;
usb_ep_enable(out_ep);
audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
if (IS_ERR(audio->copy_buf))
return -ENOMEM;
/*
* allocate a bunch of read buffers
* and queue them all at once.
*/
for (i = 0; i < req_count && err == 0; i++) {
req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
if (req) {
req->buf = kzalloc(req_buf_size,
GFP_ATOMIC);
if (req->buf) {
req->length = req_buf_size;
req->context = audio;
req->complete =
f_audio_complete;
err = usb_ep_queue(out_ep,
req, GFP_ATOMIC);
if (err)
ERROR(cdev,
"%s queue req: %d\n",
out_ep->name, err);
} else
err = -ENOMEM;
} else
err = -ENOMEM;
}
} else {
struct f_audio_buf *copy_buf = audio->copy_buf;
if (copy_buf) {
list_add_tail(©_buf->list,
&audio->play_queue);
schedule_work(&audio->playback_work);
}
}
audio->as_alt = alt;
}
return err;
}
static int f_audio_get_alt(struct usb_function *f, unsigned intf)
{
struct f_audio *audio = func_to_audio(f);
struct usb_composite_dev *cdev = f->config->cdev;
if (intf == audio->ac_intf)
return audio->ac_alt;
else if (intf == audio->as_intf)
return audio->as_alt;
else
ERROR(cdev, "%s:%d Invalid Interface %d!\n",
__func__, __LINE__, intf);
return -EINVAL;
}
static void f_audio_disable(struct usb_function *f)
{
return;
}
/*-------------------------------------------------------------------------*/
static void f_audio_build_desc(struct f_audio *audio)
{
struct gaudio *card = &audio->card;
u8 *sam_freq;
int rate;
/* Set channel numbers */
input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
/* Set sample rates */
rate = u_audio_get_playback_rate(card);
sam_freq = as_type_i_desc.tSamFreq[0];
memcpy(sam_freq, &rate, 3);
/* Todo: Set Sample bits and other parameters */
return;
}
/* audio function driver setup/binding */
static int
f_audio_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_audio *audio = func_to_audio(f);
struct usb_string *us;
int status;
struct usb_ep *ep = NULL;
struct f_uac1_legacy_opts *audio_opts;
audio_opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
audio->card.gadget = c->cdev->gadget;
/* set up ASLA audio devices */
if (!audio_opts->bound) {
status = gaudio_setup(&audio->card);
if (status < 0)
return status;
audio_opts->bound = true;
}
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
if (IS_ERR(us))
return PTR_ERR(us);
ac_interface_desc.iInterface = us[STR_AC_IF].id;
input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id;
input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id;
feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id;
output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id;
as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id;
as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id;
f_audio_build_desc(audio);
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ac_interface_desc.bInterfaceNumber = status;
audio->ac_intf = status;
audio->ac_alt = 0;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
as_interface_alt_0_desc.bInterfaceNumber = status;
as_interface_alt_1_desc.bInterfaceNumber = status;
audio->as_intf = status;
audio->as_alt = 0;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
if (!ep)
goto fail;
audio->out_ep = ep;
audio->out_ep->desc = &as_out_ep_desc;
/* copy descriptors, and track endpoint copies */
status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
NULL);
if (status)
goto fail;
return 0;
fail:
gaudio_cleanup(&audio->card);
return status;
}
/*-------------------------------------------------------------------------*/
static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
{
con->data[cmd] = value;
return 0;
}
static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
{
return con->data[cmd];
}
/* Todo: add more control selecotor dynamically */
static int control_selector_init(struct f_audio *audio)
{
INIT_LIST_HEAD(&audio->cs);
list_add(&feature_unit.list, &audio->cs);
INIT_LIST_HEAD(&feature_unit.control);
list_add(&mute_control.list, &feature_unit.control);
list_add(&volume_control.list, &feature_unit.control);
volume_control.data[UAC__CUR] = 0xffc0;
volume_control.data[UAC__MIN] = 0xe3a0;
volume_control.data[UAC__MAX] = 0xfff0;
volume_control.data[UAC__RES] = 0x0030;
return 0;
}
static inline
struct f_uac1_legacy_opts *to_f_uac1_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uac1_legacy_opts,
func_inst.group);
}
static void f_uac1_attr_release(struct config_item *item)
{
struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations f_uac1_item_ops = {
.release = f_uac1_attr_release,
};
#define UAC1_INT_ATTRIBUTE(name) \
static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
int ret; \
u32 num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtou32(page, 0, &num); \
if (ret) \
goto end; \
\
opts->name = num; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac1_opts_, name)
UAC1_INT_ATTRIBUTE(req_buf_size);
UAC1_INT_ATTRIBUTE(req_count);
UAC1_INT_ATTRIBUTE(audio_buf_size);
#define UAC1_STR_ATTRIBUTE(name) \
static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%s\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
int ret = -EBUSY; \
char *tmp; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) \
goto end; \
\
tmp = kstrndup(page, len, GFP_KERNEL); \
if (tmp) { \
ret = -ENOMEM; \
goto end; \
} \
if (opts->name##_alloc) \
kfree(opts->name); \
opts->name##_alloc = true; \
opts->name = tmp; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac1_opts_, name)
UAC1_STR_ATTRIBUTE(fn_play);
UAC1_STR_ATTRIBUTE(fn_cap);
UAC1_STR_ATTRIBUTE(fn_cntl);
static struct configfs_attribute *f_uac1_attrs[] = {
&f_uac1_opts_attr_req_buf_size,
&f_uac1_opts_attr_req_count,
&f_uac1_opts_attr_audio_buf_size,
&f_uac1_opts_attr_fn_play,
&f_uac1_opts_attr_fn_cap,
&f_uac1_opts_attr_fn_cntl,
NULL,
};
static const struct config_item_type f_uac1_func_type = {
.ct_item_ops = &f_uac1_item_ops,
.ct_attrs = f_uac1_attrs,
.ct_owner = THIS_MODULE,
};
static void f_audio_free_inst(struct usb_function_instance *f)
{
struct f_uac1_legacy_opts *opts;
opts = container_of(f, struct f_uac1_legacy_opts, func_inst);
if (opts->fn_play_alloc)
kfree(opts->fn_play);
if (opts->fn_cap_alloc)
kfree(opts->fn_cap);
if (opts->fn_cntl_alloc)
kfree(opts->fn_cntl);
kfree(opts);
}
static struct usb_function_instance *f_audio_alloc_inst(void)
{
struct f_uac1_legacy_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = f_audio_free_inst;
config_group_init_type_name(&opts->func_inst.group, "",
&f_uac1_func_type);
opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
opts->req_count = UAC1_REQ_COUNT;
opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE;
opts->fn_play = FILE_PCM_PLAYBACK;
opts->fn_cap = FILE_PCM_CAPTURE;
opts->fn_cntl = FILE_CONTROL;
return &opts->func_inst;
}
static void f_audio_free(struct usb_function *f)
{
struct f_audio *audio = func_to_audio(f);
struct f_uac1_legacy_opts *opts;
gaudio_cleanup(&audio->card);
opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
kfree(audio);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
}
static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
{
usb_free_all_descriptors(f);
}
static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
{
struct f_audio *audio;
struct f_uac1_legacy_opts *opts;
/* allocate and initialize one new instance */
audio = kzalloc(sizeof(*audio), GFP_KERNEL);
if (!audio)
return ERR_PTR(-ENOMEM);
audio->card.func.name = "g_audio";
opts = container_of(fi, struct f_uac1_legacy_opts, func_inst);
mutex_lock(&opts->lock);
++opts->refcnt;
mutex_unlock(&opts->lock);
INIT_LIST_HEAD(&audio->play_queue);
spin_lock_init(&audio->lock);
audio->card.func.bind = f_audio_bind;
audio->card.func.unbind = f_audio_unbind;
audio->card.func.set_alt = f_audio_set_alt;
audio->card.func.get_alt = f_audio_get_alt;
audio->card.func.setup = f_audio_setup;
audio->card.func.disable = f_audio_disable;
audio->card.func.free_func = f_audio_free;
control_selector_init(audio);
INIT_WORK(&audio->playback_work, f_audio_playback_work);
return &audio->card.func;
}
DECLARE_USB_FUNCTION_INIT(uac1_legacy, f_audio_alloc_inst, f_audio_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bryan Wu");
| linux-master | drivers/usb/gadget/function/f_uac1_legacy.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
*/
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/string_helpers.h>
#include <linux/usb/composite.h>
#include "u_ether.h"
/*
* This component encapsulates the Ethernet link glue needed to provide
* one (!) network link through the USB gadget stack, normally "usb0".
*
* The control and data models are handled by the function driver which
* connects to this code; such as CDC Ethernet (ECM or EEM),
* "CDC Subset", or RNDIS. That includes all descriptor and endpoint
* management.
*
* Link level addressing is handled by this component using module
* parameters; if no such parameters are provided, random link level
* addresses are used. Each end of the link uses one address. The
* host end address is exported in various ways, and is often recorded
* in configuration databases.
*
* The driver which assembles each configuration using such a link is
* responsible for ensuring that each configuration includes at most one
* instance of is network link. (The network layer provides ways for
* this single "physical" link to be used by multiple virtual links.)
*/
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
* frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
#define GETHER_MAX_MTU_SIZE 15412
#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
*/
spinlock_t lock;
struct gether *port_usb;
struct net_device *net;
struct usb_gadget *gadget;
spinlock_t req_lock; /* guard {rx,tx}_reqs */
struct list_head tx_reqs, rx_reqs;
atomic_t tx_qlen;
struct sk_buff_head rx_frames;
unsigned qmult;
unsigned header_len;
struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
int (*unwrap)(struct gether *,
struct sk_buff *skb,
struct sk_buff_head *list);
struct work_struct work;
unsigned long todo;
#define WORK_RX_MEMORY 0
bool zlp;
bool no_skb_reserve;
bool ifname_set;
u8 host_mac[ETH_ALEN];
u8 dev_mac[ETH_ALEN];
};
/*-------------------------------------------------------------------------*/
#define RX_EXTRA 20 /* bytes guarding against rx overflows */
#define DEFAULT_QLEN 2 /* double buffering by default */
/* use deeper queues at high/super speed */
static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
{
if (gadget->speed == USB_SPEED_HIGH || gadget->speed >= USB_SPEED_SUPER)
return qmult * DEFAULT_QLEN;
else
return DEFAULT_QLEN;
}
/*-------------------------------------------------------------------------*/
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
{
struct eth_dev *dev = netdev_priv(net);
strscpy(p->driver, "g_ether", sizeof(p->driver));
strscpy(p->version, UETH__VERSION, sizeof(p->version));
strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
}
/* REVISIT can also support:
* - WOL (by tracking suspends and issuing remote wakeup)
* - msglevel (implies updated messaging)
* - ... probably more ethtool ops
*/
static const struct ethtool_ops ops = {
.get_drvinfo = eth_get_drvinfo,
.get_link = ethtool_op_get_link,
};
static void defer_kevent(struct eth_dev *dev, int flag)
{
if (test_and_set_bit(flag, &dev->todo))
return;
if (!schedule_work(&dev->work))
ERROR(dev, "kevent %d may have been dropped\n", flag);
else
DBG(dev, "kevent %d scheduled\n", flag);
}
static void rx_complete(struct usb_ep *ep, struct usb_request *req);
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
struct usb_gadget *g = dev->gadget;
struct sk_buff *skb;
int retval = -ENOMEM;
size_t size = 0;
struct usb_ep *out;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb)
out = dev->port_usb->out_ep;
else
out = NULL;
if (!out)
{
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOTCONN;
}
/* Padding up to RX_EXTRA handles minor disagreements with host.
* Normally we use the USB "terminate on short read" convention;
* so allow up to (N*maxpacket), since that memory is normally
* already allocated. Some hardware doesn't deal well with short
* reads (e.g. DMA must be N*maxpacket), so for now don't trim a
* byte off the end (to force hardware errors on overflow).
*
* RNDIS uses internal framing, and explicitly allows senders to
* pad to end-of-packet. That's potentially nice for speed, but
* means receivers can't recover lost synch on their own (because
* new packets don't only start after a short RX).
*/
size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
size += dev->port_usb->header_len;
if (g->quirk_ep_out_aligned_size) {
size += out->maxpacket - 1;
size -= size % out->maxpacket;
}
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
spin_unlock_irqrestore(&dev->lock, flags);
skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
if (skb == NULL) {
DBG(dev, "no rx skb\n");
goto enomem;
}
/* Some platforms perform better when IP packets are aligned,
* but on at least one, checksumming fails otherwise. Note:
* RNDIS headers involve variable numbers of LE32 values.
*/
if (likely(!dev->no_skb_reserve))
skb_reserve(skb, NET_IP_ALIGN);
req->buf = skb->data;
req->length = size;
req->complete = rx_complete;
req->context = skb;
retval = usb_ep_queue(out, req, gfp_flags);
if (retval == -ENOMEM)
enomem:
defer_kevent(dev, WORK_RX_MEMORY);
if (retval) {
DBG(dev, "rx submit --> %d\n", retval);
if (skb)
dev_kfree_skb_any(skb);
spin_lock_irqsave(&dev->req_lock, flags);
list_add(&req->list, &dev->rx_reqs);
spin_unlock_irqrestore(&dev->req_lock, flags);
}
return retval;
}
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct sk_buff *skb = req->context, *skb2;
struct eth_dev *dev = ep->driver_data;
int status = req->status;
switch (status) {
/* normal completion */
case 0:
skb_put(skb, req->actual);
if (dev->unwrap) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb) {
status = dev->unwrap(dev->port_usb,
skb,
&dev->rx_frames);
} else {
dev_kfree_skb_any(skb);
status = -ENOTCONN;
}
spin_unlock_irqrestore(&dev->lock, flags);
} else {
skb_queue_tail(&dev->rx_frames, skb);
}
skb = NULL;
skb2 = skb_dequeue(&dev->rx_frames);
while (skb2) {
if (status < 0
|| ETH_HLEN > skb2->len
|| skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
DBG(dev, "rx length %d\n", skb2->len);
dev_kfree_skb_any(skb2);
goto next_frame;
}
skb2->protocol = eth_type_trans(skb2, dev->net);
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb2->len;
/* no buffer copies needed, unless hardware can't
* use skb buffers.
*/
status = netif_rx(skb2);
next_frame:
skb2 = skb_dequeue(&dev->rx_frames);
}
break;
/* software-driven interface shutdown */
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
VDBG(dev, "rx shutdown, code %d\n", status);
goto quiesce;
/* for hardware automagic (such as pxa) */
case -ECONNABORTED: /* endpoint reset */
DBG(dev, "rx %s reset\n", ep->name);
defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
dev_kfree_skb_any(skb);
goto clean;
/* data overrun */
case -EOVERFLOW:
dev->net->stats.rx_over_errors++;
fallthrough;
default:
dev->net->stats.rx_errors++;
DBG(dev, "rx status %d\n", status);
break;
}
if (skb)
dev_kfree_skb_any(skb);
if (!netif_running(dev->net)) {
clean:
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->rx_reqs);
spin_unlock(&dev->req_lock);
req = NULL;
}
if (req)
rx_submit(dev, req, GFP_ATOMIC);
}
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
{
unsigned i;
struct usb_request *req;
if (!n)
return -ENOMEM;
/* queue/recycle up to N requests */
i = n;
list_for_each_entry(req, list, list) {
if (i-- == 0)
goto extra;
}
while (i--) {
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (!req)
return list_empty(list) ? -ENOMEM : 0;
list_add(&req->list, list);
}
return 0;
extra:
/* free extras */
for (;;) {
struct list_head *next;
next = req->list.next;
list_del(&req->list);
usb_ep_free_request(ep, req);
if (next == list)
break;
req = container_of(next, struct usb_request, list);
}
return 0;
}
static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
{
int status;
spin_lock(&dev->req_lock);
status = prealloc(&dev->tx_reqs, link->in_ep, n);
if (status < 0)
goto fail;
status = prealloc(&dev->rx_reqs, link->out_ep, n);
if (status < 0)
goto fail;
goto done;
fail:
DBG(dev, "can't alloc requests\n");
done:
spin_unlock(&dev->req_lock);
return status;
}
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
unsigned long flags;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(&dev->req_lock, flags);
while (!list_empty(&dev->rx_reqs)) {
req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&dev->req_lock, flags);
if (rx_submit(dev, req, gfp_flags) < 0) {
defer_kevent(dev, WORK_RX_MEMORY);
return;
}
spin_lock_irqsave(&dev->req_lock, flags);
}
spin_unlock_irqrestore(&dev->req_lock, flags);
}
static void eth_work(struct work_struct *work)
{
struct eth_dev *dev = container_of(work, struct eth_dev, work);
if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
if (netif_running(dev->net))
rx_fill(dev, GFP_KERNEL);
}
if (dev->todo)
DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
}
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct sk_buff *skb = req->context;
struct eth_dev *dev = ep->driver_data;
switch (req->status) {
default:
dev->net->stats.tx_errors++;
VDBG(dev, "tx err %d\n", req->status);
fallthrough;
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
dev_kfree_skb_any(skb);
break;
case 0:
dev->net->stats.tx_bytes += skb->len;
dev_consume_skb_any(skb);
}
dev->net->stats.tx_packets++;
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->tx_reqs);
spin_unlock(&dev->req_lock);
atomic_dec(&dev->tx_qlen);
if (netif_carrier_ok(dev->net))
netif_wake_queue(dev->net);
}
static inline int is_promisc(u16 cdc_filter)
{
return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
}
static int ether_wakeup_host(struct gether *port)
{
int ret;
struct usb_function *func = &port->func;
struct usb_gadget *gadget = func->config->cdev->gadget;
if (func->func_suspended)
ret = usb_func_wakeup(func);
else
ret = usb_gadget_wakeup(gadget);
return ret;
}
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
struct eth_dev *dev = netdev_priv(net);
int length = 0;
int retval;
struct usb_request *req = NULL;
unsigned long flags;
struct usb_ep *in;
u16 cdc_filter;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb) {
in = dev->port_usb->in_ep;
cdc_filter = dev->port_usb->cdc_filter;
} else {
in = NULL;
cdc_filter = 0;
}
if (dev->port_usb && dev->port_usb->is_suspend) {
DBG(dev, "Port suspended. Triggering wakeup\n");
netif_stop_queue(net);
spin_unlock_irqrestore(&dev->lock, flags);
ether_wakeup_host(dev->port_usb);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&dev->lock, flags);
if (!in) {
if (skb)
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* apply outgoing CDC or RNDIS filters */
if (skb && !is_promisc(cdc_filter)) {
u8 *dest = skb->data;
if (is_multicast_ether_addr(dest)) {
u16 type;
/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
* SET_ETHERNET_MULTICAST_FILTERS requests
*/
if (is_broadcast_ether_addr(dest))
type = USB_CDC_PACKET_TYPE_BROADCAST;
else
type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
if (!(cdc_filter & type)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
}
spin_lock_irqsave(&dev->req_lock, flags);
/*
* this freelist can be empty if an interrupt triggered disconnect()
* and reconfigured the gadget (shutting down this queue) after the
* network stack decided to xmit but before we got the spinlock.
*/
if (list_empty(&dev->tx_reqs)) {
spin_unlock_irqrestore(&dev->req_lock, flags);
return NETDEV_TX_BUSY;
}
req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
list_del(&req->list);
/* temporarily stop TX queue when the freelist empties */
if (list_empty(&dev->tx_reqs))
netif_stop_queue(net);
spin_unlock_irqrestore(&dev->req_lock, flags);
/* no buffer copies needed, unless the network stack did it
* or the hardware can't use skb buffers.
* or there's not enough space for extra headers we need
*/
if (dev->wrap) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb)
skb = dev->wrap(dev->port_usb, skb);
spin_unlock_irqrestore(&dev->lock, flags);
if (!skb) {
/* Multi frame CDC protocols may store the frame for
* later which is not a dropped frame.
*/
if (dev->port_usb &&
dev->port_usb->supports_multi_frame)
goto multiframe;
goto drop;
}
}
length = skb->len;
req->buf = skb->data;
req->context = skb;
req->complete = tx_complete;
/* NCM requires no zlp if transfer is dwNtbInMaxSize */
if (dev->port_usb &&
dev->port_usb->is_fixed &&
length == dev->port_usb->fixed_in_len &&
(length % in->maxpacket) == 0)
req->zero = 0;
else
req->zero = 1;
/* use zlp framing on tx for strict CDC-Ether conformance,
* though any robust network rx path ignores extra padding.
* and some hardware doesn't like to write zlps.
*/
if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
length++;
req->length = length;
retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) {
default:
DBG(dev, "tx queue err %d\n", retval);
break;
case 0:
netif_trans_update(net);
atomic_inc(&dev->tx_qlen);
}
if (retval) {
dev_kfree_skb_any(skb);
drop:
dev->net->stats.tx_dropped++;
multiframe:
spin_lock_irqsave(&dev->req_lock, flags);
if (list_empty(&dev->tx_reqs))
netif_start_queue(net);
list_add(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->req_lock, flags);
}
return NETDEV_TX_OK;
}
/*-------------------------------------------------------------------------*/
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
{
DBG(dev, "%s\n", __func__);
/* fill the rx queue */
rx_fill(dev, gfp_flags);
/* and open the tx floodgates */
atomic_set(&dev->tx_qlen, 0);
netif_wake_queue(dev->net);
}
static int eth_open(struct net_device *net)
{
struct eth_dev *dev = netdev_priv(net);
struct gether *link;
DBG(dev, "%s\n", __func__);
if (netif_carrier_ok(dev->net))
eth_start(dev, GFP_KERNEL);
spin_lock_irq(&dev->lock);
link = dev->port_usb;
if (link && link->open)
link->open(link);
spin_unlock_irq(&dev->lock);
return 0;
}
static int eth_stop(struct net_device *net)
{
struct eth_dev *dev = netdev_priv(net);
unsigned long flags;
VDBG(dev, "%s\n", __func__);
netif_stop_queue(net);
DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
dev->net->stats.rx_packets, dev->net->stats.tx_packets,
dev->net->stats.rx_errors, dev->net->stats.tx_errors
);
/* ensure there are no more active requests */
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb) {
struct gether *link = dev->port_usb;
const struct usb_endpoint_descriptor *in;
const struct usb_endpoint_descriptor *out;
if (link->close)
link->close(link);
/* NOTE: we have no abort-queue primitive we could use
* to cancel all pending I/O. Instead, we disable then
* reenable the endpoints ... this idiom may leave toggle
* wrong, but that's a self-correcting error.
*
* REVISIT: we *COULD* just let the transfers complete at
* their own pace; the network stack can handle old packets.
* For the moment we leave this here, since it works.
*/
in = link->in_ep->desc;
out = link->out_ep->desc;
usb_ep_disable(link->in_ep);
usb_ep_disable(link->out_ep);
if (netif_carrier_ok(net)) {
DBG(dev, "host still using in/out endpoints\n");
link->in_ep->desc = in;
link->out_ep->desc = out;
usb_ep_enable(link->in_ep);
usb_ep_enable(link->out_ep);
}
}
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int get_ether_addr(const char *str, u8 *dev_addr)
{
if (str) {
unsigned i;
for (i = 0; i < 6; i++) {
unsigned char num;
if ((*str == '.') || (*str == ':'))
str++;
num = hex_to_bin(*str++) << 4;
num |= hex_to_bin(*str++);
dev_addr [i] = num;
}
if (is_valid_ether_addr(dev_addr))
return 0;
}
eth_random_addr(dev_addr);
return 1;
}
static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
{
if (len < 18)
return -EINVAL;
snprintf(str, len, "%pM", dev_addr);
return 18;
}
static const struct net_device_ops eth_netdev_ops = {
.ndo_open = eth_open,
.ndo_stop = eth_stop,
.ndo_start_xmit = eth_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static struct device_type gadget_type = {
.name = "gadget",
};
/*
* gether_setup_name - initialize one ethernet-over-usb link
* @g: gadget to associated with these links
* @ethaddr: NULL, or a buffer in which the ethernet address of the
* host side of the link is recorded
* @netname: name for network device (for example, "usb")
* Context: may sleep
*
* This sets up the single network link that may be exported by a
* gadget driver using this framework. The link layer addresses are
* set up using module parameters.
*
* Returns an eth_dev pointer on success, or an ERR_PTR on failure.
*/
struct eth_dev *gether_setup_name(struct usb_gadget *g,
const char *dev_addr, const char *host_addr,
u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
{
struct eth_dev *dev;
struct net_device *net;
int status;
u8 addr[ETH_ALEN];
net = alloc_etherdev(sizeof *dev);
if (!net)
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
dev->net = net;
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
if (get_ether_addr(dev_addr, addr)) {
net->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
} else {
net->addr_assign_type = NET_ADDR_SET;
}
eth_hw_addr_set(net, addr);
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
"using random %s ethernet address\n", "host");
if (ethaddr)
memcpy(ethaddr, dev->host_mac, ETH_ALEN);
net->netdev_ops = ð_netdev_ops;
net->ethtool_ops = &ops;
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
if (status < 0) {
dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
free_netdev(net);
dev = ERR_PTR(status);
} else {
INFO(dev, "MAC %pM\n", net->dev_addr);
INFO(dev, "HOST MAC %pM\n", dev->host_mac);
/*
* two kinds of host-initiated state changes:
* - iff DATA transfer is active, carrier is "on"
* - tx queueing enabled if open *and* carrier is "on"
*/
netif_carrier_off(net);
}
return dev;
}
EXPORT_SYMBOL_GPL(gether_setup_name);
struct net_device *gether_setup_name_default(const char *netname)
{
struct net_device *net;
struct eth_dev *dev;
net = alloc_etherdev(sizeof(*dev));
if (!net)
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
dev->net = net;
dev->qmult = QMULT_DEFAULT;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
eth_random_addr(dev->dev_mac);
/* by default we always have a random MAC address */
net->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(dev->host_mac);
net->netdev_ops = ð_netdev_ops;
net->ethtool_ops = &ops;
SET_NETDEV_DEVTYPE(net, &gadget_type);
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
EXPORT_SYMBOL_GPL(gether_setup_name_default);
int gether_register_netdev(struct net_device *net)
{
struct eth_dev *dev;
struct usb_gadget *g;
int status;
if (!net->dev.parent)
return -EINVAL;
dev = netdev_priv(net);
g = dev->gadget;
eth_hw_addr_set(net, dev->dev_mac);
status = register_netdev(net);
if (status < 0) {
dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
return status;
} else {
INFO(dev, "HOST MAC %pM\n", dev->host_mac);
INFO(dev, "MAC %pM\n", dev->dev_mac);
/* two kinds of host-initiated state changes:
* - iff DATA transfer is active, carrier is "on"
* - tx queueing enabled if open *and* carrier is "on"
*/
netif_carrier_off(net);
}
return status;
}
EXPORT_SYMBOL_GPL(gether_register_netdev);
void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
{
struct eth_dev *dev;
dev = netdev_priv(net);
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
}
EXPORT_SYMBOL_GPL(gether_set_gadget);
int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
{
struct eth_dev *dev;
u8 new_addr[ETH_ALEN];
dev = netdev_priv(net);
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
memcpy(dev->dev_mac, new_addr, ETH_ALEN);
net->addr_assign_type = NET_ADDR_SET;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_dev_addr);
int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
{
struct eth_dev *dev;
int ret;
dev = netdev_priv(net);
ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
if (ret + 1 < len) {
dev_addr[ret++] = '\n';
dev_addr[ret] = '\0';
}
return ret;
}
EXPORT_SYMBOL_GPL(gether_get_dev_addr);
int gether_set_host_addr(struct net_device *net, const char *host_addr)
{
struct eth_dev *dev;
u8 new_addr[ETH_ALEN];
dev = netdev_priv(net);
if (get_ether_addr(host_addr, new_addr))
return -EINVAL;
memcpy(dev->host_mac, new_addr, ETH_ALEN);
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_host_addr);
int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
{
struct eth_dev *dev;
int ret;
dev = netdev_priv(net);
ret = get_ether_addr_str(dev->host_mac, host_addr, len);
if (ret + 1 < len) {
host_addr[ret++] = '\n';
host_addr[ret] = '\0';
}
return ret;
}
EXPORT_SYMBOL_GPL(gether_get_host_addr);
int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
{
struct eth_dev *dev;
if (len < 13)
return -EINVAL;
dev = netdev_priv(net);
snprintf(host_addr, len, "%pm", dev->host_mac);
string_upper(host_addr, host_addr);
return strlen(host_addr);
}
EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
{
struct eth_dev *dev;
dev = netdev_priv(net);
memcpy(host_mac, dev->host_mac, ETH_ALEN);
}
EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
void gether_set_qmult(struct net_device *net, unsigned qmult)
{
struct eth_dev *dev;
dev = netdev_priv(net);
dev->qmult = qmult;
}
EXPORT_SYMBOL_GPL(gether_set_qmult);
unsigned gether_get_qmult(struct net_device *net)
{
struct eth_dev *dev;
dev = netdev_priv(net);
return dev->qmult;
}
EXPORT_SYMBOL_GPL(gether_get_qmult);
int gether_get_ifname(struct net_device *net, char *name, int len)
{
struct eth_dev *dev = netdev_priv(net);
int ret;
rtnl_lock();
ret = scnprintf(name, len, "%s\n",
dev->ifname_set ? net->name : netdev_name(net));
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(gether_get_ifname);
int gether_set_ifname(struct net_device *net, const char *name, int len)
{
struct eth_dev *dev = netdev_priv(net);
char tmp[IFNAMSIZ];
const char *p;
if (name[len - 1] == '\n')
len--;
if (len >= sizeof(tmp))
return -E2BIG;
strscpy(tmp, name, len + 1);
if (!dev_valid_name(tmp))
return -EINVAL;
/* Require exactly one %d, so binding will not fail with EEXIST. */
p = strchr(name, '%');
if (!p || p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
strncpy(net->name, tmp, sizeof(net->name));
dev->ifname_set = true;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_ifname);
void gether_suspend(struct gether *link)
{
struct eth_dev *dev = link->ioport;
unsigned long flags;
if (!dev)
return;
if (atomic_read(&dev->tx_qlen)) {
/*
* There is a transfer in progress. So we trigger a remote
* wakeup to inform the host.
*/
ether_wakeup_host(dev->port_usb);
return;
}
spin_lock_irqsave(&dev->lock, flags);
link->is_suspend = true;
spin_unlock_irqrestore(&dev->lock, flags);
}
EXPORT_SYMBOL_GPL(gether_suspend);
void gether_resume(struct gether *link)
{
struct eth_dev *dev = link->ioport;
unsigned long flags;
if (!dev)
return;
if (netif_queue_stopped(dev->net))
netif_start_queue(dev->net);
spin_lock_irqsave(&dev->lock, flags);
link->is_suspend = false;
spin_unlock_irqrestore(&dev->lock, flags);
}
EXPORT_SYMBOL_GPL(gether_resume);
/*
* gether_cleanup - remove Ethernet-over-USB device
* Context: may sleep
*
* This is called to free all resources allocated by @gether_setup().
*/
void gether_cleanup(struct eth_dev *dev)
{
if (!dev)
return;
unregister_netdev(dev->net);
flush_work(&dev->work);
free_netdev(dev->net);
}
EXPORT_SYMBOL_GPL(gether_cleanup);
/**
* gether_connect - notify network layer that USB link is active
* @link: the USB link, set up with endpoints, descriptors matching
* current device speed, and any framing wrapper(s) set up.
* Context: irqs blocked
*
* This is called to activate endpoints and let the network layer know
* the connection is active ("carrier detect"). It may cause the I/O
* queues to open and start letting network packets flow, but will in
* any case activate the endpoints so that they respond properly to the
* USB host.
*
* Verify net_device pointer returned using IS_ERR(). If it doesn't
* indicate some error code (negative errno), ep->driver_data values
* have been overwritten.
*/
struct net_device *gether_connect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
int result = 0;
if (!dev)
return ERR_PTR(-EINVAL);
link->in_ep->driver_data = dev;
result = usb_ep_enable(link->in_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n",
link->in_ep->name, result);
goto fail0;
}
link->out_ep->driver_data = dev;
result = usb_ep_enable(link->out_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n",
link->out_ep->name, result);
goto fail1;
}
if (result == 0)
result = alloc_requests(dev, link, qlen(dev->gadget,
dev->qmult));
if (result == 0) {
dev->zlp = link->is_zlp_ok;
dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
dev->header_len = link->header_len;
dev->unwrap = link->unwrap;
dev->wrap = link->wrap;
spin_lock(&dev->lock);
dev->port_usb = link;
if (netif_running(dev->net)) {
if (link->open)
link->open(link);
} else {
if (link->close)
link->close(link);
}
spin_unlock(&dev->lock);
netif_carrier_on(dev->net);
if (netif_running(dev->net))
eth_start(dev, GFP_ATOMIC);
/* on error, disable any endpoints */
} else {
(void) usb_ep_disable(link->out_ep);
fail1:
(void) usb_ep_disable(link->in_ep);
}
fail0:
/* caller is responsible for cleanup on error */
if (result < 0)
return ERR_PTR(result);
return dev->net;
}
EXPORT_SYMBOL_GPL(gether_connect);
/**
* gether_disconnect - notify network layer that USB link is inactive
* @link: the USB link, on which gether_connect() was called
* Context: irqs blocked
*
* This is called to deactivate endpoints and let the network layer know
* the connection went inactive ("no carrier").
*
* On return, the state is as if gether_connect() had never been called.
* The endpoints are inactive, and accordingly without active USB I/O.
* Pointers to endpoint descriptors and endpoint private data are nulled.
*/
void gether_disconnect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
struct usb_request *req;
WARN_ON(!dev);
if (!dev)
return;
DBG(dev, "%s\n", __func__);
netif_stop_queue(dev->net);
netif_carrier_off(dev->net);
/* disable endpoints, forcing (synchronous) completion
* of all pending i/o. then free the request objects
* and forget about the endpoints.
*/
usb_ep_disable(link->in_ep);
spin_lock(&dev->req_lock);
while (!list_empty(&dev->tx_reqs)) {
req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
list_del(&req->list);
spin_unlock(&dev->req_lock);
usb_ep_free_request(link->in_ep, req);
spin_lock(&dev->req_lock);
}
spin_unlock(&dev->req_lock);
link->in_ep->desc = NULL;
usb_ep_disable(link->out_ep);
spin_lock(&dev->req_lock);
while (!list_empty(&dev->rx_reqs)) {
req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
list_del(&req->list);
spin_unlock(&dev->req_lock);
usb_ep_free_request(link->out_ep, req);
spin_lock(&dev->req_lock);
}
spin_unlock(&dev->req_lock);
link->out_ep->desc = NULL;
/* finish forgetting about this USB link episode */
dev->header_len = 0;
dev->unwrap = NULL;
dev->wrap = NULL;
spin_lock(&dev->lock);
dev->port_usb = NULL;
link->is_suspend = false;
spin_unlock(&dev->lock);
}
EXPORT_SYMBOL_GPL(gether_disconnect);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
| linux-master | drivers/usb/gadget/function/u_ether.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_midi2.c -- USB MIDI 2.0 class function driver
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/ump.h>
#include <sound/ump_msg.h>
#include <sound/ump_convert.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/audio.h>
#include <linux/usb/midi-v2.h>
#include "u_f.h"
#include "u_midi2.h"
struct f_midi2;
struct f_midi2_ep;
struct f_midi2_usb_ep;
/* Context for each USB request */
struct f_midi2_req_ctx {
struct f_midi2_usb_ep *usb_ep; /* belonging USB EP */
unsigned int index; /* array index: 0-31 */
struct usb_request *req; /* assigned request */
};
/* Resources for a USB Endpoint */
struct f_midi2_usb_ep {
struct f_midi2 *card; /* belonging card */
struct f_midi2_ep *ep; /* belonging UMP EP (optional) */
struct usb_ep *usb_ep; /* assigned USB EP */
void (*complete)(struct usb_ep *usb_ep, struct usb_request *req);
unsigned long free_reqs; /* bitmap for unused requests */
unsigned int num_reqs; /* number of allocated requests */
struct f_midi2_req_ctx *reqs; /* request context array */
};
/* Resources for UMP Function Block (and USB Group Terminal Block) */
struct f_midi2_block {
struct f_midi2_block_info info; /* FB info, copied from configfs */
struct snd_ump_block *fb; /* assigned FB */
unsigned int gtb_id; /* assigned GTB id */
unsigned int string_id; /* assigned string id */
};
/* Temporary buffer for altset 0 MIDI 1.0 handling */
struct f_midi2_midi1_port {
unsigned int pending; /* pending bytes on the input buffer */
u8 buf[32]; /* raw MIDI 1.0 byte input */
u8 state; /* running status */
u8 data[2]; /* rendered USB MIDI 1.0 packet data */
};
/* MIDI 1.0 message states */
enum {
STATE_INITIAL = 0, /* pseudo state */
STATE_1PARAM,
STATE_2PARAM_1,
STATE_2PARAM_2,
STATE_SYSEX_0,
STATE_SYSEX_1,
STATE_SYSEX_2,
STATE_REAL_TIME,
STATE_FINISHED, /* pseudo state */
};
/* Resources for UMP Endpoint */
struct f_midi2_ep {
struct snd_ump_endpoint *ump; /* assigned UMP EP */
struct f_midi2 *card; /* belonging MIDI 2.0 device */
struct f_midi2_ep_info info; /* UMP EP info, copied from configfs */
unsigned int num_blks; /* number of FBs */
struct f_midi2_block blks[SNDRV_UMP_MAX_BLOCKS]; /* UMP FBs */
struct f_midi2_usb_ep ep_in; /* USB MIDI EP-in */
struct f_midi2_usb_ep ep_out; /* USB MIDI EP-out */
u8 in_group_to_cable[SNDRV_UMP_MAX_GROUPS]; /* map to cable; 1-based! */
};
/* indices for USB strings */
enum {
STR_IFACE = 0,
STR_GTB1 = 1,
};
/* 1-based GTB id to string id */
#define gtb_to_str_id(id) (STR_GTB1 + (id) - 1)
/* mapping from MIDI 1.0 cable to UMP group */
struct midi1_cable_mapping {
struct f_midi2_ep *ep;
unsigned char block;
unsigned char group;
};
/* operation mode */
enum {
MIDI_OP_MODE_UNSET, /* no altset set yet */
MIDI_OP_MODE_MIDI1, /* MIDI 1.0 (altset 0) is used */
MIDI_OP_MODE_MIDI2, /* MIDI 2.0 (altset 1) is used */
};
/* Resources for MIDI 2.0 Device */
struct f_midi2 {
struct usb_function func;
struct usb_gadget *gadget;
struct snd_card *card;
/* MIDI 1.0 in/out USB EPs */
struct f_midi2_usb_ep midi1_ep_in;
struct f_midi2_usb_ep midi1_ep_out;
/* number of MIDI 1.0 I/O cables */
unsigned int num_midi1_in;
unsigned int num_midi1_out;
/* conversion for MIDI 1.0 EP-in */
struct f_midi2_midi1_port midi1_port[MAX_CABLES];
/* conversion for MIDI 1.0 EP-out */
struct ump_cvt_to_ump midi1_ump_cvt;
/* mapping between cables and UMP groups */
struct midi1_cable_mapping in_cable_mapping[MAX_CABLES];
struct midi1_cable_mapping out_cable_mapping[MAX_CABLES];
int midi_if; /* USB MIDI interface number */
int operation_mode; /* current operation mode */
spinlock_t queue_lock;
struct f_midi2_card_info info; /* card info, copied from configfs */
unsigned int num_eps;
struct f_midi2_ep midi2_eps[MAX_UMP_EPS];
unsigned int total_blocks; /* total number of blocks of all EPs */
struct usb_string *string_defs;
struct usb_string *strings;
};
#define func_to_midi2(f) container_of(f, struct f_midi2, func)
/* get EP name string */
static const char *ump_ep_name(const struct f_midi2_ep *ep)
{
return ep->info.ep_name ? ep->info.ep_name : "MIDI 2.0 Gadget";
}
/* get EP product ID string */
static const char *ump_product_id(const struct f_midi2_ep *ep)
{
return ep->info.product_id ? ep->info.product_id : "Unique Product ID";
}
/* get FB name string */
static const char *ump_fb_name(const struct f_midi2_block_info *info)
{
return info->name ? info->name : "MIDI 2.0 Gadget I/O";
}
/*
* USB Descriptor Definitions
*/
/* GTB header descriptor */
static struct usb_ms20_gr_trm_block_header_descriptor gtb_header_desc = {
.bLength = sizeof(gtb_header_desc),
.bDescriptorType = USB_DT_CS_GR_TRM_BLOCK,
.bDescriptorSubtype = USB_MS_GR_TRM_BLOCK_HEADER,
.wTotalLength = __cpu_to_le16(0x12), // to be filled
};
/* GTB descriptor template: most items are replaced dynamically */
static struct usb_ms20_gr_trm_block_descriptor gtb_desc = {
.bLength = sizeof(gtb_desc),
.bDescriptorType = USB_DT_CS_GR_TRM_BLOCK,
.bDescriptorSubtype = USB_MS_GR_TRM_BLOCK,
.bGrpTrmBlkID = 0x01,
.bGrpTrmBlkType = USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL,
.nGroupTrm = 0x00,
.nNumGroupTrm = 1,
.iBlockItem = 0,
.bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_64,
.wMaxInputBandwidth = 0,
.wMaxOutputBandwidth = 0,
};
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MS20_ENDPOINT_DESCRIPTOR(32);
#define EP_MAX_PACKET_INT 8
/* Audio Control Interface */
static struct usb_interface_descriptor midi2_audio_if_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 0, // to be filled
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
.bInterfaceProtocol = 0,
.iInterface = 0,
};
static struct uac1_ac_header_descriptor_1 midi2_audio_class_desc = {
.bLength = 0x09,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = 0x01,
.bcdADC = __cpu_to_le16(0x0100),
.wTotalLength = __cpu_to_le16(0x0009),
.bInCollection = 0x01,
.baInterfaceNr = { 0x01 }, // to be filled
};
/* MIDI 1.0 Streaming Interface (altset 0) */
static struct usb_interface_descriptor midi2_midi1_if_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 0, // to be filled
.bAlternateSetting = 0,
.bNumEndpoints = 2, // to be filled
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
.bInterfaceProtocol = 0,
.iInterface = 0, // to be filled
};
static struct usb_ms_header_descriptor midi2_midi1_class_desc = {
.bLength = 0x07,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
.bcdMSC = __cpu_to_le16(0x0100),
.wTotalLength = __cpu_to_le16(0x41), // to be calculated
};
/* MIDI 1.0 EP OUT */
static struct usb_endpoint_descriptor midi2_midi1_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT | 0, // set up dynamically
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_ss_ep_comp_descriptor midi2_midi1_ep_out_ss_comp_desc = {
.bLength = sizeof(midi2_midi1_ep_out_ss_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_ms_endpoint_descriptor_16 midi2_midi1_ep_out_class_desc = {
.bLength = 0x05, // to be filled
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = USB_MS_GENERAL,
.bNumEmbMIDIJack = 1,
.baAssocJackID = { 0x01 },
};
/* MIDI 1.0 EP IN */
static struct usb_endpoint_descriptor midi2_midi1_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN | 0, // set up dynamically
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_ss_ep_comp_descriptor midi2_midi1_ep_in_ss_comp_desc = {
.bLength = sizeof(midi2_midi1_ep_in_ss_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_ms_endpoint_descriptor_16 midi2_midi1_ep_in_class_desc = {
.bLength = 0x05, // to be filled
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = USB_MS_GENERAL,
.bNumEmbMIDIJack = 1,
.baAssocJackID = { 0x03 },
};
/* MIDI 2.0 Streaming Interface (altset 1) */
static struct usb_interface_descriptor midi2_midi2_if_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 0, // to be filled
.bAlternateSetting = 1,
.bNumEndpoints = 2, // to be filled
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
.bInterfaceProtocol = 0,
.iInterface = 0, // to be filled
};
static struct usb_ms_header_descriptor midi2_midi2_class_desc = {
.bLength = 0x07,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
.bcdMSC = __cpu_to_le16(0x0200),
.wTotalLength = __cpu_to_le16(0x07),
};
/* MIDI 2.0 EP OUT */
static struct usb_endpoint_descriptor midi2_midi2_ep_out_desc[MAX_UMP_EPS];
static struct usb_ss_ep_comp_descriptor midi2_midi2_ep_out_ss_comp_desc = {
.bLength = sizeof(midi2_midi1_ep_out_ss_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_ms20_endpoint_descriptor_32 midi2_midi2_ep_out_class_desc[MAX_UMP_EPS];
/* MIDI 2.0 EP IN */
static struct usb_endpoint_descriptor midi2_midi2_ep_in_desc[MAX_UMP_EPS];
static struct usb_ss_ep_comp_descriptor midi2_midi2_ep_in_ss_comp_desc = {
.bLength = sizeof(midi2_midi2_ep_in_ss_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_ms20_endpoint_descriptor_32 midi2_midi2_ep_in_class_desc[MAX_UMP_EPS];
/* Arrays of descriptors to be created */
static void *midi2_audio_descs[] = {
&midi2_audio_if_desc,
&midi2_audio_class_desc,
NULL
};
static void *midi2_midi1_descs[] = {
&midi2_midi1_if_desc,
&midi2_midi1_class_desc,
NULL
};
static void *midi2_midi1_ep_out_descs[] = {
&midi2_midi1_ep_out_desc,
&midi2_midi1_ep_out_class_desc,
NULL
};
static void *midi2_midi1_ep_in_descs[] = {
&midi2_midi1_ep_in_desc,
&midi2_midi1_ep_in_class_desc,
NULL
};
static void *midi2_midi1_ep_out_ss_descs[] = {
&midi2_midi1_ep_out_desc,
&midi2_midi1_ep_out_ss_comp_desc,
&midi2_midi1_ep_out_class_desc,
NULL
};
static void *midi2_midi1_ep_in_ss_descs[] = {
&midi2_midi1_ep_in_desc,
&midi2_midi1_ep_in_ss_comp_desc,
&midi2_midi1_ep_in_class_desc,
NULL
};
static void *midi2_midi2_descs[] = {
&midi2_midi2_if_desc,
&midi2_midi2_class_desc,
NULL
};
/*
* USB request handling
*/
/* get an empty request for the given EP */
static struct usb_request *get_empty_request(struct f_midi2_usb_ep *usb_ep)
{
struct usb_request *req = NULL;
unsigned long flags;
int index;
spin_lock_irqsave(&usb_ep->card->queue_lock, flags);
if (!usb_ep->free_reqs)
goto unlock;
index = find_first_bit(&usb_ep->free_reqs, usb_ep->num_reqs);
if (index >= usb_ep->num_reqs)
goto unlock;
req = usb_ep->reqs[index].req;
if (!req)
goto unlock;
clear_bit(index, &usb_ep->free_reqs);
req->length = 0;
unlock:
spin_unlock_irqrestore(&usb_ep->card->queue_lock, flags);
return req;
}
/* put the empty request back */
static void put_empty_request(struct usb_request *req)
{
struct f_midi2_req_ctx *ctx = req->context;
unsigned long flags;
spin_lock_irqsave(&ctx->usb_ep->card->queue_lock, flags);
set_bit(ctx->index, &ctx->usb_ep->free_reqs);
spin_unlock_irqrestore(&ctx->usb_ep->card->queue_lock, flags);
}
/*
* UMP v1.1 Stream message handling
*/
/* queue a request to UMP EP; request is either queued or freed after this */
static int queue_request_ep_raw(struct usb_request *req)
{
struct f_midi2_req_ctx *ctx = req->context;
int err;
req->complete = ctx->usb_ep->complete;
err = usb_ep_queue(ctx->usb_ep->usb_ep, req, GFP_ATOMIC);
if (err) {
put_empty_request(req);
return err;
}
return 0;
}
/* queue a request with endianness conversion */
static int queue_request_ep_in(struct usb_request *req)
{
/* UMP packets have to be converted to little-endian */
cpu_to_le32_array((u32 *)req->buf, req->length >> 2);
return queue_request_ep_raw(req);
}
/* reply a UMP packet via EP-in */
static int reply_ep_in(struct f_midi2_ep *ep, const void *buf, int len)
{
struct f_midi2_usb_ep *usb_ep = &ep->ep_in;
struct usb_request *req;
req = get_empty_request(usb_ep);
if (!req)
return -ENOSPC;
req->length = len;
memcpy(req->buf, buf, len);
return queue_request_ep_in(req);
}
/* reply a UMP stream EP info */
static void reply_ump_stream_ep_info(struct f_midi2_ep *ep)
{
struct snd_ump_stream_msg_ep_info rep = {
.type = UMP_MSG_TYPE_STREAM,
.status = UMP_STREAM_MSG_STATUS_EP_INFO,
.ump_version_major = 0x01,
.ump_version_minor = 0x01,
.num_function_blocks = ep->num_blks,
.static_function_block = !!ep->card->info.static_block,
.protocol = (UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 |
UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) >> 8,
};
reply_ep_in(ep, &rep, sizeof(rep));
}
/* reply a UMP EP device info */
static void reply_ump_stream_ep_device(struct f_midi2_ep *ep)
{
struct snd_ump_stream_msg_devince_info rep = {
.type = UMP_MSG_TYPE_STREAM,
.status = UMP_STREAM_MSG_STATUS_DEVICE_INFO,
.manufacture_id = ep->info.manufacturer,
.family_lsb = ep->info.family & 0xff,
.family_msb = (ep->info.family >> 8) & 0xff,
.model_lsb = ep->info.model & 0xff,
.model_msb = (ep->info.model >> 8) & 0xff,
.sw_revision = ep->info.sw_revision,
};
reply_ep_in(ep, &rep, sizeof(rep));
}
#define UMP_STREAM_PKT_BYTES 16 /* UMP stream packet size = 16 bytes*/
#define UMP_STREAM_EP_STR_OFF 2 /* offset of name string for EP info */
#define UMP_STREAM_FB_STR_OFF 3 /* offset of name string for FB info */
/* Helper to replay a string */
static void reply_ump_stream_string(struct f_midi2_ep *ep, const u8 *name,
unsigned int type, unsigned int extra,
unsigned int start_ofs)
{
struct f_midi2_usb_ep *usb_ep = &ep->ep_in;
struct f_midi2 *midi2 = ep->card;
struct usb_request *req;
unsigned int pos;
u32 *buf;
if (!*name)
return;
req = get_empty_request(usb_ep);
if (!req)
return;
buf = (u32 *)req->buf;
pos = start_ofs;
for (;;) {
if (pos == start_ofs) {
memset(buf, 0, UMP_STREAM_PKT_BYTES);
buf[0] = ump_stream_compose(type, 0) | extra;
}
buf[pos / 4] |= *name++ << ((3 - (pos % 4)) * 8);
if (!*name) {
if (req->length)
buf[0] |= UMP_STREAM_MSG_FORMAT_END << 26;
req->length += UMP_STREAM_PKT_BYTES;
break;
}
if (++pos == UMP_STREAM_PKT_BYTES) {
if (!req->length)
buf[0] |= UMP_STREAM_MSG_FORMAT_START << 26;
else
buf[0] |= UMP_STREAM_MSG_FORMAT_CONTINUE << 26;
req->length += UMP_STREAM_PKT_BYTES;
if (midi2->info.req_buf_size - req->length < UMP_STREAM_PKT_BYTES)
break;
buf += 4;
pos = start_ofs;
}
}
if (req->length)
queue_request_ep_in(req);
else
put_empty_request(req);
}
/* Reply a UMP EP name string */
static void reply_ump_stream_ep_name(struct f_midi2_ep *ep)
{
reply_ump_stream_string(ep, ump_ep_name(ep),
UMP_STREAM_MSG_STATUS_EP_NAME, 0,
UMP_STREAM_EP_STR_OFF);
}
/* Reply a UMP EP product ID string */
static void reply_ump_stream_ep_pid(struct f_midi2_ep *ep)
{
reply_ump_stream_string(ep, ump_product_id(ep),
UMP_STREAM_MSG_STATUS_PRODUCT_ID, 0,
UMP_STREAM_EP_STR_OFF);
}
/* Reply a UMP EP stream config */
static void reply_ump_stream_ep_config(struct f_midi2_ep *ep)
{
struct snd_ump_stream_msg_stream_cfg rep = {
.type = UMP_MSG_TYPE_STREAM,
.status = UMP_STREAM_MSG_STATUS_STREAM_CFG,
};
if ((ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK) ==
SNDRV_UMP_EP_INFO_PROTO_MIDI2)
rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI2 >> 8;
else
rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 >> 8;
reply_ep_in(ep, &rep, sizeof(rep));
}
/* Reply a UMP FB info */
static void reply_ump_stream_fb_info(struct f_midi2_ep *ep, int blk)
{
struct f_midi2_block_info *b = &ep->blks[blk].info;
struct snd_ump_stream_msg_fb_info rep = {
.type = UMP_MSG_TYPE_STREAM,
.status = UMP_STREAM_MSG_STATUS_FB_INFO,
.active = !!b->active,
.function_block_id = blk,
.ui_hint = b->ui_hint,
.midi_10 = b->is_midi1,
.direction = b->direction,
.first_group = b->first_group,
.num_groups = b->num_groups,
.midi_ci_version = b->midi_ci_version,
.sysex8_streams = b->sysex8_streams,
};
reply_ep_in(ep, &rep, sizeof(rep));
}
/* Reply a FB name string */
static void reply_ump_stream_fb_name(struct f_midi2_ep *ep, unsigned int blk)
{
reply_ump_stream_string(ep, ump_fb_name(&ep->blks[blk].info),
UMP_STREAM_MSG_STATUS_FB_NAME, blk << 8,
UMP_STREAM_FB_STR_OFF);
}
/* Process a UMP Stream message */
static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data)
{
struct f_midi2 *midi2 = ep->card;
unsigned int format, status, blk;
format = ump_stream_message_format(*data);
status = ump_stream_message_status(*data);
switch (status) {
case UMP_STREAM_MSG_STATUS_EP_DISCOVERY:
if (format)
return; // invalid
if (data[1] & UMP_STREAM_MSG_REQUEST_EP_INFO)
reply_ump_stream_ep_info(ep);
if (data[1] & UMP_STREAM_MSG_REQUEST_DEVICE_INFO)
reply_ump_stream_ep_device(ep);
if (data[1] & UMP_STREAM_MSG_REQUEST_EP_NAME)
reply_ump_stream_ep_name(ep);
if (data[1] & UMP_STREAM_MSG_REQUEST_PRODUCT_ID)
reply_ump_stream_ep_pid(ep);
if (data[1] & UMP_STREAM_MSG_REQUEST_STREAM_CFG)
reply_ump_stream_ep_config(ep);
return;
case UMP_STREAM_MSG_STATUS_STREAM_CFG_REQUEST:
if (*data & UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) {
ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI2;
DBG(midi2, "Switching Protocol to MIDI2\n");
} else {
ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI1;
DBG(midi2, "Switching Protocol to MIDI1\n");
}
snd_ump_switch_protocol(ep->ump, ep->info.protocol);
reply_ump_stream_ep_config(ep);
return;
case UMP_STREAM_MSG_STATUS_FB_DISCOVERY:
if (format)
return; // invalid
blk = (*data >> 8) & 0xff;
if (blk >= ep->num_blks)
return;
if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
reply_ump_stream_fb_info(ep, blk);
if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
reply_ump_stream_fb_name(ep, blk);
return;
}
}
/* Process UMP messages included in a USB request */
static void process_ump(struct f_midi2_ep *ep, const struct usb_request *req)
{
const u32 *data = (u32 *)req->buf;
int len = req->actual >> 2;
const u32 *in_buf = ep->ump->input_buf;
for (; len > 0; len--, data++) {
if (snd_ump_receive_ump_val(ep->ump, *data) <= 0)
continue;
if (ump_message_type(*in_buf) == UMP_MSG_TYPE_STREAM)
process_ump_stream_msg(ep, in_buf);
}
}
/*
* MIDI 2.0 UMP USB request handling
*/
/* complete handler for UMP EP-out requests */
static void f_midi2_ep_out_complete(struct usb_ep *usb_ep,
struct usb_request *req)
{
struct f_midi2_req_ctx *ctx = req->context;
struct f_midi2_ep *ep = ctx->usb_ep->ep;
struct f_midi2 *midi2 = ep->card;
int status = req->status;
if (status) {
DBG(midi2, "%s complete error %d: %d/%d\n",
usb_ep->name, status, req->actual, req->length);
goto error;
}
/* convert to UMP packet in native endianness */
le32_to_cpu_array((u32 *)req->buf, req->actual >> 2);
if (midi2->info.process_ump)
process_ump(ep, req);
snd_ump_receive(ep->ump, req->buf, req->actual & ~3);
if (midi2->operation_mode != MIDI_OP_MODE_MIDI2)
goto error;
if (queue_request_ep_raw(req))
goto error;
return;
error:
put_empty_request(req);
}
/* Transmit UMP packets received from user-space to the gadget */
static void process_ump_transmit(struct f_midi2_ep *ep)
{
struct f_midi2_usb_ep *usb_ep = &ep->ep_in;
struct f_midi2 *midi2 = ep->card;
struct usb_request *req;
int len;
if (!usb_ep->usb_ep->enabled)
return;
for (;;) {
req = get_empty_request(usb_ep);
if (!req)
break;
len = snd_ump_transmit(ep->ump, (u32 *)req->buf,
midi2->info.req_buf_size);
if (len <= 0) {
put_empty_request(req);
break;
}
req->length = len;
if (queue_request_ep_in(req) < 0)
break;
}
}
/* Complete handler for UMP EP-in requests */
static void f_midi2_ep_in_complete(struct usb_ep *usb_ep,
struct usb_request *req)
{
struct f_midi2_req_ctx *ctx = req->context;
struct f_midi2_ep *ep = ctx->usb_ep->ep;
struct f_midi2 *midi2 = ep->card;
int status = req->status;
put_empty_request(req);
if (status) {
DBG(midi2, "%s complete error %d: %d/%d\n",
usb_ep->name, status, req->actual, req->length);
return;
}
process_ump_transmit(ep);
}
/*
* MIDI1 (altset 0) USB request handling
*/
/* process one MIDI byte -- copied from f_midi.c
*
* fill the packet or request if needed
* returns true if the request became empty (queued)
*/
static bool process_midi1_byte(struct f_midi2 *midi2, u8 cable, u8 b,
struct usb_request **req_p)
{
struct f_midi2_midi1_port *port = &midi2->midi1_port[cable];
u8 p[4] = { cable << 4, 0, 0, 0 };
int next_state = STATE_INITIAL;
struct usb_request *req = *req_p;
switch (b) {
case 0xf8 ... 0xff:
/* System Real-Time Messages */
p[0] |= 0x0f;
p[1] = b;
next_state = port->state;
port->state = STATE_REAL_TIME;
break;
case 0xf7:
/* End of SysEx */
switch (port->state) {
case STATE_SYSEX_0:
p[0] |= 0x05;
p[1] = 0xf7;
next_state = STATE_FINISHED;
break;
case STATE_SYSEX_1:
p[0] |= 0x06;
p[1] = port->data[0];
p[2] = 0xf7;
next_state = STATE_FINISHED;
break;
case STATE_SYSEX_2:
p[0] |= 0x07;
p[1] = port->data[0];
p[2] = port->data[1];
p[3] = 0xf7;
next_state = STATE_FINISHED;
break;
default:
/* Ignore byte */
next_state = port->state;
port->state = STATE_INITIAL;
}
break;
case 0xf0 ... 0xf6:
/* System Common Messages */
port->data[0] = port->data[1] = 0;
port->state = STATE_INITIAL;
switch (b) {
case 0xf0:
port->data[0] = b;
port->data[1] = 0;
next_state = STATE_SYSEX_1;
break;
case 0xf1:
case 0xf3:
port->data[0] = b;
next_state = STATE_1PARAM;
break;
case 0xf2:
port->data[0] = b;
next_state = STATE_2PARAM_1;
break;
case 0xf4:
case 0xf5:
next_state = STATE_INITIAL;
break;
case 0xf6:
p[0] |= 0x05;
p[1] = 0xf6;
next_state = STATE_FINISHED;
break;
}
break;
case 0x80 ... 0xef:
/*
* Channel Voice Messages, Channel Mode Messages
* and Control Change Messages.
*/
port->data[0] = b;
port->data[1] = 0;
port->state = STATE_INITIAL;
if (b >= 0xc0 && b <= 0xdf)
next_state = STATE_1PARAM;
else
next_state = STATE_2PARAM_1;
break;
case 0x00 ... 0x7f:
/* Message parameters */
switch (port->state) {
case STATE_1PARAM:
if (port->data[0] < 0xf0)
p[0] |= port->data[0] >> 4;
else
p[0] |= 0x02;
p[1] = port->data[0];
p[2] = b;
/* This is to allow Running State Messages */
next_state = STATE_1PARAM;
break;
case STATE_2PARAM_1:
port->data[1] = b;
next_state = STATE_2PARAM_2;
break;
case STATE_2PARAM_2:
if (port->data[0] < 0xf0)
p[0] |= port->data[0] >> 4;
else
p[0] |= 0x03;
p[1] = port->data[0];
p[2] = port->data[1];
p[3] = b;
/* This is to allow Running State Messages */
next_state = STATE_2PARAM_1;
break;
case STATE_SYSEX_0:
port->data[0] = b;
next_state = STATE_SYSEX_1;
break;
case STATE_SYSEX_1:
port->data[1] = b;
next_state = STATE_SYSEX_2;
break;
case STATE_SYSEX_2:
p[0] |= 0x04;
p[1] = port->data[0];
p[2] = port->data[1];
p[3] = b;
next_state = STATE_SYSEX_0;
break;
}
break;
}
/* States where we have to write into the USB request */
if (next_state == STATE_FINISHED ||
port->state == STATE_SYSEX_2 ||
port->state == STATE_1PARAM ||
port->state == STATE_2PARAM_2 ||
port->state == STATE_REAL_TIME) {
memcpy(req->buf + req->length, p, sizeof(p));
req->length += sizeof(p);
if (next_state == STATE_FINISHED) {
next_state = STATE_INITIAL;
port->data[0] = port->data[1] = 0;
}
if (midi2->info.req_buf_size - req->length <= 4) {
queue_request_ep_raw(req);
*req_p = NULL;
return true;
}
}
port->state = next_state;
return false;
}
/* process all pending MIDI bytes in the internal buffer;
* returns true if the request gets empty
* returns false if all have been processed
*/
static bool process_midi1_pending_buf(struct f_midi2 *midi2,
struct usb_request **req_p)
{
unsigned int cable, c;
for (cable = 0; cable < midi2->num_midi1_in; cable++) {
struct f_midi2_midi1_port *port = &midi2->midi1_port[cable];
if (!port->pending)
continue;
for (c = 0; c < port->pending; c++) {
if (process_midi1_byte(midi2, cable, port->buf[c],
req_p)) {
port->pending -= c;
if (port->pending)
memmove(port->buf, port->buf + c,
port->pending);
return true;
}
}
port->pending = 0;
}
return false;
}
/* fill the MIDI bytes onto the temporary buffer
*/
static void fill_midi1_pending_buf(struct f_midi2 *midi2, u8 cable, u8 *buf,
unsigned int size)
{
struct f_midi2_midi1_port *port = &midi2->midi1_port[cable];
if (port->pending + size > sizeof(port->buf))
return;
memcpy(port->buf + port->pending, buf, size);
port->pending += size;
}
/* try to process data given from the associated UMP stream */
static void process_midi1_transmit(struct f_midi2 *midi2)
{
struct f_midi2_usb_ep *usb_ep = &midi2->midi1_ep_in;
struct f_midi2_ep *ep = &midi2->midi2_eps[0];
struct usb_request *req = NULL;
/* 12 is the largest outcome (4 MIDI1 cmds) for a single UMP packet */
unsigned char outbuf[12];
unsigned char group, cable;
int len, size;
u32 ump;
if (!usb_ep->usb_ep || !usb_ep->usb_ep->enabled)
return;
for (;;) {
if (!req) {
req = get_empty_request(usb_ep);
if (!req)
break;
}
if (process_midi1_pending_buf(midi2, &req))
continue;
len = snd_ump_transmit(ep->ump, &ump, 4);
if (len <= 0)
break;
if (snd_ump_receive_ump_val(ep->ump, ump) <= 0)
continue;
size = snd_ump_convert_from_ump(ep->ump->input_buf, outbuf,
&group);
if (size <= 0)
continue;
cable = ep->in_group_to_cable[group];
if (!cable)
continue;
cable--; /* to 0-base */
fill_midi1_pending_buf(midi2, cable, outbuf, size);
}
if (req) {
if (req->length)
queue_request_ep_raw(req);
else
put_empty_request(req);
}
}
/* complete handler for MIDI1 EP-in requests */
static void f_midi2_midi1_ep_in_complete(struct usb_ep *usb_ep,
struct usb_request *req)
{
struct f_midi2_req_ctx *ctx = req->context;
struct f_midi2 *midi2 = ctx->usb_ep->card;
int status = req->status;
put_empty_request(req);
if (status) {
DBG(midi2, "%s complete error %d: %d/%d\n",
usb_ep->name, status, req->actual, req->length);
return;
}
process_midi1_transmit(midi2);
}
/* complete handler for MIDI1 EP-out requests */
static void f_midi2_midi1_ep_out_complete(struct usb_ep *usb_ep,
struct usb_request *req)
{
struct f_midi2_req_ctx *ctx = req->context;
struct f_midi2 *midi2 = ctx->usb_ep->card;
struct f_midi2_ep *ep;
struct ump_cvt_to_ump *cvt = &midi2->midi1_ump_cvt;
static const u8 midi1_packet_bytes[16] = {
0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
};
unsigned int group, cable, bytes, c, len;
int status = req->status;
const u8 *buf = req->buf;
if (status) {
DBG(midi2, "%s complete error %d: %d/%d\n",
usb_ep->name, status, req->actual, req->length);
goto error;
}
len = req->actual >> 2;
for (; len; len--, buf += 4) {
cable = *buf >> 4;
ep = midi2->out_cable_mapping[cable].ep;
if (!ep)
continue;
group = midi2->out_cable_mapping[cable].group;
bytes = midi1_packet_bytes[*buf & 0x0f];
for (c = 0; c < bytes; c++) {
snd_ump_convert_to_ump(cvt, group, ep->info.protocol,
buf[c + 1]);
if (cvt->ump_bytes) {
snd_ump_receive(ep->ump, cvt->ump,
cvt->ump_bytes);
cvt->ump_bytes = 0;
}
}
}
if (midi2->operation_mode != MIDI_OP_MODE_MIDI1)
goto error;
if (queue_request_ep_raw(req))
goto error;
return;
error:
put_empty_request(req);
}
/*
* Common EP handling helpers
*/
/* Start MIDI EP */
static int f_midi2_start_ep(struct f_midi2_usb_ep *usb_ep,
struct usb_function *fn)
{
int err;
if (!usb_ep->usb_ep)
return 0;
usb_ep_disable(usb_ep->usb_ep);
err = config_ep_by_speed(usb_ep->card->gadget, fn, usb_ep->usb_ep);
if (err)
return err;
return usb_ep_enable(usb_ep->usb_ep);
}
/* Drop pending requests */
static void f_midi2_drop_reqs(struct f_midi2_usb_ep *usb_ep)
{
int i;
if (!usb_ep->usb_ep || !usb_ep->num_reqs)
return;
for (i = 0; i < usb_ep->num_reqs; i++) {
if (!test_bit(i, &usb_ep->free_reqs) && usb_ep->reqs[i].req) {
usb_ep_dequeue(usb_ep->usb_ep, usb_ep->reqs[i].req);
set_bit(i, &usb_ep->free_reqs);
}
}
}
/* Allocate requests for the given EP */
static int f_midi2_alloc_ep_reqs(struct f_midi2_usb_ep *usb_ep)
{
struct f_midi2 *midi2 = usb_ep->card;
int i;
if (!usb_ep->usb_ep)
return 0;
if (!usb_ep->reqs)
return -EINVAL;
for (i = 0; i < midi2->info.num_reqs; i++) {
if (usb_ep->reqs[i].req)
continue;
usb_ep->reqs[i].req = alloc_ep_req(usb_ep->usb_ep,
midi2->info.req_buf_size);
if (!usb_ep->reqs[i].req)
return -ENOMEM;
usb_ep->reqs[i].req->context = &usb_ep->reqs[i];
}
return 0;
}
/* Free allocated requests */
static void f_midi2_free_ep_reqs(struct f_midi2_usb_ep *usb_ep)
{
struct f_midi2 *midi2 = usb_ep->card;
int i;
for (i = 0; i < midi2->info.num_reqs; i++) {
if (!usb_ep->reqs[i].req)
continue;
free_ep_req(usb_ep->usb_ep, usb_ep->reqs[i].req);
usb_ep->reqs[i].req = NULL;
}
}
/* Initialize EP */
static int f_midi2_init_ep(struct f_midi2 *midi2, struct f_midi2_ep *ep,
struct f_midi2_usb_ep *usb_ep,
void *desc,
void (*complete)(struct usb_ep *usb_ep,
struct usb_request *req))
{
int i;
usb_ep->card = midi2;
usb_ep->ep = ep;
usb_ep->usb_ep = usb_ep_autoconfig(midi2->gadget, desc);
if (!usb_ep->usb_ep)
return -ENODEV;
usb_ep->complete = complete;
usb_ep->reqs = kcalloc(midi2->info.num_reqs, sizeof(*usb_ep->reqs),
GFP_KERNEL);
if (!usb_ep->reqs)
return -ENOMEM;
for (i = 0; i < midi2->info.num_reqs; i++) {
usb_ep->reqs[i].index = i;
usb_ep->reqs[i].usb_ep = usb_ep;
set_bit(i, &usb_ep->free_reqs);
usb_ep->num_reqs++;
}
return 0;
}
/* Free EP */
static void f_midi2_free_ep(struct f_midi2_usb_ep *usb_ep)
{
f_midi2_drop_reqs(usb_ep);
f_midi2_free_ep_reqs(usb_ep);
kfree(usb_ep->reqs);
usb_ep->num_reqs = 0;
usb_ep->free_reqs = 0;
usb_ep->reqs = NULL;
}
/* Queue requests for EP-out at start */
static void f_midi2_queue_out_reqs(struct f_midi2_usb_ep *usb_ep)
{
int i, err;
if (!usb_ep->usb_ep)
return;
for (i = 0; i < usb_ep->num_reqs; i++) {
if (!test_bit(i, &usb_ep->free_reqs) || !usb_ep->reqs[i].req)
continue;
usb_ep->reqs[i].req->complete = usb_ep->complete;
err = usb_ep_queue(usb_ep->usb_ep, usb_ep->reqs[i].req,
GFP_ATOMIC);
if (!err)
clear_bit(i, &usb_ep->free_reqs);
}
}
/*
* Gadget Function callbacks
*/
/* stop both IN and OUT EPs */
static void f_midi2_stop_eps(struct f_midi2_usb_ep *ep_in,
struct f_midi2_usb_ep *ep_out)
{
f_midi2_drop_reqs(ep_in);
f_midi2_drop_reqs(ep_out);
f_midi2_free_ep_reqs(ep_in);
f_midi2_free_ep_reqs(ep_out);
}
/* start/queue both IN and OUT EPs */
static int f_midi2_start_eps(struct f_midi2_usb_ep *ep_in,
struct f_midi2_usb_ep *ep_out,
struct usb_function *fn)
{
int err;
err = f_midi2_start_ep(ep_in, fn);
if (err)
return err;
err = f_midi2_start_ep(ep_out, fn);
if (err)
return err;
err = f_midi2_alloc_ep_reqs(ep_in);
if (err)
return err;
err = f_midi2_alloc_ep_reqs(ep_out);
if (err)
return err;
f_midi2_queue_out_reqs(ep_out);
return 0;
}
/* gadget function set_alt callback */
static int f_midi2_set_alt(struct usb_function *fn, unsigned int intf,
unsigned int alt)
{
struct f_midi2 *midi2 = func_to_midi2(fn);
struct f_midi2_ep *ep;
int i, op_mode, err;
if (intf != midi2->midi_if || alt > 1)
return 0;
if (alt == 0)
op_mode = MIDI_OP_MODE_MIDI1;
else if (alt == 1)
op_mode = MIDI_OP_MODE_MIDI2;
else
op_mode = MIDI_OP_MODE_UNSET;
if (midi2->operation_mode == op_mode)
return 0;
midi2->operation_mode = op_mode;
if (op_mode != MIDI_OP_MODE_MIDI1)
f_midi2_stop_eps(&midi2->midi1_ep_in, &midi2->midi1_ep_out);
if (op_mode != MIDI_OP_MODE_MIDI2) {
for (i = 0; i < midi2->num_eps; i++) {
ep = &midi2->midi2_eps[i];
f_midi2_stop_eps(&ep->ep_in, &ep->ep_out);
}
}
if (op_mode == MIDI_OP_MODE_MIDI1)
return f_midi2_start_eps(&midi2->midi1_ep_in,
&midi2->midi1_ep_out, fn);
if (op_mode == MIDI_OP_MODE_MIDI2) {
for (i = 0; i < midi2->num_eps; i++) {
ep = &midi2->midi2_eps[i];
err = f_midi2_start_eps(&ep->ep_in, &ep->ep_out, fn);
if (err)
return err;
}
}
return 0;
}
/* gadget function get_alt callback */
static int f_midi2_get_alt(struct usb_function *fn, unsigned int intf)
{
struct f_midi2 *midi2 = func_to_midi2(fn);
if (intf == midi2->midi_if &&
midi2->operation_mode == MIDI_OP_MODE_MIDI2)
return 1;
return 0;
}
/* convert UMP direction to USB MIDI 2.0 direction */
static unsigned int ump_to_usb_dir(unsigned int ump_dir)
{
switch (ump_dir) {
case SNDRV_UMP_DIR_INPUT:
return USB_MS_GR_TRM_BLOCK_TYPE_INPUT_ONLY;
case SNDRV_UMP_DIR_OUTPUT:
return USB_MS_GR_TRM_BLOCK_TYPE_OUTPUT_ONLY;
default:
return USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL;
}
}
/* assign GTB descriptors (for the given request) */
static void assign_block_descriptors(struct f_midi2 *midi2,
struct usb_request *req,
int max_len)
{
struct usb_ms20_gr_trm_block_header_descriptor header;
struct usb_ms20_gr_trm_block_descriptor *desc;
struct f_midi2_block_info *b;
struct f_midi2_ep *ep;
int i, blk, len;
char *data;
len = sizeof(gtb_header_desc) + sizeof(gtb_desc) * midi2->total_blocks;
if (WARN_ON(len > midi2->info.req_buf_size))
return;
header = gtb_header_desc;
header.wTotalLength = cpu_to_le16(len);
if (max_len < len) {
len = min_t(int, len, sizeof(header));
memcpy(req->buf, &header, len);
req->length = len;
req->zero = len < max_len;
return;
}
memcpy(req->buf, &header, sizeof(header));
data = req->buf + sizeof(header);
for (i = 0; i < midi2->num_eps; i++) {
ep = &midi2->midi2_eps[i];
for (blk = 0; blk < ep->num_blks; blk++) {
b = &ep->blks[blk].info;
desc = (struct usb_ms20_gr_trm_block_descriptor *)data;
*desc = gtb_desc;
desc->bGrpTrmBlkID = ep->blks[blk].gtb_id;
desc->bGrpTrmBlkType = ump_to_usb_dir(b->direction);
desc->nGroupTrm = b->first_group;
desc->nNumGroupTrm = b->num_groups;
desc->iBlockItem = ep->blks[blk].string_id;
if (ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
desc->bMIDIProtocol = USB_MS_MIDI_PROTO_2_0;
else
desc->bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_128;
if (b->is_midi1 == 2) {
desc->wMaxInputBandwidth = cpu_to_le16(1);
desc->wMaxOutputBandwidth = cpu_to_le16(1);
}
data += sizeof(*desc);
}
}
req->length = len;
req->zero = len < max_len;
}
/* gadget function setup callback: handle GTB requests */
static int f_midi2_setup(struct usb_function *fn,
const struct usb_ctrlrequest *ctrl)
{
struct f_midi2 *midi2 = func_to_midi2(fn);
struct usb_composite_dev *cdev = fn->config->cdev;
struct usb_request *req = cdev->req;
u16 value, length;
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD ||
ctrl->bRequest != USB_REQ_GET_DESCRIPTOR)
return -EOPNOTSUPP;
value = le16_to_cpu(ctrl->wValue);
length = le16_to_cpu(ctrl->wLength);
if ((value >> 8) != USB_DT_CS_GR_TRM_BLOCK)
return -EOPNOTSUPP;
/* handle only altset 1 */
if ((value & 0xff) != 1)
return -EOPNOTSUPP;
assign_block_descriptors(midi2, req, length);
return usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
}
/* gadget function disable callback */
static void f_midi2_disable(struct usb_function *fn)
{
struct f_midi2 *midi2 = func_to_midi2(fn);
midi2->operation_mode = MIDI_OP_MODE_UNSET;
}
/*
* ALSA UMP ops: most of them are NOPs, only trigger for write is needed
*/
static int f_midi2_ump_open(struct snd_ump_endpoint *ump, int dir)
{
return 0;
}
static void f_midi2_ump_close(struct snd_ump_endpoint *ump, int dir)
{
}
static void f_midi2_ump_trigger(struct snd_ump_endpoint *ump, int dir, int up)
{
struct f_midi2_ep *ep = ump->private_data;
struct f_midi2 *midi2 = ep->card;
if (up && dir == SNDRV_RAWMIDI_STREAM_OUTPUT) {
switch (midi2->operation_mode) {
case MIDI_OP_MODE_MIDI1:
process_midi1_transmit(midi2);
break;
case MIDI_OP_MODE_MIDI2:
process_ump_transmit(ep);
break;
}
}
}
static void f_midi2_ump_drain(struct snd_ump_endpoint *ump, int dir)
{
}
static const struct snd_ump_ops f_midi2_ump_ops = {
.open = f_midi2_ump_open,
.close = f_midi2_ump_close,
.trigger = f_midi2_ump_trigger,
.drain = f_midi2_ump_drain,
};
/*
* "Operation Mode" control element
*/
static int f_midi2_operation_mode_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = MIDI_OP_MODE_UNSET;
uinfo->value.integer.max = MIDI_OP_MODE_MIDI2;
return 0;
}
static int f_midi2_operation_mode_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct f_midi2 *midi2 = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = midi2->operation_mode;
return 0;
}
static const struct snd_kcontrol_new operation_mode_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
.name = "Operation Mode",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = f_midi2_operation_mode_info,
.get = f_midi2_operation_mode_get,
};
/*
* ALSA UMP instance creation / deletion
*/
static void f_midi2_free_card(struct f_midi2 *midi2)
{
if (midi2->card) {
snd_card_free_when_closed(midi2->card);
midi2->card = NULL;
}
}
/* use a reverse direction for the gadget host */
static int reverse_dir(int dir)
{
if (!dir || dir == SNDRV_UMP_DIR_BIDIRECTION)
return dir;
return (dir == SNDRV_UMP_DIR_OUTPUT) ?
SNDRV_UMP_DIR_INPUT : SNDRV_UMP_DIR_OUTPUT;
}
static int f_midi2_create_card(struct f_midi2 *midi2)
{
struct snd_card *card;
struct snd_ump_endpoint *ump;
struct f_midi2_ep *ep;
int i, id, blk, err;
__be32 sw;
err = snd_card_new(&midi2->gadget->dev, -1, NULL, THIS_MODULE, 0,
&card);
if (err < 0)
return err;
midi2->card = card;
strcpy(card->driver, "f_midi2");
strcpy(card->shortname, "MIDI 2.0 Gadget");
strcpy(card->longname, "MIDI 2.0 Gadget");
id = 0;
for (i = 0; i < midi2->num_eps; i++) {
ep = &midi2->midi2_eps[i];
err = snd_ump_endpoint_new(card, "MIDI 2.0 Gadget", id,
1, 1, &ump);
if (err < 0)
goto error;
id++;
ep->ump = ump;
ump->no_process_stream = true;
ump->private_data = ep;
ump->ops = &f_midi2_ump_ops;
if (midi2->info.static_block)
ump->info.flags |= SNDRV_UMP_EP_INFO_STATIC_BLOCKS;
ump->info.protocol_caps = (ep->info.protocol_caps & 3) << 8;
ump->info.protocol = (ep->info.protocol & 3) << 8;
ump->info.version = 0x0101;
ump->info.family_id = ep->info.family;
ump->info.model_id = ep->info.model;
ump->info.manufacturer_id = ep->info.manufacturer & 0xffffff;
sw = cpu_to_be32(ep->info.sw_revision);
memcpy(ump->info.sw_revision, &sw, 4);
strscpy(ump->info.name, ump_ep_name(ep),
sizeof(ump->info.name));
strscpy(ump->info.product_id, ump_product_id(ep),
sizeof(ump->info.product_id));
strscpy(ump->core.name, ump->info.name, sizeof(ump->core.name));
for (blk = 0; blk < ep->num_blks; blk++) {
const struct f_midi2_block_info *b = &ep->blks[blk].info;
struct snd_ump_block *fb;
err = snd_ump_block_new(ump, blk,
reverse_dir(b->direction),
b->first_group, b->num_groups,
&ep->blks[blk].fb);
if (err < 0)
goto error;
fb = ep->blks[blk].fb;
fb->info.active = !!b->active;
fb->info.midi_ci_version = b->midi_ci_version;
fb->info.ui_hint = reverse_dir(b->ui_hint);
fb->info.sysex8_streams = b->sysex8_streams;
fb->info.flags |= b->is_midi1;
strscpy(fb->info.name, ump_fb_name(b),
sizeof(fb->info.name));
}
}
for (i = 0; i < midi2->num_eps; i++) {
err = snd_ump_attach_legacy_rawmidi(midi2->midi2_eps[i].ump,
"Legacy MIDI", id);
if (err < 0)
goto error;
id++;
}
err = snd_ctl_add(card, snd_ctl_new1(&operation_mode_ctl, midi2));
if (err < 0)
goto error;
err = snd_card_register(card);
if (err < 0)
goto error;
return 0;
error:
f_midi2_free_card(midi2);
return err;
}
/*
* Creation of USB descriptors
*/
struct f_midi2_usb_config {
struct usb_descriptor_header **list;
unsigned int size;
unsigned int alloc;
/* MIDI 1.0 jacks */
unsigned char jack_in, jack_out, jack_id;
struct usb_midi_in_jack_descriptor jack_ins[MAX_CABLES];
struct usb_midi_out_jack_descriptor_1 jack_outs[MAX_CABLES];
};
static int append_config(struct f_midi2_usb_config *config, void *d)
{
unsigned int size;
void *buf;
if (config->size + 2 >= config->alloc) {
size = config->size + 16;
buf = krealloc(config->list, size * sizeof(void *), GFP_KERNEL);
if (!buf)
return -ENOMEM;
config->list = buf;
config->alloc = size;
}
config->list[config->size] = d;
config->size++;
config->list[config->size] = NULL;
return 0;
}
static int append_configs(struct f_midi2_usb_config *config, void **d)
{
int err;
for (; *d; d++) {
err = append_config(config, *d);
if (err)
return err;
}
return 0;
}
static int append_midi1_in_jack(struct f_midi2 *midi2,
struct f_midi2_usb_config *config,
struct midi1_cable_mapping *map,
unsigned int type)
{
struct usb_midi_in_jack_descriptor *jack =
&config->jack_ins[config->jack_in++];
int id = ++config->jack_id;
int err;
jack->bLength = 0x06;
jack->bDescriptorType = USB_DT_CS_INTERFACE;
jack->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
jack->bJackType = type;
jack->bJackID = id;
/* use the corresponding block name as jack name */
if (map->ep)
jack->iJack = map->ep->blks[map->block].string_id;
err = append_config(config, jack);
if (err < 0)
return err;
return id;
}
static int append_midi1_out_jack(struct f_midi2 *midi2,
struct f_midi2_usb_config *config,
struct midi1_cable_mapping *map,
unsigned int type, unsigned int source)
{
struct usb_midi_out_jack_descriptor_1 *jack =
&config->jack_outs[config->jack_out++];
int id = ++config->jack_id;
int err;
jack->bLength = 0x09;
jack->bDescriptorType = USB_DT_CS_INTERFACE;
jack->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
jack->bJackType = type;
jack->bJackID = id;
jack->bNrInputPins = 1;
jack->pins[0].baSourceID = source;
jack->pins[0].baSourcePin = 0x01;
/* use the corresponding block name as jack name */
if (map->ep)
jack->iJack = map->ep->blks[map->block].string_id;
err = append_config(config, jack);
if (err < 0)
return err;
return id;
}
static int f_midi2_create_usb_configs(struct f_midi2 *midi2,
struct f_midi2_usb_config *config,
int speed)
{
void **midi1_in_eps, **midi1_out_eps;
int i, jack, total;
int err;
switch (speed) {
default:
case USB_SPEED_HIGH:
midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(512);
midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(512);
for (i = 0; i < midi2->num_eps; i++)
midi2_midi2_ep_out_desc[i].wMaxPacketSize =
cpu_to_le16(512);
fallthrough;
case USB_SPEED_FULL:
midi1_in_eps = midi2_midi1_ep_in_descs;
midi1_out_eps = midi2_midi1_ep_out_descs;
break;
case USB_SPEED_SUPER:
midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(1024);
midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(1024);
for (i = 0; i < midi2->num_eps; i++)
midi2_midi2_ep_out_desc[i].wMaxPacketSize =
cpu_to_le16(1024);
midi1_in_eps = midi2_midi1_ep_in_ss_descs;
midi1_out_eps = midi2_midi1_ep_out_ss_descs;
break;
}
err = append_configs(config, midi2_audio_descs);
if (err < 0)
return err;
if (midi2->num_midi1_in && midi2->num_midi1_out)
midi2_midi1_if_desc.bNumEndpoints = 2;
else
midi2_midi1_if_desc.bNumEndpoints = 1;
err = append_configs(config, midi2_midi1_descs);
if (err < 0)
return err;
total = USB_DT_MS_HEADER_SIZE;
if (midi2->num_midi1_out) {
midi2_midi1_ep_out_class_desc.bLength =
USB_DT_MS_ENDPOINT_SIZE(midi2->num_midi1_out);
total += midi2_midi1_ep_out_class_desc.bLength;
midi2_midi1_ep_out_class_desc.bNumEmbMIDIJack =
midi2->num_midi1_out;
total += midi2->num_midi1_out *
(USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
for (i = 0; i < midi2->num_midi1_out; i++) {
jack = append_midi1_in_jack(midi2, config,
&midi2->in_cable_mapping[i],
USB_MS_EMBEDDED);
if (jack < 0)
return jack;
midi2_midi1_ep_out_class_desc.baAssocJackID[i] = jack;
jack = append_midi1_out_jack(midi2, config,
&midi2->in_cable_mapping[i],
USB_MS_EXTERNAL, jack);
if (jack < 0)
return jack;
}
}
if (midi2->num_midi1_in) {
midi2_midi1_ep_in_class_desc.bLength =
USB_DT_MS_ENDPOINT_SIZE(midi2->num_midi1_in);
total += midi2_midi1_ep_in_class_desc.bLength;
midi2_midi1_ep_in_class_desc.bNumEmbMIDIJack =
midi2->num_midi1_in;
total += midi2->num_midi1_in *
(USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
for (i = 0; i < midi2->num_midi1_in; i++) {
jack = append_midi1_in_jack(midi2, config,
&midi2->out_cable_mapping[i],
USB_MS_EXTERNAL);
if (jack < 0)
return jack;
jack = append_midi1_out_jack(midi2, config,
&midi2->out_cable_mapping[i],
USB_MS_EMBEDDED, jack);
if (jack < 0)
return jack;
midi2_midi1_ep_in_class_desc.baAssocJackID[i] = jack;
}
}
midi2_midi1_class_desc.wTotalLength = cpu_to_le16(total);
if (midi2->num_midi1_out) {
err = append_configs(config, midi1_out_eps);
if (err < 0)
return err;
}
if (midi2->num_midi1_in) {
err = append_configs(config, midi1_in_eps);
if (err < 0)
return err;
}
err = append_configs(config, midi2_midi2_descs);
if (err < 0)
return err;
for (i = 0; i < midi2->num_eps; i++) {
err = append_config(config, &midi2_midi2_ep_out_desc[i]);
if (err < 0)
return err;
if (speed == USB_SPEED_SUPER || speed == USB_SPEED_SUPER_PLUS) {
err = append_config(config, &midi2_midi2_ep_out_ss_comp_desc);
if (err < 0)
return err;
}
err = append_config(config, &midi2_midi2_ep_out_class_desc[i]);
if (err < 0)
return err;
err = append_config(config, &midi2_midi2_ep_in_desc[i]);
if (err < 0)
return err;
if (speed == USB_SPEED_SUPER || speed == USB_SPEED_SUPER_PLUS) {
err = append_config(config, &midi2_midi2_ep_in_ss_comp_desc);
if (err < 0)
return err;
}
err = append_config(config, &midi2_midi2_ep_in_class_desc[i]);
if (err < 0)
return err;
}
return 0;
}
static void f_midi2_free_usb_configs(struct f_midi2_usb_config *config)
{
kfree(config->list);
memset(config, 0, sizeof(*config));
}
/* as we use the static descriptors for simplicity, serialize bind call */
static DEFINE_MUTEX(f_midi2_desc_mutex);
/* fill MIDI2 EP class-specific descriptor */
static void fill_midi2_class_desc(struct f_midi2_ep *ep,
struct usb_ms20_endpoint_descriptor_32 *cdesc)
{
int blk;
cdesc->bLength = USB_DT_MS20_ENDPOINT_SIZE(ep->num_blks);
cdesc->bDescriptorType = USB_DT_CS_ENDPOINT;
cdesc->bDescriptorSubtype = USB_MS_GENERAL_2_0;
cdesc->bNumGrpTrmBlock = ep->num_blks;
for (blk = 0; blk < ep->num_blks; blk++)
cdesc->baAssoGrpTrmBlkID[blk] = ep->blks[blk].gtb_id;
}
/* initialize MIDI2 EP-in */
static int f_midi2_init_midi2_ep_in(struct f_midi2 *midi2, int index)
{
struct f_midi2_ep *ep = &midi2->midi2_eps[index];
struct usb_endpoint_descriptor *desc = &midi2_midi2_ep_in_desc[index];
desc->bLength = USB_DT_ENDPOINT_SIZE;
desc->bDescriptorType = USB_DT_ENDPOINT;
desc->bEndpointAddress = USB_DIR_IN;
desc->bmAttributes = USB_ENDPOINT_XFER_INT;
desc->wMaxPacketSize = cpu_to_le16(EP_MAX_PACKET_INT);
desc->bInterval = 1;
fill_midi2_class_desc(ep, &midi2_midi2_ep_in_class_desc[index]);
return f_midi2_init_ep(midi2, ep, &ep->ep_in, desc,
f_midi2_ep_in_complete);
}
/* initialize MIDI2 EP-out */
static int f_midi2_init_midi2_ep_out(struct f_midi2 *midi2, int index)
{
struct f_midi2_ep *ep = &midi2->midi2_eps[index];
struct usb_endpoint_descriptor *desc = &midi2_midi2_ep_out_desc[index];
desc->bLength = USB_DT_ENDPOINT_SIZE;
desc->bDescriptorType = USB_DT_ENDPOINT;
desc->bEndpointAddress = USB_DIR_OUT;
desc->bmAttributes = USB_ENDPOINT_XFER_BULK;
fill_midi2_class_desc(ep, &midi2_midi2_ep_out_class_desc[index]);
return f_midi2_init_ep(midi2, ep, &ep->ep_out, desc,
f_midi2_ep_out_complete);
}
/* gadget function bind callback */
static int f_midi2_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_midi2 *midi2 = func_to_midi2(f);
struct f_midi2_ep *ep;
struct f_midi2_usb_config config = {};
struct usb_gadget_strings string_fn = {
.language = 0x0409, /* en-us */
.strings = midi2->string_defs,
};
struct usb_gadget_strings *strings[] = {
&string_fn,
NULL,
};
int i, blk, status;
midi2->gadget = cdev->gadget;
midi2->operation_mode = MIDI_OP_MODE_UNSET;
status = f_midi2_create_card(midi2);
if (status < 0)
goto fail_register;
/* maybe allocate device-global string ID */
midi2->strings = usb_gstrings_attach(c->cdev, strings,
midi2->total_blocks + 1);
if (IS_ERR(midi2->strings)) {
status = PTR_ERR(midi2->strings);
goto fail_string;
}
mutex_lock(&f_midi2_desc_mutex);
midi2_midi1_if_desc.iInterface = midi2->strings[STR_IFACE].id;
midi2_midi2_if_desc.iInterface = midi2->strings[STR_IFACE].id;
for (i = 0; i < midi2->num_eps; i++) {
ep = &midi2->midi2_eps[i];
for (blk = 0; blk < ep->num_blks; blk++)
ep->blks[blk].string_id =
midi2->strings[gtb_to_str_id(ep->blks[blk].gtb_id)].id;
}
midi2_midi2_if_desc.bNumEndpoints = midi2->num_eps * 2;
/* audio interface */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
midi2_audio_if_desc.bInterfaceNumber = status;
/* MIDI streaming */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
midi2->midi_if = status;
midi2_midi1_if_desc.bInterfaceNumber = status;
midi2_midi2_if_desc.bInterfaceNumber = status;
midi2_audio_class_desc.baInterfaceNr[0] = status;
/* allocate instance-specific endpoints */
if (midi2->midi2_eps[0].blks[0].info.direction != SNDRV_UMP_DIR_OUTPUT) {
status = f_midi2_init_ep(midi2, NULL, &midi2->midi1_ep_in,
&midi2_midi1_ep_in_desc,
f_midi2_midi1_ep_in_complete);
if (status)
goto fail;
}
if (midi2->midi2_eps[0].blks[0].info.direction != SNDRV_UMP_DIR_INPUT) {
status = f_midi2_init_ep(midi2, NULL, &midi2->midi1_ep_out,
&midi2_midi1_ep_out_desc,
f_midi2_midi1_ep_out_complete);
if (status)
goto fail;
}
for (i = 0; i < midi2->num_eps; i++) {
status = f_midi2_init_midi2_ep_in(midi2, i);
if (status)
goto fail;
status = f_midi2_init_midi2_ep_out(midi2, i);
if (status)
goto fail;
}
status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_FULL);
if (status < 0)
goto fail;
f->fs_descriptors = usb_copy_descriptors(config.list);
if (!f->fs_descriptors) {
status = -ENOMEM;
goto fail;
}
f_midi2_free_usb_configs(&config);
status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_HIGH);
if (status < 0)
goto fail;
f->hs_descriptors = usb_copy_descriptors(config.list);
if (!f->hs_descriptors) {
status = -ENOMEM;
goto fail;
}
f_midi2_free_usb_configs(&config);
status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_SUPER);
if (status < 0)
goto fail;
f->ss_descriptors = usb_copy_descriptors(config.list);
if (!f->ss_descriptors) {
status = -ENOMEM;
goto fail;
}
f_midi2_free_usb_configs(&config);
mutex_unlock(&f_midi2_desc_mutex);
return 0;
fail:
f_midi2_free_usb_configs(&config);
mutex_unlock(&f_midi2_desc_mutex);
usb_free_all_descriptors(f);
fail_string:
f_midi2_free_card(midi2);
fail_register:
ERROR(midi2, "%s: can't bind, err %d\n", f->name, status);
return status;
}
/* gadget function unbind callback */
static void f_midi2_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_midi2 *midi2 = func_to_midi2(f);
int i;
f_midi2_free_card(midi2);
f_midi2_free_ep(&midi2->midi1_ep_in);
f_midi2_free_ep(&midi2->midi1_ep_out);
for (i = 0; i < midi2->num_eps; i++) {
f_midi2_free_ep(&midi2->midi2_eps[i].ep_in);
f_midi2_free_ep(&midi2->midi2_eps[i].ep_out);
}
usb_free_all_descriptors(f);
}
/*
* ConfigFS interface
*/
/* type conversion helpers */
static inline struct f_midi2_opts *to_f_midi2_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_midi2_opts,
func_inst.group);
}
static inline struct f_midi2_ep_opts *
to_f_midi2_ep_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_midi2_ep_opts,
group);
}
static inline struct f_midi2_block_opts *
to_f_midi2_block_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_midi2_block_opts,
group);
}
/* trim the string to be usable for EP and FB name strings */
static void make_name_string(char *s)
{
char *p;
p = strchr(s, '\n');
if (p)
*p = 0;
p = s + strlen(s);
for (; p > s && isspace(*p); p--)
*p = 0;
}
/* configfs helpers: generic show/store for unisnged int */
static ssize_t f_midi2_opts_uint_show(struct f_midi2_opts *opts,
u32 val, const char *format, char *page)
{
int result;
mutex_lock(&opts->lock);
result = sprintf(page, format, val);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_midi2_opts_uint_store(struct f_midi2_opts *opts,
u32 *valp, u32 minval, u32 maxval,
const char *page, size_t len)
{
int ret;
u32 val;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou32(page, 0, &val);
if (ret)
goto end;
if (val < minval || val > maxval) {
ret = -EINVAL;
goto end;
}
*valp = val;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
/* generic store for bool */
static ssize_t f_midi2_opts_bool_store(struct f_midi2_opts *opts,
bool *valp, const char *page, size_t len)
{
int ret;
bool val;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtobool(page, &val);
if (ret)
goto end;
*valp = val;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
/* generic show/store for string */
static ssize_t f_midi2_opts_str_show(struct f_midi2_opts *opts,
const char *str, char *page)
{
int result = 0;
mutex_lock(&opts->lock);
if (str)
result = scnprintf(page, PAGE_SIZE, "%s\n", str);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_midi2_opts_str_store(struct f_midi2_opts *opts,
const char **strp, size_t maxlen,
const char *page, size_t len)
{
char *c;
int ret;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
c = kstrndup(page, min(len, maxlen), GFP_KERNEL);
if (!c) {
ret = -ENOMEM;
goto end;
}
kfree(*strp);
make_name_string(c);
*strp = c;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
/*
* Definitions for UMP Block config
*/
/* define an uint option for block */
#define F_MIDI2_BLOCK_OPT(name, format, minval, maxval) \
static ssize_t f_midi2_block_opts_##name##_show(struct config_item *item,\
char *page) \
{ \
struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
return f_midi2_opts_uint_show(opts->ep->opts, opts->info.name, \
format "\n", page); \
} \
\
static ssize_t f_midi2_block_opts_##name##_store(struct config_item *item,\
const char *page, size_t len) \
{ \
struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
return f_midi2_opts_uint_store(opts->ep->opts, &opts->info.name,\
minval, maxval, page, len); \
} \
\
CONFIGFS_ATTR(f_midi2_block_opts_, name)
/* define a boolean option for block */
#define F_MIDI2_BLOCK_BOOL_OPT(name) \
static ssize_t f_midi2_block_opts_##name##_show(struct config_item *item,\
char *page) \
{ \
struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
return f_midi2_opts_uint_show(opts->ep->opts, opts->info.name, \
"%u\n", page); \
} \
\
static ssize_t f_midi2_block_opts_##name##_store(struct config_item *item,\
const char *page, size_t len) \
{ \
struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
return f_midi2_opts_bool_store(opts->ep->opts, &opts->info.name,\
page, len); \
} \
\
CONFIGFS_ATTR(f_midi2_block_opts_, name)
F_MIDI2_BLOCK_OPT(direction, "0x%x", 1, 3);
F_MIDI2_BLOCK_OPT(first_group, "0x%x", 0, 15);
F_MIDI2_BLOCK_OPT(num_groups, "0x%x", 1, 16);
F_MIDI2_BLOCK_OPT(midi1_first_group, "0x%x", 0, 15);
F_MIDI2_BLOCK_OPT(midi1_num_groups, "0x%x", 0, 16);
F_MIDI2_BLOCK_OPT(ui_hint, "0x%x", 0, 3);
F_MIDI2_BLOCK_OPT(midi_ci_version, "%u", 0, 1);
F_MIDI2_BLOCK_OPT(sysex8_streams, "%u", 0, 255);
F_MIDI2_BLOCK_OPT(is_midi1, "%u", 0, 2);
F_MIDI2_BLOCK_BOOL_OPT(active);
static ssize_t f_midi2_block_opts_name_show(struct config_item *item,
char *page)
{
struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item);
return f_midi2_opts_str_show(opts->ep->opts, opts->info.name, page);
}
static ssize_t f_midi2_block_opts_name_store(struct config_item *item,
const char *page, size_t len)
{
struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item);
return f_midi2_opts_str_store(opts->ep->opts, &opts->info.name, 128,
page, len);
}
CONFIGFS_ATTR(f_midi2_block_opts_, name);
static struct configfs_attribute *f_midi2_block_attrs[] = {
&f_midi2_block_opts_attr_direction,
&f_midi2_block_opts_attr_first_group,
&f_midi2_block_opts_attr_num_groups,
&f_midi2_block_opts_attr_midi1_first_group,
&f_midi2_block_opts_attr_midi1_num_groups,
&f_midi2_block_opts_attr_ui_hint,
&f_midi2_block_opts_attr_midi_ci_version,
&f_midi2_block_opts_attr_sysex8_streams,
&f_midi2_block_opts_attr_is_midi1,
&f_midi2_block_opts_attr_active,
&f_midi2_block_opts_attr_name,
NULL,
};
static void f_midi2_block_opts_release(struct config_item *item)
{
struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item);
kfree(opts->info.name);
kfree(opts);
}
static struct configfs_item_operations f_midi2_block_item_ops = {
.release = f_midi2_block_opts_release,
};
static const struct config_item_type f_midi2_block_type = {
.ct_item_ops = &f_midi2_block_item_ops,
.ct_attrs = f_midi2_block_attrs,
.ct_owner = THIS_MODULE,
};
/* create a f_midi2_block_opts instance for the given block number */
static int f_midi2_block_opts_create(struct f_midi2_ep_opts *ep_opts,
unsigned int blk,
struct f_midi2_block_opts **block_p)
{
struct f_midi2_block_opts *block_opts;
int ret = 0;
mutex_lock(&ep_opts->opts->lock);
if (ep_opts->opts->refcnt || ep_opts->blks[blk]) {
ret = -EBUSY;
goto out;
}
block_opts = kzalloc(sizeof(*block_opts), GFP_KERNEL);
if (!block_opts) {
ret = -ENOMEM;
goto out;
}
block_opts->ep = ep_opts;
block_opts->id = blk;
/* set up the default values */
block_opts->info.direction = SNDRV_UMP_DIR_BIDIRECTION;
block_opts->info.first_group = 0;
block_opts->info.num_groups = 1;
block_opts->info.ui_hint = SNDRV_UMP_BLOCK_UI_HINT_BOTH;
block_opts->info.active = 1;
ep_opts->blks[blk] = block_opts;
*block_p = block_opts;
out:
mutex_unlock(&ep_opts->opts->lock);
return ret;
}
/* make_group callback for a block */
static struct config_group *
f_midi2_opts_block_make(struct config_group *group, const char *name)
{
struct f_midi2_ep_opts *ep_opts;
struct f_midi2_block_opts *block_opts;
unsigned int blk;
int ret;
if (strncmp(name, "block.", 6))
return ERR_PTR(-EINVAL);
ret = kstrtouint(name + 6, 10, &blk);
if (ret)
return ERR_PTR(ret);
ep_opts = to_f_midi2_ep_opts(&group->cg_item);
if (blk >= SNDRV_UMP_MAX_BLOCKS)
return ERR_PTR(-EINVAL);
if (ep_opts->blks[blk])
return ERR_PTR(-EBUSY);
ret = f_midi2_block_opts_create(ep_opts, blk, &block_opts);
if (ret)
return ERR_PTR(ret);
config_group_init_type_name(&block_opts->group, name,
&f_midi2_block_type);
return &block_opts->group;
}
/* drop_item callback for a block */
static void
f_midi2_opts_block_drop(struct config_group *group, struct config_item *item)
{
struct f_midi2_block_opts *block_opts = to_f_midi2_block_opts(item);
mutex_lock(&block_opts->ep->opts->lock);
block_opts->ep->blks[block_opts->id] = NULL;
mutex_unlock(&block_opts->ep->opts->lock);
config_item_put(item);
}
/*
* Definitions for UMP Endpoint config
*/
/* define an uint option for EP */
#define F_MIDI2_EP_OPT(name, format, minval, maxval) \
static ssize_t f_midi2_ep_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
return f_midi2_opts_uint_show(opts->opts, opts->info.name, \
format "\n", page); \
} \
\
static ssize_t f_midi2_ep_opts_##name##_store(struct config_item *item, \
const char *page, size_t len)\
{ \
struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
return f_midi2_opts_uint_store(opts->opts, &opts->info.name, \
minval, maxval, page, len); \
} \
\
CONFIGFS_ATTR(f_midi2_ep_opts_, name)
/* define a string option for EP */
#define F_MIDI2_EP_STR_OPT(name, maxlen) \
static ssize_t f_midi2_ep_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
return f_midi2_opts_str_show(opts->opts, opts->info.name, page);\
} \
\
static ssize_t f_midi2_ep_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
return f_midi2_opts_str_store(opts->opts, &opts->info.name, maxlen,\
page, len); \
} \
\
CONFIGFS_ATTR(f_midi2_ep_opts_, name)
F_MIDI2_EP_OPT(protocol, "0x%x", 1, 2);
F_MIDI2_EP_OPT(protocol_caps, "0x%x", 1, 3);
F_MIDI2_EP_OPT(manufacturer, "0x%x", 0, 0xffffff);
F_MIDI2_EP_OPT(family, "0x%x", 0, 0xffff);
F_MIDI2_EP_OPT(model, "0x%x", 0, 0xffff);
F_MIDI2_EP_OPT(sw_revision, "0x%x", 0, 0xffffffff);
F_MIDI2_EP_STR_OPT(ep_name, 128);
F_MIDI2_EP_STR_OPT(product_id, 128);
static struct configfs_attribute *f_midi2_ep_attrs[] = {
&f_midi2_ep_opts_attr_protocol,
&f_midi2_ep_opts_attr_protocol_caps,
&f_midi2_ep_opts_attr_ep_name,
&f_midi2_ep_opts_attr_product_id,
&f_midi2_ep_opts_attr_manufacturer,
&f_midi2_ep_opts_attr_family,
&f_midi2_ep_opts_attr_model,
&f_midi2_ep_opts_attr_sw_revision,
NULL,
};
static void f_midi2_ep_opts_release(struct config_item *item)
{
struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item);
kfree(opts->info.ep_name);
kfree(opts->info.product_id);
kfree(opts);
}
static struct configfs_item_operations f_midi2_ep_item_ops = {
.release = f_midi2_ep_opts_release,
};
static struct configfs_group_operations f_midi2_ep_group_ops = {
.make_group = f_midi2_opts_block_make,
.drop_item = f_midi2_opts_block_drop,
};
static const struct config_item_type f_midi2_ep_type = {
.ct_item_ops = &f_midi2_ep_item_ops,
.ct_group_ops = &f_midi2_ep_group_ops,
.ct_attrs = f_midi2_ep_attrs,
.ct_owner = THIS_MODULE,
};
/* create a f_midi2_ep_opts instance */
static int f_midi2_ep_opts_create(struct f_midi2_opts *opts,
unsigned int index,
struct f_midi2_ep_opts **ep_p)
{
struct f_midi2_ep_opts *ep_opts;
ep_opts = kzalloc(sizeof(*ep_opts), GFP_KERNEL);
if (!ep_opts)
return -ENOMEM;
ep_opts->opts = opts;
ep_opts->index = index;
/* set up the default values */
ep_opts->info.protocol = 2;
ep_opts->info.protocol_caps = 3;
opts->eps[index] = ep_opts;
*ep_p = ep_opts;
return 0;
}
/* make_group callback for an EP */
static struct config_group *
f_midi2_opts_ep_make(struct config_group *group, const char *name)
{
struct f_midi2_opts *opts;
struct f_midi2_ep_opts *ep_opts;
unsigned int index;
int ret;
if (strncmp(name, "ep.", 3))
return ERR_PTR(-EINVAL);
ret = kstrtouint(name + 3, 10, &index);
if (ret)
return ERR_PTR(ret);
opts = to_f_midi2_opts(&group->cg_item);
if (index >= MAX_UMP_EPS)
return ERR_PTR(-EINVAL);
if (opts->eps[index])
return ERR_PTR(-EBUSY);
ret = f_midi2_ep_opts_create(opts, index, &ep_opts);
if (ret)
return ERR_PTR(ret);
config_group_init_type_name(&ep_opts->group, name, &f_midi2_ep_type);
return &ep_opts->group;
}
/* drop_item callback for an EP */
static void
f_midi2_opts_ep_drop(struct config_group *group, struct config_item *item)
{
struct f_midi2_ep_opts *ep_opts = to_f_midi2_ep_opts(item);
mutex_lock(&ep_opts->opts->lock);
ep_opts->opts->eps[ep_opts->index] = NULL;
mutex_unlock(&ep_opts->opts->lock);
config_item_put(item);
}
/*
* Definitions for card config
*/
/* define a bool option for card */
#define F_MIDI2_BOOL_OPT(name) \
static ssize_t f_midi2_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_midi2_opts *opts = to_f_midi2_opts(item); \
return f_midi2_opts_uint_show(opts, opts->info.name, \
"%u\n", page); \
} \
\
static ssize_t f_midi2_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_midi2_opts *opts = to_f_midi2_opts(item); \
return f_midi2_opts_bool_store(opts, &opts->info.name, \
page, len); \
} \
\
CONFIGFS_ATTR(f_midi2_opts_, name)
F_MIDI2_BOOL_OPT(process_ump);
F_MIDI2_BOOL_OPT(static_block);
static ssize_t f_midi2_opts_iface_name_show(struct config_item *item,
char *page)
{
struct f_midi2_opts *opts = to_f_midi2_opts(item);
return f_midi2_opts_str_show(opts, opts->info.iface_name, page);
}
static ssize_t f_midi2_opts_iface_name_store(struct config_item *item,
const char *page, size_t len)
{
struct f_midi2_opts *opts = to_f_midi2_opts(item);
return f_midi2_opts_str_store(opts, &opts->info.iface_name, 128,
page, len);
}
CONFIGFS_ATTR(f_midi2_opts_, iface_name);
static struct configfs_attribute *f_midi2_attrs[] = {
&f_midi2_opts_attr_process_ump,
&f_midi2_opts_attr_static_block,
&f_midi2_opts_attr_iface_name,
NULL
};
static void f_midi2_opts_release(struct config_item *item)
{
struct f_midi2_opts *opts = to_f_midi2_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations f_midi2_item_ops = {
.release = f_midi2_opts_release,
};
static struct configfs_group_operations f_midi2_group_ops = {
.make_group = f_midi2_opts_ep_make,
.drop_item = f_midi2_opts_ep_drop,
};
static const struct config_item_type f_midi2_func_type = {
.ct_item_ops = &f_midi2_item_ops,
.ct_group_ops = &f_midi2_group_ops,
.ct_attrs = f_midi2_attrs,
.ct_owner = THIS_MODULE,
};
static void f_midi2_free_inst(struct usb_function_instance *f)
{
struct f_midi2_opts *opts;
opts = container_of(f, struct f_midi2_opts, func_inst);
kfree(opts->info.iface_name);
kfree(opts);
}
/* gadget alloc_inst */
static struct usb_function_instance *f_midi2_alloc_inst(void)
{
struct f_midi2_opts *opts;
struct f_midi2_ep_opts *ep_opts;
struct f_midi2_block_opts *block_opts;
int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = f_midi2_free_inst;
opts->info.process_ump = true;
opts->info.static_block = true;
opts->info.num_reqs = 32;
opts->info.req_buf_size = 512;
/* create the default ep */
ret = f_midi2_ep_opts_create(opts, 0, &ep_opts);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
}
/* create the default block */
ret = f_midi2_block_opts_create(ep_opts, 0, &block_opts);
if (ret) {
kfree(ep_opts);
kfree(opts);
return ERR_PTR(ret);
}
/* set up the default MIDI1 (that is mandatory) */
block_opts->info.midi1_num_groups = 1;
config_group_init_type_name(&opts->func_inst.group, "",
&f_midi2_func_type);
config_group_init_type_name(&ep_opts->group, "ep.0",
&f_midi2_ep_type);
configfs_add_default_group(&ep_opts->group, &opts->func_inst.group);
config_group_init_type_name(&block_opts->group, "block.0",
&f_midi2_block_type);
configfs_add_default_group(&block_opts->group, &ep_opts->group);
return &opts->func_inst;
}
static void do_f_midi2_free(struct f_midi2 *midi2, struct f_midi2_opts *opts)
{
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
kfree(midi2->string_defs);
kfree(midi2);
}
static void f_midi2_free(struct usb_function *f)
{
do_f_midi2_free(func_to_midi2(f),
container_of(f->fi, struct f_midi2_opts, func_inst));
}
/* verify the parameters set up via configfs;
* return the number of EPs or a negative error
*/
static int verify_parameters(struct f_midi2_opts *opts)
{
int i, j, num_eps, num_blks;
struct f_midi2_ep_info *ep;
struct f_midi2_block_info *bp;
for (num_eps = 0; num_eps < MAX_UMP_EPS && opts->eps[num_eps];
num_eps++)
;
if (!num_eps) {
pr_err("f_midi2: No EP is defined\n");
return -EINVAL;
}
num_blks = 0;
for (i = 0; i < num_eps; i++) {
ep = &opts->eps[i]->info;
if (!(ep->protocol_caps & ep->protocol)) {
pr_err("f_midi2: Invalid protocol 0x%x (caps 0x%x) for EP %d\n",
ep->protocol, ep->protocol_caps, i);
return -EINVAL;
}
for (j = 0; j < SNDRV_UMP_MAX_BLOCKS && opts->eps[i]->blks[j];
j++, num_blks++) {
bp = &opts->eps[i]->blks[j]->info;
if (bp->first_group + bp->num_groups > SNDRV_UMP_MAX_GROUPS) {
pr_err("f_midi2: Invalid group definitions for block %d:%d\n",
i, j);
return -EINVAL;
}
if (bp->midi1_num_groups) {
if (bp->midi1_first_group < bp->first_group ||
bp->midi1_first_group + bp->midi1_num_groups >
bp->first_group + bp->num_groups) {
pr_err("f_midi2: Invalid MIDI1 group definitions for block %d:%d\n",
i, j);
return -EINVAL;
}
}
}
}
if (!num_blks) {
pr_err("f_midi2: No block is defined\n");
return -EINVAL;
}
return num_eps;
}
/* fill mapping between MIDI 1.0 cable and UMP EP/group */
static void fill_midi1_cable_mapping(struct f_midi2 *midi2,
struct f_midi2_ep *ep,
int blk)
{
const struct f_midi2_block_info *binfo = &ep->blks[blk].info;
struct midi1_cable_mapping *map;
int i, group;
if (!binfo->midi1_num_groups)
return;
if (binfo->direction != SNDRV_UMP_DIR_OUTPUT) {
group = binfo->midi1_first_group;
map = midi2->in_cable_mapping + midi2->num_midi1_in;
for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) {
if (midi2->num_midi1_in >= MAX_CABLES)
break;
map->ep = ep;
map->block = blk;
map->group = group;
midi2->num_midi1_in++;
/* store 1-based cable number */
ep->in_group_to_cable[group] = midi2->num_midi1_in;
}
}
if (binfo->direction != SNDRV_UMP_DIR_INPUT) {
group = binfo->midi1_first_group;
map = midi2->out_cable_mapping + midi2->num_midi1_out;
for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) {
if (midi2->num_midi1_out >= MAX_CABLES)
break;
map->ep = ep;
map->block = blk;
map->group = group;
midi2->num_midi1_out++;
}
}
}
/* gadget alloc callback */
static struct usb_function *f_midi2_alloc(struct usb_function_instance *fi)
{
struct f_midi2 *midi2;
struct f_midi2_opts *opts;
struct f_midi2_ep *ep;
struct f_midi2_block *bp;
int i, num_eps, blk;
midi2 = kzalloc(sizeof(*midi2), GFP_KERNEL);
if (!midi2)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_midi2_opts, func_inst);
mutex_lock(&opts->lock);
num_eps = verify_parameters(opts);
if (num_eps < 0) {
mutex_unlock(&opts->lock);
kfree(midi2);
return ERR_PTR(num_eps);
}
++opts->refcnt;
mutex_unlock(&opts->lock);
spin_lock_init(&midi2->queue_lock);
midi2->func.name = "midi2_func";
midi2->func.bind = f_midi2_bind;
midi2->func.unbind = f_midi2_unbind;
midi2->func.get_alt = f_midi2_get_alt;
midi2->func.set_alt = f_midi2_set_alt;
midi2->func.setup = f_midi2_setup;
midi2->func.disable = f_midi2_disable;
midi2->func.free_func = f_midi2_free;
midi2->info = opts->info;
midi2->num_eps = num_eps;
for (i = 0; i < num_eps; i++) {
ep = &midi2->midi2_eps[i];
ep->info = opts->eps[i]->info;
ep->card = midi2;
for (blk = 0; blk < SNDRV_UMP_MAX_BLOCKS &&
opts->eps[i]->blks[blk]; blk++) {
bp = &ep->blks[blk];
ep->num_blks++;
bp->info = opts->eps[i]->blks[blk]->info;
bp->gtb_id = ++midi2->total_blocks;
}
}
midi2->string_defs = kcalloc(midi2->total_blocks + 1,
sizeof(*midi2->string_defs), GFP_KERNEL);
if (!midi2->string_defs) {
do_f_midi2_free(midi2, opts);
return ERR_PTR(-ENOMEM);
}
if (opts->info.iface_name && *opts->info.iface_name)
midi2->string_defs[STR_IFACE].s = opts->info.iface_name;
else
midi2->string_defs[STR_IFACE].s = ump_ep_name(&midi2->midi2_eps[0]);
for (i = 0; i < midi2->num_eps; i++) {
ep = &midi2->midi2_eps[i];
for (blk = 0; blk < ep->num_blks; blk++) {
bp = &ep->blks[blk];
midi2->string_defs[gtb_to_str_id(bp->gtb_id)].s =
ump_fb_name(&bp->info);
fill_midi1_cable_mapping(midi2, ep, blk);
}
}
if (!midi2->num_midi1_in && !midi2->num_midi1_out) {
pr_err("f_midi2: MIDI1 definition is missing\n");
do_f_midi2_free(midi2, opts);
return ERR_PTR(-EINVAL);
}
return &midi2->func;
}
DECLARE_USB_FUNCTION_INIT(midi2, f_midi2_alloc_inst, f_midi2_alloc);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/f_midi2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_rndis.c -- RNDIS link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz ([email protected])
*/
/* #define VERBOSE_DEBUG */
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/atomic.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_rndis.h"
#include "rndis.h"
#include "configfs.h"
/*
* This function is an RNDIS Ethernet port -- a Microsoft protocol that's
* been promoted instead of the standard CDC Ethernet. The published RNDIS
* spec is ambiguous, incomplete, and needlessly complex. Variants such as
* ActiveSync have even worse status in terms of specification.
*
* In short: it's a protocol controlled by (and for) Microsoft, not for an
* Open ecosystem or markets. Linux supports it *only* because Microsoft
* doesn't support the CDC Ethernet standard.
*
* The RNDIS data transfer model is complex, with multiple Ethernet packets
* per USB message, and out of band data. The control model is built around
* what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM
* (modem, not Ethernet) veneer, with those ACM descriptors being entirely
* useless (they're ignored). RNDIS expects to be the only function in its
* configuration, so it's no real help if you need composite devices; and
* it expects to be the first configuration too.
*
* There is a single technical advantage of RNDIS over CDC Ethernet, if you
* discount the fluff that its RPC can be made to deliver: it doesn't need
* a NOP altsetting for the data interface. That lets it work on some of the
* "so smart it's stupid" hardware which takes over configuration changes
* from the software, and adds restrictions like "no altsettings".
*
* Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and
* have all sorts of contrary-to-specification oddities that can prevent
* them from working sanely. Since bugfixes (or accurate specs, letting
* Linux work around those bugs) are unlikely to ever come from MSFT, you
* may want to avoid using RNDIS on purely operational grounds.
*
* Omissions from the RNDIS 1.0 specification include:
*
* - Power management ... references data that's scattered around lots
* of other documentation, which is incorrect/incomplete there too.
*
* - There are various undocumented protocol requirements, like the need
* to send garbage in some control-OUT messages.
*
* - MS-Windows drivers sometimes emit undocumented requests.
*/
struct f_rndis {
struct gether port;
u8 ctrl_id, data_id;
u8 ethaddr[ETH_ALEN];
u32 vendorID;
const char *manufacturer;
struct rndis_params *params;
struct usb_ep *notify;
struct usb_request *notify_req;
atomic_t notify_count;
};
static inline struct f_rndis *func_to_rndis(struct usb_function *f)
{
return container_of(f, struct f_rndis, port.func);
}
/*-------------------------------------------------------------------------*/
/*
*/
#define RNDIS_STATUS_INTERVAL_MS 32
#define STATUS_BYTECOUNT 8 /* 8 bytes data */
/* interface descriptor: */
static struct usb_interface_descriptor rndis_control_intf = {
.bLength = sizeof rndis_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
/* status endpoint is optional; this could be patched later */
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
.bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR,
/* .iInterface = DYNAMIC */
};
static struct usb_cdc_header_desc header_desc = {
.bLength = sizeof header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
.bLength = sizeof call_mgmt_descriptor,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
.bmCapabilities = 0x00,
.bDataInterface = 0x01,
};
static struct usb_cdc_acm_descriptor rndis_acm_descriptor = {
.bLength = sizeof rndis_acm_descriptor,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
.bmCapabilities = 0x00,
};
static struct usb_cdc_union_desc rndis_union_desc = {
.bLength = sizeof(rndis_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
/* .bMasterInterface0 = DYNAMIC */
/* .bSlaveInterface0 = DYNAMIC */
};
/* the data interface has two bulk endpoints */
static struct usb_interface_descriptor rndis_data_intf = {
.bLength = sizeof rndis_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
static struct usb_interface_assoc_descriptor
rndis_iad_descriptor = {
.bLength = sizeof rndis_iad_descriptor,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
.bFunctionProtocol = USB_CDC_PROTO_NONE,
/* .iFunction = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
.bInterval = RNDIS_STATUS_INTERVAL_MS,
};
static struct usb_endpoint_descriptor fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *eth_fs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
(struct usb_descriptor_header *) &header_desc,
(struct usb_descriptor_header *) &call_mgmt_descriptor,
(struct usb_descriptor_header *) &rndis_acm_descriptor,
(struct usb_descriptor_header *) &rndis_union_desc,
(struct usb_descriptor_header *) &fs_notify_desc,
/* data interface has no altsetting */
(struct usb_descriptor_header *) &rndis_data_intf,
(struct usb_descriptor_header *) &fs_in_desc,
(struct usb_descriptor_header *) &fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
.bInterval = USB_MS_TO_HS_INTERVAL(RNDIS_STATUS_INTERVAL_MS)
};
static struct usb_endpoint_descriptor hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *eth_hs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
(struct usb_descriptor_header *) &header_desc,
(struct usb_descriptor_header *) &call_mgmt_descriptor,
(struct usb_descriptor_header *) &rndis_acm_descriptor,
(struct usb_descriptor_header *) &rndis_union_desc,
(struct usb_descriptor_header *) &hs_notify_desc,
/* data interface has no altsetting */
(struct usb_descriptor_header *) &rndis_data_intf,
(struct usb_descriptor_header *) &hs_in_desc,
(struct usb_descriptor_header *) &hs_out_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor ss_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
.bInterval = USB_MS_TO_HS_INTERVAL(RNDIS_STATUS_INTERVAL_MS)
};
static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
.bLength = sizeof ss_intr_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 3 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
.wBytesPerInterval = cpu_to_le16(STATUS_BYTECOUNT),
};
static struct usb_endpoint_descriptor ss_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
.bLength = sizeof ss_bulk_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_descriptor_header *eth_ss_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
(struct usb_descriptor_header *) &header_desc,
(struct usb_descriptor_header *) &call_mgmt_descriptor,
(struct usb_descriptor_header *) &rndis_acm_descriptor,
(struct usb_descriptor_header *) &rndis_union_desc,
(struct usb_descriptor_header *) &ss_notify_desc,
(struct usb_descriptor_header *) &ss_intr_comp_desc,
/* data interface has no altsetting */
(struct usb_descriptor_header *) &rndis_data_intf,
(struct usb_descriptor_header *) &ss_in_desc,
(struct usb_descriptor_header *) &ss_bulk_comp_desc,
(struct usb_descriptor_header *) &ss_out_desc,
(struct usb_descriptor_header *) &ss_bulk_comp_desc,
NULL,
};
/* string descriptors: */
static struct usb_string rndis_string_defs[] = {
[0].s = "RNDIS Communications Control",
[1].s = "RNDIS Ethernet Data",
[2].s = "RNDIS",
{ } /* end of list */
};
static struct usb_gadget_strings rndis_string_table = {
.language = 0x0409, /* en-us */
.strings = rndis_string_defs,
};
static struct usb_gadget_strings *rndis_strings[] = {
&rndis_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static struct sk_buff *rndis_add_header(struct gether *port,
struct sk_buff *skb)
{
struct sk_buff *skb2;
if (!skb)
return NULL;
skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
rndis_add_hdr(skb2);
dev_kfree_skb(skb);
return skb2;
}
static void rndis_response_available(void *_rndis)
{
struct f_rndis *rndis = _rndis;
struct usb_request *req = rndis->notify_req;
struct usb_composite_dev *cdev = rndis->port.func.config->cdev;
__le32 *data = req->buf;
int status;
if (atomic_inc_return(&rndis->notify_count) != 1)
return;
/* Send RNDIS RESPONSE_AVAILABLE notification; a
* USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
*
* This is the only notification defined by RNDIS.
*/
data[0] = cpu_to_le32(1);
data[1] = cpu_to_le32(0);
status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
if (status) {
atomic_dec(&rndis->notify_count);
DBG(cdev, "notify/0 --> %d\n", status);
}
}
static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_rndis *rndis = req->context;
struct usb_composite_dev *cdev = rndis->port.func.config->cdev;
int status = req->status;
/* after TX:
* - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
* - RNDIS_RESPONSE_AVAILABLE (status/irq)
*/
switch (status) {
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
atomic_set(&rndis->notify_count, 0);
break;
default:
DBG(cdev, "RNDIS %s response error %d, %d/%d\n",
ep->name, status,
req->actual, req->length);
fallthrough;
case 0:
if (ep != rndis->notify)
break;
/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
* notifications by resending until we're done
*/
if (atomic_dec_and_test(&rndis->notify_count))
break;
status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
if (status) {
atomic_dec(&rndis->notify_count);
DBG(cdev, "notify/1 --> %d\n", status);
}
break;
}
}
static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_rndis *rndis = req->context;
int status;
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
if (status < 0)
pr_err("RNDIS command error %d, %d/%d\n",
status, req->actual, req->length);
// spin_unlock(&dev->lock);
}
static int
rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_rndis *rndis = func_to_rndis(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
/* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
/* RNDIS uses the CDC command encapsulation mechanism to implement
* an RPC scheme, with much getting/setting of attributes by OID.
*/
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SEND_ENCAPSULATED_COMMAND:
if (w_value || w_index != rndis->ctrl_id)
goto invalid;
/* read the request; process it later */
value = w_length;
req->complete = rndis_command_complete;
req->context = rndis;
/* later, rndis_response_available() sends a notification */
break;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_GET_ENCAPSULATED_RESPONSE:
if (w_value || w_index != rndis->ctrl_id)
goto invalid;
else {
u8 *buf;
u32 n;
/* return the result */
buf = rndis_get_next_response(rndis->params, &n);
if (buf) {
memcpy(req->buf, buf, n);
req->complete = rndis_response_complete;
req->context = rndis;
rndis_free_response(rndis->params, buf);
value = n;
}
/* else stalls ... spec says to avoid that */
}
break;
default:
invalid:
VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (value >= 0) {
DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = (value < w_length);
req->length = value;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(cdev, "rndis response on err %d\n", value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_rndis *rndis = func_to_rndis(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* we know alt == 0 */
if (intf == rndis->ctrl_id) {
VDBG(cdev, "reset rndis control %d\n", intf);
usb_ep_disable(rndis->notify);
if (!rndis->notify->desc) {
VDBG(cdev, "init rndis ctrl %d\n", intf);
if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
goto fail;
}
usb_ep_enable(rndis->notify);
} else if (intf == rndis->data_id) {
struct net_device *net;
if (rndis->port.in_ep->enabled) {
DBG(cdev, "reset rndis\n");
gether_disconnect(&rndis->port);
}
if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
DBG(cdev, "init rndis\n");
if (config_ep_by_speed(cdev->gadget, f,
rndis->port.in_ep) ||
config_ep_by_speed(cdev->gadget, f,
rndis->port.out_ep)) {
rndis->port.in_ep->desc = NULL;
rndis->port.out_ep->desc = NULL;
goto fail;
}
}
/* Avoid ZLPs; they can be troublesome. */
rndis->port.is_zlp_ok = false;
/* RNDIS should be in the "RNDIS uninitialized" state,
* either never activated or after rndis_uninit().
*
* We don't want data to flow here until a nonzero packet
* filter is set, at which point it enters "RNDIS data
* initialized" state ... but we do want the endpoints
* to be activated. It's a strange little state.
*
* REVISIT the RNDIS gadget code has done this wrong for a
* very long time. We need another call to the link layer
* code -- gether_updown(...bool) maybe -- to do it right.
*/
rndis->port.cdc_filter = 0;
DBG(cdev, "RNDIS RX/TX early activation ... \n");
net = gether_connect(&rndis->port);
if (IS_ERR(net))
return PTR_ERR(net);
rndis_set_param_dev(rndis->params, net,
&rndis->port.cdc_filter);
} else
goto fail;
return 0;
fail:
return -EINVAL;
}
static void rndis_disable(struct usb_function *f)
{
struct f_rndis *rndis = func_to_rndis(f);
struct usb_composite_dev *cdev = f->config->cdev;
if (!rndis->notify->enabled)
return;
DBG(cdev, "rndis deactivated\n");
rndis_uninit(rndis->params);
gether_disconnect(&rndis->port);
usb_ep_disable(rndis->notify);
rndis->notify->desc = NULL;
}
/*-------------------------------------------------------------------------*/
/*
* This isn't quite the same mechanism as CDC Ethernet, since the
* notification scheme passes less data, but the same set of link
* states must be tested. A key difference is that altsettings are
* not used to tell whether the link should send packets or not.
*/
static void rndis_open(struct gether *geth)
{
struct f_rndis *rndis = func_to_rndis(&geth->func);
struct usb_composite_dev *cdev = geth->func.config->cdev;
DBG(cdev, "%s\n", __func__);
rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
gether_bitrate(cdev->gadget) / 100);
rndis_signal_connect(rndis->params);
}
static void rndis_close(struct gether *geth)
{
struct f_rndis *rndis = func_to_rndis(&geth->func);
DBG(geth->func.config->cdev, "%s\n", __func__);
rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
rndis_signal_disconnect(rndis->params);
}
/*-------------------------------------------------------------------------*/
/* Some controllers can't support RNDIS ... */
static inline bool can_support_rndis(struct usb_configuration *c)
{
/* everything else is *presumably* fine */
return true;
}
/* ethernet function driver setup/binding */
static int
rndis_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_rndis *rndis = func_to_rndis(f);
struct usb_string *us;
int status;
struct usb_ep *ep;
struct f_rndis_opts *rndis_opts;
if (!can_support_rndis(c))
return -EINVAL;
rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst);
if (cdev->use_os_string) {
f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
GFP_KERNEL);
if (!f->os_desc_table)
return -ENOMEM;
f->os_desc_n = 1;
f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
}
rndis_iad_descriptor.bFunctionClass = rndis_opts->class;
rndis_iad_descriptor.bFunctionSubClass = rndis_opts->subclass;
rndis_iad_descriptor.bFunctionProtocol = rndis_opts->protocol;
/*
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
* configurations are bound in sequence with list_for_each_entry,
* in each configuration its functions are bound in sequence
* with list_for_each_entry, so we assume no race condition
* with regard to rndis_opts->bound access
*/
if (!rndis_opts->bound) {
gether_set_gadget(rndis_opts->net, cdev->gadget);
status = gether_register_netdev(rndis_opts->net);
if (status)
goto fail;
rndis_opts->bound = true;
}
us = usb_gstrings_attach(cdev, rndis_strings,
ARRAY_SIZE(rndis_string_defs));
if (IS_ERR(us)) {
status = PTR_ERR(us);
goto fail;
}
rndis_control_intf.iInterface = us[0].id;
rndis_data_intf.iInterface = us[1].id;
rndis_iad_descriptor.iFunction = us[2].id;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
rndis->ctrl_id = status;
rndis_iad_descriptor.bFirstInterface = status;
rndis_control_intf.bInterfaceNumber = status;
rndis_union_desc.bMasterInterface0 = status;
if (cdev->use_os_string)
f->os_desc_table[0].if_id =
rndis_iad_descriptor.bFirstInterface;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
rndis->data_id = status;
rndis_data_intf.bInterfaceNumber = status;
rndis_union_desc.bSlaveInterface0 = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
if (!ep)
goto fail;
rndis->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
if (!ep)
goto fail;
rndis->port.out_ep = ep;
/* NOTE: a status/notification endpoint is, strictly speaking,
* optional. We don't treat it that way though! It's simpler,
* and some newer profiles don't treat it as optional.
*/
ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
if (!ep)
goto fail;
rndis->notify = ep;
status = -ENOMEM;
/* allocate notification request and buffer */
rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!rndis->notify_req)
goto fail;
rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
if (!rndis->notify_req->buf)
goto fail;
rndis->notify_req->length = STATUS_BYTECOUNT;
rndis->notify_req->context = rndis;
rndis->notify_req->complete = rndis_response_complete;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
hs_in_desc.bEndpointAddress = fs_in_desc.bEndpointAddress;
hs_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress;
hs_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
ss_in_desc.bEndpointAddress = fs_in_desc.bEndpointAddress;
ss_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress;
ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
eth_ss_function, eth_ss_function);
if (status)
goto fail;
rndis->port.open = rndis_open;
rndis->port.close = rndis_close;
rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
rndis_set_host_mac(rndis->params, rndis->ethaddr);
if (rndis->manufacturer && rndis->vendorID &&
rndis_set_param_vendor(rndis->params, rndis->vendorID,
rndis->manufacturer)) {
status = -EINVAL;
goto fail_free_descs;
}
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
* until we're activated via set_alt().
*/
DBG(cdev, "RNDIS: IN/%s OUT/%s NOTIFY/%s\n",
rndis->port.in_ep->name, rndis->port.out_ep->name,
rndis->notify->name);
return 0;
fail_free_descs:
usb_free_all_descriptors(f);
fail:
kfree(f->os_desc_table);
f->os_desc_n = 0;
if (rndis->notify_req) {
kfree(rndis->notify_req->buf);
usb_ep_free_request(rndis->notify, rndis->notify_req);
}
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
{
struct f_rndis_opts *opts;
opts = container_of(f, struct f_rndis_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
opts->borrowed_net = opts->bound = true;
opts->net = net;
}
EXPORT_SYMBOL_GPL(rndis_borrow_net);
static inline struct f_rndis_opts *to_f_rndis_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_rndis_opts,
func_inst.group);
}
/* f_rndis_item_ops */
USB_ETHERNET_CONFIGFS_ITEM(rndis);
/* f_rndis_opts_dev_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(rndis);
/* f_rndis_opts_host_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(rndis);
/* f_rndis_opts_qmult */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(rndis);
/* f_rndis_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(rndis);
/* f_rndis_opts_class */
USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, class);
/* f_rndis_opts_subclass */
USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, subclass);
/* f_rndis_opts_protocol */
USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, protocol);
static struct configfs_attribute *rndis_attrs[] = {
&rndis_opts_attr_dev_addr,
&rndis_opts_attr_host_addr,
&rndis_opts_attr_qmult,
&rndis_opts_attr_ifname,
&rndis_opts_attr_class,
&rndis_opts_attr_subclass,
&rndis_opts_attr_protocol,
NULL,
};
static const struct config_item_type rndis_func_type = {
.ct_item_ops = &rndis_item_ops,
.ct_attrs = rndis_attrs,
.ct_owner = THIS_MODULE,
};
static void rndis_free_inst(struct usb_function_instance *f)
{
struct f_rndis_opts *opts;
opts = container_of(f, struct f_rndis_opts, func_inst);
if (!opts->borrowed_net) {
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
}
kfree(opts->rndis_interf_group); /* single VLA chunk */
kfree(opts);
}
static struct usb_function_instance *rndis_alloc_inst(void)
{
struct f_rndis_opts *opts;
struct usb_os_desc *descs[1];
char *names[1];
struct config_group *rndis_interf_group;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->rndis_os_desc.ext_compat_id = opts->rndis_ext_compat_id;
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = rndis_free_inst;
opts->net = gether_setup_default();
if (IS_ERR(opts->net)) {
struct net_device *net = opts->net;
kfree(opts);
return ERR_CAST(net);
}
INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop);
opts->class = rndis_iad_descriptor.bFunctionClass;
opts->subclass = rndis_iad_descriptor.bFunctionSubClass;
opts->protocol = rndis_iad_descriptor.bFunctionProtocol;
descs[0] = &opts->rndis_os_desc;
names[0] = "rndis";
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
rndis_interf_group =
usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
names, THIS_MODULE);
if (IS_ERR(rndis_interf_group)) {
rndis_free_inst(&opts->func_inst);
return ERR_CAST(rndis_interf_group);
}
opts->rndis_interf_group = rndis_interf_group;
return &opts->func_inst;
}
static void rndis_free(struct usb_function *f)
{
struct f_rndis *rndis;
struct f_rndis_opts *opts;
rndis = func_to_rndis(f);
rndis_deregister(rndis->params);
opts = container_of(f->fi, struct f_rndis_opts, func_inst);
kfree(rndis);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
}
static void rndis_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_rndis *rndis = func_to_rndis(f);
kfree(f->os_desc_table);
f->os_desc_n = 0;
usb_free_all_descriptors(f);
kfree(rndis->notify_req->buf);
usb_ep_free_request(rndis->notify, rndis->notify_req);
}
static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
{
struct f_rndis *rndis;
struct f_rndis_opts *opts;
struct rndis_params *params;
/* allocate and initialize one new instance */
rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
if (!rndis)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_rndis_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt++;
gether_get_host_addr_u8(opts->net, rndis->ethaddr);
rndis->vendorID = opts->vendor_id;
rndis->manufacturer = opts->manufacturer;
rndis->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);
/* RNDIS activates when the host changes this filter */
rndis->port.cdc_filter = 0;
/* RNDIS has special (and complex) framing */
rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
rndis->port.wrap = rndis_add_header;
rndis->port.unwrap = rndis_rm_hdr;
rndis->port.func.name = "rndis";
/* descriptors are per-instance copies */
rndis->port.func.bind = rndis_bind;
rndis->port.func.unbind = rndis_unbind;
rndis->port.func.set_alt = rndis_set_alt;
rndis->port.func.setup = rndis_setup;
rndis->port.func.disable = rndis_disable;
rndis->port.func.free_func = rndis_free;
params = rndis_register(rndis_response_available, rndis);
if (IS_ERR(params)) {
kfree(rndis);
return ERR_CAST(params);
}
rndis->params = params;
return &rndis->port.func;
}
DECLARE_USB_FUNCTION_INIT(rndis, rndis_alloc_inst, rndis_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
| linux-master | drivers/usb/gadget/function/f_rndis.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* storage_common.c -- Common definitions for mass storage functionality
*
* Copyright (C) 2003-2008 Alan Stern
* Copyeight (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz ([email protected])
*/
/*
* This file requires the following identifiers used in USB strings to
* be defined (each of type pointer to char):
* - fsg_string_interface -- name of the interface
*/
/*
* When USB_GADGET_DEBUG_FILES is defined the module param num_buffers
* sets the number of pipeline buffers (length of the fsg_buffhd array).
* The valid range of num_buffers is: num >= 2 && num <= 4.
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kstrtox.h>
#include <linux/usb/composite.h>
#include "storage_common.h"
/* There is only one interface. */
struct usb_interface_descriptor fsg_intf_desc = {
.bLength = sizeof fsg_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 2, /* Adjusted during fsg_bind() */
.bInterfaceClass = USB_CLASS_MASS_STORAGE,
.bInterfaceSubClass = USB_SC_SCSI, /* Adjusted during fsg_bind() */
.bInterfaceProtocol = USB_PR_BULK, /* Adjusted during fsg_bind() */
.iInterface = FSG_STRING_INTERFACE,
};
EXPORT_SYMBOL_GPL(fsg_intf_desc);
/*
* Three full-speed endpoint descriptors: bulk-in, bulk-out, and
* interrupt-in.
*/
struct usb_endpoint_descriptor fsg_fs_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
/* wMaxPacketSize set by autoconfiguration */
};
EXPORT_SYMBOL_GPL(fsg_fs_bulk_in_desc);
struct usb_endpoint_descriptor fsg_fs_bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
/* wMaxPacketSize set by autoconfiguration */
};
EXPORT_SYMBOL_GPL(fsg_fs_bulk_out_desc);
struct usb_descriptor_header *fsg_fs_function[] = {
(struct usb_descriptor_header *) &fsg_intf_desc,
(struct usb_descriptor_header *) &fsg_fs_bulk_in_desc,
(struct usb_descriptor_header *) &fsg_fs_bulk_out_desc,
NULL,
};
EXPORT_SYMBOL_GPL(fsg_fs_function);
/*
* USB 2.0 devices need to expose both high speed and full speed
* descriptors, unless they only run at full speed.
*
* That means alternate endpoint descriptors (bigger packets).
*/
struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
EXPORT_SYMBOL_GPL(fsg_hs_bulk_in_desc);
struct usb_endpoint_descriptor fsg_hs_bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
.bInterval = 1, /* NAK every 1 uframe */
};
EXPORT_SYMBOL_GPL(fsg_hs_bulk_out_desc);
struct usb_descriptor_header *fsg_hs_function[] = {
(struct usb_descriptor_header *) &fsg_intf_desc,
(struct usb_descriptor_header *) &fsg_hs_bulk_in_desc,
(struct usb_descriptor_header *) &fsg_hs_bulk_out_desc,
NULL,
};
EXPORT_SYMBOL_GPL(fsg_hs_function);
struct usb_endpoint_descriptor fsg_ss_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
EXPORT_SYMBOL_GPL(fsg_ss_bulk_in_desc);
struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = {
.bLength = sizeof(fsg_ss_bulk_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/*.bMaxBurst = DYNAMIC, */
};
EXPORT_SYMBOL_GPL(fsg_ss_bulk_in_comp_desc);
struct usb_endpoint_descriptor fsg_ss_bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
EXPORT_SYMBOL_GPL(fsg_ss_bulk_out_desc);
struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = {
.bLength = sizeof(fsg_ss_bulk_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/*.bMaxBurst = DYNAMIC, */
};
EXPORT_SYMBOL_GPL(fsg_ss_bulk_out_comp_desc);
struct usb_descriptor_header *fsg_ss_function[] = {
(struct usb_descriptor_header *) &fsg_intf_desc,
(struct usb_descriptor_header *) &fsg_ss_bulk_in_desc,
(struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc,
(struct usb_descriptor_header *) &fsg_ss_bulk_out_desc,
(struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc,
NULL,
};
EXPORT_SYMBOL_GPL(fsg_ss_function);
/*-------------------------------------------------------------------------*/
/*
* If the next two routines are called while the gadget is registered,
* the caller must own fsg->filesem for writing.
*/
void fsg_lun_close(struct fsg_lun *curlun)
{
if (curlun->filp) {
LDBG(curlun, "close backing file\n");
fput(curlun->filp);
curlun->filp = NULL;
}
}
EXPORT_SYMBOL_GPL(fsg_lun_close);
int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
{
int ro;
struct file *filp = NULL;
int rc = -EINVAL;
struct inode *inode = NULL;
loff_t size;
loff_t num_sectors;
loff_t min_sectors;
unsigned int blkbits;
unsigned int blksize;
/* R/W if we can, R/O if we must */
ro = curlun->initially_ro;
if (!ro) {
filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
ro = 1;
}
if (ro)
filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
if (IS_ERR(filp)) {
LINFO(curlun, "unable to open backing file: %s\n", filename);
return PTR_ERR(filp);
}
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
inode = filp->f_mapping->host;
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
}
/*
* If we can't read the file, it's no good.
* If we can't write the file, use it read-only.
*/
if (!(filp->f_mode & FMODE_CAN_READ)) {
LINFO(curlun, "file not readable: %s\n", filename);
goto out;
}
if (!(filp->f_mode & FMODE_CAN_WRITE))
ro = 1;
size = i_size_read(inode);
if (size < 0) {
LINFO(curlun, "unable to find file size: %s\n", filename);
rc = (int) size;
goto out;
}
if (curlun->cdrom) {
blksize = 2048;
blkbits = 11;
} else if (S_ISBLK(inode->i_mode)) {
blksize = bdev_logical_block_size(I_BDEV(inode));
blkbits = blksize_bits(blksize);
} else {
blksize = 512;
blkbits = 9;
}
num_sectors = size >> blkbits; /* File size in logic-block-size blocks */
min_sectors = 1;
if (curlun->cdrom) {
min_sectors = 300; /* Smallest track is 300 frames */
if (num_sectors >= 256*60*75) {
num_sectors = 256*60*75 - 1;
LINFO(curlun, "file too big: %s\n", filename);
LINFO(curlun, "using only first %d blocks\n",
(int) num_sectors);
}
}
if (num_sectors < min_sectors) {
LINFO(curlun, "file too small: %s\n", filename);
rc = -ETOOSMALL;
goto out;
}
if (fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
curlun->blksize = blksize;
curlun->blkbits = blkbits;
curlun->ro = ro;
curlun->filp = filp;
curlun->file_length = size;
curlun->num_sectors = num_sectors;
LDBG(curlun, "open backing file: %s\n", filename);
return 0;
out:
fput(filp);
return rc;
}
EXPORT_SYMBOL_GPL(fsg_lun_open);
/*-------------------------------------------------------------------------*/
/*
* Sync the file data, don't bother with the metadata.
* This code was copied from fs/buffer.c:sys_fdatasync().
*/
int fsg_lun_fsync_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
if (curlun->ro || !filp)
return 0;
return vfs_fsync(filp, 1);
}
EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
/*
* Convert to Minutes-Seconds-Frames.
* Sector size is already set to 2048 bytes.
*/
addr += 2*75; /* Lead-in occupies 2 seconds */
dest[3] = addr % 75; /* Frames */
addr /= 75;
dest[2] = addr % 60; /* Seconds */
addr /= 60;
dest[1] = addr; /* Minutes */
dest[0] = 0; /* Reserved */
} else {
/* Absolute sector */
put_unaligned_be32(addr, dest);
}
}
EXPORT_SYMBOL_GPL(store_cdrom_address);
/*-------------------------------------------------------------------------*/
ssize_t fsg_show_ro(struct fsg_lun *curlun, char *buf)
{
return sprintf(buf, "%d\n", fsg_lun_is_open(curlun)
? curlun->ro
: curlun->initially_ro);
}
EXPORT_SYMBOL_GPL(fsg_show_ro);
ssize_t fsg_show_nofua(struct fsg_lun *curlun, char *buf)
{
return sprintf(buf, "%u\n", curlun->nofua);
}
EXPORT_SYMBOL_GPL(fsg_show_nofua);
ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
char *buf)
{
char *p;
ssize_t rc;
down_read(filesem);
if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */
p = file_path(curlun->filp, buf, PAGE_SIZE - 1);
if (IS_ERR(p))
rc = PTR_ERR(p);
else {
rc = strlen(p);
memmove(buf, p, rc);
buf[rc] = '\n'; /* Add a newline */
buf[++rc] = 0;
}
} else { /* No file, return 0 bytes */
*buf = 0;
rc = 0;
}
up_read(filesem);
return rc;
}
EXPORT_SYMBOL_GPL(fsg_show_file);
ssize_t fsg_show_cdrom(struct fsg_lun *curlun, char *buf)
{
return sprintf(buf, "%u\n", curlun->cdrom);
}
EXPORT_SYMBOL_GPL(fsg_show_cdrom);
ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf)
{
return sprintf(buf, "%u\n", curlun->removable);
}
EXPORT_SYMBOL_GPL(fsg_show_removable);
ssize_t fsg_show_inquiry_string(struct fsg_lun *curlun, char *buf)
{
return sprintf(buf, "%s\n", curlun->inquiry_string);
}
EXPORT_SYMBOL_GPL(fsg_show_inquiry_string);
/*
* The caller must hold fsg->filesem for reading when calling this function.
*/
static ssize_t _fsg_store_ro(struct fsg_lun *curlun, bool ro)
{
if (fsg_lun_is_open(curlun)) {
LDBG(curlun, "read-only status change prevented\n");
return -EBUSY;
}
curlun->ro = ro;
curlun->initially_ro = ro;
LDBG(curlun, "read-only status set to %d\n", curlun->ro);
return 0;
}
ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem,
const char *buf, size_t count)
{
ssize_t rc;
bool ro;
rc = kstrtobool(buf, &ro);
if (rc)
return rc;
/*
* Allow the write-enable status to change only while the
* backing file is closed.
*/
down_read(filesem);
rc = _fsg_store_ro(curlun, ro);
if (!rc)
rc = count;
up_read(filesem);
return rc;
}
EXPORT_SYMBOL_GPL(fsg_store_ro);
ssize_t fsg_store_nofua(struct fsg_lun *curlun, const char *buf, size_t count)
{
bool nofua;
int ret;
ret = kstrtobool(buf, &nofua);
if (ret)
return ret;
/* Sync data when switching from async mode to sync */
if (!nofua && curlun->nofua)
fsg_lun_fsync_sub(curlun);
curlun->nofua = nofua;
return count;
}
EXPORT_SYMBOL_GPL(fsg_store_nofua);
ssize_t fsg_store_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
const char *buf, size_t count)
{
int rc = 0;
if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
LDBG(curlun, "eject attempt prevented\n");
return -EBUSY; /* "Door is locked" */
}
/* Remove a trailing newline */
if (count > 0 && buf[count-1] == '\n')
((char *) buf)[count-1] = 0; /* Ugh! */
/* Load new medium */
down_write(filesem);
if (count > 0 && buf[0]) {
/* fsg_lun_open() will close existing file if any. */
rc = fsg_lun_open(curlun, buf);
if (rc == 0)
curlun->unit_attention_data =
SS_NOT_READY_TO_READY_TRANSITION;
} else if (fsg_lun_is_open(curlun)) {
fsg_lun_close(curlun);
curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
}
up_write(filesem);
return (rc < 0 ? rc : count);
}
EXPORT_SYMBOL_GPL(fsg_store_file);
ssize_t fsg_store_cdrom(struct fsg_lun *curlun, struct rw_semaphore *filesem,
const char *buf, size_t count)
{
bool cdrom;
int ret;
ret = kstrtobool(buf, &cdrom);
if (ret)
return ret;
down_read(filesem);
ret = cdrom ? _fsg_store_ro(curlun, true) : 0;
if (!ret) {
curlun->cdrom = cdrom;
ret = count;
}
up_read(filesem);
return ret;
}
EXPORT_SYMBOL_GPL(fsg_store_cdrom);
ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf,
size_t count)
{
bool removable;
int ret;
ret = kstrtobool(buf, &removable);
if (ret)
return ret;
curlun->removable = removable;
return count;
}
EXPORT_SYMBOL_GPL(fsg_store_removable);
ssize_t fsg_store_inquiry_string(struct fsg_lun *curlun, const char *buf,
size_t count)
{
const size_t len = min(count, sizeof(curlun->inquiry_string));
if (len == 0 || buf[0] == '\n') {
curlun->inquiry_string[0] = 0;
} else {
snprintf(curlun->inquiry_string,
sizeof(curlun->inquiry_string), "%-28s", buf);
if (curlun->inquiry_string[len-1] == '\n')
curlun->inquiry_string[len-1] = ' ';
}
return count;
}
EXPORT_SYMBOL_GPL(fsg_store_inquiry_string);
ssize_t fsg_store_forced_eject(struct fsg_lun *curlun, struct rw_semaphore *filesem,
const char *buf, size_t count)
{
int ret;
/*
* Forcibly detach the backing file from the LUN
* regardless of whether the host has allowed it.
*/
curlun->prevent_medium_removal = 0;
ret = fsg_store_file(curlun, filesem, "", 0);
return ret < 0 ? ret : count;
}
EXPORT_SYMBOL_GPL(fsg_store_forced_eject);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/storage_common.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_gadget.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart ([email protected])
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/g_uvc.h>
#include <linux/usb/video.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include "uvc.h"
#include "uvc_configfs.h"
#include "uvc_v4l2.h"
#include "uvc_video.h"
unsigned int uvc_gadget_trace_param;
module_param_named(trace, uvc_gadget_trace_param, uint, 0644);
MODULE_PARM_DESC(trace, "Trace level bitmask");
/* --------------------------------------------------------------------------
* Function descriptors
*/
/* string IDs are assigned dynamically */
static struct usb_string uvc_en_us_strings[] = {
/* [UVC_STRING_CONTROL_IDX].s = DYNAMIC, */
[UVC_STRING_STREAMING_IDX].s = "Video Streaming",
{ }
};
static struct usb_gadget_strings uvc_stringtab = {
.language = 0x0409, /* en-us */
.strings = uvc_en_us_strings,
};
static struct usb_gadget_strings *uvc_function_strings[] = {
&uvc_stringtab,
NULL,
};
#define UVC_INTF_VIDEO_CONTROL 0
#define UVC_INTF_VIDEO_STREAMING 1
#define UVC_STATUS_MAX_PACKET_SIZE 16 /* 16 bytes status */
static struct usb_interface_assoc_descriptor uvc_iad = {
.bLength = sizeof(uvc_iad),
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0,
.bInterfaceCount = 2,
.bFunctionClass = USB_CLASS_VIDEO,
.bFunctionSubClass = UVC_SC_VIDEO_INTERFACE_COLLECTION,
.bFunctionProtocol = 0x00,
.iFunction = 0,
};
static struct usb_interface_descriptor uvc_control_intf = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_CONTROL,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOCONTROL,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
static struct usb_endpoint_descriptor uvc_interrupt_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE),
.bInterval = 8,
};
static struct usb_ss_ep_comp_descriptor uvc_ss_interrupt_comp = {
.bLength = sizeof(uvc_ss_interrupt_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* The following 3 values can be tweaked if necessary. */
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE),
};
static struct uvc_control_endpoint_descriptor uvc_interrupt_cs_ep = {
.bLength = UVC_DT_CONTROL_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubType = UVC_EP_INTERRUPT,
.wMaxTransferSize = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE),
};
static struct usb_interface_descriptor uvc_streaming_intf_alt0 = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
static struct usb_interface_descriptor uvc_streaming_intf_alt1 = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
static struct usb_endpoint_descriptor uvc_fs_streaming_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_SYNC_ASYNC
| USB_ENDPOINT_XFER_ISOC,
/*
* The wMaxPacketSize and bInterval values will be initialized from
* module parameters.
*/
};
static struct usb_endpoint_descriptor uvc_hs_streaming_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_SYNC_ASYNC
| USB_ENDPOINT_XFER_ISOC,
/*
* The wMaxPacketSize and bInterval values will be initialized from
* module parameters.
*/
};
static struct usb_endpoint_descriptor uvc_ss_streaming_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_SYNC_ASYNC
| USB_ENDPOINT_XFER_ISOC,
/*
* The wMaxPacketSize and bInterval values will be initialized from
* module parameters.
*/
};
static struct usb_ss_ep_comp_descriptor uvc_ss_streaming_comp = {
.bLength = sizeof(uvc_ss_streaming_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/*
* The bMaxBurst, bmAttributes and wBytesPerInterval values will be
* initialized from module parameters.
*/
};
static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
(struct usb_descriptor_header *) &uvc_fs_streaming_ep,
NULL,
};
static const struct usb_descriptor_header * const uvc_hs_streaming[] = {
(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
(struct usb_descriptor_header *) &uvc_hs_streaming_ep,
NULL,
};
static const struct usb_descriptor_header * const uvc_ss_streaming[] = {
(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
(struct usb_descriptor_header *) &uvc_ss_streaming_ep,
(struct usb_descriptor_header *) &uvc_ss_streaming_comp,
NULL,
};
/* --------------------------------------------------------------------------
* Control requests
*/
static void
uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_device *uvc = req->context;
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
if (uvc->event_setup_out) {
uvc->event_setup_out = 0;
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DATA;
uvc_event->data.length = min_t(unsigned int, req->actual,
sizeof(uvc_event->data.data));
memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length);
v4l2_event_queue(&uvc->vdev, &v4l2_event);
}
}
static int
uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
unsigned int interface = le16_to_cpu(ctrl->wIndex) & 0xff;
struct usb_ctrlrequest *mctrl;
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
uvcg_info(f, "invalid request type\n");
return -EINVAL;
}
/* Stall too big requests. */
if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE)
return -EINVAL;
/*
* Tell the complete callback to generate an event for the next request
* that will be enqueued by UVCIOC_SEND_RESPONSE.
*/
uvc->event_setup_out = !(ctrl->bRequestType & USB_DIR_IN);
uvc->event_length = le16_to_cpu(ctrl->wLength);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_SETUP;
memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
/* check for the interface number, fixup the interface number in
* the ctrl request so the userspace doesn't have to bother with
* offset and configfs parsing
*/
mctrl = &uvc_event->req;
mctrl->wIndex &= ~cpu_to_le16(0xff);
if (interface == uvc->streaming_intf)
mctrl->wIndex = cpu_to_le16(UVC_STRING_STREAMING_IDX);
v4l2_event_queue(&uvc->vdev, &v4l2_event);
return 0;
}
void uvc_function_setup_continue(struct uvc_device *uvc)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
usb_composite_setup_continue(cdev);
}
static int
uvc_function_get_alt(struct usb_function *f, unsigned interface)
{
struct uvc_device *uvc = to_uvc(f);
uvcg_info(f, "%s(%u)\n", __func__, interface);
if (interface == uvc->control_intf)
return 0;
else if (interface != uvc->streaming_intf)
return -EINVAL;
else
return uvc->video.ep->enabled ? 1 : 0;
}
static int
uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
{
struct uvc_device *uvc = to_uvc(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
int ret;
uvcg_info(f, "%s(%u, %u)\n", __func__, interface, alt);
if (interface == uvc->control_intf) {
if (alt)
return -EINVAL;
if (uvc->enable_interrupt_ep) {
uvcg_info(f, "reset UVC interrupt endpoint\n");
usb_ep_disable(uvc->interrupt_ep);
if (!uvc->interrupt_ep->desc)
if (config_ep_by_speed(cdev->gadget, f,
uvc->interrupt_ep))
return -EINVAL;
usb_ep_enable(uvc->interrupt_ep);
}
if (uvc->state == UVC_STATE_DISCONNECTED) {
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_CONNECT;
uvc_event->speed = cdev->gadget->speed;
v4l2_event_queue(&uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_CONNECTED;
}
return 0;
}
if (interface != uvc->streaming_intf)
return -EINVAL;
/* TODO
if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep))
return alt ? -EINVAL : 0;
*/
switch (alt) {
case 0:
if (uvc->state != UVC_STATE_STREAMING)
return 0;
if (uvc->video.ep)
usb_ep_disable(uvc->video.ep);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMOFF;
v4l2_event_queue(&uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_CONNECTED;
return 0;
case 1:
if (uvc->state != UVC_STATE_CONNECTED)
return 0;
if (!uvc->video.ep)
return -EINVAL;
uvcg_info(f, "reset UVC\n");
usb_ep_disable(uvc->video.ep);
ret = config_ep_by_speed(f->config->cdev->gadget,
&(uvc->func), uvc->video.ep);
if (ret)
return ret;
usb_ep_enable(uvc->video.ep);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMON;
v4l2_event_queue(&uvc->vdev, &v4l2_event);
return USB_GADGET_DELAYED_STATUS;
default:
return -EINVAL;
}
}
static void
uvc_function_disable(struct usb_function *f)
{
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
uvcg_info(f, "%s()\n", __func__);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DISCONNECT;
v4l2_event_queue(&uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_DISCONNECTED;
usb_ep_disable(uvc->video.ep);
if (uvc->enable_interrupt_ep)
usb_ep_disable(uvc->interrupt_ep);
}
/* --------------------------------------------------------------------------
* Connection / disconnection
*/
void
uvc_function_connect(struct uvc_device *uvc)
{
int ret;
if ((ret = usb_function_activate(&uvc->func)) < 0)
uvcg_info(&uvc->func, "UVC connect failed with %d\n", ret);
}
void
uvc_function_disconnect(struct uvc_device *uvc)
{
int ret;
if ((ret = usb_function_deactivate(&uvc->func)) < 0)
uvcg_info(&uvc->func, "UVC disconnect failed with %d\n", ret);
}
/* --------------------------------------------------------------------------
* USB probe and disconnect
*/
static ssize_t function_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uvc_device *uvc = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", uvc->func.fi->group.cg_item.ci_name);
}
static DEVICE_ATTR_RO(function_name);
static int
uvc_register_video(struct uvc_device *uvc)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
/* TODO reference counting. */
memset(&uvc->vdev, 0, sizeof(uvc->vdev));
uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev;
uvc->vdev.fops = &uvc_v4l2_fops;
uvc->vdev.ioctl_ops = &uvc_v4l2_ioctl_ops;
uvc->vdev.release = video_device_release_empty;
uvc->vdev.vfl_dir = VFL_DIR_TX;
uvc->vdev.lock = &uvc->video.mutex;
uvc->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
strscpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name));
video_set_drvdata(&uvc->vdev, uvc);
ret = video_register_device(&uvc->vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0)
return ret;
ret = device_create_file(&uvc->vdev.dev, &dev_attr_function_name);
if (ret < 0) {
video_unregister_device(&uvc->vdev);
return ret;
}
return 0;
}
#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
do { \
memcpy(mem, desc, (desc)->bLength); \
*(dst)++ = mem; \
mem += (desc)->bLength; \
} while (0);
#define UVC_COPY_DESCRIPTORS(mem, dst, src) \
do { \
const struct usb_descriptor_header * const *__src; \
for (__src = src; *__src; ++__src) { \
memcpy(mem, *__src, (*__src)->bLength); \
*dst++ = mem; \
mem += (*__src)->bLength; \
} \
} while (0)
#define UVC_COPY_XU_DESCRIPTOR(mem, dst, desc) \
do { \
*(dst)++ = mem; \
memcpy(mem, desc, 22); /* bLength to bNrInPins */ \
mem += 22; \
\
memcpy(mem, (desc)->baSourceID, (desc)->bNrInPins); \
mem += (desc)->bNrInPins; \
\
memcpy(mem, &(desc)->bControlSize, 1); \
mem++; \
\
memcpy(mem, (desc)->bmControls, (desc)->bControlSize); \
mem += (desc)->bControlSize; \
\
memcpy(mem, &(desc)->iExtension, 1); \
mem++; \
} while (0)
static struct usb_descriptor_header **
uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
{
struct uvc_input_header_descriptor *uvc_streaming_header;
struct uvc_header_descriptor *uvc_control_header;
const struct uvc_descriptor_header * const *uvc_control_desc;
const struct uvc_descriptor_header * const *uvc_streaming_cls;
const struct usb_descriptor_header * const *uvc_streaming_std;
const struct usb_descriptor_header * const *src;
struct usb_descriptor_header **dst;
struct usb_descriptor_header **hdr;
struct uvcg_extension *xu;
unsigned int control_size;
unsigned int streaming_size;
unsigned int n_desc;
unsigned int bytes;
void *mem;
switch (speed) {
case USB_SPEED_SUPER:
uvc_control_desc = uvc->desc.ss_control;
uvc_streaming_cls = uvc->desc.ss_streaming;
uvc_streaming_std = uvc_ss_streaming;
break;
case USB_SPEED_HIGH:
uvc_control_desc = uvc->desc.fs_control;
uvc_streaming_cls = uvc->desc.hs_streaming;
uvc_streaming_std = uvc_hs_streaming;
break;
case USB_SPEED_FULL:
default:
uvc_control_desc = uvc->desc.fs_control;
uvc_streaming_cls = uvc->desc.fs_streaming;
uvc_streaming_std = uvc_fs_streaming;
break;
}
if (!uvc_control_desc || !uvc_streaming_cls)
return ERR_PTR(-ENODEV);
/*
* Descriptors layout
*
* uvc_iad
* uvc_control_intf
* Class-specific UVC control descriptors
* uvc_interrupt_ep
* uvc_interrupt_cs_ep
* uvc_ss_interrupt_comp (for SS only)
* uvc_streaming_intf_alt0
* Class-specific UVC streaming descriptors
* uvc_{fs|hs}_streaming
*/
/* Count descriptors and compute their size. */
control_size = 0;
streaming_size = 0;
bytes = uvc_iad.bLength + uvc_control_intf.bLength
+ uvc_streaming_intf_alt0.bLength;
n_desc = 3;
if (uvc->enable_interrupt_ep) {
bytes += uvc_interrupt_ep.bLength + uvc_interrupt_cs_ep.bLength;
n_desc += 2;
if (speed == USB_SPEED_SUPER) {
bytes += uvc_ss_interrupt_comp.bLength;
n_desc += 1;
}
}
for (src = (const struct usb_descriptor_header **)uvc_control_desc;
*src; ++src) {
control_size += (*src)->bLength;
bytes += (*src)->bLength;
n_desc++;
}
list_for_each_entry(xu, uvc->desc.extension_units, list) {
control_size += xu->desc.bLength;
bytes += xu->desc.bLength;
n_desc++;
}
for (src = (const struct usb_descriptor_header **)uvc_streaming_cls;
*src; ++src) {
streaming_size += (*src)->bLength;
bytes += (*src)->bLength;
n_desc++;
}
for (src = uvc_streaming_std; *src; ++src) {
bytes += (*src)->bLength;
n_desc++;
}
mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL);
if (mem == NULL)
return NULL;
hdr = mem;
dst = mem;
mem += (n_desc + 1) * sizeof(*src);
/* Copy the descriptors. */
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_iad);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_intf);
uvc_control_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
(const struct usb_descriptor_header **)uvc_control_desc);
list_for_each_entry(xu, uvc->desc.extension_units, list)
UVC_COPY_XU_DESCRIPTOR(mem, dst, &xu->desc);
uvc_control_header->wTotalLength = cpu_to_le16(control_size);
uvc_control_header->bInCollection = 1;
uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
if (uvc->enable_interrupt_ep) {
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_ep);
if (speed == USB_SPEED_SUPER)
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_interrupt_comp);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_cs_ep);
}
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
uvc_streaming_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
(const struct usb_descriptor_header**)uvc_streaming_cls);
uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size);
uvc_streaming_header->bEndpointAddress = uvc->video.ep->address;
UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std);
*dst = NULL;
return hdr;
}
static int
uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
struct uvcg_extension *xu;
struct usb_string *us;
unsigned int max_packet_mult;
unsigned int max_packet_size;
struct usb_ep *ep;
struct f_uvc_opts *opts;
int ret = -EINVAL;
uvcg_info(f, "%s()\n", __func__);
opts = fi_to_f_uvc_opts(f->fi);
/* Sanity check the streaming endpoint module parameters. */
opts->streaming_interval = clamp(opts->streaming_interval, 1U, 16U);
opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
/* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
if (opts->streaming_maxburst &&
(opts->streaming_maxpacket % 1024) != 0) {
opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
uvcg_info(f, "overriding streaming_maxpacket to %d\n",
opts->streaming_maxpacket);
}
/*
* Fill in the FS/HS/SS Video Streaming specific descriptors from the
* module parameters.
*
* NOTE: We assume that the user knows what they are doing and won't
* give parameters that their UDC doesn't support.
*/
if (opts->streaming_maxpacket <= 1024) {
max_packet_mult = 1;
max_packet_size = opts->streaming_maxpacket;
} else if (opts->streaming_maxpacket <= 2048) {
max_packet_mult = 2;
max_packet_size = opts->streaming_maxpacket / 2;
} else {
max_packet_mult = 3;
max_packet_size = opts->streaming_maxpacket / 3;
}
uvc_fs_streaming_ep.wMaxPacketSize =
cpu_to_le16(min(opts->streaming_maxpacket, 1023U));
uvc_fs_streaming_ep.bInterval = opts->streaming_interval;
uvc_hs_streaming_ep.wMaxPacketSize =
cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
/* A high-bandwidth endpoint must specify a bInterval value of 1 */
if (max_packet_mult > 1)
uvc_hs_streaming_ep.bInterval = 1;
else
uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
uvc_ss_streaming_comp.bmAttributes = max_packet_mult - 1;
uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
uvc_ss_streaming_comp.wBytesPerInterval =
cpu_to_le16(max_packet_size * max_packet_mult *
(opts->streaming_maxburst + 1));
/* Allocate endpoints. */
if (opts->enable_interrupt_ep) {
ep = usb_ep_autoconfig(cdev->gadget, &uvc_interrupt_ep);
if (!ep) {
uvcg_info(f, "Unable to allocate interrupt EP\n");
goto error;
}
uvc->interrupt_ep = ep;
uvc_control_intf.bNumEndpoints = 1;
}
uvc->enable_interrupt_ep = opts->enable_interrupt_ep;
ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
if (!ep) {
uvcg_info(f, "Unable to allocate streaming EP\n");
goto error;
}
uvc->video.ep = ep;
uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
/*
* XUs can have an arbitrary string descriptor describing them. If they
* have one pick up the ID.
*/
list_for_each_entry(xu, &opts->extension_units, list)
if (xu->string_descriptor_index)
xu->desc.iExtension = cdev->usb_strings[xu->string_descriptor_index].id;
/*
* We attach the hard-coded defaults incase the user does not provide
* any more appropriate strings through configfs.
*/
uvc_en_us_strings[UVC_STRING_CONTROL_IDX].s = opts->function_name;
us = usb_gstrings_attach(cdev, uvc_function_strings,
ARRAY_SIZE(uvc_en_us_strings));
if (IS_ERR(us)) {
ret = PTR_ERR(us);
goto error;
}
uvc_iad.iFunction = opts->iad_index ? cdev->usb_strings[opts->iad_index].id :
us[UVC_STRING_CONTROL_IDX].id;
uvc_streaming_intf_alt0.iInterface = opts->vs0_index ?
cdev->usb_strings[opts->vs0_index].id :
us[UVC_STRING_STREAMING_IDX].id;
uvc_streaming_intf_alt1.iInterface = opts->vs1_index ?
cdev->usb_strings[opts->vs1_index].id :
us[UVC_STRING_STREAMING_IDX].id;
/* Allocate interface IDs. */
if ((ret = usb_interface_id(c, f)) < 0)
goto error;
uvc_iad.bFirstInterface = ret;
uvc_control_intf.bInterfaceNumber = ret;
uvc->control_intf = ret;
opts->control_interface = ret;
if ((ret = usb_interface_id(c, f)) < 0)
goto error;
uvc_streaming_intf_alt0.bInterfaceNumber = ret;
uvc_streaming_intf_alt1.bInterfaceNumber = ret;
uvc->streaming_intf = ret;
opts->streaming_interface = ret;
/* Copy descriptors */
f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
if (IS_ERR(f->fs_descriptors)) {
ret = PTR_ERR(f->fs_descriptors);
f->fs_descriptors = NULL;
goto error;
}
f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
if (IS_ERR(f->hs_descriptors)) {
ret = PTR_ERR(f->hs_descriptors);
f->hs_descriptors = NULL;
goto error;
}
f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER);
if (IS_ERR(f->ss_descriptors)) {
ret = PTR_ERR(f->ss_descriptors);
f->ss_descriptors = NULL;
goto error;
}
/* Preallocate control endpoint request. */
uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
if (uvc->control_req == NULL || uvc->control_buf == NULL) {
ret = -ENOMEM;
goto error;
}
uvc->control_req->buf = uvc->control_buf;
uvc->control_req->complete = uvc_function_ep0_complete;
uvc->control_req->context = uvc;
if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) {
uvcg_err(f, "failed to register V4L2 device\n");
goto error;
}
/* Initialise video. */
ret = uvcg_video_init(&uvc->video, uvc);
if (ret < 0)
goto v4l2_error;
/* Register a V4L2 device. */
ret = uvc_register_video(uvc);
if (ret < 0) {
uvcg_err(f, "failed to register video device\n");
goto v4l2_error;
}
return 0;
v4l2_error:
v4l2_device_unregister(&uvc->v4l2_dev);
error:
if (uvc->control_req)
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf);
usb_free_all_descriptors(f);
return ret;
}
/* --------------------------------------------------------------------------
* USB gadget function
*/
static void uvc_free_inst(struct usb_function_instance *f)
{
struct f_uvc_opts *opts = fi_to_f_uvc_opts(f);
mutex_destroy(&opts->lock);
kfree(opts);
}
static struct usb_function_instance *uvc_alloc_inst(void)
{
struct f_uvc_opts *opts;
struct uvc_camera_terminal_descriptor *cd;
struct uvc_processing_unit_descriptor *pd;
struct uvc_output_terminal_descriptor *od;
struct uvc_descriptor_header **ctl_cls;
int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = uvc_free_inst;
mutex_init(&opts->lock);
cd = &opts->uvc_camera_terminal;
cd->bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3);
cd->bDescriptorType = USB_DT_CS_INTERFACE;
cd->bDescriptorSubType = UVC_VC_INPUT_TERMINAL;
cd->bTerminalID = 1;
cd->wTerminalType = cpu_to_le16(0x0201);
cd->bAssocTerminal = 0;
cd->iTerminal = 0;
cd->wObjectiveFocalLengthMin = cpu_to_le16(0);
cd->wObjectiveFocalLengthMax = cpu_to_le16(0);
cd->wOcularFocalLength = cpu_to_le16(0);
cd->bControlSize = 3;
cd->bmControls[0] = 2;
cd->bmControls[1] = 0;
cd->bmControls[2] = 0;
pd = &opts->uvc_processing;
pd->bLength = UVC_DT_PROCESSING_UNIT_SIZE(2);
pd->bDescriptorType = USB_DT_CS_INTERFACE;
pd->bDescriptorSubType = UVC_VC_PROCESSING_UNIT;
pd->bUnitID = 2;
pd->bSourceID = 1;
pd->wMaxMultiplier = cpu_to_le16(16*1024);
pd->bControlSize = 2;
pd->bmControls[0] = 1;
pd->bmControls[1] = 0;
pd->iProcessing = 0;
pd->bmVideoStandards = 0;
od = &opts->uvc_output_terminal;
od->bLength = UVC_DT_OUTPUT_TERMINAL_SIZE;
od->bDescriptorType = USB_DT_CS_INTERFACE;
od->bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL;
od->bTerminalID = 3;
od->wTerminalType = cpu_to_le16(0x0101);
od->bAssocTerminal = 0;
od->bSourceID = 2;
od->iTerminal = 0;
/*
* With the ability to add XUs to the UVC function graph, we need to be
* able to allocate unique unit IDs to them. The IDs are 1-based, with
* the CT, PU and OT above consuming the first 3.
*/
opts->last_unit_id = 3;
/* Prepare fs control class descriptors for configfs-based gadgets */
ctl_cls = opts->uvc_fs_control_cls;
ctl_cls[0] = NULL; /* assigned elsewhere by configfs */
ctl_cls[1] = (struct uvc_descriptor_header *)cd;
ctl_cls[2] = (struct uvc_descriptor_header *)pd;
ctl_cls[3] = (struct uvc_descriptor_header *)od;
ctl_cls[4] = NULL; /* NULL-terminate */
opts->fs_control =
(const struct uvc_descriptor_header * const *)ctl_cls;
/* Prepare hs control class descriptors for configfs-based gadgets */
ctl_cls = opts->uvc_ss_control_cls;
ctl_cls[0] = NULL; /* assigned elsewhere by configfs */
ctl_cls[1] = (struct uvc_descriptor_header *)cd;
ctl_cls[2] = (struct uvc_descriptor_header *)pd;
ctl_cls[3] = (struct uvc_descriptor_header *)od;
ctl_cls[4] = NULL; /* NULL-terminate */
opts->ss_control =
(const struct uvc_descriptor_header * const *)ctl_cls;
INIT_LIST_HEAD(&opts->extension_units);
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
snprintf(opts->function_name, sizeof(opts->function_name), "UVC Camera");
ret = uvcg_attach_configfs(opts);
if (ret < 0) {
kfree(opts);
return ERR_PTR(ret);
}
return &opts->func_inst;
}
static void uvc_free(struct usb_function *f)
{
struct uvc_device *uvc = to_uvc(f);
struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts,
func_inst);
config_item_put(&uvc->header->item);
--opts->refcnt;
kfree(uvc);
}
static void uvc_function_unbind(struct usb_configuration *c,
struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
struct uvc_video *video = &uvc->video;
long wait_ret = 1;
uvcg_info(f, "%s()\n", __func__);
if (video->async_wq)
destroy_workqueue(video->async_wq);
/*
* If we know we're connected via v4l2, then there should be a cleanup
* of the device from userspace either via UVC_EVENT_DISCONNECT or
* though the video device removal uevent. Allow some time for the
* application to close out before things get deleted.
*/
if (uvc->func_connected) {
uvcg_dbg(f, "waiting for clean disconnect\n");
wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
uvc->func_connected == false, msecs_to_jiffies(500));
uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
}
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
v4l2_device_unregister(&uvc->v4l2_dev);
if (uvc->func_connected) {
/*
* Wait for the release to occur to ensure there are no longer any
* pending operations that may cause panics when resources are cleaned
* up.
*/
uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
uvc->func_connected == false, msecs_to_jiffies(1000));
uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
}
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf);
usb_free_all_descriptors(f);
}
static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
{
struct uvc_device *uvc;
struct f_uvc_opts *opts;
struct uvc_descriptor_header **strm_cls;
struct config_item *streaming, *header, *h;
uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
if (uvc == NULL)
return ERR_PTR(-ENOMEM);
mutex_init(&uvc->video.mutex);
uvc->state = UVC_STATE_DISCONNECTED;
init_waitqueue_head(&uvc->func_connected_queue);
opts = fi_to_f_uvc_opts(fi);
mutex_lock(&opts->lock);
if (opts->uvc_fs_streaming_cls) {
strm_cls = opts->uvc_fs_streaming_cls;
opts->fs_streaming =
(const struct uvc_descriptor_header * const *)strm_cls;
}
if (opts->uvc_hs_streaming_cls) {
strm_cls = opts->uvc_hs_streaming_cls;
opts->hs_streaming =
(const struct uvc_descriptor_header * const *)strm_cls;
}
if (opts->uvc_ss_streaming_cls) {
strm_cls = opts->uvc_ss_streaming_cls;
opts->ss_streaming =
(const struct uvc_descriptor_header * const *)strm_cls;
}
uvc->desc.fs_control = opts->fs_control;
uvc->desc.ss_control = opts->ss_control;
uvc->desc.fs_streaming = opts->fs_streaming;
uvc->desc.hs_streaming = opts->hs_streaming;
uvc->desc.ss_streaming = opts->ss_streaming;
streaming = config_group_find_item(&opts->func_inst.group, "streaming");
if (!streaming)
goto err_config;
header = config_group_find_item(to_config_group(streaming), "header");
config_item_put(streaming);
if (!header)
goto err_config;
h = config_group_find_item(to_config_group(header), "h");
config_item_put(header);
if (!h)
goto err_config;
uvc->header = to_uvcg_streaming_header(h);
if (!uvc->header->linked) {
mutex_unlock(&opts->lock);
kfree(uvc);
return ERR_PTR(-EBUSY);
}
uvc->desc.extension_units = &opts->extension_units;
++opts->refcnt;
mutex_unlock(&opts->lock);
/* Register the function. */
uvc->func.name = "uvc";
uvc->func.bind = uvc_function_bind;
uvc->func.unbind = uvc_function_unbind;
uvc->func.get_alt = uvc_function_get_alt;
uvc->func.set_alt = uvc_function_set_alt;
uvc->func.disable = uvc_function_disable;
uvc->func.setup = uvc_function_setup;
uvc->func.free_func = uvc_free;
uvc->func.bind_deactivated = true;
return &uvc->func;
err_config:
mutex_unlock(&opts->lock);
kfree(uvc);
return ERR_PTR(-ENOENT);
}
DECLARE_USB_FUNCTION_INIT(uvc, uvc_alloc_inst, uvc_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laurent Pinchart");
| linux-master | drivers/usb/gadget/function/f_uvc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_midi.c -- USB MIDI class function driver
*
* Copyright (C) 2006 Thumtronics Pty Ltd.
* Developed for Thumtronics by Grey Innovation
* Ben Williamson <[email protected]>
*
* Rewritten for the composite framework
* Copyright (C) 2011 Daniel Mack <[email protected]>
*
* Based on drivers/usb/gadget/f_audio.c,
* Copyright (C) 2008 Bryan Wu <[email protected]>
* Copyright (C) 2008 Analog Devices, Inc
*
* and drivers/usb/gadget/midi.c,
* Copyright (C) 2006 Thumtronics Pty Ltd.
* Ben Williamson <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kfifo.h>
#include <linux/spinlock.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/rawmidi.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/audio.h>
#include <linux/usb/midi.h>
#include "u_f.h"
#include "u_midi.h"
MODULE_AUTHOR("Ben Williamson");
MODULE_LICENSE("GPL v2");
static const char f_midi_shortname[] = "f_midi";
static const char f_midi_longname[] = "MIDI Gadget";
/*
* We can only handle 16 cables on one single endpoint, as cable numbers are
* stored in 4-bit fields. And as the interface currently only holds one
* single endpoint, this is the maximum number of ports we can allow.
*/
#define MAX_PORTS 16
/* MIDI message states */
enum {
STATE_INITIAL = 0, /* pseudo state */
STATE_1PARAM,
STATE_2PARAM_1,
STATE_2PARAM_2,
STATE_SYSEX_0,
STATE_SYSEX_1,
STATE_SYSEX_2,
STATE_REAL_TIME,
STATE_FINISHED, /* pseudo state */
};
/*
* This is a gadget, and the IN/OUT naming is from the host's perspective.
* USB -> OUT endpoint -> rawmidi
* USB <- IN endpoint <- rawmidi
*/
struct gmidi_in_port {
struct snd_rawmidi_substream *substream;
int active;
uint8_t cable;
uint8_t state;
uint8_t data[2];
};
struct f_midi {
struct usb_function func;
struct usb_gadget *gadget;
struct usb_ep *in_ep, *out_ep;
struct snd_card *card;
struct snd_rawmidi *rmidi;
u8 ms_id;
struct snd_rawmidi_substream *out_substream[MAX_PORTS];
unsigned long out_triggered;
struct work_struct work;
unsigned int in_ports;
unsigned int out_ports;
int index;
char *id;
unsigned int buflen, qlen;
/* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
spinlock_t transmit_lock;
unsigned int in_last_port;
unsigned char free_ref;
struct gmidi_in_port in_ports_array[/* in_ports */];
};
static inline struct f_midi *func_to_midi(struct usb_function *f)
{
return container_of(f, struct f_midi, func);
}
static void f_midi_transmit(struct f_midi *midi);
static void f_midi_rmidi_free(struct snd_rawmidi *rmidi);
static void f_midi_free_inst(struct usb_function_instance *f);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
/* B.3.1 Standard AC Interface Descriptor */
static struct usb_interface_descriptor ac_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
/* .bNumEndpoints = DYNAMIC */
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
/* .iInterface = DYNAMIC */
};
/* B.3.2 Class-Specific AC Interface Descriptor */
static struct uac1_ac_header_descriptor_1 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
.bcdADC = cpu_to_le16(0x0100),
.wTotalLength = cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)),
.bInCollection = 1,
/* .baInterfaceNr = DYNAMIC */
};
/* B.4.1 Standard MS Interface Descriptor */
static struct usb_interface_descriptor ms_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
/* .iInterface = DYNAMIC */
};
/* B.4.2 Class-Specific MS Interface Descriptor */
static struct usb_ms_header_descriptor ms_header_desc = {
.bLength = USB_DT_MS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
.bcdMSC = cpu_to_le16(0x0100),
/* .wTotalLength = DYNAMIC */
};
/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
static struct usb_endpoint_descriptor bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_ss_ep_comp_descriptor bulk_out_ss_comp_desc = {
.bLength = sizeof(bulk_out_ss_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
/* B.5.2 Class-specific MS Bulk OUT Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_out_desc = {
/* .bLength = DYNAMIC */
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = USB_MS_GENERAL,
/* .bNumEmbMIDIJack = DYNAMIC */
/* .baAssocJackID = DYNAMIC */
};
/* B.6.1 Standard Bulk IN Endpoint Descriptor */
static struct usb_endpoint_descriptor bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_ss_ep_comp_descriptor bulk_in_ss_comp_desc = {
.bLength = sizeof(bulk_in_ss_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
/* .bLength = DYNAMIC */
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = USB_MS_GENERAL,
/* .bNumEmbMIDIJack = DYNAMIC */
/* .baAssocJackID = DYNAMIC */
};
/* string IDs are assigned dynamically */
#define STRING_FUNC_IDX 0
static struct usb_string midi_string_defs[] = {
[STRING_FUNC_IDX].s = "MIDI function",
{ } /* end of list */
};
static struct usb_gadget_strings midi_stringtab = {
.language = 0x0409, /* en-us */
.strings = midi_string_defs,
};
static struct usb_gadget_strings *midi_strings[] = {
&midi_stringtab,
NULL,
};
static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
return alloc_ep_req(ep, length);
}
static const uint8_t f_midi_cin_length[] = {
0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
};
/*
* Receives a chunk of MIDI data.
*/
static void f_midi_read_data(struct usb_ep *ep, int cable,
uint8_t *data, int length)
{
struct f_midi *midi = ep->driver_data;
struct snd_rawmidi_substream *substream = midi->out_substream[cable];
if (!substream)
/* Nobody is listening - throw it on the floor. */
return;
if (!test_bit(cable, &midi->out_triggered))
return;
snd_rawmidi_receive(substream, data, length);
}
static void f_midi_handle_out_data(struct usb_ep *ep, struct usb_request *req)
{
unsigned int i;
u8 *buf = req->buf;
for (i = 0; i + 3 < req->actual; i += 4)
if (buf[i] != 0) {
int cable = buf[i] >> 4;
int length = f_midi_cin_length[buf[i] & 0x0f];
f_midi_read_data(ep, cable, &buf[i + 1], length);
}
}
static void
f_midi_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_midi *midi = ep->driver_data;
struct usb_composite_dev *cdev = midi->func.config->cdev;
int status = req->status;
switch (status) {
case 0: /* normal completion */
if (ep == midi->out_ep) {
/* We received stuff. req is queued again, below */
f_midi_handle_out_data(ep, req);
} else if (ep == midi->in_ep) {
/* Our transmit completed. See if there's more to go.
* f_midi_transmit eats req, don't queue it again. */
req->length = 0;
f_midi_transmit(midi);
return;
}
break;
/* this endpoint is normally active while we're configured */
case -ECONNABORTED: /* hardware forced ep reset */
case -ECONNRESET: /* request dequeued */
case -ESHUTDOWN: /* disconnect from host */
VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status,
req->actual, req->length);
if (ep == midi->out_ep) {
f_midi_handle_out_data(ep, req);
/* We don't need to free IN requests because it's handled
* by the midi->in_req_fifo. */
free_ep_req(ep, req);
}
return;
case -EOVERFLOW: /* buffer overrun on read means that
* we didn't provide a big enough buffer.
*/
default:
DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name,
status, req->actual, req->length);
break;
case -EREMOTEIO: /* short read */
break;
}
status = usb_ep_queue(ep, req, GFP_ATOMIC);
if (status) {
ERROR(cdev, "kill %s: resubmit %d bytes --> %d\n",
ep->name, req->length, status);
usb_ep_set_halt(ep);
/* FIXME recover later ... somehow */
}
}
static void f_midi_drop_out_substreams(struct f_midi *midi)
{
unsigned int i;
for (i = 0; i < midi->in_ports; i++) {
struct gmidi_in_port *port = midi->in_ports_array + i;
struct snd_rawmidi_substream *substream = port->substream;
if (port->active && substream)
snd_rawmidi_drop_output(substream);
}
}
static int f_midi_start_ep(struct f_midi *midi,
struct usb_function *f,
struct usb_ep *ep)
{
int err;
struct usb_composite_dev *cdev = f->config->cdev;
usb_ep_disable(ep);
err = config_ep_by_speed(midi->gadget, f, ep);
if (err) {
ERROR(cdev, "can't configure %s: %d\n", ep->name, err);
return err;
}
err = usb_ep_enable(ep);
if (err) {
ERROR(cdev, "can't start %s: %d\n", ep->name, err);
return err;
}
ep->driver_data = midi;
return 0;
}
static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_midi *midi = func_to_midi(f);
unsigned i;
int err;
/* we only set alt for MIDIStreaming interface */
if (intf != midi->ms_id)
return 0;
err = f_midi_start_ep(midi, f, midi->in_ep);
if (err)
return err;
err = f_midi_start_ep(midi, f, midi->out_ep);
if (err)
return err;
/* pre-allocate write usb requests to use on f_midi_transmit. */
while (kfifo_avail(&midi->in_req_fifo)) {
struct usb_request *req =
midi_alloc_ep_req(midi->in_ep, midi->buflen);
if (req == NULL)
return -ENOMEM;
req->length = 0;
req->complete = f_midi_complete;
kfifo_put(&midi->in_req_fifo, req);
}
/* allocate a bunch of read buffers and queue them all at once. */
for (i = 0; i < midi->qlen && err == 0; i++) {
struct usb_request *req =
midi_alloc_ep_req(midi->out_ep, midi->buflen);
if (req == NULL)
return -ENOMEM;
req->complete = f_midi_complete;
err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
if (err) {
ERROR(midi, "%s: couldn't enqueue request: %d\n",
midi->out_ep->name, err);
if (req->buf != NULL)
free_ep_req(midi->out_ep, req);
return err;
}
}
return 0;
}
static void f_midi_disable(struct usb_function *f)
{
struct f_midi *midi = func_to_midi(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = NULL;
DBG(cdev, "disable\n");
/*
* just disable endpoints, forcing completion of pending i/o.
* all our completion handlers free their requests in this case.
*/
usb_ep_disable(midi->in_ep);
usb_ep_disable(midi->out_ep);
/* release IN requests */
while (kfifo_get(&midi->in_req_fifo, &req))
free_ep_req(midi->in_ep, req);
f_midi_drop_out_substreams(midi);
}
static int f_midi_snd_free(struct snd_device *device)
{
return 0;
}
/*
* Converts MIDI commands to USB MIDI packets.
*/
static void f_midi_transmit_byte(struct usb_request *req,
struct gmidi_in_port *port, uint8_t b)
{
uint8_t p[4] = { port->cable << 4, 0, 0, 0 };
uint8_t next_state = STATE_INITIAL;
switch (b) {
case 0xf8 ... 0xff:
/* System Real-Time Messages */
p[0] |= 0x0f;
p[1] = b;
next_state = port->state;
port->state = STATE_REAL_TIME;
break;
case 0xf7:
/* End of SysEx */
switch (port->state) {
case STATE_SYSEX_0:
p[0] |= 0x05;
p[1] = 0xf7;
next_state = STATE_FINISHED;
break;
case STATE_SYSEX_1:
p[0] |= 0x06;
p[1] = port->data[0];
p[2] = 0xf7;
next_state = STATE_FINISHED;
break;
case STATE_SYSEX_2:
p[0] |= 0x07;
p[1] = port->data[0];
p[2] = port->data[1];
p[3] = 0xf7;
next_state = STATE_FINISHED;
break;
default:
/* Ignore byte */
next_state = port->state;
port->state = STATE_INITIAL;
}
break;
case 0xf0 ... 0xf6:
/* System Common Messages */
port->data[0] = port->data[1] = 0;
port->state = STATE_INITIAL;
switch (b) {
case 0xf0:
port->data[0] = b;
port->data[1] = 0;
next_state = STATE_SYSEX_1;
break;
case 0xf1:
case 0xf3:
port->data[0] = b;
next_state = STATE_1PARAM;
break;
case 0xf2:
port->data[0] = b;
next_state = STATE_2PARAM_1;
break;
case 0xf4:
case 0xf5:
next_state = STATE_INITIAL;
break;
case 0xf6:
p[0] |= 0x05;
p[1] = 0xf6;
next_state = STATE_FINISHED;
break;
}
break;
case 0x80 ... 0xef:
/*
* Channel Voice Messages, Channel Mode Messages
* and Control Change Messages.
*/
port->data[0] = b;
port->data[1] = 0;
port->state = STATE_INITIAL;
if (b >= 0xc0 && b <= 0xdf)
next_state = STATE_1PARAM;
else
next_state = STATE_2PARAM_1;
break;
case 0x00 ... 0x7f:
/* Message parameters */
switch (port->state) {
case STATE_1PARAM:
if (port->data[0] < 0xf0)
p[0] |= port->data[0] >> 4;
else
p[0] |= 0x02;
p[1] = port->data[0];
p[2] = b;
/* This is to allow Running State Messages */
next_state = STATE_1PARAM;
break;
case STATE_2PARAM_1:
port->data[1] = b;
next_state = STATE_2PARAM_2;
break;
case STATE_2PARAM_2:
if (port->data[0] < 0xf0)
p[0] |= port->data[0] >> 4;
else
p[0] |= 0x03;
p[1] = port->data[0];
p[2] = port->data[1];
p[3] = b;
/* This is to allow Running State Messages */
next_state = STATE_2PARAM_1;
break;
case STATE_SYSEX_0:
port->data[0] = b;
next_state = STATE_SYSEX_1;
break;
case STATE_SYSEX_1:
port->data[1] = b;
next_state = STATE_SYSEX_2;
break;
case STATE_SYSEX_2:
p[0] |= 0x04;
p[1] = port->data[0];
p[2] = port->data[1];
p[3] = b;
next_state = STATE_SYSEX_0;
break;
}
break;
}
/* States where we have to write into the USB request */
if (next_state == STATE_FINISHED ||
port->state == STATE_SYSEX_2 ||
port->state == STATE_1PARAM ||
port->state == STATE_2PARAM_2 ||
port->state == STATE_REAL_TIME) {
unsigned int length = req->length;
u8 *buf = (u8 *)req->buf + length;
memcpy(buf, p, sizeof(p));
req->length = length + sizeof(p);
if (next_state == STATE_FINISHED) {
next_state = STATE_INITIAL;
port->data[0] = port->data[1] = 0;
}
}
port->state = next_state;
}
static int f_midi_do_transmit(struct f_midi *midi, struct usb_ep *ep)
{
struct usb_request *req = NULL;
unsigned int len, i;
bool active = false;
int err;
/*
* We peek the request in order to reuse it if it fails to enqueue on
* its endpoint
*/
len = kfifo_peek(&midi->in_req_fifo, &req);
if (len != 1) {
ERROR(midi, "%s: Couldn't get usb request\n", __func__);
return -1;
}
/*
* If buffer overrun, then we ignore this transmission.
* IMPORTANT: This will cause the user-space rawmidi device to block
* until a) usb requests have been completed or b) snd_rawmidi_write()
* times out.
*/
if (req->length > 0)
return 0;
for (i = midi->in_last_port; i < midi->in_ports; ++i) {
struct gmidi_in_port *port = midi->in_ports_array + i;
struct snd_rawmidi_substream *substream = port->substream;
if (!port->active || !substream)
continue;
while (req->length + 3 < midi->buflen) {
uint8_t b;
if (snd_rawmidi_transmit(substream, &b, 1) != 1) {
port->active = 0;
break;
}
f_midi_transmit_byte(req, port, b);
}
active = !!port->active;
if (active)
break;
}
midi->in_last_port = active ? i : 0;
if (req->length <= 0)
goto done;
err = usb_ep_queue(ep, req, GFP_ATOMIC);
if (err < 0) {
ERROR(midi, "%s failed to queue req: %d\n",
midi->in_ep->name, err);
req->length = 0; /* Re-use request next time. */
} else {
/* Upon success, put request at the back of the queue. */
kfifo_skip(&midi->in_req_fifo);
kfifo_put(&midi->in_req_fifo, req);
}
done:
return active;
}
static void f_midi_transmit(struct f_midi *midi)
{
struct usb_ep *ep = midi->in_ep;
int ret;
unsigned long flags;
/* We only care about USB requests if IN endpoint is enabled */
if (!ep || !ep->enabled)
goto drop_out;
spin_lock_irqsave(&midi->transmit_lock, flags);
do {
ret = f_midi_do_transmit(midi, ep);
if (ret < 0) {
spin_unlock_irqrestore(&midi->transmit_lock, flags);
goto drop_out;
}
} while (ret);
spin_unlock_irqrestore(&midi->transmit_lock, flags);
return;
drop_out:
f_midi_drop_out_substreams(midi);
}
static void f_midi_in_work(struct work_struct *work)
{
struct f_midi *midi;
midi = container_of(work, struct f_midi, work);
f_midi_transmit(midi);
}
static int f_midi_in_open(struct snd_rawmidi_substream *substream)
{
struct f_midi *midi = substream->rmidi->private_data;
struct gmidi_in_port *port;
if (substream->number >= midi->in_ports)
return -EINVAL;
VDBG(midi, "%s()\n", __func__);
port = midi->in_ports_array + substream->number;
port->substream = substream;
port->state = STATE_INITIAL;
return 0;
}
static int f_midi_in_close(struct snd_rawmidi_substream *substream)
{
struct f_midi *midi = substream->rmidi->private_data;
VDBG(midi, "%s()\n", __func__);
return 0;
}
static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct f_midi *midi = substream->rmidi->private_data;
if (substream->number >= midi->in_ports)
return;
VDBG(midi, "%s() %d\n", __func__, up);
midi->in_ports_array[substream->number].active = up;
if (up)
queue_work(system_highpri_wq, &midi->work);
}
static int f_midi_out_open(struct snd_rawmidi_substream *substream)
{
struct f_midi *midi = substream->rmidi->private_data;
if (substream->number >= MAX_PORTS)
return -EINVAL;
VDBG(midi, "%s()\n", __func__);
midi->out_substream[substream->number] = substream;
return 0;
}
static int f_midi_out_close(struct snd_rawmidi_substream *substream)
{
struct f_midi *midi = substream->rmidi->private_data;
VDBG(midi, "%s()\n", __func__);
return 0;
}
static void f_midi_out_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct f_midi *midi = substream->rmidi->private_data;
VDBG(midi, "%s()\n", __func__);
if (up)
set_bit(substream->number, &midi->out_triggered);
else
clear_bit(substream->number, &midi->out_triggered);
}
static const struct snd_rawmidi_ops gmidi_in_ops = {
.open = f_midi_in_open,
.close = f_midi_in_close,
.trigger = f_midi_in_trigger,
};
static const struct snd_rawmidi_ops gmidi_out_ops = {
.open = f_midi_out_open,
.close = f_midi_out_close,
.trigger = f_midi_out_trigger
};
static inline void f_midi_unregister_card(struct f_midi *midi)
{
if (midi->card) {
snd_card_free(midi->card);
midi->card = NULL;
}
}
/* register as a sound "card" */
static int f_midi_register_card(struct f_midi *midi)
{
struct snd_card *card;
struct snd_rawmidi *rmidi;
int err;
static struct snd_device_ops ops = {
.dev_free = f_midi_snd_free,
};
err = snd_card_new(&midi->gadget->dev, midi->index, midi->id,
THIS_MODULE, 0, &card);
if (err < 0) {
ERROR(midi, "snd_card_new() failed\n");
goto fail;
}
midi->card = card;
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, midi, &ops);
if (err < 0) {
ERROR(midi, "snd_device_new() failed: error %d\n", err);
goto fail;
}
strcpy(card->driver, f_midi_longname);
strcpy(card->longname, f_midi_longname);
strcpy(card->shortname, f_midi_shortname);
/* Set up rawmidi */
snd_component_add(card, "MIDI");
err = snd_rawmidi_new(card, card->longname, 0,
midi->out_ports, midi->in_ports, &rmidi);
if (err < 0) {
ERROR(midi, "snd_rawmidi_new() failed: error %d\n", err);
goto fail;
}
midi->rmidi = rmidi;
midi->in_last_port = 0;
strcpy(rmidi->name, card->shortname);
rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = midi;
rmidi->private_free = f_midi_rmidi_free;
midi->free_ref++;
/*
* Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT.
* It's an upside-down world being a gadget.
*/
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops);
/* register it - we're ready to go */
err = snd_card_register(card);
if (err < 0) {
ERROR(midi, "snd_card_register() failed\n");
goto fail;
}
VDBG(midi, "%s() finished ok\n", __func__);
return 0;
fail:
f_midi_unregister_card(midi);
return err;
}
/* MIDI function driver setup/binding */
static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_descriptor_header **midi_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
struct usb_string *us;
int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0;
midi->gadget = cdev->gadget;
INIT_WORK(&midi->work, f_midi_in_work);
status = f_midi_register_card(midi);
if (status < 0)
goto fail_register;
/* maybe allocate device-global string ID */
us = usb_gstrings_attach(c->cdev, midi_strings,
ARRAY_SIZE(midi_string_defs));
if (IS_ERR(us)) {
status = PTR_ERR(us);
goto fail;
}
ac_interface_desc.iInterface = us[STRING_FUNC_IDX].id;
/* We have two interfaces, AudioControl and MIDIStreaming */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ac_interface_desc.bInterfaceNumber = status;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ms_interface_desc.bInterfaceNumber = status;
ac_header_desc.baInterfaceNr[0] = status;
midi->ms_id = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
if (!midi->in_ep)
goto fail;
midi->out_ep = usb_ep_autoconfig(cdev->gadget, &bulk_out_desc);
if (!midi->out_ep)
goto fail;
/* allocate temporary function list */
midi_function = kcalloc((MAX_PORTS * 4) + 11, sizeof(*midi_function),
GFP_KERNEL);
if (!midi_function) {
status = -ENOMEM;
goto fail;
}
/*
* construct the function's descriptor set. As the number of
* input and output MIDI ports is configurable, we have to do
* it that way.
*/
/* add the headers - these are always the same */
midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
/* calculate the header's wTotalLength */
n = USB_DT_MS_HEADER_SIZE
+ (midi->in_ports + midi->out_ports) *
(USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
ms_header_desc.wTotalLength = cpu_to_le16(n);
midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
/* configure the external IN jacks, each linked to an embedded OUT jack */
for (n = 0; n < midi->in_ports; n++) {
struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
in_ext->bLength = USB_DT_MIDI_IN_SIZE;
in_ext->bDescriptorType = USB_DT_CS_INTERFACE;
in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
in_ext->bJackType = USB_MS_EXTERNAL;
in_ext->bJackID = jack++;
in_ext->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_ext;
out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
out_emb->bJackType = USB_MS_EMBEDDED;
out_emb->bJackID = jack++;
out_emb->bNrInputPins = 1;
out_emb->pins[0].baSourcePin = 1;
out_emb->pins[0].baSourceID = in_ext->bJackID;
out_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) out_emb;
/* link it to the endpoint */
ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
}
/* configure the external OUT jacks, each linked to an embedded IN jack */
for (n = 0; n < midi->out_ports; n++) {
struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
in_emb->bLength = USB_DT_MIDI_IN_SIZE;
in_emb->bDescriptorType = USB_DT_CS_INTERFACE;
in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
in_emb->bJackType = USB_MS_EMBEDDED;
in_emb->bJackID = jack++;
in_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_emb;
out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
out_ext->bJackType = USB_MS_EXTERNAL;
out_ext->bJackID = jack++;
out_ext->bNrInputPins = 1;
out_ext->iJack = 0;
out_ext->pins[0].baSourceID = in_emb->bJackID;
out_ext->pins[0].baSourcePin = 1;
midi_function[i++] = (struct usb_descriptor_header *) out_ext;
/* link it to the endpoint */
ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
}
/* configure the endpoint descriptors ... */
ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
/* ... and add them to the list */
endpoint_descriptor_index = i;
midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_out_desc;
midi_function[i++] = (struct usb_descriptor_header *) &bulk_in_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
midi_function[i++] = NULL;
/*
* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
/* copy descriptors, and track endpoint copies */
f->fs_descriptors = usb_copy_descriptors(midi_function);
if (!f->fs_descriptors)
goto fail_f_midi;
bulk_in_desc.wMaxPacketSize = cpu_to_le16(512);
bulk_out_desc.wMaxPacketSize = cpu_to_le16(512);
f->hs_descriptors = usb_copy_descriptors(midi_function);
if (!f->hs_descriptors)
goto fail_f_midi;
bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
i = endpoint_descriptor_index;
midi_function[i++] = (struct usb_descriptor_header *)
&bulk_out_desc;
midi_function[i++] = (struct usb_descriptor_header *)
&bulk_out_ss_comp_desc;
midi_function[i++] = (struct usb_descriptor_header *)
&ms_out_desc;
midi_function[i++] = (struct usb_descriptor_header *)
&bulk_in_desc;
midi_function[i++] = (struct usb_descriptor_header *)
&bulk_in_ss_comp_desc;
midi_function[i++] = (struct usb_descriptor_header *)
&ms_in_desc;
f->ss_descriptors = usb_copy_descriptors(midi_function);
if (!f->ss_descriptors)
goto fail_f_midi;
kfree(midi_function);
return 0;
fail_f_midi:
kfree(midi_function);
usb_free_all_descriptors(f);
fail:
f_midi_unregister_card(midi);
fail_register:
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static inline struct f_midi_opts *to_f_midi_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_midi_opts,
func_inst.group);
}
static void midi_attr_release(struct config_item *item)
{
struct f_midi_opts *opts = to_f_midi_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations midi_item_ops = {
.release = midi_attr_release,
};
#define F_MIDI_OPT(name, test_limit, limit) \
static ssize_t f_midi_opts_##name##_show(struct config_item *item, char *page) \
{ \
struct f_midi_opts *opts = to_f_midi_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%u\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_midi_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_midi_opts *opts = to_f_midi_opts(item); \
int ret; \
u32 num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt > 1) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtou32(page, 0, &num); \
if (ret) \
goto end; \
\
if (test_limit && num > limit) { \
ret = -EINVAL; \
goto end; \
} \
opts->name = num; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_midi_opts_, name);
#define F_MIDI_OPT_SIGNED(name, test_limit, limit) \
static ssize_t f_midi_opts_##name##_show(struct config_item *item, char *page) \
{ \
struct f_midi_opts *opts = to_f_midi_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%d\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_midi_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_midi_opts *opts = to_f_midi_opts(item); \
int ret; \
s32 num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt > 1) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtos32(page, 0, &num); \
if (ret) \
goto end; \
\
if (test_limit && num > limit) { \
ret = -EINVAL; \
goto end; \
} \
opts->name = num; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_midi_opts_, name);
F_MIDI_OPT_SIGNED(index, true, SNDRV_CARDS);
F_MIDI_OPT(buflen, false, 0);
F_MIDI_OPT(qlen, false, 0);
F_MIDI_OPT(in_ports, true, MAX_PORTS);
F_MIDI_OPT(out_ports, true, MAX_PORTS);
static ssize_t f_midi_opts_id_show(struct config_item *item, char *page)
{
struct f_midi_opts *opts = to_f_midi_opts(item);
int result;
mutex_lock(&opts->lock);
if (opts->id) {
result = strlcpy(page, opts->id, PAGE_SIZE);
} else {
page[0] = 0;
result = 0;
}
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_midi_opts_id_store(struct config_item *item,
const char *page, size_t len)
{
struct f_midi_opts *opts = to_f_midi_opts(item);
int ret;
char *c;
mutex_lock(&opts->lock);
if (opts->refcnt > 1) {
ret = -EBUSY;
goto end;
}
c = kstrndup(page, len, GFP_KERNEL);
if (!c) {
ret = -ENOMEM;
goto end;
}
if (opts->id_allocated)
kfree(opts->id);
opts->id = c;
opts->id_allocated = true;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_midi_opts_, id);
static struct configfs_attribute *midi_attrs[] = {
&f_midi_opts_attr_index,
&f_midi_opts_attr_buflen,
&f_midi_opts_attr_qlen,
&f_midi_opts_attr_in_ports,
&f_midi_opts_attr_out_ports,
&f_midi_opts_attr_id,
NULL,
};
static const struct config_item_type midi_func_type = {
.ct_item_ops = &midi_item_ops,
.ct_attrs = midi_attrs,
.ct_owner = THIS_MODULE,
};
static void f_midi_free_inst(struct usb_function_instance *f)
{
struct f_midi_opts *opts;
bool free = false;
opts = container_of(f, struct f_midi_opts, func_inst);
mutex_lock(&opts->lock);
if (!--opts->refcnt) {
free = true;
}
mutex_unlock(&opts->lock);
if (free) {
if (opts->id_allocated)
kfree(opts->id);
kfree(opts);
}
}
static struct usb_function_instance *f_midi_alloc_inst(void)
{
struct f_midi_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
opts->buflen = 512;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
opts->refcnt = 1;
config_group_init_type_name(&opts->func_inst.group, "",
&midi_func_type);
return &opts->func_inst;
}
static void f_midi_free(struct usb_function *f)
{
struct f_midi *midi;
struct f_midi_opts *opts;
bool free = false;
midi = func_to_midi(f);
opts = container_of(f->fi, struct f_midi_opts, func_inst);
mutex_lock(&opts->lock);
if (!--midi->free_ref) {
kfree(midi->id);
kfifo_free(&midi->in_req_fifo);
kfree(midi);
free = true;
}
mutex_unlock(&opts->lock);
if (free)
f_midi_free_inst(&opts->func_inst);
}
static void f_midi_rmidi_free(struct snd_rawmidi *rmidi)
{
f_midi_free(rmidi->private_data);
}
static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct f_midi *midi = func_to_midi(f);
struct snd_card *card;
DBG(cdev, "unbind\n");
/* just to be sure */
f_midi_disable(f);
card = midi->card;
midi->card = NULL;
if (card)
snd_card_free_when_closed(card);
usb_free_all_descriptors(f);
}
static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
{
struct f_midi *midi = NULL;
struct f_midi_opts *opts;
int status, i;
opts = container_of(fi, struct f_midi_opts, func_inst);
mutex_lock(&opts->lock);
/* sanity check */
if (opts->in_ports > MAX_PORTS || opts->out_ports > MAX_PORTS) {
status = -EINVAL;
goto setup_fail;
}
/* allocate and initialize one new instance */
midi = kzalloc(struct_size(midi, in_ports_array, opts->in_ports),
GFP_KERNEL);
if (!midi) {
status = -ENOMEM;
goto setup_fail;
}
for (i = 0; i < opts->in_ports; i++)
midi->in_ports_array[i].cable = i;
/* set up ALSA midi devices */
midi->id = kstrdup(opts->id, GFP_KERNEL);
if (opts->id && !midi->id) {
status = -ENOMEM;
goto midi_free;
}
midi->in_ports = opts->in_ports;
midi->out_ports = opts->out_ports;
midi->index = opts->index;
midi->buflen = opts->buflen;
midi->qlen = opts->qlen;
midi->in_last_port = 0;
midi->free_ref = 1;
status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL);
if (status)
goto midi_free;
spin_lock_init(&midi->transmit_lock);
++opts->refcnt;
mutex_unlock(&opts->lock);
midi->func.name = "gmidi function";
midi->func.bind = f_midi_bind;
midi->func.unbind = f_midi_unbind;
midi->func.set_alt = f_midi_set_alt;
midi->func.disable = f_midi_disable;
midi->func.free_func = f_midi_free;
return &midi->func;
midi_free:
if (midi)
kfree(midi->id);
kfree(midi);
setup_fail:
mutex_unlock(&opts->lock);
return ERR_PTR(status);
}
DECLARE_USB_FUNCTION_INIT(midi, f_midi_alloc_inst, f_midi_alloc);
| linux-master | drivers/usb/gadget/function/f_midi.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.