python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* f_subset.c -- "CDC Subset" Ethernet link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_gether.h"
/*
* This function packages a simple "CDC Subset" Ethernet port with no real
* control mechanisms; just raw data transfer over two bulk endpoints.
* The data transfer model is exactly that of CDC Ethernet, which is
* why we call it the "CDC Subset".
*
* Because it's not standardized, this has some interoperability issues.
* They mostly relate to driver binding, since the data transfer model is
* so simple (CDC Ethernet). The original versions of this protocol used
* specific product/vendor IDs: byteswapped IDs for Digital Equipment's
* SA-1100 "Itsy" board, which could run Linux 2.4 kernels and supported
* daughtercards with USB peripheral connectors. (It was used more often
* with other boards, using the Itsy identifiers.) Linux hosts recognized
* this with CONFIG_USB_ARMLINUX; these devices have only one configuration
* and one interface.
*
* At some point, MCCI defined a (nonconformant) CDC MDLM variant called
* "SAFE", which happens to have a mode which is identical to the "CDC
* Subset" in terms of data transfer and lack of control model. This was
* adopted by later Sharp Zaurus models, and by some other software which
* Linux hosts recognize with CONFIG_USB_NET_ZAURUS.
*
* Because Microsoft's RNDIS drivers are far from robust, we added a few
* descriptors to the CDC Subset code, making this code look like a SAFE
* implementation. This lets you use MCCI's host side MS-Windows drivers
* if you get fed up with RNDIS. It also makes it easier for composite
* drivers to work, since they can use class based binding instead of
* caring about specific product and vendor IDs.
*/
struct f_gether {
struct gether port;
char ethaddr[14];
};
static inline struct f_gether *func_to_geth(struct usb_function *f)
{
return container_of(f, struct f_gether, port.func);
}
/*-------------------------------------------------------------------------*/
/*
* "Simple" CDC-subset option is a simple vendor-neutral model that most
* full speed controllers can handle: one interface, two bulk endpoints.
* To assist host side drivers, we fancy it up a bit, and add descriptors so
* some host side drivers will understand it as a "SAFE" variant.
*
* "SAFE" loosely follows CDC WMC MDLM, violating the spec in various ways.
* Data endpoints live in the control interface, there's no data interface.
* And it's not used to talk to a cell phone radio.
*/
/* interface descriptor: */
static struct usb_interface_descriptor subset_data_intf = {
.bLength = sizeof subset_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bAlternateSetting = 0,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
static struct usb_cdc_header_desc mdlm_header_desc = {
.bLength = sizeof mdlm_header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static struct usb_cdc_mdlm_desc mdlm_desc = {
.bLength = sizeof mdlm_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_MDLM_TYPE,
.bcdVersion = cpu_to_le16(0x0100),
.bGUID = {
0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6,
0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f,
},
};
/* since "usb_cdc_mdlm_detail_desc" is a variable length structure, we
* can't really use its struct. All we do here is say that we're using
* the submode of "SAFE" which directly matches the CDC Subset.
*/
static u8 mdlm_detail_desc[] = {
6,
USB_DT_CS_INTERFACE,
USB_CDC_MDLM_DETAIL_TYPE,
0, /* "SAFE" */
0, /* network control capabilities (none) */
0, /* network data capabilities ("raw" encapsulation) */
};
static struct usb_cdc_ether_desc ether_desc = {
.bLength = sizeof ether_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
/* this descriptor actually adds value, surprise! */
/* .iMACAddress = DYNAMIC */
.bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
.wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
.wNumberMCFilters = cpu_to_le16(0),
.bNumberPowerFilters = 0,
};
/* full speed support: */
static struct usb_endpoint_descriptor fs_subset_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor fs_subset_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *fs_eth_function[] = {
(struct usb_descriptor_header *) &subset_data_intf,
(struct usb_descriptor_header *) &mdlm_header_desc,
(struct usb_descriptor_header *) &mdlm_desc,
(struct usb_descriptor_header *) &mdlm_detail_desc,
(struct usb_descriptor_header *) ðer_desc,
(struct usb_descriptor_header *) &fs_subset_in_desc,
(struct usb_descriptor_header *) &fs_subset_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor hs_subset_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor hs_subset_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *hs_eth_function[] = {
(struct usb_descriptor_header *) &subset_data_intf,
(struct usb_descriptor_header *) &mdlm_header_desc,
(struct usb_descriptor_header *) &mdlm_desc,
(struct usb_descriptor_header *) &mdlm_detail_desc,
(struct usb_descriptor_header *) ðer_desc,
(struct usb_descriptor_header *) &hs_subset_in_desc,
(struct usb_descriptor_header *) &hs_subset_out_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor ss_subset_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor ss_subset_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc = {
.bLength = sizeof ss_subset_bulk_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_descriptor_header *ss_eth_function[] = {
(struct usb_descriptor_header *) &subset_data_intf,
(struct usb_descriptor_header *) &mdlm_header_desc,
(struct usb_descriptor_header *) &mdlm_desc,
(struct usb_descriptor_header *) &mdlm_detail_desc,
(struct usb_descriptor_header *) ðer_desc,
(struct usb_descriptor_header *) &ss_subset_in_desc,
(struct usb_descriptor_header *) &ss_subset_bulk_comp_desc,
(struct usb_descriptor_header *) &ss_subset_out_desc,
(struct usb_descriptor_header *) &ss_subset_bulk_comp_desc,
NULL,
};
/* string descriptors: */
static struct usb_string geth_string_defs[] = {
[0].s = "CDC Ethernet Subset/SAFE",
[1].s = "",
{ } /* end of list */
};
static struct usb_gadget_strings geth_string_table = {
.language = 0x0409, /* en-us */
.strings = geth_string_defs,
};
static struct usb_gadget_strings *geth_strings[] = {
&geth_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gether *geth = func_to_geth(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct net_device *net;
/* we know alt == 0, so this is an activation or a reset */
if (geth->port.in_ep->enabled) {
DBG(cdev, "reset cdc subset\n");
gether_disconnect(&geth->port);
}
DBG(cdev, "init + activate cdc subset\n");
if (config_ep_by_speed(cdev->gadget, f, geth->port.in_ep) ||
config_ep_by_speed(cdev->gadget, f, geth->port.out_ep)) {
geth->port.in_ep->desc = NULL;
geth->port.out_ep->desc = NULL;
return -EINVAL;
}
net = gether_connect(&geth->port);
return PTR_ERR_OR_ZERO(net);
}
static void geth_disable(struct usb_function *f)
{
struct f_gether *geth = func_to_geth(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "net deactivated\n");
gether_disconnect(&geth->port);
}
/*-------------------------------------------------------------------------*/
/* serial function driver setup/binding */
static int
geth_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_gether *geth = func_to_geth(f);
struct usb_string *us;
int status;
struct usb_ep *ep;
struct f_gether_opts *gether_opts;
gether_opts = container_of(f->fi, struct f_gether_opts, func_inst);
/*
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
* configurations are bound in sequence with list_for_each_entry,
* in each configuration its functions are bound in sequence
* with list_for_each_entry, so we assume no race condition
* with regard to gether_opts->bound access
*/
if (!gether_opts->bound) {
mutex_lock(&gether_opts->lock);
gether_set_gadget(gether_opts->net, cdev->gadget);
status = gether_register_netdev(gether_opts->net);
mutex_unlock(&gether_opts->lock);
if (status)
return status;
gether_opts->bound = true;
}
us = usb_gstrings_attach(cdev, geth_strings,
ARRAY_SIZE(geth_string_defs));
if (IS_ERR(us))
return PTR_ERR(us);
subset_data_intf.iInterface = us[0].id;
ether_desc.iMACAddress = us[1].id;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
subset_data_intf.bInterfaceNumber = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc);
if (!ep)
goto fail;
geth->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_out_desc);
if (!ep)
goto fail;
geth->port.out_ep = ep;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
hs_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress;
hs_subset_out_desc.bEndpointAddress =
fs_subset_out_desc.bEndpointAddress;
ss_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress;
ss_subset_out_desc.bEndpointAddress =
fs_subset_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
ss_eth_function, ss_eth_function);
if (status)
goto fail;
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
* until we're activated via set_alt().
*/
DBG(cdev, "CDC Subset: IN/%s OUT/%s\n",
geth->port.in_ep->name, geth->port.out_ep->name);
return 0;
fail:
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static inline struct f_gether_opts *to_f_gether_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_gether_opts,
func_inst.group);
}
/* f_gether_item_ops */
USB_ETHERNET_CONFIGFS_ITEM(gether);
/* f_gether_opts_dev_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(gether);
/* f_gether_opts_host_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(gether);
/* f_gether_opts_qmult */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(gether);
/* f_gether_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(gether);
static struct configfs_attribute *gether_attrs[] = {
&gether_opts_attr_dev_addr,
&gether_opts_attr_host_addr,
&gether_opts_attr_qmult,
&gether_opts_attr_ifname,
NULL,
};
static const struct config_item_type gether_func_type = {
.ct_item_ops = &gether_item_ops,
.ct_attrs = gether_attrs,
.ct_owner = THIS_MODULE,
};
static void geth_free_inst(struct usb_function_instance *f)
{
struct f_gether_opts *opts;
opts = container_of(f, struct f_gether_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
kfree(opts);
}
static struct usb_function_instance *geth_alloc_inst(void)
{
struct f_gether_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = geth_free_inst;
opts->net = gether_setup_default();
if (IS_ERR(opts->net)) {
struct net_device *net = opts->net;
kfree(opts);
return ERR_CAST(net);
}
config_group_init_type_name(&opts->func_inst.group, "",
&gether_func_type);
return &opts->func_inst;
}
static void geth_free(struct usb_function *f)
{
struct f_gether *eth;
eth = func_to_geth(f);
kfree(eth);
}
static void geth_unbind(struct usb_configuration *c, struct usb_function *f)
{
geth_string_defs[0].id = 0;
usb_free_all_descriptors(f);
}
static struct usb_function *geth_alloc(struct usb_function_instance *fi)
{
struct f_gether *geth;
struct f_gether_opts *opts;
int status;
/* allocate and initialize one new instance */
geth = kzalloc(sizeof(*geth), GFP_KERNEL);
if (!geth)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_gether_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt++;
/* export host's Ethernet address in CDC format */
status = gether_get_host_addr_cdc(opts->net, geth->ethaddr,
sizeof(geth->ethaddr));
if (status < 12) {
kfree(geth);
mutex_unlock(&opts->lock);
return ERR_PTR(-EINVAL);
}
geth_string_defs[1].s = geth->ethaddr;
geth->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);
geth->port.cdc_filter = DEFAULT_FILTER;
geth->port.func.name = "cdc_subset";
geth->port.func.bind = geth_bind;
geth->port.func.unbind = geth_unbind;
geth->port.func.set_alt = geth_set_alt;
geth->port.func.disable = geth_disable;
geth->port.func.free_func = geth_free;
return &geth->port.func;
}
DECLARE_USB_FUNCTION_INIT(geth, geth_alloc_inst, geth_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
| linux-master | drivers/usb/gadget/function/f_subset.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_uac2.c -- USB Audio Class 2.0 Function
*
* Copyright (C) 2011
* Yadwinder Singh ([email protected])
* Jaswinder Singh ([email protected])
*
* Copyright (C) 2020
* Ruslan Bilovol ([email protected])
*/
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
#include <linux/module.h>
#include "u_audio.h"
#include "u_uac2.h"
/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
#define UAC2_CHANNEL_MASK 0x07FFFFFF
/*
* The driver implements a simple UAC_2 topology.
* USB-OUT -> IT_1 -> FU -> OT_3 -> ALSA_Capture
* ALSA_Playback -> IT_2 -> FU -> OT_4 -> USB-IN
* Capture and Playback sampling rates are independently
* controlled by two clock sources :
* CLK_5 := c_srate, and CLK_6 := p_srate
*/
#define USB_OUT_CLK_ID (out_clk_src_desc.bClockID)
#define USB_IN_CLK_ID (in_clk_src_desc.bClockID)
#define USB_OUT_FU_ID (out_feature_unit_desc->bUnitID)
#define USB_IN_FU_ID (in_feature_unit_desc->bUnitID)
#define CONTROL_ABSENT 0
#define CONTROL_RDONLY 1
#define CONTROL_RDWR 3
#define CLK_FREQ_CTRL 0
#define CLK_VLD_CTRL 2
#define FU_MUTE_CTRL 0
#define FU_VOL_CTRL 2
#define COPY_CTRL 0
#define CONN_CTRL 2
#define OVRLD_CTRL 4
#define CLSTR_CTRL 6
#define UNFLW_CTRL 8
#define OVFLW_CTRL 10
#define EPIN_EN(_opts) ((_opts)->p_chmask != 0)
#define EPOUT_EN(_opts) ((_opts)->c_chmask != 0)
#define FUIN_EN(_opts) (EPIN_EN(_opts) \
&& ((_opts)->p_mute_present \
|| (_opts)->p_volume_present))
#define FUOUT_EN(_opts) (EPOUT_EN(_opts) \
&& ((_opts)->c_mute_present \
|| (_opts)->c_volume_present))
#define EPOUT_FBACK_IN_EN(_opts) ((_opts)->c_sync == USB_ENDPOINT_SYNC_ASYNC)
struct f_uac2 {
struct g_audio g_audio;
u8 ac_intf, as_in_intf, as_out_intf;
u8 ac_alt, as_in_alt, as_out_alt; /* needed for get_alt() */
struct usb_ctrlrequest setup_cr; /* will be used in data stage */
/* Interrupt IN endpoint of AC interface */
struct usb_ep *int_ep;
atomic_t int_count;
/* transient state, only valid during handling of a single control request */
int clock_id;
};
static inline struct f_uac2 *func_to_uac2(struct usb_function *f)
{
return container_of(f, struct f_uac2, g_audio.func);
}
static inline
struct f_uac2_opts *g_audio_to_uac2_opts(struct g_audio *agdev)
{
return container_of(agdev->func.fi, struct f_uac2_opts, func_inst);
}
static int afunc_notify(struct g_audio *agdev, int unit_id, int cs);
/* --------- USB Function Interface ------------- */
enum {
STR_ASSOC,
STR_IF_CTRL,
STR_CLKSRC_IN,
STR_CLKSRC_OUT,
STR_USB_IT,
STR_IO_IT,
STR_USB_OT,
STR_IO_OT,
STR_FU_IN,
STR_FU_OUT,
STR_AS_OUT_ALT0,
STR_AS_OUT_ALT1,
STR_AS_IN_ALT0,
STR_AS_IN_ALT1,
};
static struct usb_string strings_fn[] = {
/* [STR_ASSOC].s = DYNAMIC, */
[STR_IF_CTRL].s = "Topology Control",
[STR_CLKSRC_IN].s = "Input Clock",
[STR_CLKSRC_OUT].s = "Output Clock",
[STR_USB_IT].s = "USBH Out",
[STR_IO_IT].s = "USBD Out",
[STR_USB_OT].s = "USBH In",
[STR_IO_OT].s = "USBD In",
[STR_FU_IN].s = "Capture Volume",
[STR_FU_OUT].s = "Playback Volume",
[STR_AS_OUT_ALT0].s = "Playback Inactive",
[STR_AS_OUT_ALT1].s = "Playback Active",
[STR_AS_IN_ALT0].s = "Capture Inactive",
[STR_AS_IN_ALT1].s = "Capture Active",
{ },
};
static const char *const speed_names[] = {
[USB_SPEED_UNKNOWN] = "UNKNOWN",
[USB_SPEED_LOW] = "LS",
[USB_SPEED_FULL] = "FS",
[USB_SPEED_HIGH] = "HS",
[USB_SPEED_WIRELESS] = "W",
[USB_SPEED_SUPER] = "SS",
[USB_SPEED_SUPER_PLUS] = "SS+",
};
static struct usb_gadget_strings str_fn = {
.language = 0x0409, /* en-us */
.strings = strings_fn,
};
static struct usb_gadget_strings *fn_strings[] = {
&str_fn,
NULL,
};
static struct usb_interface_assoc_descriptor iad_desc = {
.bLength = sizeof iad_desc,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0,
.bInterfaceCount = 3,
.bFunctionClass = USB_CLASS_AUDIO,
.bFunctionSubClass = UAC2_FUNCTION_SUBCLASS_UNDEFINED,
.bFunctionProtocol = UAC_VERSION_2,
};
/* Audio Control Interface */
static struct usb_interface_descriptor std_ac_if_desc = {
.bLength = sizeof std_ac_if_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
/* .bNumEndpoints = DYNAMIC */
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
.bInterfaceProtocol = UAC_VERSION_2,
};
/* Clock source for IN traffic */
static struct uac_clock_source_descriptor in_clk_src_desc = {
.bLength = sizeof in_clk_src_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
/* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDWR << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
};
/* Clock source for OUT traffic */
static struct uac_clock_source_descriptor out_clk_src_desc = {
.bLength = sizeof out_clk_src_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
/* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDWR << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
};
/* Input Terminal for USB_OUT */
static struct uac2_input_terminal_descriptor usb_out_it_desc = {
.bLength = sizeof usb_out_it_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
/* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Input Terminal for I/O-In */
static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bLength = sizeof io_in_it_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
/* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Ouput Terminal for USB_IN */
static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
.bLength = sizeof usb_in_ot_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
/* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Ouput Terminal for I/O-Out */
static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bLength = sizeof io_out_ot_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
/* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
static struct uac2_feature_unit_descriptor *in_feature_unit_desc;
static struct uac2_feature_unit_descriptor *out_feature_unit_desc;
static struct uac2_ac_header_descriptor ac_hdr_desc = {
.bLength = sizeof ac_hdr_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_MS_HEADER,
.bcdADC = cpu_to_le16(0x200),
.bCategory = UAC2_FUNCTION_IO_BOX,
/* .wTotalLength = DYNAMIC */
.bmControls = 0,
};
/* AC IN Interrupt Endpoint */
static struct usb_endpoint_descriptor fs_ep_int_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(6),
.bInterval = 1,
};
static struct usb_endpoint_descriptor hs_ep_int_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(6),
.bInterval = 4,
};
static struct usb_endpoint_descriptor ss_ep_int_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(6),
.bInterval = 4,
};
static struct usb_ss_ep_comp_descriptor ss_ep_int_desc_comp = {
.bLength = sizeof(ss_ep_int_desc_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.wBytesPerInterval = cpu_to_le16(6),
};
/* Audio Streaming OUT Interface - Alt0 */
static struct usb_interface_descriptor std_as_out_if0_desc = {
.bLength = sizeof std_as_out_if0_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
.bInterfaceProtocol = UAC_VERSION_2,
};
/* Audio Streaming OUT Interface - Alt1 */
static struct usb_interface_descriptor std_as_out_if1_desc = {
.bLength = sizeof std_as_out_if1_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
.bInterfaceProtocol = UAC_VERSION_2,
};
/* Audio Stream OUT Intface Desc */
static struct uac2_as_header_descriptor as_out_hdr_desc = {
.bLength = sizeof as_out_hdr_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
/* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
.iChannelNames = 0,
};
/* Audio USB_OUT Format */
static struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
.bLength = sizeof as_out_fmt1_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
.bFormatType = UAC_FORMAT_TYPE_I,
};
/* STD AS ISO OUT Endpoint */
static struct usb_endpoint_descriptor fs_epout_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
/* .bmAttributes = DYNAMIC */
/* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
static struct usb_endpoint_descriptor hs_epout_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
/* .bmAttributes = DYNAMIC */
/* .wMaxPacketSize = DYNAMIC */
/* .bInterval = DYNAMIC */
};
static struct usb_endpoint_descriptor ss_epout_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
/* .bmAttributes = DYNAMIC */
/* .wMaxPacketSize = DYNAMIC */
/* .bInterval = DYNAMIC */
};
static struct usb_ss_ep_comp_descriptor ss_epout_desc_comp = {
.bLength = sizeof(ss_epout_desc_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
/* wBytesPerInterval = DYNAMIC */
};
/* CS AS ISO OUT Endpoint */
static struct uac2_iso_endpoint_descriptor as_iso_out_desc = {
.bLength = sizeof as_iso_out_desc,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 0,
.bmControls = 0,
.bLockDelayUnits = 0,
.wLockDelay = 0,
};
/* STD AS ISO IN Feedback Endpoint */
static struct usb_endpoint_descriptor fs_epin_fback_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_USAGE_FEEDBACK,
.wMaxPacketSize = cpu_to_le16(3),
.bInterval = 1,
};
static struct usb_endpoint_descriptor hs_epin_fback_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_USAGE_FEEDBACK,
.wMaxPacketSize = cpu_to_le16(4),
.bInterval = 4,
};
static struct usb_endpoint_descriptor ss_epin_fback_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_USAGE_FEEDBACK,
.wMaxPacketSize = cpu_to_le16(4),
.bInterval = 4,
};
static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
.bLength = sizeof(ss_epin_fback_desc_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = cpu_to_le16(4),
};
/* Audio Streaming IN Interface - Alt0 */
static struct usb_interface_descriptor std_as_in_if0_desc = {
.bLength = sizeof std_as_in_if0_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
.bInterfaceProtocol = UAC_VERSION_2,
};
/* Audio Streaming IN Interface - Alt1 */
static struct usb_interface_descriptor std_as_in_if1_desc = {
.bLength = sizeof std_as_in_if1_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
.bInterfaceProtocol = UAC_VERSION_2,
};
/* Audio Stream IN Intface Desc */
static struct uac2_as_header_descriptor as_in_hdr_desc = {
.bLength = sizeof as_in_hdr_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
/* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
.iChannelNames = 0,
};
/* Audio USB_IN Format */
static struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
.bLength = sizeof as_in_fmt1_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
.bFormatType = UAC_FORMAT_TYPE_I,
};
/* STD AS ISO IN Endpoint */
static struct usb_endpoint_descriptor fs_epin_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
/* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
static struct usb_endpoint_descriptor hs_epin_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
/* .wMaxPacketSize = DYNAMIC */
/* .bInterval = DYNAMIC */
};
static struct usb_endpoint_descriptor ss_epin_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
/* .wMaxPacketSize = DYNAMIC */
/* .bInterval = DYNAMIC */
};
static struct usb_ss_ep_comp_descriptor ss_epin_desc_comp = {
.bLength = sizeof(ss_epin_desc_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
/* wBytesPerInterval = DYNAMIC */
};
/* CS AS ISO IN Endpoint */
static struct uac2_iso_endpoint_descriptor as_iso_in_desc = {
.bLength = sizeof as_iso_in_desc,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 0,
.bmControls = 0,
.bLockDelayUnits = 0,
.wLockDelay = 0,
};
static struct usb_descriptor_header *fs_audio_desc[] = {
(struct usb_descriptor_header *)&iad_desc,
(struct usb_descriptor_header *)&std_ac_if_desc,
(struct usb_descriptor_header *)&ac_hdr_desc,
(struct usb_descriptor_header *)&in_clk_src_desc,
(struct usb_descriptor_header *)&out_clk_src_desc,
(struct usb_descriptor_header *)&usb_out_it_desc,
(struct usb_descriptor_header *)&out_feature_unit_desc,
(struct usb_descriptor_header *)&io_in_it_desc,
(struct usb_descriptor_header *)&usb_in_ot_desc,
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
(struct usb_descriptor_header *)&fs_ep_int_desc,
(struct usb_descriptor_header *)&std_as_out_if0_desc,
(struct usb_descriptor_header *)&std_as_out_if1_desc,
(struct usb_descriptor_header *)&as_out_hdr_desc,
(struct usb_descriptor_header *)&as_out_fmt1_desc,
(struct usb_descriptor_header *)&fs_epout_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
(struct usb_descriptor_header *)&fs_epin_fback_desc,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
(struct usb_descriptor_header *)&as_in_hdr_desc,
(struct usb_descriptor_header *)&as_in_fmt1_desc,
(struct usb_descriptor_header *)&fs_epin_desc,
(struct usb_descriptor_header *)&as_iso_in_desc,
NULL,
};
static struct usb_descriptor_header *hs_audio_desc[] = {
(struct usb_descriptor_header *)&iad_desc,
(struct usb_descriptor_header *)&std_ac_if_desc,
(struct usb_descriptor_header *)&ac_hdr_desc,
(struct usb_descriptor_header *)&in_clk_src_desc,
(struct usb_descriptor_header *)&out_clk_src_desc,
(struct usb_descriptor_header *)&usb_out_it_desc,
(struct usb_descriptor_header *)&out_feature_unit_desc,
(struct usb_descriptor_header *)&io_in_it_desc,
(struct usb_descriptor_header *)&usb_in_ot_desc,
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
(struct usb_descriptor_header *)&hs_ep_int_desc,
(struct usb_descriptor_header *)&std_as_out_if0_desc,
(struct usb_descriptor_header *)&std_as_out_if1_desc,
(struct usb_descriptor_header *)&as_out_hdr_desc,
(struct usb_descriptor_header *)&as_out_fmt1_desc,
(struct usb_descriptor_header *)&hs_epout_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
(struct usb_descriptor_header *)&hs_epin_fback_desc,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
(struct usb_descriptor_header *)&as_in_hdr_desc,
(struct usb_descriptor_header *)&as_in_fmt1_desc,
(struct usb_descriptor_header *)&hs_epin_desc,
(struct usb_descriptor_header *)&as_iso_in_desc,
NULL,
};
static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&iad_desc,
(struct usb_descriptor_header *)&std_ac_if_desc,
(struct usb_descriptor_header *)&ac_hdr_desc,
(struct usb_descriptor_header *)&in_clk_src_desc,
(struct usb_descriptor_header *)&out_clk_src_desc,
(struct usb_descriptor_header *)&usb_out_it_desc,
(struct usb_descriptor_header *)&out_feature_unit_desc,
(struct usb_descriptor_header *)&io_in_it_desc,
(struct usb_descriptor_header *)&usb_in_ot_desc,
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
(struct usb_descriptor_header *)&ss_ep_int_desc,
(struct usb_descriptor_header *)&ss_ep_int_desc_comp,
(struct usb_descriptor_header *)&std_as_out_if0_desc,
(struct usb_descriptor_header *)&std_as_out_if1_desc,
(struct usb_descriptor_header *)&as_out_hdr_desc,
(struct usb_descriptor_header *)&as_out_fmt1_desc,
(struct usb_descriptor_header *)&ss_epout_desc,
(struct usb_descriptor_header *)&ss_epout_desc_comp,
(struct usb_descriptor_header *)&as_iso_out_desc,
(struct usb_descriptor_header *)&ss_epin_fback_desc,
(struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
(struct usb_descriptor_header *)&as_in_hdr_desc,
(struct usb_descriptor_header *)&as_in_fmt1_desc,
(struct usb_descriptor_header *)&ss_epin_desc,
(struct usb_descriptor_header *)&ss_epin_desc_comp,
(struct usb_descriptor_header *)&as_iso_in_desc,
NULL,
};
struct cntrl_cur_lay2 {
__le16 wCUR;
};
struct cntrl_range_lay2 {
__le16 wNumSubRanges;
__le16 wMIN;
__le16 wMAX;
__le16 wRES;
} __packed;
struct cntrl_cur_lay3 {
__le32 dCUR;
};
struct cntrl_subrange_lay3 {
__le32 dMIN;
__le32 dMAX;
__le32 dRES;
} __packed;
#define ranges_lay3_size(c) (sizeof(c.wNumSubRanges) \
+ le16_to_cpu(c.wNumSubRanges) \
* sizeof(struct cntrl_subrange_lay3))
#define DECLARE_UAC2_CNTRL_RANGES_LAY3(k, n) \
struct cntrl_ranges_lay3_##k { \
__le16 wNumSubRanges; \
struct cntrl_subrange_lay3 r[n]; \
} __packed
DECLARE_UAC2_CNTRL_RANGES_LAY3(srates, UAC_MAX_RATES);
static int get_max_srate(const int *srates)
{
int i, max_srate = 0;
for (i = 0; i < UAC_MAX_RATES; i++) {
if (srates[i] == 0)
break;
if (srates[i] > max_srate)
max_srate = srates[i];
}
return max_srate;
}
static int get_max_bw_for_bint(const struct f_uac2_opts *uac2_opts,
u8 bint, unsigned int factor, bool is_playback)
{
int chmask, srate, ssize;
u16 max_size_bw;
if (is_playback) {
chmask = uac2_opts->p_chmask;
srate = get_max_srate(uac2_opts->p_srates);
ssize = uac2_opts->p_ssize;
} else {
chmask = uac2_opts->c_chmask;
srate = get_max_srate(uac2_opts->c_srates);
ssize = uac2_opts->c_ssize;
}
if (is_playback || (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
// playback is always async, capture only when configured
// Win10 requires max packet size + 1 frame
srate = srate * (1000 + uac2_opts->fb_max) / 1000;
// updated srate is always bigger, therefore DIV_ROUND_UP always yields +1
max_size_bw = num_channels(chmask) * ssize *
(DIV_ROUND_UP(srate, factor / (1 << (bint - 1))));
} else {
// adding 1 frame provision for Win10
max_size_bw = num_channels(chmask) * ssize *
(DIV_ROUND_UP(srate, factor / (1 << (bint - 1))) + 1);
}
return max_size_bw;
}
static int set_ep_max_packet_size_bint(struct device *dev, const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
enum usb_device_speed speed, bool is_playback)
{
u16 max_size_bw, max_size_ep;
u8 bint, opts_bint;
char *dir;
switch (speed) {
case USB_SPEED_FULL:
max_size_ep = 1023;
// fixed
bint = ep_desc->bInterval;
max_size_bw = get_max_bw_for_bint(uac2_opts, bint, 1000, is_playback);
break;
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
max_size_ep = 1024;
if (is_playback)
opts_bint = uac2_opts->p_hs_bint;
else
opts_bint = uac2_opts->c_hs_bint;
if (opts_bint > 0) {
/* fixed bint */
bint = opts_bint;
max_size_bw = get_max_bw_for_bint(uac2_opts, bint, 8000, is_playback);
} else {
/* checking bInterval from 4 to 1 whether the required bandwidth fits */
for (bint = 4; bint > 0; --bint) {
max_size_bw = get_max_bw_for_bint(
uac2_opts, bint, 8000, is_playback);
if (max_size_bw <= max_size_ep)
break;
}
}
break;
default:
return -EINVAL;
}
if (is_playback)
dir = "Playback";
else
dir = "Capture";
if (max_size_bw <= max_size_ep)
dev_dbg(dev,
"%s %s: Would use wMaxPacketSize %d and bInterval %d\n",
speed_names[speed], dir, max_size_bw, bint);
else {
dev_warn(dev,
"%s %s: Req. wMaxPacketSize %d at bInterval %d > max ISOC %d, may drop data!\n",
speed_names[speed], dir, max_size_bw, bint, max_size_ep);
max_size_bw = max_size_ep;
}
ep_desc->wMaxPacketSize = cpu_to_le16(max_size_bw);
ep_desc->bInterval = bint;
return 0;
}
static struct uac2_feature_unit_descriptor *build_fu_desc(int chmask)
{
struct uac2_feature_unit_descriptor *fu_desc;
int channels = num_channels(chmask);
int fu_desc_size = UAC2_DT_FEATURE_UNIT_SIZE(channels);
fu_desc = kzalloc(fu_desc_size, GFP_KERNEL);
if (!fu_desc)
return NULL;
fu_desc->bLength = fu_desc_size;
fu_desc->bDescriptorType = USB_DT_CS_INTERFACE;
fu_desc->bDescriptorSubtype = UAC_FEATURE_UNIT;
/* bUnitID, bSourceID and bmaControls will be defined later */
return fu_desc;
}
/* Use macro to overcome line length limitation */
#define USBDHDR(p) (struct usb_descriptor_header *)(p)
static void setup_headers(struct f_uac2_opts *opts,
struct usb_descriptor_header **headers,
enum usb_device_speed speed)
{
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *ep_int_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc;
struct usb_endpoint_descriptor *ep_int_desc;
int i;
switch (speed) {
case USB_SPEED_FULL:
epout_desc = &fs_epout_desc;
epin_desc = &fs_epin_desc;
epin_fback_desc = &fs_epin_fback_desc;
ep_int_desc = &fs_ep_int_desc;
break;
case USB_SPEED_HIGH:
epout_desc = &hs_epout_desc;
epin_desc = &hs_epin_desc;
epin_fback_desc = &hs_epin_fback_desc;
ep_int_desc = &hs_ep_int_desc;
break;
default:
epout_desc = &ss_epout_desc;
epin_desc = &ss_epin_desc;
epout_desc_comp = &ss_epout_desc_comp;
epin_desc_comp = &ss_epin_desc_comp;
epin_fback_desc = &ss_epin_fback_desc;
epin_fback_desc_comp = &ss_epin_fback_desc_comp;
ep_int_desc = &ss_ep_int_desc;
ep_int_desc_comp = &ss_ep_int_desc_comp;
}
i = 0;
headers[i++] = USBDHDR(&iad_desc);
headers[i++] = USBDHDR(&std_ac_if_desc);
headers[i++] = USBDHDR(&ac_hdr_desc);
if (EPIN_EN(opts))
headers[i++] = USBDHDR(&in_clk_src_desc);
if (EPOUT_EN(opts)) {
headers[i++] = USBDHDR(&out_clk_src_desc);
headers[i++] = USBDHDR(&usb_out_it_desc);
if (FUOUT_EN(opts))
headers[i++] = USBDHDR(out_feature_unit_desc);
}
if (EPIN_EN(opts)) {
headers[i++] = USBDHDR(&io_in_it_desc);
if (FUIN_EN(opts))
headers[i++] = USBDHDR(in_feature_unit_desc);
headers[i++] = USBDHDR(&usb_in_ot_desc);
}
if (EPOUT_EN(opts))
headers[i++] = USBDHDR(&io_out_ot_desc);
if (FUOUT_EN(opts) || FUIN_EN(opts)) {
headers[i++] = USBDHDR(ep_int_desc);
if (ep_int_desc_comp)
headers[i++] = USBDHDR(ep_int_desc_comp);
}
if (EPOUT_EN(opts)) {
headers[i++] = USBDHDR(&std_as_out_if0_desc);
headers[i++] = USBDHDR(&std_as_out_if1_desc);
headers[i++] = USBDHDR(&as_out_hdr_desc);
headers[i++] = USBDHDR(&as_out_fmt1_desc);
headers[i++] = USBDHDR(epout_desc);
if (epout_desc_comp)
headers[i++] = USBDHDR(epout_desc_comp);
headers[i++] = USBDHDR(&as_iso_out_desc);
if (EPOUT_FBACK_IN_EN(opts)) {
headers[i++] = USBDHDR(epin_fback_desc);
if (epin_fback_desc_comp)
headers[i++] = USBDHDR(epin_fback_desc_comp);
}
}
if (EPIN_EN(opts)) {
headers[i++] = USBDHDR(&std_as_in_if0_desc);
headers[i++] = USBDHDR(&std_as_in_if1_desc);
headers[i++] = USBDHDR(&as_in_hdr_desc);
headers[i++] = USBDHDR(&as_in_fmt1_desc);
headers[i++] = USBDHDR(epin_desc);
if (epin_desc_comp)
headers[i++] = USBDHDR(epin_desc_comp);
headers[i++] = USBDHDR(&as_iso_in_desc);
}
headers[i] = NULL;
}
static void setup_descriptor(struct f_uac2_opts *opts)
{
/* patch descriptors */
int i = 1; /* ID's start with 1 */
if (EPOUT_EN(opts))
usb_out_it_desc.bTerminalID = i++;
if (EPIN_EN(opts))
io_in_it_desc.bTerminalID = i++;
if (EPOUT_EN(opts))
io_out_ot_desc.bTerminalID = i++;
if (EPIN_EN(opts))
usb_in_ot_desc.bTerminalID = i++;
if (FUOUT_EN(opts))
out_feature_unit_desc->bUnitID = i++;
if (FUIN_EN(opts))
in_feature_unit_desc->bUnitID = i++;
if (EPOUT_EN(opts))
out_clk_src_desc.bClockID = i++;
if (EPIN_EN(opts))
in_clk_src_desc.bClockID = i++;
usb_out_it_desc.bCSourceID = out_clk_src_desc.bClockID;
if (FUIN_EN(opts)) {
usb_in_ot_desc.bSourceID = in_feature_unit_desc->bUnitID;
in_feature_unit_desc->bSourceID = io_in_it_desc.bTerminalID;
} else {
usb_in_ot_desc.bSourceID = io_in_it_desc.bTerminalID;
}
usb_in_ot_desc.bCSourceID = in_clk_src_desc.bClockID;
io_in_it_desc.bCSourceID = in_clk_src_desc.bClockID;
io_out_ot_desc.bCSourceID = out_clk_src_desc.bClockID;
if (FUOUT_EN(opts)) {
io_out_ot_desc.bSourceID = out_feature_unit_desc->bUnitID;
out_feature_unit_desc->bSourceID = usb_out_it_desc.bTerminalID;
} else {
io_out_ot_desc.bSourceID = usb_out_it_desc.bTerminalID;
}
as_out_hdr_desc.bTerminalLink = usb_out_it_desc.bTerminalID;
as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID;
iad_desc.bInterfaceCount = 1;
ac_hdr_desc.wTotalLength = cpu_to_le16(sizeof(ac_hdr_desc));
if (EPIN_EN(opts)) {
u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
len += sizeof(in_clk_src_desc);
len += sizeof(usb_in_ot_desc);
if (FUIN_EN(opts))
len += in_feature_unit_desc->bLength;
len += sizeof(io_in_it_desc);
ac_hdr_desc.wTotalLength = cpu_to_le16(len);
iad_desc.bInterfaceCount++;
}
if (EPOUT_EN(opts)) {
u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
len += sizeof(out_clk_src_desc);
len += sizeof(usb_out_it_desc);
if (FUOUT_EN(opts))
len += out_feature_unit_desc->bLength;
len += sizeof(io_out_ot_desc);
ac_hdr_desc.wTotalLength = cpu_to_le16(len);
iad_desc.bInterfaceCount++;
}
setup_headers(opts, fs_audio_desc, USB_SPEED_FULL);
setup_headers(opts, hs_audio_desc, USB_SPEED_HIGH);
setup_headers(opts, ss_audio_desc, USB_SPEED_SUPER);
}
static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
{
struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
const char *msg = NULL;
if (!opts->p_chmask && !opts->c_chmask)
msg = "no playback and capture channels";
else if (opts->p_chmask & ~UAC2_CHANNEL_MASK)
msg = "unsupported playback channels mask";
else if (opts->c_chmask & ~UAC2_CHANNEL_MASK)
msg = "unsupported capture channels mask";
else if ((opts->p_ssize < 1) || (opts->p_ssize > 4))
msg = "incorrect playback sample size";
else if ((opts->c_ssize < 1) || (opts->c_ssize > 4))
msg = "incorrect capture sample size";
else if (!opts->p_srates[0])
msg = "incorrect playback sampling rate";
else if (!opts->c_srates[0])
msg = "incorrect capture sampling rate";
else if (opts->p_volume_max <= opts->p_volume_min)
msg = "incorrect playback volume max/min";
else if (opts->c_volume_max <= opts->c_volume_min)
msg = "incorrect capture volume max/min";
else if (opts->p_volume_res <= 0)
msg = "negative/zero playback volume resolution";
else if (opts->c_volume_res <= 0)
msg = "negative/zero capture volume resolution";
else if ((opts->p_volume_max - opts->p_volume_min) % opts->p_volume_res)
msg = "incorrect playback volume resolution";
else if ((opts->c_volume_max - opts->c_volume_min) % opts->c_volume_res)
msg = "incorrect capture volume resolution";
else if ((opts->p_hs_bint < 0) || (opts->p_hs_bint > 4))
msg = "incorrect playback HS/SS bInterval (1-4: fixed, 0: auto)";
else if ((opts->c_hs_bint < 0) || (opts->c_hs_bint > 4))
msg = "incorrect capture HS/SS bInterval (1-4: fixed, 0: auto)";
if (msg) {
dev_err(dev, "Error: %s\n", msg);
return -EINVAL;
}
return 0;
}
static int
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
{
struct f_uac2 *uac2 = func_to_uac2(fn);
struct g_audio *agdev = func_to_g_audio(fn);
struct usb_composite_dev *cdev = cfg->cdev;
struct usb_gadget *gadget = cdev->gadget;
struct device *dev = &gadget->dev;
struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
struct usb_string *us;
int ret;
ret = afunc_validate_opts(agdev, dev);
if (ret)
return ret;
strings_fn[STR_ASSOC].s = uac2_opts->function_name;
us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
if (IS_ERR(us))
return PTR_ERR(us);
if (FUOUT_EN(uac2_opts)) {
out_feature_unit_desc = build_fu_desc(uac2_opts->c_chmask);
if (!out_feature_unit_desc)
return -ENOMEM;
}
if (FUIN_EN(uac2_opts)) {
in_feature_unit_desc = build_fu_desc(uac2_opts->p_chmask);
if (!in_feature_unit_desc) {
ret = -ENOMEM;
goto err_free_fu;
}
}
iad_desc.iFunction = us[STR_ASSOC].id;
std_ac_if_desc.iInterface = us[STR_IF_CTRL].id;
in_clk_src_desc.iClockSource = us[STR_CLKSRC_IN].id;
out_clk_src_desc.iClockSource = us[STR_CLKSRC_OUT].id;
usb_out_it_desc.iTerminal = us[STR_USB_IT].id;
io_in_it_desc.iTerminal = us[STR_IO_IT].id;
usb_in_ot_desc.iTerminal = us[STR_USB_OT].id;
io_out_ot_desc.iTerminal = us[STR_IO_OT].id;
std_as_out_if0_desc.iInterface = us[STR_AS_OUT_ALT0].id;
std_as_out_if1_desc.iInterface = us[STR_AS_OUT_ALT1].id;
std_as_in_if0_desc.iInterface = us[STR_AS_IN_ALT0].id;
std_as_in_if1_desc.iInterface = us[STR_AS_IN_ALT1].id;
if (FUOUT_EN(uac2_opts)) {
u8 *i_feature = (u8 *)out_feature_unit_desc +
out_feature_unit_desc->bLength - 1;
*i_feature = us[STR_FU_OUT].id;
}
if (FUIN_EN(uac2_opts)) {
u8 *i_feature = (u8 *)in_feature_unit_desc +
in_feature_unit_desc->bLength - 1;
*i_feature = us[STR_FU_IN].id;
}
/* Initialize the configurable parameters */
usb_out_it_desc.bNrChannels = num_channels(uac2_opts->c_chmask);
usb_out_it_desc.bmChannelConfig = cpu_to_le32(uac2_opts->c_chmask);
io_in_it_desc.bNrChannels = num_channels(uac2_opts->p_chmask);
io_in_it_desc.bmChannelConfig = cpu_to_le32(uac2_opts->p_chmask);
as_out_hdr_desc.bNrChannels = num_channels(uac2_opts->c_chmask);
as_out_hdr_desc.bmChannelConfig = cpu_to_le32(uac2_opts->c_chmask);
as_in_hdr_desc.bNrChannels = num_channels(uac2_opts->p_chmask);
as_in_hdr_desc.bmChannelConfig = cpu_to_le32(uac2_opts->p_chmask);
as_out_fmt1_desc.bSubslotSize = uac2_opts->c_ssize;
as_out_fmt1_desc.bBitResolution = uac2_opts->c_ssize * 8;
as_in_fmt1_desc.bSubslotSize = uac2_opts->p_ssize;
as_in_fmt1_desc.bBitResolution = uac2_opts->p_ssize * 8;
if (FUOUT_EN(uac2_opts)) {
__le32 *bma = (__le32 *)&out_feature_unit_desc->bmaControls[0];
u32 control = 0;
if (uac2_opts->c_mute_present)
control |= CONTROL_RDWR << FU_MUTE_CTRL;
if (uac2_opts->c_volume_present)
control |= CONTROL_RDWR << FU_VOL_CTRL;
*bma = cpu_to_le32(control);
}
if (FUIN_EN(uac2_opts)) {
__le32 *bma = (__le32 *)&in_feature_unit_desc->bmaControls[0];
u32 control = 0;
if (uac2_opts->p_mute_present)
control |= CONTROL_RDWR << FU_MUTE_CTRL;
if (uac2_opts->p_volume_present)
control |= CONTROL_RDWR << FU_VOL_CTRL;
*bma = cpu_to_le32(control);
}
ret = usb_interface_id(cfg, fn);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
goto err_free_fu;
}
iad_desc.bFirstInterface = ret;
std_ac_if_desc.bInterfaceNumber = ret;
uac2->ac_intf = ret;
uac2->ac_alt = 0;
if (EPOUT_EN(uac2_opts)) {
ret = usb_interface_id(cfg, fn);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
goto err_free_fu;
}
std_as_out_if0_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bNumEndpoints = 1;
uac2->as_out_intf = ret;
uac2->as_out_alt = 0;
if (EPOUT_FBACK_IN_EN(uac2_opts)) {
fs_epout_desc.bmAttributes =
USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC;
hs_epout_desc.bmAttributes =
USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC;
ss_epout_desc.bmAttributes =
USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC;
std_as_out_if1_desc.bNumEndpoints++;
} else {
fs_epout_desc.bmAttributes =
USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ADAPTIVE;
hs_epout_desc.bmAttributes =
USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ADAPTIVE;
ss_epout_desc.bmAttributes =
USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ADAPTIVE;
}
}
if (EPIN_EN(uac2_opts)) {
ret = usb_interface_id(cfg, fn);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
goto err_free_fu;
}
std_as_in_if0_desc.bInterfaceNumber = ret;
std_as_in_if1_desc.bInterfaceNumber = ret;
uac2->as_in_intf = ret;
uac2->as_in_alt = 0;
}
if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts)) {
uac2->int_ep = usb_ep_autoconfig(gadget, &fs_ep_int_desc);
if (!uac2->int_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
ret = -ENODEV;
goto err_free_fu;
}
std_ac_if_desc.bNumEndpoints = 1;
}
hs_epin_desc.bInterval = uac2_opts->p_hs_bint;
ss_epin_desc.bInterval = uac2_opts->p_hs_bint;
hs_epout_desc.bInterval = uac2_opts->c_hs_bint;
ss_epout_desc.bInterval = uac2_opts->c_hs_bint;
/* Calculate wMaxPacketSize according to audio bandwidth */
ret = set_ep_max_packet_size_bint(dev, uac2_opts, &fs_epin_desc,
USB_SPEED_FULL, true);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return ret;
}
ret = set_ep_max_packet_size_bint(dev, uac2_opts, &fs_epout_desc,
USB_SPEED_FULL, false);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return ret;
}
ret = set_ep_max_packet_size_bint(dev, uac2_opts, &hs_epin_desc,
USB_SPEED_HIGH, true);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return ret;
}
ret = set_ep_max_packet_size_bint(dev, uac2_opts, &hs_epout_desc,
USB_SPEED_HIGH, false);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return ret;
}
ret = set_ep_max_packet_size_bint(dev, uac2_opts, &ss_epin_desc,
USB_SPEED_SUPER, true);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return ret;
}
ret = set_ep_max_packet_size_bint(dev, uac2_opts, &ss_epout_desc,
USB_SPEED_SUPER, false);
if (ret < 0) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return ret;
}
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
if (!agdev->out_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
ret = -ENODEV;
goto err_free_fu;
}
if (EPOUT_FBACK_IN_EN(uac2_opts)) {
agdev->in_ep_fback = usb_ep_autoconfig(gadget,
&fs_epin_fback_desc);
if (!agdev->in_ep_fback) {
dev_err(dev, "%s:%d Error!\n",
__func__, __LINE__);
ret = -ENODEV;
goto err_free_fu;
}
}
}
if (EPIN_EN(uac2_opts)) {
agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
if (!agdev->in_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
ret = -ENODEV;
goto err_free_fu;
}
}
agdev->in_ep_maxpsize = max_t(u16,
le16_to_cpu(fs_epin_desc.wMaxPacketSize),
le16_to_cpu(hs_epin_desc.wMaxPacketSize));
agdev->out_ep_maxpsize = max_t(u16,
le16_to_cpu(fs_epout_desc.wMaxPacketSize),
le16_to_cpu(hs_epout_desc.wMaxPacketSize));
agdev->in_ep_maxpsize = max_t(u16, agdev->in_ep_maxpsize,
le16_to_cpu(ss_epin_desc.wMaxPacketSize));
agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
le16_to_cpu(ss_epout_desc.wMaxPacketSize));
ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
// HS and SS endpoint addresses are copied from autoconfigured FS descriptors
hs_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress;
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
ss_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
ss_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress;
ss_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
ss_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress;
setup_descriptor(uac2_opts);
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, ss_audio_desc,
ss_audio_desc);
if (ret)
goto err_free_fu;
agdev->gadget = gadget;
agdev->params.p_chmask = uac2_opts->p_chmask;
memcpy(agdev->params.p_srates, uac2_opts->p_srates,
sizeof(agdev->params.p_srates));
agdev->params.p_ssize = uac2_opts->p_ssize;
if (FUIN_EN(uac2_opts)) {
agdev->params.p_fu.id = USB_IN_FU_ID;
agdev->params.p_fu.mute_present = uac2_opts->p_mute_present;
agdev->params.p_fu.volume_present = uac2_opts->p_volume_present;
agdev->params.p_fu.volume_min = uac2_opts->p_volume_min;
agdev->params.p_fu.volume_max = uac2_opts->p_volume_max;
agdev->params.p_fu.volume_res = uac2_opts->p_volume_res;
}
agdev->params.c_chmask = uac2_opts->c_chmask;
memcpy(agdev->params.c_srates, uac2_opts->c_srates,
sizeof(agdev->params.c_srates));
agdev->params.c_ssize = uac2_opts->c_ssize;
if (FUOUT_EN(uac2_opts)) {
agdev->params.c_fu.id = USB_OUT_FU_ID;
agdev->params.c_fu.mute_present = uac2_opts->c_mute_present;
agdev->params.c_fu.volume_present = uac2_opts->c_volume_present;
agdev->params.c_fu.volume_min = uac2_opts->c_volume_min;
agdev->params.c_fu.volume_max = uac2_opts->c_volume_max;
agdev->params.c_fu.volume_res = uac2_opts->c_volume_res;
}
agdev->params.req_number = uac2_opts->req_number;
agdev->params.fb_max = uac2_opts->fb_max;
if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts))
agdev->notify = afunc_notify;
ret = g_audio_setup(agdev, "UAC2 PCM", "UAC2_Gadget");
if (ret)
goto err_free_descs;
return 0;
err_free_descs:
usb_free_all_descriptors(fn);
agdev->gadget = NULL;
err_free_fu:
kfree(out_feature_unit_desc);
out_feature_unit_desc = NULL;
kfree(in_feature_unit_desc);
in_feature_unit_desc = NULL;
return ret;
}
static void
afunc_notify_complete(struct usb_ep *_ep, struct usb_request *req)
{
struct g_audio *agdev = req->context;
struct f_uac2 *uac2 = func_to_uac2(&agdev->func);
atomic_dec(&uac2->int_count);
kfree(req->buf);
usb_ep_free_request(_ep, req);
}
static int
afunc_notify(struct g_audio *agdev, int unit_id, int cs)
{
struct f_uac2 *uac2 = func_to_uac2(&agdev->func);
struct usb_request *req;
struct uac2_interrupt_data_msg *msg;
u16 w_index, w_value;
int ret;
if (!uac2->int_ep->enabled)
return 0;
if (atomic_inc_return(&uac2->int_count) > UAC2_DEF_INT_REQ_NUM) {
atomic_dec(&uac2->int_count);
return 0;
}
req = usb_ep_alloc_request(uac2->int_ep, GFP_ATOMIC);
if (req == NULL) {
ret = -ENOMEM;
goto err_dec_int_count;
}
msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
if (msg == NULL) {
ret = -ENOMEM;
goto err_free_request;
}
w_index = unit_id << 8 | uac2->ac_intf;
w_value = cs << 8;
msg->bInfo = 0; /* Non-vendor, interface interrupt */
msg->bAttribute = UAC2_CS_CUR;
msg->wIndex = cpu_to_le16(w_index);
msg->wValue = cpu_to_le16(w_value);
req->length = sizeof(*msg);
req->buf = msg;
req->context = agdev;
req->complete = afunc_notify_complete;
ret = usb_ep_queue(uac2->int_ep, req, GFP_ATOMIC);
if (ret)
goto err_free_msg;
return 0;
err_free_msg:
kfree(msg);
err_free_request:
usb_ep_free_request(uac2->int_ep, req);
err_dec_int_count:
atomic_dec(&uac2->int_count);
return ret;
}
static int
afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = fn->config->cdev;
struct f_uac2 *uac2 = func_to_uac2(fn);
struct g_audio *agdev = func_to_g_audio(fn);
struct usb_gadget *gadget = cdev->gadget;
struct device *dev = &gadget->dev;
int ret = 0;
/* No i/f has more than 2 alt settings */
if (alt > 1) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
if (intf == uac2->ac_intf) {
/* Control I/f has only 1 AltSetting - 0 */
if (alt) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
/* restart interrupt endpoint */
if (uac2->int_ep) {
usb_ep_disable(uac2->int_ep);
config_ep_by_speed(gadget, &agdev->func, uac2->int_ep);
usb_ep_enable(uac2->int_ep);
}
return 0;
}
if (intf == uac2->as_out_intf) {
uac2->as_out_alt = alt;
if (alt)
ret = u_audio_start_capture(&uac2->g_audio);
else
u_audio_stop_capture(&uac2->g_audio);
} else if (intf == uac2->as_in_intf) {
uac2->as_in_alt = alt;
if (alt)
ret = u_audio_start_playback(&uac2->g_audio);
else
u_audio_stop_playback(&uac2->g_audio);
} else {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
return ret;
}
static int
afunc_get_alt(struct usb_function *fn, unsigned intf)
{
struct f_uac2 *uac2 = func_to_uac2(fn);
struct g_audio *agdev = func_to_g_audio(fn);
if (intf == uac2->ac_intf)
return uac2->ac_alt;
else if (intf == uac2->as_out_intf)
return uac2->as_out_alt;
else if (intf == uac2->as_in_intf)
return uac2->as_in_alt;
else
dev_err(&agdev->gadget->dev,
"%s:%d Invalid Interface %d!\n",
__func__, __LINE__, intf);
return -EINVAL;
}
static void
afunc_disable(struct usb_function *fn)
{
struct f_uac2 *uac2 = func_to_uac2(fn);
uac2->as_in_alt = 0;
uac2->as_out_alt = 0;
u_audio_stop_capture(&uac2->g_audio);
u_audio_stop_playback(&uac2->g_audio);
if (uac2->int_ep)
usb_ep_disable(uac2->int_ep);
}
static void
afunc_suspend(struct usb_function *fn)
{
struct f_uac2 *uac2 = func_to_uac2(fn);
u_audio_suspend(&uac2->g_audio);
}
static int
in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
int value = -EOPNOTSUPP;
u32 p_srate, c_srate;
u_audio_get_playback_srate(agdev, &p_srate);
u_audio_get_capture_srate(agdev, &c_srate);
if ((entity_id == USB_IN_CLK_ID) || (entity_id == USB_OUT_CLK_ID)) {
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
struct cntrl_cur_lay3 c;
memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
c.dCUR = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
c.dCUR = cpu_to_le32(c_srate);
value = min_t(unsigned int, w_length, sizeof(c));
memcpy(req->buf, &c, value);
} else if (control_selector == UAC2_CS_CONTROL_CLOCK_VALID) {
*(u8 *)req->buf = 1;
value = min_t(unsigned int, w_length, 1);
} else {
dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_MUTE) {
unsigned int mute;
u_audio_get_mute(agdev, is_playback, &mute);
*(u8 *)req->buf = mute;
value = min_t(unsigned int, w_length, 1);
} else if (control_selector == UAC_FU_VOLUME) {
struct cntrl_cur_lay2 c;
s16 volume;
memset(&c, 0, sizeof(struct cntrl_cur_lay2));
u_audio_get_volume(agdev, is_playback, &volume);
c.wCUR = cpu_to_le16(volume);
value = min_t(unsigned int, w_length, sizeof(c));
memcpy(req->buf, &c, value);
} else {
dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else {
dev_err(&agdev->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return value;
}
static int
in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
int value = -EOPNOTSUPP;
if ((entity_id == USB_IN_CLK_ID) || (entity_id == USB_OUT_CLK_ID)) {
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
struct cntrl_ranges_lay3_srates rs;
int i;
int wNumSubRanges = 0;
int srate;
int *srates;
if (entity_id == USB_IN_CLK_ID)
srates = opts->p_srates;
else if (entity_id == USB_OUT_CLK_ID)
srates = opts->c_srates;
else
return -EOPNOTSUPP;
for (i = 0; i < UAC_MAX_RATES; i++) {
srate = srates[i];
if (srate == 0)
break;
rs.r[wNumSubRanges].dMIN = cpu_to_le32(srate);
rs.r[wNumSubRanges].dMAX = cpu_to_le32(srate);
rs.r[wNumSubRanges].dRES = 0;
wNumSubRanges++;
dev_dbg(&agdev->gadget->dev,
"%s(): clk %d: rate ID %d: %d\n",
__func__, entity_id, wNumSubRanges, srate);
}
rs.wNumSubRanges = cpu_to_le16(wNumSubRanges);
value = min_t(unsigned int, w_length, ranges_lay3_size(rs));
dev_dbg(&agdev->gadget->dev, "%s(): sending %d rates, size %d\n",
__func__, rs.wNumSubRanges, value);
memcpy(req->buf, &rs, value);
} else {
dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_VOLUME) {
struct cntrl_range_lay2 r;
s16 max_db, min_db, res_db;
if (is_playback) {
max_db = opts->p_volume_max;
min_db = opts->p_volume_min;
res_db = opts->p_volume_res;
} else {
max_db = opts->c_volume_max;
min_db = opts->c_volume_min;
res_db = opts->c_volume_res;
}
r.wMAX = cpu_to_le16(max_db);
r.wMIN = cpu_to_le16(min_db);
r.wRES = cpu_to_le16(res_db);
r.wNumSubRanges = cpu_to_le16(1);
value = min_t(unsigned int, w_length, sizeof(r));
memcpy(req->buf, &r, value);
} else {
dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else {
dev_err(&agdev->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return value;
}
static int
ac_rq_in(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
if (cr->bRequest == UAC2_CS_CUR)
return in_rq_cur(fn, cr);
else if (cr->bRequest == UAC2_CS_RANGE)
return in_rq_range(fn, cr);
else
return -EOPNOTSUPP;
}
static void uac2_cs_control_sam_freq(struct usb_ep *ep, struct usb_request *req)
{
struct usb_function *fn = ep->driver_data;
struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2 *uac2 = func_to_uac2(fn);
u32 val;
if (req->actual != 4)
return;
val = le32_to_cpu(*((__le32 *)req->buf));
dev_dbg(&agdev->gadget->dev, "%s val: %d.\n", __func__, val);
if (uac2->clock_id == USB_IN_CLK_ID) {
u_audio_set_playback_srate(agdev, val);
} else if (uac2->clock_id == USB_OUT_CLK_ID) {
u_audio_set_capture_srate(agdev, val);
}
}
static void
out_rq_cur_complete(struct usb_ep *ep, struct usb_request *req)
{
struct g_audio *agdev = req->context;
struct usb_composite_dev *cdev = agdev->func.config->cdev;
struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
struct f_uac2 *uac2 = func_to_uac2(&agdev->func);
struct usb_ctrlrequest *cr = &uac2->setup_cr;
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
if (req->status != 0) {
dev_dbg(&cdev->gadget->dev, "completion err %d\n", req->status);
return;
}
if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_MUTE) {
u8 mute = *(u8 *)req->buf;
u_audio_set_mute(agdev, is_playback, mute);
return;
} else if (control_selector == UAC_FU_VOLUME) {
struct cntrl_cur_lay2 *c = req->buf;
s16 volume;
volume = le16_to_cpu(c->wCUR);
u_audio_set_volume(agdev, is_playback, volume);
return;
} else {
dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
usb_ep_set_halt(ep);
}
}
}
static int
out_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_composite_dev *cdev = fn->config->cdev;
struct usb_request *req = fn->config->cdev->req;
struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
struct f_uac2 *uac2 = func_to_uac2(fn);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
u8 clock_id = w_index >> 8;
if ((entity_id == USB_IN_CLK_ID) || (entity_id == USB_OUT_CLK_ID)) {
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
dev_dbg(&agdev->gadget->dev,
"control_selector UAC2_CS_CONTROL_SAM_FREQ, clock: %d\n", clock_id);
cdev->gadget->ep0->driver_data = fn;
uac2->clock_id = clock_id;
req->complete = uac2_cs_control_sam_freq;
return w_length;
}
} else if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
memcpy(&uac2->setup_cr, cr, sizeof(*cr));
req->context = agdev;
req->complete = out_rq_cur_complete;
return w_length;
} else {
dev_err(&agdev->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return -EOPNOTSUPP;
}
static int
setup_rq_inf(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct f_uac2 *uac2 = func_to_uac2(fn);
struct g_audio *agdev = func_to_g_audio(fn);
u16 w_index = le16_to_cpu(cr->wIndex);
u8 intf = w_index & 0xff;
if (intf != uac2->ac_intf) {
dev_err(&agdev->gadget->dev,
"%s:%d Error!\n", __func__, __LINE__);
return -EOPNOTSUPP;
}
if (cr->bRequestType & USB_DIR_IN)
return ac_rq_in(fn, cr);
else if (cr->bRequest == UAC2_CS_CUR)
return out_rq_cur(fn, cr);
return -EOPNOTSUPP;
}
static int
afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_composite_dev *cdev = fn->config->cdev;
struct g_audio *agdev = func_to_g_audio(fn);
struct usb_request *req = cdev->req;
u16 w_length = le16_to_cpu(cr->wLength);
int value = -EOPNOTSUPP;
/* Only Class specific requests are supposed to reach here */
if ((cr->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS)
return -EOPNOTSUPP;
if ((cr->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE)
value = setup_rq_inf(fn, cr);
else
dev_err(&agdev->gadget->dev, "%s:%d Error!\n",
__func__, __LINE__);
if (value >= 0) {
req->length = value;
req->zero = value < w_length;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
dev_err(&agdev->gadget->dev,
"%s:%d Error!\n", __func__, __LINE__);
req->status = 0;
}
}
return value;
}
static inline struct f_uac2_opts *to_f_uac2_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uac2_opts,
func_inst.group);
}
static void f_uac2_attr_release(struct config_item *item)
{
struct f_uac2_opts *opts = to_f_uac2_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations f_uac2_item_ops = {
.release = f_uac2_attr_release,
};
#define uac2_kstrtou8 kstrtou8
#define uac2_kstrtou32 kstrtou32
#define uac2_kstrtos16 kstrtos16
#define uac2_kstrtobool(s, base, res) kstrtobool((s), (res))
static const char *u8_fmt = "%u\n";
static const char *u32_fmt = "%u\n";
static const char *s16_fmt = "%hd\n";
static const char *bool_fmt = "%u\n";
#define UAC2_ATTRIBUTE(type, name) \
static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, type##_fmt, opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
int ret; \
type num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = uac2_kstrto##type(page, 0, &num); \
if (ret) \
goto end; \
\
opts->name = num; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac2_opts_, name)
#define UAC2_ATTRIBUTE_SYNC(name) \
static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
int result; \
char *str; \
\
mutex_lock(&opts->lock); \
switch (opts->name) { \
case USB_ENDPOINT_SYNC_ASYNC: \
str = "async"; \
break; \
case USB_ENDPOINT_SYNC_ADAPTIVE: \
str = "adaptive"; \
break; \
default: \
str = "unknown"; \
break; \
} \
result = sprintf(page, "%s\n", str); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
int ret = 0; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
if (!strncmp(page, "async", 5)) \
opts->name = USB_ENDPOINT_SYNC_ASYNC; \
else if (!strncmp(page, "adaptive", 8)) \
opts->name = USB_ENDPOINT_SYNC_ADAPTIVE; \
else { \
ret = -EINVAL; \
goto end; \
} \
\
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac2_opts_, name)
#define UAC2_RATE_ATTRIBUTE(name) \
static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
int result = 0; \
int i; \
\
mutex_lock(&opts->lock); \
page[0] = '\0'; \
for (i = 0; i < UAC_MAX_RATES; i++) { \
if (opts->name##s[i] == 0) \
break; \
result += sprintf(page + strlen(page), "%u,", \
opts->name##s[i]); \
} \
if (strlen(page) > 0) \
page[strlen(page) - 1] = '\n'; \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
char *split_page = NULL; \
int ret = -EINVAL; \
char *token; \
u32 num; \
int i; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
i = 0; \
memset(opts->name##s, 0x00, sizeof(opts->name##s)); \
split_page = kstrdup(page, GFP_KERNEL); \
while ((token = strsep(&split_page, ",")) != NULL) { \
ret = kstrtou32(token, 0, &num); \
if (ret) \
goto end; \
\
opts->name##s[i++] = num; \
ret = len; \
}; \
\
end: \
kfree(split_page); \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac2_opts_, name)
#define UAC2_ATTRIBUTE_STRING(name) \
static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = snprintf(page, sizeof(opts->name), "%s", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
int ret = 0; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = snprintf(opts->name, min(sizeof(opts->name), len), \
"%s", page); \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac2_opts_, name)
UAC2_ATTRIBUTE(u32, p_chmask);
UAC2_RATE_ATTRIBUTE(p_srate);
UAC2_ATTRIBUTE(u32, p_ssize);
UAC2_ATTRIBUTE(u8, p_hs_bint);
UAC2_ATTRIBUTE(u32, c_chmask);
UAC2_RATE_ATTRIBUTE(c_srate);
UAC2_ATTRIBUTE_SYNC(c_sync);
UAC2_ATTRIBUTE(u32, c_ssize);
UAC2_ATTRIBUTE(u8, c_hs_bint);
UAC2_ATTRIBUTE(u32, req_number);
UAC2_ATTRIBUTE(bool, p_mute_present);
UAC2_ATTRIBUTE(bool, p_volume_present);
UAC2_ATTRIBUTE(s16, p_volume_min);
UAC2_ATTRIBUTE(s16, p_volume_max);
UAC2_ATTRIBUTE(s16, p_volume_res);
UAC2_ATTRIBUTE(bool, c_mute_present);
UAC2_ATTRIBUTE(bool, c_volume_present);
UAC2_ATTRIBUTE(s16, c_volume_min);
UAC2_ATTRIBUTE(s16, c_volume_max);
UAC2_ATTRIBUTE(s16, c_volume_res);
UAC2_ATTRIBUTE(u32, fb_max);
UAC2_ATTRIBUTE_STRING(function_name);
static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_p_chmask,
&f_uac2_opts_attr_p_srate,
&f_uac2_opts_attr_p_ssize,
&f_uac2_opts_attr_p_hs_bint,
&f_uac2_opts_attr_c_chmask,
&f_uac2_opts_attr_c_srate,
&f_uac2_opts_attr_c_ssize,
&f_uac2_opts_attr_c_hs_bint,
&f_uac2_opts_attr_c_sync,
&f_uac2_opts_attr_req_number,
&f_uac2_opts_attr_fb_max,
&f_uac2_opts_attr_p_mute_present,
&f_uac2_opts_attr_p_volume_present,
&f_uac2_opts_attr_p_volume_min,
&f_uac2_opts_attr_p_volume_max,
&f_uac2_opts_attr_p_volume_res,
&f_uac2_opts_attr_c_mute_present,
&f_uac2_opts_attr_c_volume_present,
&f_uac2_opts_attr_c_volume_min,
&f_uac2_opts_attr_c_volume_max,
&f_uac2_opts_attr_c_volume_res,
&f_uac2_opts_attr_function_name,
NULL,
};
static const struct config_item_type f_uac2_func_type = {
.ct_item_ops = &f_uac2_item_ops,
.ct_attrs = f_uac2_attrs,
.ct_owner = THIS_MODULE,
};
static void afunc_free_inst(struct usb_function_instance *f)
{
struct f_uac2_opts *opts;
opts = container_of(f, struct f_uac2_opts, func_inst);
kfree(opts);
}
static struct usb_function_instance *afunc_alloc_inst(void)
{
struct f_uac2_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = afunc_free_inst;
config_group_init_type_name(&opts->func_inst.group, "",
&f_uac2_func_type);
opts->p_chmask = UAC2_DEF_PCHMASK;
opts->p_srates[0] = UAC2_DEF_PSRATE;
opts->p_ssize = UAC2_DEF_PSSIZE;
opts->p_hs_bint = UAC2_DEF_PHSBINT;
opts->c_chmask = UAC2_DEF_CCHMASK;
opts->c_srates[0] = UAC2_DEF_CSRATE;
opts->c_ssize = UAC2_DEF_CSSIZE;
opts->c_hs_bint = UAC2_DEF_CHSBINT;
opts->c_sync = UAC2_DEF_CSYNC;
opts->p_mute_present = UAC2_DEF_MUTE_PRESENT;
opts->p_volume_present = UAC2_DEF_VOLUME_PRESENT;
opts->p_volume_min = UAC2_DEF_MIN_DB;
opts->p_volume_max = UAC2_DEF_MAX_DB;
opts->p_volume_res = UAC2_DEF_RES_DB;
opts->c_mute_present = UAC2_DEF_MUTE_PRESENT;
opts->c_volume_present = UAC2_DEF_VOLUME_PRESENT;
opts->c_volume_min = UAC2_DEF_MIN_DB;
opts->c_volume_max = UAC2_DEF_MAX_DB;
opts->c_volume_res = UAC2_DEF_RES_DB;
opts->req_number = UAC2_DEF_REQ_NUM;
opts->fb_max = FBACK_FAST_MAX;
snprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
return &opts->func_inst;
}
static void afunc_free(struct usb_function *f)
{
struct g_audio *agdev;
struct f_uac2_opts *opts;
agdev = func_to_g_audio(f);
opts = container_of(f->fi, struct f_uac2_opts, func_inst);
kfree(agdev);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
}
static void afunc_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct g_audio *agdev = func_to_g_audio(f);
g_audio_cleanup(agdev);
usb_free_all_descriptors(f);
agdev->gadget = NULL;
kfree(out_feature_unit_desc);
out_feature_unit_desc = NULL;
kfree(in_feature_unit_desc);
in_feature_unit_desc = NULL;
}
static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
{
struct f_uac2 *uac2;
struct f_uac2_opts *opts;
uac2 = kzalloc(sizeof(*uac2), GFP_KERNEL);
if (uac2 == NULL)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_uac2_opts, func_inst);
mutex_lock(&opts->lock);
++opts->refcnt;
mutex_unlock(&opts->lock);
uac2->g_audio.func.name = "uac2_func";
uac2->g_audio.func.bind = afunc_bind;
uac2->g_audio.func.unbind = afunc_unbind;
uac2->g_audio.func.set_alt = afunc_set_alt;
uac2->g_audio.func.get_alt = afunc_get_alt;
uac2->g_audio.func.disable = afunc_disable;
uac2->g_audio.func.suspend = afunc_suspend;
uac2->g_audio.func.setup = afunc_setup;
uac2->g_audio.func.free_func = afunc_free;
return &uac2->g_audio.func;
}
DECLARE_USB_FUNCTION_INIT(uac2, afunc_alloc_inst, afunc_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yadwinder Singh");
MODULE_AUTHOR("Jaswinder Singh");
MODULE_AUTHOR("Ruslan Bilovol");
| linux-master | drivers/usb/gadget/function/f_uac2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_serial.c - generic USB serial function driver
*
* Copyright (C) 2003 Al Borchers ([email protected])
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include "u_serial.h"
/*
* This function packages a simple "generic serial" port with no real
* control mechanisms, just raw data transfer over two bulk endpoints.
*
* Because it's not standardized, this isn't as interoperable as the
* CDC ACM driver. However, for many purposes it's just as functional
* if you can arrange appropriate host side drivers.
*/
struct f_gser {
struct gserial port;
u8 data_id;
u8 port_num;
};
static inline struct f_gser *func_to_gser(struct usb_function *f)
{
return container_of(f, struct f_gser, port.func);
}
/*-------------------------------------------------------------------------*/
/* interface descriptor: */
static struct usb_interface_descriptor gser_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor gser_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor gser_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *gser_fs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
(struct usb_descriptor_header *) &gser_fs_in_desc,
(struct usb_descriptor_header *) &gser_fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor gser_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor gser_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *gser_hs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
(struct usb_descriptor_header *) &gser_hs_in_desc,
(struct usb_descriptor_header *) &gser_hs_out_desc,
NULL,
};
static struct usb_endpoint_descriptor gser_ss_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor gser_ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor gser_ss_bulk_comp_desc = {
.bLength = sizeof gser_ss_bulk_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_descriptor_header *gser_ss_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
(struct usb_descriptor_header *) &gser_ss_in_desc,
(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &gser_ss_out_desc,
(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
NULL,
};
/* string descriptors: */
static struct usb_string gser_string_defs[] = {
[0].s = "Generic Serial",
{ } /* end of list */
};
static struct usb_gadget_strings gser_string_table = {
.language = 0x0409, /* en-us */
.strings = gser_string_defs,
};
static struct usb_gadget_strings *gser_strings[] = {
&gser_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* we know alt == 0, so this is an activation or a reset */
if (gser->port.in->enabled) {
dev_dbg(&cdev->gadget->dev,
"reset generic ttyGS%d\n", gser->port_num);
gserial_disconnect(&gser->port);
}
if (!gser->port.in->desc || !gser->port.out->desc) {
dev_dbg(&cdev->gadget->dev,
"activate generic ttyGS%d\n", gser->port_num);
if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
gser->port.in->desc = NULL;
gser->port.out->desc = NULL;
return -EINVAL;
}
}
gserial_connect(&gser->port, gser->port_num);
return 0;
}
static void gser_disable(struct usb_function *f)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
dev_dbg(&cdev->gadget->dev,
"generic ttyGS%d deactivated\n", gser->port_num);
gserial_disconnect(&gser->port);
}
/*-------------------------------------------------------------------------*/
/* serial function driver setup/binding */
static int gser_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_gser *gser = func_to_gser(f);
int status;
struct usb_ep *ep;
/* REVISIT might want instance-specific strings to help
* distinguish instances ...
*/
/* maybe allocate device-global string ID */
if (gser_string_defs[0].id == 0) {
status = usb_string_id(c->cdev);
if (status < 0)
return status;
gser_string_defs[0].id = status;
}
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
gser->data_id = status;
gser_interface_desc.bInterfaceNumber = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_in_desc);
if (!ep)
goto fail;
gser->port.in = ep;
ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_out_desc);
if (!ep)
goto fail;
gser->port.out = ep;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
gser_hs_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress;
gser_hs_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
gser_ss_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress;
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
gser_ss_function, gser_ss_function);
if (status)
goto fail;
dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: IN/%s OUT/%s\n",
gser->port_num,
gser->port.in->name, gser->port.out->name);
return 0;
fail:
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_serial_opts,
func_inst.group);
}
static void serial_attr_release(struct config_item *item)
{
struct f_serial_opts *opts = to_f_serial_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations serial_item_ops = {
.release = serial_attr_release,
};
#ifdef CONFIG_U_SERIAL_CONSOLE
static ssize_t f_serial_console_store(struct config_item *item,
const char *page, size_t count)
{
return gserial_set_console(to_f_serial_opts(item)->port_num,
page, count);
}
static ssize_t f_serial_console_show(struct config_item *item, char *page)
{
return gserial_get_console(to_f_serial_opts(item)->port_num, page);
}
CONFIGFS_ATTR(f_serial_, console);
#endif /* CONFIG_U_SERIAL_CONSOLE */
static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
}
CONFIGFS_ATTR_RO(f_serial_, port_num);
static struct configfs_attribute *acm_attrs[] = {
#ifdef CONFIG_U_SERIAL_CONSOLE
&f_serial_attr_console,
#endif
&f_serial_attr_port_num,
NULL,
};
static const struct config_item_type serial_func_type = {
.ct_item_ops = &serial_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
};
static void gser_free_inst(struct usb_function_instance *f)
{
struct f_serial_opts *opts;
opts = container_of(f, struct f_serial_opts, func_inst);
gserial_free_line(opts->port_num);
kfree(opts);
}
static struct usb_function_instance *gser_alloc_inst(void)
{
struct f_serial_opts *opts;
int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = gser_free_inst;
ret = gserial_alloc_line(&opts->port_num);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
}
config_group_init_type_name(&opts->func_inst.group, "",
&serial_func_type);
return &opts->func_inst;
}
static void gser_free(struct usb_function *f)
{
struct f_gser *serial;
serial = func_to_gser(f);
kfree(serial);
}
static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_gser *gser = func_to_gser(f);
/* Ensure port is disconnected before unbinding */
gserial_disconnect(&gser->port);
usb_free_all_descriptors(f);
}
static void gser_resume(struct usb_function *f)
{
struct f_gser *gser = func_to_gser(f);
gserial_resume(&gser->port);
}
static void gser_suspend(struct usb_function *f)
{
struct f_gser *gser = func_to_gser(f);
gserial_suspend(&gser->port);
}
static struct usb_function *gser_alloc(struct usb_function_instance *fi)
{
struct f_gser *gser;
struct f_serial_opts *opts;
/* allocate and initialize one new instance */
gser = kzalloc(sizeof(*gser), GFP_KERNEL);
if (!gser)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_serial_opts, func_inst);
gser->port_num = opts->port_num;
gser->port.func.name = "gser";
gser->port.func.strings = gser_strings;
gser->port.func.bind = gser_bind;
gser->port.func.unbind = gser_unbind;
gser->port.func.set_alt = gser_set_alt;
gser->port.func.disable = gser_disable;
gser->port.func.free_func = gser_free;
gser->port.func.resume = gser_resume;
gser->port.func.suspend = gser_suspend;
return &gser->port.func;
}
DECLARE_USB_FUNCTION_INIT(gser, gser_alloc_inst, gser_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Al Borchers");
MODULE_AUTHOR("David Brownell");
| linux-master | drivers/usb/gadget/function/f_serial.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_queue.c -- USB Video Class driver - Buffers management
*
* Copyright (C) 2005-2010
* Laurent Pinchart ([email protected])
*/
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <media/v4l2-common.h>
#include <media/videobuf2-dma-sg.h>
#include <media/videobuf2-vmalloc.h>
#include "uvc.h"
/* ------------------------------------------------------------------------
* Video buffers queue management.
*
* Video queues is initialized by uvcg_queue_init(). The function performs
* basic initialization of the uvc_video_queue struct and never fails.
*
* Video buffers are managed by videobuf2. The driver uses a mutex to protect
* the videobuf2 queue operations by serializing calls to videobuf2 and a
* spinlock to protect the IRQ queue that holds the buffers to be processed by
* the driver.
*/
/* -----------------------------------------------------------------------------
* videobuf2 queue operations
*/
static int uvc_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_video *video = container_of(queue, struct uvc_video, queue);
unsigned int req_size;
unsigned int nreq;
if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
*nbuffers = UVC_MAX_VIDEO_BUFFERS;
*nplanes = 1;
sizes[0] = video->imagesize;
req_size = video->ep->maxpacket
* max_t(unsigned int, video->ep->maxburst, 1)
* (video->ep->mult);
/* We divide by two, to increase the chance to run
* into fewer requests for smaller framesizes.
*/
nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size);
nreq = clamp(nreq, 4U, 64U);
video->uvc_num_requests = nreq;
return 0;
}
static int uvc_buffer_prepare(struct vb2_buffer *vb)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
return -EINVAL;
}
if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
return -ENODEV;
buf->state = UVC_BUF_STATE_QUEUED;
if (queue->use_sg) {
buf->sgt = vb2_dma_sg_plane_desc(vb, 0);
buf->sg = buf->sgt->sgl;
} else {
buf->mem = vb2_plane_vaddr(vb, 0);
}
buf->length = vb2_plane_size(vb, 0);
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
buf->bytesused = 0;
else
buf->bytesused = vb2_get_plane_payload(vb, 0);
return 0;
}
static void uvc_buffer_queue(struct vb2_buffer *vb)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
list_add_tail(&buf->queue, &queue->irqqueue);
} else {
/*
* If the device is disconnected return the buffer to userspace
* directly. The next QBUF call will fail with -ENODEV.
*/
buf->state = UVC_BUF_STATE_ERROR;
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&queue->irqlock, flags);
}
static const struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type,
struct mutex *lock)
{
struct uvc_video *video = container_of(queue, struct uvc_video, queue);
struct usb_composite_dev *cdev = video->uvc->func.config->cdev;
int ret;
queue->queue.type = type;
queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue->queue.drv_priv = queue;
queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue->queue.ops = &uvc_queue_qops;
queue->queue.lock = lock;
if (cdev->gadget->sg_supported) {
queue->queue.mem_ops = &vb2_dma_sg_memops;
queue->use_sg = 1;
} else {
queue->queue.mem_ops = &vb2_vmalloc_memops;
}
queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY
| V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
queue->queue.dev = dev;
ret = vb2_queue_init(&queue->queue);
if (ret)
return ret;
spin_lock_init(&queue->irqlock);
INIT_LIST_HEAD(&queue->irqqueue);
queue->flags = 0;
return 0;
}
/*
* Free the video buffers.
*/
void uvcg_free_buffers(struct uvc_video_queue *queue)
{
vb2_queue_release(&queue->queue);
}
/*
* Allocate the video buffers.
*/
int uvcg_alloc_buffers(struct uvc_video_queue *queue,
struct v4l2_requestbuffers *rb)
{
int ret;
ret = vb2_reqbufs(&queue->queue, rb);
return ret ? ret : rb->count;
}
int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
return vb2_querybuf(&queue->queue, buf);
}
int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
return vb2_qbuf(&queue->queue, NULL, buf);
}
/*
* Dequeue a video buffer. If nonblocking is false, block until a buffer is
* available.
*/
int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
int nonblocking)
{
return vb2_dqbuf(&queue->queue, buf, nonblocking);
}
/*
* Poll the video queue.
*
* This function implements video queue polling and is intended to be used by
* the device poll handler.
*/
__poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
return vb2_poll(&queue->queue, file, wait);
}
int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
{
return vb2_mmap(&queue->queue, vma);
}
#ifndef CONFIG_MMU
/*
* Get unmapped area.
*
* NO-MMU arch need this function to make mmap() work correctly.
*/
unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue,
unsigned long pgoff)
{
return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
}
#endif
/*
* Cancel the video buffers queue.
*
* Cancelling the queue marks all buffers on the irq queue as erroneous,
* wakes them up and removes them from the queue.
*
* If the disconnect parameter is set, further calls to uvc_queue_buffer will
* fail with -ENODEV.
*
* This function acquires the irq spinlock and can be called from interrupt
* context.
*/
void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
{
struct uvc_buffer *buf;
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
while (!list_empty(&queue->irqqueue)) {
buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
list_del(&buf->queue);
buf->state = UVC_BUF_STATE_ERROR;
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
}
queue->buf_used = 0;
/*
* This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_queue_buffer and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
* blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
* state outside the queue code.
*/
if (disconnect)
queue->flags |= UVC_QUEUE_DISCONNECTED;
spin_unlock_irqrestore(&queue->irqlock, flags);
}
/*
* Enable or disable the video buffers queue.
*
* The queue must be enabled before starting video acquisition and must be
* disabled after stopping it. This ensures that the video buffers queue
* state can be properly initialized before buffers are accessed from the
* interrupt handler.
*
* Enabling the video queue initializes parameters (such as sequence number,
* sync pattern, ...). If the queue is already enabled, return -EBUSY.
*
* Disabling the video queue cancels the queue and removes all buffers from
* the main queue.
*
* This function can't be called from interrupt context. Use
* uvcg_queue_cancel() instead.
*/
int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
{
unsigned long flags;
int ret = 0;
if (enable) {
ret = vb2_streamon(&queue->queue, queue->queue.type);
if (ret < 0)
return ret;
queue->sequence = 0;
queue->buf_used = 0;
queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
} else {
ret = vb2_streamoff(&queue->queue, queue->queue.type);
if (ret < 0)
return ret;
spin_lock_irqsave(&queue->irqlock, flags);
INIT_LIST_HEAD(&queue->irqqueue);
/*
* FIXME: We need to clear the DISCONNECTED flag to ensure that
* applications will be able to queue buffers for the next
* streaming run. However, clearing it here doesn't guarantee
* that the device will be reconnected in the meantime.
*/
queue->flags &= ~UVC_QUEUE_DISCONNECTED;
spin_unlock_irqrestore(&queue->irqlock, flags);
}
return ret;
}
/* called with &queue_irqlock held.. */
void uvcg_complete_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf)
{
if (queue->flags & UVC_QUEUE_DROP_INCOMPLETE) {
queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
buf->state = UVC_BUF_STATE_ERROR;
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
return;
}
buf->buf.field = V4L2_FIELD_NONE;
buf->buf.sequence = queue->sequence++;
buf->buf.vb2_buf.timestamp = ktime_get_ns();
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
}
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
{
struct uvc_buffer *buf = NULL;
if (!list_empty(&queue->irqqueue))
buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
return buf;
}
| linux-master | drivers/usb/gadget/function/uvc_queue.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_v4l2.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart ([email protected])
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/usb/g_uvc.h>
#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include "f_uvc.h"
#include "uvc.h"
#include "uvc_queue.h"
#include "uvc_video.h"
#include "uvc_v4l2.h"
#include "uvc_configfs.h"
static const struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat)
{
char guid[16] = UVC_GUID_FORMAT_MJPEG;
const struct uvc_format_desc *format;
struct uvcg_uncompressed *unc;
if (uformat->type == UVCG_UNCOMPRESSED) {
unc = to_uvcg_uncompressed(&uformat->group.cg_item);
if (!unc)
return ERR_PTR(-EINVAL);
memcpy(guid, unc->desc.guidFormat, sizeof(guid));
}
format = uvc_format_by_guid(guid);
if (!format)
return ERR_PTR(-EINVAL);
return format;
}
static int uvc_v4l2_get_bytesperline(struct uvcg_format *uformat,
struct uvcg_frame *uframe)
{
struct uvcg_uncompressed *u;
if (uformat->type == UVCG_UNCOMPRESSED) {
u = to_uvcg_uncompressed(&uformat->group.cg_item);
if (!u)
return 0;
return u->desc.bBitsPerPixel * uframe->frame.w_width / 8;
}
return 0;
}
static int uvc_get_frame_size(struct uvcg_format *uformat,
struct uvcg_frame *uframe)
{
unsigned int bpl = uvc_v4l2_get_bytesperline(uformat, uframe);
return bpl ? bpl * uframe->frame.w_height :
uframe->frame.dw_max_video_frame_buffer_size;
}
static struct uvcg_format *find_format_by_index(struct uvc_device *uvc, int index)
{
struct uvcg_format_ptr *format;
struct uvcg_format *uformat = NULL;
int i = 1;
list_for_each_entry(format, &uvc->header->formats, entry) {
if (index == i) {
uformat = format->fmt;
break;
}
i++;
}
return uformat;
}
static struct uvcg_frame *find_frame_by_index(struct uvc_device *uvc,
struct uvcg_format *uformat,
int index)
{
struct uvcg_format_ptr *format;
struct uvcg_frame_ptr *frame;
struct uvcg_frame *uframe = NULL;
list_for_each_entry(format, &uvc->header->formats, entry) {
if (format->fmt->type != uformat->type)
continue;
list_for_each_entry(frame, &format->fmt->frames, entry) {
if (index == frame->frm->frame.b_frame_index) {
uframe = frame->frm;
break;
}
}
}
return uframe;
}
static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc,
u32 pixelformat)
{
struct uvcg_format_ptr *format;
struct uvcg_format *uformat = NULL;
list_for_each_entry(format, &uvc->header->formats, entry) {
const struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
if (fmtdesc->fcc == pixelformat) {
uformat = format->fmt;
break;
}
}
return uformat;
}
static struct uvcg_frame *find_closest_frame_by_size(struct uvc_device *uvc,
struct uvcg_format *uformat,
u16 rw, u16 rh)
{
struct uvc_video *video = &uvc->video;
struct uvcg_format_ptr *format;
struct uvcg_frame_ptr *frame;
struct uvcg_frame *uframe = NULL;
unsigned int d, maxd;
/* Find the closest image size. The distance between image sizes is
* the size in pixels of the non-overlapping regions between the
* requested size and the frame-specified size.
*/
maxd = (unsigned int)-1;
list_for_each_entry(format, &uvc->header->formats, entry) {
if (format->fmt->type != uformat->type)
continue;
list_for_each_entry(frame, &format->fmt->frames, entry) {
u16 w, h;
w = frame->frm->frame.w_width;
h = frame->frm->frame.w_height;
d = min(w, rw) * min(h, rh);
d = w*h + rw*rh - 2*d;
if (d < maxd) {
maxd = d;
uframe = frame->frm;
}
if (maxd == 0)
break;
}
}
if (!uframe)
uvcg_dbg(&video->uvc->func, "Unsupported size %ux%u\n", rw, rh);
return uframe;
}
/* --------------------------------------------------------------------------
* Requests handling
*/
static int
uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
struct usb_request *req = uvc->control_req;
if (data->length < 0)
return usb_ep_set_halt(cdev->gadget->ep0);
req->length = min_t(unsigned int, uvc->event_length, data->length);
req->zero = data->length < uvc->event_length;
memcpy(req->buf, data->data, req->length);
return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL);
}
/* --------------------------------------------------------------------------
* V4L2 ioctls
*/
static int
uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct usb_composite_dev *cdev = uvc->func.config->cdev;
strscpy(cap->driver, "g_uvc", sizeof(cap->driver));
strscpy(cap->card, cdev->gadget->name, sizeof(cap->card));
strscpy(cap->bus_info, dev_name(&cdev->gadget->dev),
sizeof(cap->bus_info));
return 0;
}
static int
uvc_v4l2_get_format(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
fmt->fmt.pix.pixelformat = video->fcc;
fmt->fmt.pix.width = video->width;
fmt->fmt.pix.height = video->height;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.bytesperline = video->bpp * video->width / 8;
fmt->fmt.pix.sizeimage = video->imagesize;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
fmt->fmt.pix.priv = 0;
return 0;
}
static int
uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
struct uvcg_format *uformat;
struct uvcg_frame *uframe;
u8 *fcc;
if (fmt->type != video->queue.queue.type)
return -EINVAL;
fcc = (u8 *)&fmt->fmt.pix.pixelformat;
uvcg_dbg(&uvc->func, "Trying format 0x%08x (%c%c%c%c): %ux%u\n",
fmt->fmt.pix.pixelformat,
fcc[0], fcc[1], fcc[2], fcc[3],
fmt->fmt.pix.width, fmt->fmt.pix.height);
uformat = find_format_by_pix(uvc, fmt->fmt.pix.pixelformat);
if (!uformat)
return -EINVAL;
uframe = find_closest_frame_by_size(uvc, uformat,
fmt->fmt.pix.width, fmt->fmt.pix.height);
if (!uframe)
return -EINVAL;
fmt->fmt.pix.width = uframe->frame.w_width;
fmt->fmt.pix.height = uframe->frame.w_height;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe);
fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe);
fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
fmt->fmt.pix.priv = 0;
return 0;
}
static int
uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
int ret;
ret = uvc_v4l2_try_format(file, fh, fmt);
if (ret)
return ret;
video->fcc = fmt->fmt.pix.pixelformat;
video->bpp = fmt->fmt.pix.bytesperline * 8 / video->width;
video->width = fmt->fmt.pix.width;
video->height = fmt->fmt.pix.height;
video->imagesize = fmt->fmt.pix.sizeimage;
return ret;
}
static int
uvc_v4l2_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *fival)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvcg_format *uformat = NULL;
struct uvcg_frame *uframe = NULL;
struct uvcg_frame_ptr *frame;
uformat = find_format_by_pix(uvc, fival->pixel_format);
if (!uformat)
return -EINVAL;
list_for_each_entry(frame, &uformat->frames, entry) {
if (frame->frm->frame.w_width == fival->width &&
frame->frm->frame.w_height == fival->height) {
uframe = frame->frm;
break;
}
}
if (!uframe)
return -EINVAL;
if (fival->index >= uframe->frame.b_frame_interval_type)
return -EINVAL;
fival->discrete.numerator =
uframe->dw_frame_interval[fival->index];
/* TODO: handle V4L2_FRMIVAL_TYPE_STEPWISE */
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete.denominator = 10000000;
v4l2_simplify_fraction(&fival->discrete.numerator,
&fival->discrete.denominator, 8, 333);
return 0;
}
static int
uvc_v4l2_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvcg_format *uformat = NULL;
struct uvcg_frame *uframe = NULL;
uformat = find_format_by_pix(uvc, fsize->pixel_format);
if (!uformat)
return -EINVAL;
if (fsize->index >= uformat->num_frames)
return -EINVAL;
uframe = find_frame_by_index(uvc, uformat, fsize->index + 1);
if (!uframe)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = uframe->frame.w_width;
fsize->discrete.height = uframe->frame.w_height;
return 0;
}
static int
uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
const struct uvc_format_desc *fmtdesc;
struct uvcg_format *uformat;
if (f->index >= uvc->header->num_fmt)
return -EINVAL;
uformat = find_format_by_index(uvc, f->index + 1);
if (!uformat)
return -EINVAL;
fmtdesc = to_uvc_format(uformat);
f->pixelformat = fmtdesc->fcc;
return 0;
}
static int
uvc_v4l2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
if (b->type != video->queue.queue.type)
return -EINVAL;
return uvcg_alloc_buffers(&video->queue, b);
}
static int
uvc_v4l2_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
return uvcg_query_buffer(&video->queue, b);
}
static int
uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
int ret;
ret = uvcg_queue_buffer(&video->queue, b);
if (ret < 0)
return ret;
if (uvc->state == UVC_STATE_STREAMING)
queue_work(video->async_wq, &video->pump);
return ret;
}
static int
uvc_v4l2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK);
}
static int
uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
int ret;
if (type != video->queue.queue.type)
return -EINVAL;
/* Enable UVC video. */
ret = uvcg_video_enable(video, 1);
if (ret < 0)
return ret;
/*
* Complete the alternate setting selection setup phase now that
* userspace is ready to provide video frames.
*/
uvc_function_setup_continue(uvc);
uvc->state = UVC_STATE_STREAMING;
return 0;
}
static int
uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
if (type != video->queue.queue.type)
return -EINVAL;
return uvcg_video_enable(video, 0);
}
static int
uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
struct uvc_device *uvc = video_get_drvdata(fh->vdev);
struct uvc_file_handle *handle = to_uvc_file_handle(fh);
int ret;
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
return -EINVAL;
if (sub->type == UVC_EVENT_SETUP && uvc->func_connected)
return -EBUSY;
ret = v4l2_event_subscribe(fh, sub, 2, NULL);
if (ret < 0)
return ret;
if (sub->type == UVC_EVENT_SETUP) {
uvc->func_connected = true;
handle->is_uvc_app_handle = true;
uvc_function_connect(uvc);
}
return 0;
}
static void uvc_v4l2_disable(struct uvc_device *uvc)
{
uvc_function_disconnect(uvc);
uvcg_video_enable(&uvc->video, 0);
uvcg_free_buffers(&uvc->video.queue);
uvc->func_connected = false;
wake_up_interruptible(&uvc->func_connected_queue);
}
static int
uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
struct uvc_device *uvc = video_get_drvdata(fh->vdev);
struct uvc_file_handle *handle = to_uvc_file_handle(fh);
int ret;
ret = v4l2_event_unsubscribe(fh, sub);
if (ret < 0)
return ret;
if (sub->type == UVC_EVENT_SETUP && handle->is_uvc_app_handle) {
uvc_v4l2_disable(uvc);
handle->is_uvc_app_handle = false;
}
return 0;
}
static long
uvc_v4l2_ioctl_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
switch (cmd) {
case UVCIOC_SEND_RESPONSE:
return uvc_send_response(uvc, arg);
default:
return -ENOIOCTLCMD;
}
}
const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = {
.vidioc_querycap = uvc_v4l2_querycap,
.vidioc_try_fmt_vid_out = uvc_v4l2_try_format,
.vidioc_g_fmt_vid_out = uvc_v4l2_get_format,
.vidioc_s_fmt_vid_out = uvc_v4l2_set_format,
.vidioc_enum_frameintervals = uvc_v4l2_enum_frameintervals,
.vidioc_enum_framesizes = uvc_v4l2_enum_framesizes,
.vidioc_enum_fmt_vid_out = uvc_v4l2_enum_format,
.vidioc_reqbufs = uvc_v4l2_reqbufs,
.vidioc_querybuf = uvc_v4l2_querybuf,
.vidioc_qbuf = uvc_v4l2_qbuf,
.vidioc_dqbuf = uvc_v4l2_dqbuf,
.vidioc_streamon = uvc_v4l2_streamon,
.vidioc_streamoff = uvc_v4l2_streamoff,
.vidioc_subscribe_event = uvc_v4l2_subscribe_event,
.vidioc_unsubscribe_event = uvc_v4l2_unsubscribe_event,
.vidioc_default = uvc_v4l2_ioctl_default,
};
/* --------------------------------------------------------------------------
* V4L2
*/
static int
uvc_v4l2_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (handle == NULL)
return -ENOMEM;
v4l2_fh_init(&handle->vfh, vdev);
v4l2_fh_add(&handle->vfh);
handle->device = &uvc->video;
file->private_data = &handle->vfh;
return 0;
}
static int
uvc_v4l2_release(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
struct uvc_video *video = handle->device;
mutex_lock(&video->mutex);
if (handle->is_uvc_app_handle)
uvc_v4l2_disable(uvc);
mutex_unlock(&video->mutex);
file->private_data = NULL;
v4l2_fh_del(&handle->vfh);
v4l2_fh_exit(&handle->vfh);
kfree(handle);
return 0;
}
static int
uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
return uvcg_queue_mmap(&uvc->video.queue, vma);
}
static __poll_t
uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
return uvcg_queue_poll(&uvc->video.queue, file, wait);
}
#ifndef CONFIG_MMU
static unsigned long uvcg_v4l2_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
return uvcg_queue_get_unmapped_area(&uvc->video.queue, pgoff);
}
#endif
const struct v4l2_file_operations uvc_v4l2_fops = {
.owner = THIS_MODULE,
.open = uvc_v4l2_open,
.release = uvc_v4l2_release,
.unlocked_ioctl = video_ioctl2,
.mmap = uvc_v4l2_mmap,
.poll = uvc_v4l2_poll,
#ifndef CONFIG_MMU
.get_unmapped_area = uvcg_v4l2_get_unmapped_area,
#endif
};
| linux-master | drivers/usb/gadget/function/uvc_v4l2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_sourcesink.c - USB peripheral source/sink configuration driver
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
*/
/* #define VERBOSE_DEBUG */
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/usb/composite.h>
#include <linux/err.h>
#include "g_zero.h"
#include "u_f.h"
/*
* SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
* controller drivers.
*
* This just sinks bulk packets OUT to the peripheral and sources them IN
* to the host, optionally with specific data patterns for integrity tests.
* As such it supports basic functionality and load tests.
*
* In terms of control messaging, this supports all the standard requests
* plus two that support control-OUT tests. If the optional "autoresume"
* mode is enabled, it provides good functional coverage for the "USBCV"
* test harness from USB-IF.
*/
struct f_sourcesink {
struct usb_function function;
struct usb_ep *in_ep;
struct usb_ep *out_ep;
struct usb_ep *iso_in_ep;
struct usb_ep *iso_out_ep;
int cur_alt;
unsigned pattern;
unsigned isoc_interval;
unsigned isoc_maxpacket;
unsigned isoc_mult;
unsigned isoc_maxburst;
unsigned buflen;
unsigned bulk_qlen;
unsigned iso_qlen;
};
static inline struct f_sourcesink *func_to_ss(struct usb_function *f)
{
return container_of(f, struct f_sourcesink, function);
}
/*-------------------------------------------------------------------------*/
static struct usb_interface_descriptor source_sink_intf_alt0 = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
/* .iInterface = DYNAMIC */
};
static struct usb_interface_descriptor source_sink_intf_alt1 = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
.bNumEndpoints = 4,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor fs_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor fs_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor fs_iso_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(1023),
.bInterval = 4,
};
static struct usb_endpoint_descriptor fs_iso_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(1023),
.bInterval = 4,
};
static struct usb_descriptor_header *fs_source_sink_descs[] = {
(struct usb_descriptor_header *) &source_sink_intf_alt0,
(struct usb_descriptor_header *) &fs_sink_desc,
(struct usb_descriptor_header *) &fs_source_desc,
(struct usb_descriptor_header *) &source_sink_intf_alt1,
#define FS_ALT_IFC_1_OFFSET 3
(struct usb_descriptor_header *) &fs_sink_desc,
(struct usb_descriptor_header *) &fs_source_desc,
(struct usb_descriptor_header *) &fs_iso_sink_desc,
(struct usb_descriptor_header *) &fs_iso_source_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor hs_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor hs_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor hs_iso_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = 4,
};
static struct usb_endpoint_descriptor hs_iso_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = 4,
};
static struct usb_descriptor_header *hs_source_sink_descs[] = {
(struct usb_descriptor_header *) &source_sink_intf_alt0,
(struct usb_descriptor_header *) &hs_source_desc,
(struct usb_descriptor_header *) &hs_sink_desc,
(struct usb_descriptor_header *) &source_sink_intf_alt1,
#define HS_ALT_IFC_1_OFFSET 3
(struct usb_descriptor_header *) &hs_source_desc,
(struct usb_descriptor_header *) &hs_sink_desc,
(struct usb_descriptor_header *) &hs_iso_source_desc,
(struct usb_descriptor_header *) &hs_iso_sink_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor ss_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_source_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = 0,
};
static struct usb_endpoint_descriptor ss_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_sink_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = 0,
};
static struct usb_endpoint_descriptor ss_iso_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = 4,
};
static struct usb_ss_ep_comp_descriptor ss_iso_source_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor ss_iso_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(1024),
.bInterval = 4,
};
static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = cpu_to_le16(1024),
};
static struct usb_descriptor_header *ss_source_sink_descs[] = {
(struct usb_descriptor_header *) &source_sink_intf_alt0,
(struct usb_descriptor_header *) &ss_source_desc,
(struct usb_descriptor_header *) &ss_source_comp_desc,
(struct usb_descriptor_header *) &ss_sink_desc,
(struct usb_descriptor_header *) &ss_sink_comp_desc,
(struct usb_descriptor_header *) &source_sink_intf_alt1,
#define SS_ALT_IFC_1_OFFSET 5
(struct usb_descriptor_header *) &ss_source_desc,
(struct usb_descriptor_header *) &ss_source_comp_desc,
(struct usb_descriptor_header *) &ss_sink_desc,
(struct usb_descriptor_header *) &ss_sink_comp_desc,
(struct usb_descriptor_header *) &ss_iso_source_desc,
(struct usb_descriptor_header *) &ss_iso_source_comp_desc,
(struct usb_descriptor_header *) &ss_iso_sink_desc,
(struct usb_descriptor_header *) &ss_iso_sink_comp_desc,
NULL,
};
/* function-specific strings: */
static struct usb_string strings_sourcesink[] = {
[0].s = "source and sink data",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_sourcesink = {
.language = 0x0409, /* en-us */
.strings = strings_sourcesink,
};
static struct usb_gadget_strings *sourcesink_strings[] = {
&stringtab_sourcesink,
NULL,
};
/*-------------------------------------------------------------------------*/
static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
{
return alloc_ep_req(ep, len);
}
static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
{
int value;
value = usb_ep_disable(ep);
if (value < 0)
DBG(cdev, "disable %s --> %d\n", ep->name, value);
}
void disable_endpoints(struct usb_composite_dev *cdev,
struct usb_ep *in, struct usb_ep *out,
struct usb_ep *iso_in, struct usb_ep *iso_out)
{
disable_ep(cdev, in);
disable_ep(cdev, out);
if (iso_in)
disable_ep(cdev, iso_in);
if (iso_out)
disable_ep(cdev, iso_out);
}
static int
sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_sourcesink *ss = func_to_ss(f);
int id;
int ret;
/* allocate interface ID(s) */
id = usb_interface_id(c, f);
if (id < 0)
return id;
source_sink_intf_alt0.bInterfaceNumber = id;
source_sink_intf_alt1.bInterfaceNumber = id;
/* allocate bulk endpoints */
ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc);
if (!ss->in_ep) {
autoconf_fail:
ERROR(cdev, "%s: can't autoconfigure on %s\n",
f->name, cdev->gadget->name);
return -ENODEV;
}
ss->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_sink_desc);
if (!ss->out_ep)
goto autoconf_fail;
/* sanity check the isoc module parameters */
if (ss->isoc_interval < 1)
ss->isoc_interval = 1;
if (ss->isoc_interval > 16)
ss->isoc_interval = 16;
if (ss->isoc_mult > 2)
ss->isoc_mult = 2;
if (ss->isoc_maxburst > 15)
ss->isoc_maxburst = 15;
/* fill in the FS isoc descriptors from the module parameters */
fs_iso_source_desc.wMaxPacketSize = ss->isoc_maxpacket > 1023 ?
1023 : ss->isoc_maxpacket;
fs_iso_source_desc.bInterval = ss->isoc_interval;
fs_iso_sink_desc.wMaxPacketSize = ss->isoc_maxpacket > 1023 ?
1023 : ss->isoc_maxpacket;
fs_iso_sink_desc.bInterval = ss->isoc_interval;
/* allocate iso endpoints */
ss->iso_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_iso_source_desc);
if (!ss->iso_in_ep)
goto no_iso;
ss->iso_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_iso_sink_desc);
if (!ss->iso_out_ep) {
usb_ep_autoconfig_release(ss->iso_in_ep);
ss->iso_in_ep = NULL;
no_iso:
/*
* We still want to work even if the UDC doesn't have isoc
* endpoints, so null out the alt interface that contains
* them and continue.
*/
fs_source_sink_descs[FS_ALT_IFC_1_OFFSET] = NULL;
hs_source_sink_descs[HS_ALT_IFC_1_OFFSET] = NULL;
ss_source_sink_descs[SS_ALT_IFC_1_OFFSET] = NULL;
}
if (ss->isoc_maxpacket > 1024)
ss->isoc_maxpacket = 1024;
/* support high speed hardware */
hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
/*
* Fill in the HS isoc descriptors from the module parameters.
* We assume that the user knows what they are doing and won't
* give parameters that their UDC doesn't support.
*/
hs_iso_source_desc.wMaxPacketSize = ss->isoc_maxpacket;
hs_iso_source_desc.wMaxPacketSize |= ss->isoc_mult << 11;
hs_iso_source_desc.bInterval = ss->isoc_interval;
hs_iso_source_desc.bEndpointAddress =
fs_iso_source_desc.bEndpointAddress;
hs_iso_sink_desc.wMaxPacketSize = ss->isoc_maxpacket;
hs_iso_sink_desc.wMaxPacketSize |= ss->isoc_mult << 11;
hs_iso_sink_desc.bInterval = ss->isoc_interval;
hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
/* support super speed hardware */
ss_source_desc.bEndpointAddress =
fs_source_desc.bEndpointAddress;
ss_sink_desc.bEndpointAddress =
fs_sink_desc.bEndpointAddress;
/*
* Fill in the SS isoc descriptors from the module parameters.
* We assume that the user knows what they are doing and won't
* give parameters that their UDC doesn't support.
*/
ss_iso_source_desc.wMaxPacketSize = ss->isoc_maxpacket;
ss_iso_source_desc.bInterval = ss->isoc_interval;
ss_iso_source_comp_desc.bmAttributes = ss->isoc_mult;
ss_iso_source_comp_desc.bMaxBurst = ss->isoc_maxburst;
ss_iso_source_comp_desc.wBytesPerInterval = ss->isoc_maxpacket *
(ss->isoc_mult + 1) * (ss->isoc_maxburst + 1);
ss_iso_source_desc.bEndpointAddress =
fs_iso_source_desc.bEndpointAddress;
ss_iso_sink_desc.wMaxPacketSize = ss->isoc_maxpacket;
ss_iso_sink_desc.bInterval = ss->isoc_interval;
ss_iso_sink_comp_desc.bmAttributes = ss->isoc_mult;
ss_iso_sink_comp_desc.bMaxBurst = ss->isoc_maxburst;
ss_iso_sink_comp_desc.wBytesPerInterval = ss->isoc_maxpacket *
(ss->isoc_mult + 1) * (ss->isoc_maxburst + 1);
ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_source_sink_descs,
hs_source_sink_descs, ss_source_sink_descs,
ss_source_sink_descs);
if (ret)
return ret;
DBG(cdev, "%s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
f->name, ss->in_ep->name, ss->out_ep->name,
ss->iso_in_ep ? ss->iso_in_ep->name : "<none>",
ss->iso_out_ep ? ss->iso_out_ep->name : "<none>");
return 0;
}
static void
sourcesink_free_func(struct usb_function *f)
{
struct f_ss_opts *opts;
opts = container_of(f->fi, struct f_ss_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
usb_free_all_descriptors(f);
kfree(func_to_ss(f));
}
/* optionally require specific source/sink data patterns */
static int check_read_data(struct f_sourcesink *ss, struct usb_request *req)
{
unsigned i;
u8 *buf = req->buf;
struct usb_composite_dev *cdev = ss->function.config->cdev;
int max_packet_size = le16_to_cpu(ss->out_ep->desc->wMaxPacketSize);
if (ss->pattern == 2)
return 0;
for (i = 0; i < req->actual; i++, buf++) {
switch (ss->pattern) {
/* all-zeroes has no synchronization issues */
case 0:
if (*buf == 0)
continue;
break;
/* "mod63" stays in sync with short-terminated transfers,
* OR otherwise when host and gadget agree on how large
* each usb transfer request should be. Resync is done
* with set_interface or set_config. (We *WANT* it to
* get quickly out of sync if controllers or their drivers
* stutter for any reason, including buffer duplication...)
*/
case 1:
if (*buf == (u8)((i % max_packet_size) % 63))
continue;
break;
}
ERROR(cdev, "bad OUT byte, buf[%d] = %d\n", i, *buf);
usb_ep_set_halt(ss->out_ep);
return -EINVAL;
}
return 0;
}
static void reinit_write_data(struct usb_ep *ep, struct usb_request *req)
{
unsigned i;
u8 *buf = req->buf;
int max_packet_size = le16_to_cpu(ep->desc->wMaxPacketSize);
struct f_sourcesink *ss = ep->driver_data;
switch (ss->pattern) {
case 0:
memset(req->buf, 0, req->length);
break;
case 1:
for (i = 0; i < req->length; i++)
*buf++ = (u8) ((i % max_packet_size) % 63);
break;
case 2:
break;
}
}
static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
{
struct usb_composite_dev *cdev;
struct f_sourcesink *ss = ep->driver_data;
int status = req->status;
/* driver_data will be null if ep has been disabled */
if (!ss)
return;
cdev = ss->function.config->cdev;
switch (status) {
case 0: /* normal completion? */
if (ep == ss->out_ep) {
check_read_data(ss, req);
if (ss->pattern != 2)
memset(req->buf, 0x55, req->length);
}
break;
/* this endpoint is normally active while we're configured */
case -ECONNABORTED: /* hardware forced ep reset */
case -ECONNRESET: /* request dequeued */
case -ESHUTDOWN: /* disconnect from host */
VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status,
req->actual, req->length);
if (ep == ss->out_ep)
check_read_data(ss, req);
free_ep_req(ep, req);
return;
case -EOVERFLOW: /* buffer overrun on read means that
* we didn't provide a big enough
* buffer.
*/
default:
#if 1
DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name,
status, req->actual, req->length);
break;
#endif
case -EREMOTEIO: /* short read */
break;
}
status = usb_ep_queue(ep, req, GFP_ATOMIC);
if (status) {
ERROR(cdev, "kill %s: resubmit %d bytes --> %d\n",
ep->name, req->length, status);
usb_ep_set_halt(ep);
/* FIXME recover later ... somehow */
}
}
static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
bool is_iso, int speed)
{
struct usb_ep *ep;
struct usb_request *req;
int i, size, qlen, status = 0;
if (is_iso) {
switch (speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
size = ss->isoc_maxpacket *
(ss->isoc_mult + 1) *
(ss->isoc_maxburst + 1);
break;
case USB_SPEED_HIGH:
size = ss->isoc_maxpacket * (ss->isoc_mult + 1);
break;
default:
size = ss->isoc_maxpacket > 1023 ?
1023 : ss->isoc_maxpacket;
break;
}
ep = is_in ? ss->iso_in_ep : ss->iso_out_ep;
qlen = ss->iso_qlen;
} else {
ep = is_in ? ss->in_ep : ss->out_ep;
qlen = ss->bulk_qlen;
size = ss->buflen;
}
for (i = 0; i < qlen; i++) {
req = ss_alloc_ep_req(ep, size);
if (!req)
return -ENOMEM;
req->complete = source_sink_complete;
if (is_in)
reinit_write_data(ep, req);
else if (ss->pattern != 2)
memset(req->buf, 0x55, req->length);
status = usb_ep_queue(ep, req, GFP_ATOMIC);
if (status) {
struct usb_composite_dev *cdev;
cdev = ss->function.config->cdev;
ERROR(cdev, "start %s%s %s --> %d\n",
is_iso ? "ISO-" : "", is_in ? "IN" : "OUT",
ep->name, status);
free_ep_req(ep, req);
return status;
}
}
return status;
}
static void disable_source_sink(struct f_sourcesink *ss)
{
struct usb_composite_dev *cdev;
cdev = ss->function.config->cdev;
disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep,
ss->iso_out_ep);
VDBG(cdev, "%s disabled\n", ss->function.name);
}
static int
enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss,
int alt)
{
int result = 0;
int speed = cdev->gadget->speed;
struct usb_ep *ep;
/* one bulk endpoint writes (sources) zeroes IN (to the host) */
ep = ss->in_ep;
result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
if (result)
return result;
result = usb_ep_enable(ep);
if (result < 0)
return result;
ep->driver_data = ss;
result = source_sink_start_ep(ss, true, false, speed);
if (result < 0) {
fail:
ep = ss->in_ep;
usb_ep_disable(ep);
return result;
}
/* one bulk endpoint reads (sinks) anything OUT (from the host) */
ep = ss->out_ep;
result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
if (result)
goto fail;
result = usb_ep_enable(ep);
if (result < 0)
goto fail;
ep->driver_data = ss;
result = source_sink_start_ep(ss, false, false, speed);
if (result < 0) {
fail2:
ep = ss->out_ep;
usb_ep_disable(ep);
goto fail;
}
if (alt == 0)
goto out;
/* one iso endpoint writes (sources) zeroes IN (to the host) */
ep = ss->iso_in_ep;
if (ep) {
result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
if (result)
goto fail2;
result = usb_ep_enable(ep);
if (result < 0)
goto fail2;
ep->driver_data = ss;
result = source_sink_start_ep(ss, true, true, speed);
if (result < 0) {
fail3:
ep = ss->iso_in_ep;
if (ep)
usb_ep_disable(ep);
goto fail2;
}
}
/* one iso endpoint reads (sinks) anything OUT (from the host) */
ep = ss->iso_out_ep;
if (ep) {
result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
if (result)
goto fail3;
result = usb_ep_enable(ep);
if (result < 0)
goto fail3;
ep->driver_data = ss;
result = source_sink_start_ep(ss, false, true, speed);
if (result < 0) {
usb_ep_disable(ep);
goto fail3;
}
}
out:
ss->cur_alt = alt;
DBG(cdev, "%s enabled, alt intf %d\n", ss->function.name, alt);
return result;
}
static int sourcesink_set_alt(struct usb_function *f,
unsigned intf, unsigned alt)
{
struct f_sourcesink *ss = func_to_ss(f);
struct usb_composite_dev *cdev = f->config->cdev;
disable_source_sink(ss);
return enable_source_sink(cdev, ss, alt);
}
static int sourcesink_get_alt(struct usb_function *f, unsigned intf)
{
struct f_sourcesink *ss = func_to_ss(f);
return ss->cur_alt;
}
static void sourcesink_disable(struct usb_function *f)
{
struct f_sourcesink *ss = func_to_ss(f);
disable_source_sink(ss);
}
/*-------------------------------------------------------------------------*/
static int sourcesink_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct usb_configuration *c = f->config;
struct usb_request *req = c->cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
req->length = USB_COMP_EP0_BUFSIZ;
/* composite driver infrastructure handles everything except
* the two control test requests.
*/
switch (ctrl->bRequest) {
/*
* These are the same vendor-specific requests supported by
* Intel's USB 2.0 compliance test devices. We exceed that
* device spec by allowing multiple-packet requests.
*
* NOTE: the Control-OUT data stays in req->buf ... better
* would be copying it into a scratch buffer, so that other
* requests may safely intervene.
*/
case 0x5b: /* control WRITE test -- fill the buffer */
if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR))
goto unknown;
if (w_value || w_index)
break;
/* just read that many bytes into the buffer */
if (w_length > req->length)
break;
value = w_length;
break;
case 0x5c: /* control READ test -- return the buffer */
if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR))
goto unknown;
if (w_value || w_index)
break;
/* expect those bytes are still in the buffer; send back */
if (w_length > req->length)
break;
value = w_length;
break;
default:
unknown:
VDBG(c->cdev,
"unknown control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (value >= 0) {
VDBG(c->cdev, "source/sink req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = 0;
req->length = value;
value = usb_ep_queue(c->cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(c->cdev, "source/sink response, err %d\n",
value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static struct usb_function *source_sink_alloc_func(
struct usb_function_instance *fi)
{
struct f_sourcesink *ss;
struct f_ss_opts *ss_opts;
ss = kzalloc(sizeof(*ss), GFP_KERNEL);
if (!ss)
return ERR_PTR(-ENOMEM);
ss_opts = container_of(fi, struct f_ss_opts, func_inst);
mutex_lock(&ss_opts->lock);
ss_opts->refcnt++;
mutex_unlock(&ss_opts->lock);
ss->pattern = ss_opts->pattern;
ss->isoc_interval = ss_opts->isoc_interval;
ss->isoc_maxpacket = ss_opts->isoc_maxpacket;
ss->isoc_mult = ss_opts->isoc_mult;
ss->isoc_maxburst = ss_opts->isoc_maxburst;
ss->buflen = ss_opts->bulk_buflen;
ss->bulk_qlen = ss_opts->bulk_qlen;
ss->iso_qlen = ss_opts->iso_qlen;
ss->function.name = "source/sink";
ss->function.bind = sourcesink_bind;
ss->function.set_alt = sourcesink_set_alt;
ss->function.get_alt = sourcesink_get_alt;
ss->function.disable = sourcesink_disable;
ss->function.setup = sourcesink_setup;
ss->function.strings = sourcesink_strings;
ss->function.free_func = sourcesink_free_func;
return &ss->function;
}
static inline struct f_ss_opts *to_f_ss_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_ss_opts,
func_inst.group);
}
static void ss_attr_release(struct config_item *item)
{
struct f_ss_opts *ss_opts = to_f_ss_opts(item);
usb_put_function_instance(&ss_opts->func_inst);
}
static struct configfs_item_operations ss_item_ops = {
.release = ss_attr_release,
};
static ssize_t f_ss_opts_pattern_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->pattern);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_pattern_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
if (num != 0 && num != 1 && num != 2) {
ret = -EINVAL;
goto end;
}
opts->pattern = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, pattern);
static ssize_t f_ss_opts_isoc_interval_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->isoc_interval);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_isoc_interval_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
if (num > 16) {
ret = -EINVAL;
goto end;
}
opts->isoc_interval = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, isoc_interval);
static ssize_t f_ss_opts_isoc_maxpacket_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->isoc_maxpacket);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_isoc_maxpacket_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u16 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou16(page, 0, &num);
if (ret)
goto end;
if (num > 1024) {
ret = -EINVAL;
goto end;
}
opts->isoc_maxpacket = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, isoc_maxpacket);
static ssize_t f_ss_opts_isoc_mult_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->isoc_mult);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_isoc_mult_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
if (num > 2) {
ret = -EINVAL;
goto end;
}
opts->isoc_mult = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, isoc_mult);
static ssize_t f_ss_opts_isoc_maxburst_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->isoc_maxburst);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_isoc_maxburst_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
if (num > 15) {
ret = -EINVAL;
goto end;
}
opts->isoc_maxburst = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, isoc_maxburst);
static ssize_t f_ss_opts_bulk_buflen_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->bulk_buflen);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_bulk_buflen_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u32 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou32(page, 0, &num);
if (ret)
goto end;
opts->bulk_buflen = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, bulk_buflen);
static ssize_t f_ss_opts_bulk_qlen_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->bulk_qlen);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_bulk_qlen_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u32 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou32(page, 0, &num);
if (ret)
goto end;
opts->bulk_qlen = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, bulk_qlen);
static ssize_t f_ss_opts_iso_qlen_show(struct config_item *item, char *page)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%u\n", opts->iso_qlen);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_ss_opts_iso_qlen_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ss_opts *opts = to_f_ss_opts(item);
int ret;
u32 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou32(page, 0, &num);
if (ret)
goto end;
opts->iso_qlen = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_ss_opts_, iso_qlen);
static struct configfs_attribute *ss_attrs[] = {
&f_ss_opts_attr_pattern,
&f_ss_opts_attr_isoc_interval,
&f_ss_opts_attr_isoc_maxpacket,
&f_ss_opts_attr_isoc_mult,
&f_ss_opts_attr_isoc_maxburst,
&f_ss_opts_attr_bulk_buflen,
&f_ss_opts_attr_bulk_qlen,
&f_ss_opts_attr_iso_qlen,
NULL,
};
static const struct config_item_type ss_func_type = {
.ct_item_ops = &ss_item_ops,
.ct_attrs = ss_attrs,
.ct_owner = THIS_MODULE,
};
static void source_sink_free_instance(struct usb_function_instance *fi)
{
struct f_ss_opts *ss_opts;
ss_opts = container_of(fi, struct f_ss_opts, func_inst);
kfree(ss_opts);
}
static struct usb_function_instance *source_sink_alloc_inst(void)
{
struct f_ss_opts *ss_opts;
ss_opts = kzalloc(sizeof(*ss_opts), GFP_KERNEL);
if (!ss_opts)
return ERR_PTR(-ENOMEM);
mutex_init(&ss_opts->lock);
ss_opts->func_inst.free_func_inst = source_sink_free_instance;
ss_opts->isoc_interval = GZERO_ISOC_INTERVAL;
ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET;
ss_opts->bulk_buflen = GZERO_BULK_BUFLEN;
ss_opts->bulk_qlen = GZERO_SS_BULK_QLEN;
ss_opts->iso_qlen = GZERO_SS_ISO_QLEN;
config_group_init_type_name(&ss_opts->func_inst.group, "",
&ss_func_type);
return &ss_opts->func_inst;
}
DECLARE_USB_FUNCTION(SourceSink, source_sink_alloc_inst,
source_sink_alloc_func);
static int __init sslb_modinit(void)
{
int ret;
ret = usb_function_register(&SourceSinkusb_func);
if (ret)
return ret;
ret = lb_modinit();
if (ret)
usb_function_unregister(&SourceSinkusb_func);
return ret;
}
static void __exit sslb_modexit(void)
{
usb_function_unregister(&SourceSinkusb_func);
lb_modexit();
}
module_init(sslb_modinit);
module_exit(sslb_modexit);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/f_sourcesink.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* u_audio.c -- interface to USB gadget "ALSA sound card" utilities
*
* Copyright (C) 2016
* Author: Ruslan Bilovol <[email protected]>
*
* Sound card implementation was cut-and-pasted with changes
* from f_uac2.c and has:
* Copyright (C) 2011
* Yadwinder Singh ([email protected])
* Jaswinder Singh ([email protected])
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <linux/usb/audio.h>
#include "u_audio.h"
#define BUFF_SIZE_MAX (PAGE_SIZE * 16)
#define PRD_SIZE_MAX PAGE_SIZE
#define MIN_PERIODS 4
enum {
UAC_FBACK_CTRL,
UAC_P_PITCH_CTRL,
UAC_MUTE_CTRL,
UAC_VOLUME_CTRL,
UAC_RATE_CTRL,
};
/* Runtime data params for one stream */
struct uac_rtd_params {
struct snd_uac_chip *uac; /* parent chip */
bool ep_enabled; /* if the ep is enabled */
struct snd_pcm_substream *ss;
/* Ring buffer */
ssize_t hw_ptr;
void *rbuf;
unsigned int pitch; /* Stream pitch ratio to 1000000 */
unsigned int max_psize; /* MaxPacketSize of endpoint */
struct usb_request **reqs;
struct usb_request *req_fback; /* Feedback endpoint request */
bool fb_ep_enabled; /* if the ep is enabled */
/* Volume/Mute controls and their state */
int fu_id; /* Feature Unit ID */
struct snd_kcontrol *snd_kctl_volume;
struct snd_kcontrol *snd_kctl_mute;
s16 volume_min, volume_max, volume_res;
s16 volume;
int mute;
struct snd_kcontrol *snd_kctl_rate; /* read-only current rate */
int srate; /* selected samplerate */
int active; /* playback/capture running */
spinlock_t lock; /* lock for control transfers */
};
struct snd_uac_chip {
struct g_audio *audio_dev;
struct uac_rtd_params p_prm;
struct uac_rtd_params c_prm;
struct snd_card *card;
struct snd_pcm *pcm;
/* pre-calculated values for playback iso completion */
unsigned long long p_residue_mil;
unsigned int p_interval;
unsigned int p_framesize;
};
static const struct snd_pcm_hardware uac_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
| SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
| SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
.rates = SNDRV_PCM_RATE_CONTINUOUS,
.periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
.buffer_bytes_max = BUFF_SIZE_MAX,
.period_bytes_max = PRD_SIZE_MAX,
.periods_min = MIN_PERIODS,
};
static void u_audio_set_fback_frequency(enum usb_device_speed speed,
struct usb_ep *out_ep,
unsigned long long freq,
unsigned int pitch,
void *buf)
{
u32 ff = 0;
const struct usb_endpoint_descriptor *ep_desc;
/*
* Because the pitch base is 1000000, the final divider here
* will be 1000 * 1000000 = 1953125 << 9
*
* Instead of dealing with big numbers lets fold this 9 left shift
*/
if (speed == USB_SPEED_FULL) {
/*
* Full-speed feedback endpoints report frequency
* in samples/frame
* Format is encoded in Q10.10 left-justified in the 24 bits,
* so that it has a Q10.14 format.
*
* ff = (freq << 14) / 1000
*/
freq <<= 5;
} else {
/*
* High-speed feedback endpoints report frequency
* in samples/microframe.
* Format is encoded in Q12.13 fitted into four bytes so that
* the binary point is located between the second and the third
* byte fromat (that is Q16.16)
*
* ff = (freq << 16) / 8000
*
* Win10 and OSX UAC2 drivers require number of samples per packet
* in order to honor the feedback value.
* Linux snd-usb-audio detects the applied bit-shift automatically.
*/
ep_desc = out_ep->desc;
freq <<= 4 + (ep_desc->bInterval - 1);
}
ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
*(__le32 *)buf = cpu_to_le32(ff);
}
static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
{
unsigned int pending;
unsigned int hw_ptr;
int status = req->status;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
struct uac_rtd_params *prm = req->context;
struct snd_uac_chip *uac = prm->uac;
unsigned int frames, p_pktsize;
unsigned long long pitched_rate_mil, p_pktsize_residue_mil,
residue_frames_mil, div_result;
/* i/f shutting down */
if (!prm->ep_enabled) {
usb_ep_free_request(ep, req);
return;
}
if (req->status == -ESHUTDOWN)
return;
/*
* We can't really do much about bad xfers.
* Afterall, the ISOCH xfers could fail legitimately.
*/
if (status)
pr_debug("%s: iso_complete status(%d) %d/%d\n",
__func__, status, req->actual, req->length);
substream = prm->ss;
/* Do nothing if ALSA isn't active */
if (!substream)
goto exit;
snd_pcm_stream_lock(substream);
runtime = substream->runtime;
if (!runtime || !snd_pcm_running(substream)) {
snd_pcm_stream_unlock(substream);
goto exit;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
* For each IN packet, take the quotient of the current data
* rate and the endpoint's interval as the base packet size.
* If there is a residue from this division, add it to the
* residue accumulator.
*/
unsigned long long p_interval_mil = uac->p_interval * 1000000ULL;
pitched_rate_mil = (unsigned long long) prm->srate * prm->pitch;
div_result = pitched_rate_mil;
do_div(div_result, uac->p_interval);
do_div(div_result, 1000000);
frames = (unsigned int) div_result;
pr_debug("p_srate %d, pitch %d, interval_mil %llu, frames %d\n",
prm->srate, prm->pitch, p_interval_mil, frames);
p_pktsize = min_t(unsigned int,
uac->p_framesize * frames,
ep->maxpacket);
if (p_pktsize < ep->maxpacket) {
residue_frames_mil = pitched_rate_mil - frames * p_interval_mil;
p_pktsize_residue_mil = uac->p_framesize * residue_frames_mil;
} else
p_pktsize_residue_mil = 0;
req->length = p_pktsize;
uac->p_residue_mil += p_pktsize_residue_mil;
/*
* Whenever there are more bytes in the accumulator p_residue_mil than we
* need to add one more sample frame, increase this packet's
* size and decrease the accumulator.
*/
div_result = uac->p_residue_mil;
do_div(div_result, uac->p_interval);
do_div(div_result, 1000000);
if ((unsigned int) div_result >= uac->p_framesize) {
req->length += uac->p_framesize;
uac->p_residue_mil -= uac->p_framesize * p_interval_mil;
pr_debug("increased req length to %d\n", req->length);
}
pr_debug("remains uac->p_residue_mil %llu\n", uac->p_residue_mil);
req->actual = req->length;
}
hw_ptr = prm->hw_ptr;
/* Pack USB load in ALSA ring buffer */
pending = runtime->dma_bytes - hw_ptr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (unlikely(pending < req->actual)) {
memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
memcpy(req->buf + pending, runtime->dma_area,
req->actual - pending);
} else {
memcpy(req->buf, runtime->dma_area + hw_ptr,
req->actual);
}
} else {
if (unlikely(pending < req->actual)) {
memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
memcpy(runtime->dma_area, req->buf + pending,
req->actual - pending);
} else {
memcpy(runtime->dma_area + hw_ptr, req->buf,
req->actual);
}
}
/* update hw_ptr after data is copied to memory */
prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
hw_ptr = prm->hw_ptr;
snd_pcm_stream_unlock(substream);
if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
snd_pcm_period_elapsed(substream);
exit:
if (usb_ep_queue(ep, req, GFP_ATOMIC))
dev_err(uac->card->dev, "%d Error!\n", __LINE__);
}
static void u_audio_iso_fback_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct uac_rtd_params *prm = req->context;
struct snd_uac_chip *uac = prm->uac;
struct g_audio *audio_dev = uac->audio_dev;
int status = req->status;
/* i/f shutting down */
if (!prm->fb_ep_enabled) {
kfree(req->buf);
usb_ep_free_request(ep, req);
return;
}
if (req->status == -ESHUTDOWN)
return;
/*
* We can't really do much about bad xfers.
* Afterall, the ISOCH xfers could fail legitimately.
*/
if (status)
pr_debug("%s: iso_complete status(%d) %d/%d\n",
__func__, status, req->actual, req->length);
u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep,
prm->srate, prm->pitch,
req->buf);
if (usb_ep_queue(ep, req, GFP_ATOMIC))
dev_err(uac->card->dev, "%d Error!\n", __LINE__);
}
static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
struct uac_rtd_params *prm;
struct g_audio *audio_dev;
struct uac_params *params;
int err = 0;
audio_dev = uac->audio_dev;
params = &audio_dev->params;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
prm = &uac->p_prm;
else
prm = &uac->c_prm;
/* Reset */
prm->hw_ptr = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
prm->ss = substream;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
prm->ss = NULL;
break;
default:
err = -EINVAL;
}
/* Clear buffer after Play stops */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
memset(prm->rbuf, 0, prm->max_psize * params->req_number);
return err;
}
static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
struct uac_rtd_params *prm;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
prm = &uac->p_prm;
else
prm = &uac->c_prm;
return bytes_to_frames(substream->runtime, prm->hw_ptr);
}
static u64 uac_ssize_to_fmt(int ssize)
{
u64 ret;
switch (ssize) {
case 3:
ret = SNDRV_PCM_FMTBIT_S24_3LE;
break;
case 4:
ret = SNDRV_PCM_FMTBIT_S32_LE;
break;
default:
ret = SNDRV_PCM_FMTBIT_S16_LE;
break;
}
return ret;
}
static int uac_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct g_audio *audio_dev;
struct uac_params *params;
struct uac_rtd_params *prm;
int p_ssize, c_ssize;
int p_chmask, c_chmask;
audio_dev = uac->audio_dev;
params = &audio_dev->params;
p_ssize = params->p_ssize;
c_ssize = params->c_ssize;
p_chmask = params->p_chmask;
c_chmask = params->c_chmask;
uac->p_residue_mil = 0;
runtime->hw = uac_pcm_hardware;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
runtime->hw.formats = uac_ssize_to_fmt(p_ssize);
runtime->hw.channels_min = num_channels(p_chmask);
prm = &uac->p_prm;
} else {
runtime->hw.formats = uac_ssize_to_fmt(c_ssize);
runtime->hw.channels_min = num_channels(c_chmask);
prm = &uac->c_prm;
}
runtime->hw.period_bytes_min = 2 * prm->max_psize
/ runtime->hw.periods_min;
runtime->hw.rate_min = prm->srate;
runtime->hw.rate_max = runtime->hw.rate_min;
runtime->hw.channels_max = runtime->hw.channels_min;
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
return 0;
}
/* ALSA cries without these function pointers */
static int uac_pcm_null(struct snd_pcm_substream *substream)
{
return 0;
}
static const struct snd_pcm_ops uac_pcm_ops = {
.open = uac_pcm_open,
.close = uac_pcm_null,
.trigger = uac_pcm_trigger,
.pointer = uac_pcm_pointer,
.prepare = uac_pcm_null,
};
static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
{
struct snd_uac_chip *uac = prm->uac;
struct g_audio *audio_dev;
struct uac_params *params;
int i;
if (!prm->ep_enabled)
return;
audio_dev = uac->audio_dev;
params = &audio_dev->params;
for (i = 0; i < params->req_number; i++) {
if (prm->reqs[i]) {
if (usb_ep_dequeue(ep, prm->reqs[i]))
usb_ep_free_request(ep, prm->reqs[i]);
/*
* If usb_ep_dequeue() cannot successfully dequeue the
* request, the request will be freed by the completion
* callback.
*/
prm->reqs[i] = NULL;
}
}
prm->ep_enabled = false;
if (usb_ep_disable(ep))
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
}
static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
{
struct snd_uac_chip *uac = prm->uac;
if (!prm->fb_ep_enabled)
return;
if (prm->req_fback) {
if (usb_ep_dequeue(ep, prm->req_fback)) {
kfree(prm->req_fback->buf);
usb_ep_free_request(ep, prm->req_fback);
}
prm->req_fback = NULL;
}
prm->fb_ep_enabled = false;
if (usb_ep_disable(ep))
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
}
static void set_active(struct uac_rtd_params *prm, bool active)
{
// notifying through the Rate ctrl
struct snd_kcontrol *kctl = prm->snd_kctl_rate;
unsigned long flags;
spin_lock_irqsave(&prm->lock, flags);
if (prm->active != active) {
prm->active = active;
snd_ctl_notify(prm->uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
&kctl->id);
}
spin_unlock_irqrestore(&prm->lock, flags);
}
int u_audio_set_capture_srate(struct g_audio *audio_dev, int srate)
{
struct uac_params *params = &audio_dev->params;
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
int i;
unsigned long flags;
dev_dbg(&audio_dev->gadget->dev, "%s: srate %d\n", __func__, srate);
prm = &uac->c_prm;
for (i = 0; i < UAC_MAX_RATES; i++) {
if (params->c_srates[i] == srate) {
spin_lock_irqsave(&prm->lock, flags);
prm->srate = srate;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
if (params->c_srates[i] == 0)
break;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(u_audio_set_capture_srate);
int u_audio_get_capture_srate(struct g_audio *audio_dev, u32 *val)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
unsigned long flags;
prm = &uac->c_prm;
spin_lock_irqsave(&prm->lock, flags);
*val = prm->srate;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_get_capture_srate);
int u_audio_set_playback_srate(struct g_audio *audio_dev, int srate)
{
struct uac_params *params = &audio_dev->params;
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
int i;
unsigned long flags;
dev_dbg(&audio_dev->gadget->dev, "%s: srate %d\n", __func__, srate);
prm = &uac->p_prm;
for (i = 0; i < UAC_MAX_RATES; i++) {
if (params->p_srates[i] == srate) {
spin_lock_irqsave(&prm->lock, flags);
prm->srate = srate;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
if (params->p_srates[i] == 0)
break;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(u_audio_set_playback_srate);
int u_audio_get_playback_srate(struct g_audio *audio_dev, u32 *val)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
unsigned long flags;
prm = &uac->p_prm;
spin_lock_irqsave(&prm->lock, flags);
*val = prm->srate;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_get_playback_srate);
int u_audio_start_capture(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct usb_gadget *gadget = audio_dev->gadget;
struct device *dev = &gadget->dev;
struct usb_request *req, *req_fback;
struct usb_ep *ep, *ep_fback;
struct uac_rtd_params *prm;
struct uac_params *params = &audio_dev->params;
int req_len, i;
prm = &uac->c_prm;
dev_dbg(dev, "start capture with rate %d\n", prm->srate);
ep = audio_dev->out_ep;
config_ep_by_speed(gadget, &audio_dev->func, ep);
req_len = ep->maxpacket;
prm->ep_enabled = true;
usb_ep_enable(ep);
for (i = 0; i < params->req_number; i++) {
if (!prm->reqs[i]) {
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req == NULL)
return -ENOMEM;
prm->reqs[i] = req;
req->zero = 0;
req->context = prm;
req->length = req_len;
req->complete = u_audio_iso_complete;
req->buf = prm->rbuf + i * ep->maxpacket;
}
if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC))
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
}
set_active(&uac->c_prm, true);
ep_fback = audio_dev->in_ep_fback;
if (!ep_fback)
return 0;
/* Setup feedback endpoint */
config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
prm->fb_ep_enabled = true;
usb_ep_enable(ep_fback);
req_len = ep_fback->maxpacket;
req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC);
if (req_fback == NULL)
return -ENOMEM;
prm->req_fback = req_fback;
req_fback->zero = 0;
req_fback->context = prm;
req_fback->length = req_len;
req_fback->complete = u_audio_iso_fback_complete;
req_fback->buf = kzalloc(req_len, GFP_ATOMIC);
if (!req_fback->buf)
return -ENOMEM;
/*
* Configure the feedback endpoint's reported frequency.
* Always start with original frequency since its deviation can't
* be meauserd at start of playback
*/
prm->pitch = 1000000;
u_audio_set_fback_frequency(audio_dev->gadget->speed, ep,
prm->srate, prm->pitch,
req_fback->buf);
if (usb_ep_queue(ep_fback, req_fback, GFP_ATOMIC))
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_start_capture);
void u_audio_stop_capture(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;
set_active(&uac->c_prm, false);
if (audio_dev->in_ep_fback)
free_ep_fback(&uac->c_prm, audio_dev->in_ep_fback);
free_ep(&uac->c_prm, audio_dev->out_ep);
}
EXPORT_SYMBOL_GPL(u_audio_stop_capture);
int u_audio_start_playback(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct usb_gadget *gadget = audio_dev->gadget;
struct device *dev = &gadget->dev;
struct usb_request *req;
struct usb_ep *ep;
struct uac_rtd_params *prm;
struct uac_params *params = &audio_dev->params;
unsigned int factor;
const struct usb_endpoint_descriptor *ep_desc;
int req_len, i;
unsigned int p_pktsize;
prm = &uac->p_prm;
dev_dbg(dev, "start playback with rate %d\n", prm->srate);
ep = audio_dev->in_ep;
config_ep_by_speed(gadget, &audio_dev->func, ep);
ep_desc = ep->desc;
/*
* Always start with original frequency
*/
prm->pitch = 1000000;
/* pre-calculate the playback endpoint's interval */
if (gadget->speed == USB_SPEED_FULL)
factor = 1000;
else
factor = 8000;
/* pre-compute some values for iso_complete() */
uac->p_framesize = params->p_ssize *
num_channels(params->p_chmask);
uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
p_pktsize = min_t(unsigned int,
uac->p_framesize *
(prm->srate / uac->p_interval),
ep->maxpacket);
req_len = p_pktsize;
uac->p_residue_mil = 0;
prm->ep_enabled = true;
usb_ep_enable(ep);
for (i = 0; i < params->req_number; i++) {
if (!prm->reqs[i]) {
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req == NULL)
return -ENOMEM;
prm->reqs[i] = req;
req->zero = 0;
req->context = prm;
req->length = req_len;
req->complete = u_audio_iso_complete;
req->buf = prm->rbuf + i * ep->maxpacket;
}
if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC))
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
}
set_active(&uac->p_prm, true);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_start_playback);
void u_audio_stop_playback(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;
set_active(&uac->p_prm, false);
free_ep(&uac->p_prm, audio_dev->in_ep);
}
EXPORT_SYMBOL_GPL(u_audio_stop_playback);
void u_audio_suspend(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;
set_active(&uac->p_prm, false);
set_active(&uac->c_prm, false);
}
EXPORT_SYMBOL_GPL(u_audio_suspend);
int u_audio_get_volume(struct g_audio *audio_dev, int playback, s16 *val)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
unsigned long flags;
if (playback)
prm = &uac->p_prm;
else
prm = &uac->c_prm;
spin_lock_irqsave(&prm->lock, flags);
*val = prm->volume;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_get_volume);
int u_audio_set_volume(struct g_audio *audio_dev, int playback, s16 val)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
unsigned long flags;
int change = 0;
if (playback)
prm = &uac->p_prm;
else
prm = &uac->c_prm;
spin_lock_irqsave(&prm->lock, flags);
val = clamp(val, prm->volume_min, prm->volume_max);
if (prm->volume != val) {
prm->volume = val;
change = 1;
}
spin_unlock_irqrestore(&prm->lock, flags);
if (change)
snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
&prm->snd_kctl_volume->id);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_set_volume);
int u_audio_get_mute(struct g_audio *audio_dev, int playback, int *val)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
unsigned long flags;
if (playback)
prm = &uac->p_prm;
else
prm = &uac->c_prm;
spin_lock_irqsave(&prm->lock, flags);
*val = prm->mute;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_get_mute);
int u_audio_set_mute(struct g_audio *audio_dev, int playback, int val)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct uac_rtd_params *prm;
unsigned long flags;
int change = 0;
int mute;
if (playback)
prm = &uac->p_prm;
else
prm = &uac->c_prm;
mute = val ? 1 : 0;
spin_lock_irqsave(&prm->lock, flags);
if (prm->mute != mute) {
prm->mute = mute;
change = 1;
}
spin_unlock_irqrestore(&prm->lock, flags);
if (change)
snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
&prm->snd_kctl_mute->id);
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_set_mute);
static int u_audio_pitch_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
struct snd_uac_chip *uac = prm->uac;
struct g_audio *audio_dev = uac->audio_dev;
struct uac_params *params = &audio_dev->params;
unsigned int pitch_min, pitch_max;
pitch_min = (1000 - FBACK_SLOW_MAX) * 1000;
pitch_max = (1000 + params->fb_max) * 1000;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = pitch_min;
uinfo->value.integer.max = pitch_max;
uinfo->value.integer.step = 1;
return 0;
}
static int u_audio_pitch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = prm->pitch;
return 0;
}
static int u_audio_pitch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
struct snd_uac_chip *uac = prm->uac;
struct g_audio *audio_dev = uac->audio_dev;
struct uac_params *params = &audio_dev->params;
unsigned int val;
unsigned int pitch_min, pitch_max;
int change = 0;
pitch_min = (1000 - FBACK_SLOW_MAX) * 1000;
pitch_max = (1000 + params->fb_max) * 1000;
val = ucontrol->value.integer.value[0];
if (val < pitch_min)
val = pitch_min;
if (val > pitch_max)
val = pitch_max;
if (prm->pitch != val) {
prm->pitch = val;
change = 1;
}
return change;
}
static int u_audio_mute_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
uinfo->value.integer.step = 1;
return 0;
}
static int u_audio_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
unsigned long flags;
spin_lock_irqsave(&prm->lock, flags);
ucontrol->value.integer.value[0] = !prm->mute;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
static int u_audio_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
struct snd_uac_chip *uac = prm->uac;
struct g_audio *audio_dev = uac->audio_dev;
unsigned int val;
unsigned long flags;
int change = 0;
val = !ucontrol->value.integer.value[0];
spin_lock_irqsave(&prm->lock, flags);
if (val != prm->mute) {
prm->mute = val;
change = 1;
}
spin_unlock_irqrestore(&prm->lock, flags);
if (change && audio_dev->notify)
audio_dev->notify(audio_dev, prm->fu_id, UAC_FU_MUTE);
return change;
}
/*
* TLV callback for mixer volume controls
*/
static int u_audio_volume_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *_tlv)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
DECLARE_TLV_DB_MINMAX(scale, 0, 0);
if (size < sizeof(scale))
return -ENOMEM;
/* UAC volume resolution is 1/256 dB, TLV is 1/100 dB */
scale[2] = (prm->volume_min * 100) / 256;
scale[3] = (prm->volume_max * 100) / 256;
if (copy_to_user(_tlv, scale, sizeof(scale)))
return -EFAULT;
return 0;
}
static int u_audio_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max =
(prm->volume_max - prm->volume_min + prm->volume_res - 1)
/ prm->volume_res;
uinfo->value.integer.step = 1;
return 0;
}
static int u_audio_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
unsigned long flags;
spin_lock_irqsave(&prm->lock, flags);
ucontrol->value.integer.value[0] =
(prm->volume - prm->volume_min) / prm->volume_res;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
static int u_audio_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
struct snd_uac_chip *uac = prm->uac;
struct g_audio *audio_dev = uac->audio_dev;
unsigned int val;
s16 volume;
unsigned long flags;
int change = 0;
val = ucontrol->value.integer.value[0];
spin_lock_irqsave(&prm->lock, flags);
volume = (val * prm->volume_res) + prm->volume_min;
volume = clamp(volume, prm->volume_min, prm->volume_max);
if (volume != prm->volume) {
prm->volume = volume;
change = 1;
}
spin_unlock_irqrestore(&prm->lock, flags);
if (change && audio_dev->notify)
audio_dev->notify(audio_dev, prm->fu_id, UAC_FU_VOLUME);
return change;
}
static int get_max_srate(const int *srates)
{
int i, max_srate = 0;
for (i = 0; i < UAC_MAX_RATES; i++) {
if (srates[i] == 0)
break;
if (srates[i] > max_srate)
max_srate = srates[i];
}
return max_srate;
}
static int get_min_srate(const int *srates)
{
int i, min_srate = INT_MAX;
for (i = 0; i < UAC_MAX_RATES; i++) {
if (srates[i] == 0)
break;
if (srates[i] < min_srate)
min_srate = srates[i];
}
return min_srate;
}
static int u_audio_rate_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
const int *srates;
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
struct snd_uac_chip *uac = prm->uac;
struct g_audio *audio_dev = uac->audio_dev;
struct uac_params *params = &audio_dev->params;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
if (prm == &uac->c_prm)
srates = params->c_srates;
else
srates = params->p_srates;
uinfo->value.integer.min = get_min_srate(srates);
uinfo->value.integer.max = get_max_srate(srates);
return 0;
}
static int u_audio_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
unsigned long flags;
spin_lock_irqsave(&prm->lock, flags);
if (prm->active)
ucontrol->value.integer.value[0] = prm->srate;
else
/* not active: reporting zero rate */
ucontrol->value.integer.value[0] = 0;
spin_unlock_irqrestore(&prm->lock, flags);
return 0;
}
static struct snd_kcontrol_new u_audio_controls[] = {
[UAC_FBACK_CTRL] {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Capture Pitch 1000000",
.info = u_audio_pitch_info,
.get = u_audio_pitch_get,
.put = u_audio_pitch_put,
},
[UAC_P_PITCH_CTRL] {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Playback Pitch 1000000",
.info = u_audio_pitch_info,
.get = u_audio_pitch_get,
.put = u_audio_pitch_put,
},
[UAC_MUTE_CTRL] {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = u_audio_mute_info,
.get = u_audio_mute_get,
.put = u_audio_mute_put,
},
[UAC_VOLUME_CTRL] {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = u_audio_volume_info,
.get = u_audio_volume_get,
.put = u_audio_volume_put,
},
[UAC_RATE_CTRL] {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "", /* will be filled later */
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = u_audio_rate_info,
.get = u_audio_rate_get,
},
};
int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
const char *card_name)
{
struct snd_uac_chip *uac;
struct snd_card *card;
struct snd_pcm *pcm;
struct snd_kcontrol *kctl;
struct uac_params *params;
int p_chmask, c_chmask;
int i, err;
if (!g_audio)
return -EINVAL;
uac = kzalloc(sizeof(*uac), GFP_KERNEL);
if (!uac)
return -ENOMEM;
g_audio->uac = uac;
uac->audio_dev = g_audio;
params = &g_audio->params;
p_chmask = params->p_chmask;
c_chmask = params->c_chmask;
if (c_chmask) {
struct uac_rtd_params *prm = &uac->c_prm;
spin_lock_init(&prm->lock);
uac->c_prm.uac = uac;
prm->max_psize = g_audio->out_ep_maxpsize;
prm->srate = params->c_srates[0];
prm->reqs = kcalloc(params->req_number,
sizeof(struct usb_request *),
GFP_KERNEL);
if (!prm->reqs) {
err = -ENOMEM;
goto fail;
}
prm->rbuf = kcalloc(params->req_number, prm->max_psize,
GFP_KERNEL);
if (!prm->rbuf) {
prm->max_psize = 0;
err = -ENOMEM;
goto fail;
}
}
if (p_chmask) {
struct uac_rtd_params *prm = &uac->p_prm;
spin_lock_init(&prm->lock);
uac->p_prm.uac = uac;
prm->max_psize = g_audio->in_ep_maxpsize;
prm->srate = params->p_srates[0];
prm->reqs = kcalloc(params->req_number,
sizeof(struct usb_request *),
GFP_KERNEL);
if (!prm->reqs) {
err = -ENOMEM;
goto fail;
}
prm->rbuf = kcalloc(params->req_number, prm->max_psize,
GFP_KERNEL);
if (!prm->rbuf) {
prm->max_psize = 0;
err = -ENOMEM;
goto fail;
}
}
/* Choose any slot, with no id */
err = snd_card_new(&g_audio->gadget->dev,
-1, NULL, THIS_MODULE, 0, &card);
if (err < 0)
goto fail;
uac->card = card;
/*
* Create first PCM device
* Create a substream only for non-zero channel streams
*/
err = snd_pcm_new(uac->card, pcm_name, 0,
p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
if (err < 0)
goto snd_fail;
strscpy(pcm->name, pcm_name, sizeof(pcm->name));
pcm->private_data = uac;
uac->pcm = pcm;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
/*
* Create mixer and controls
* Create only if it's required on USB side
*/
if ((c_chmask && g_audio->in_ep_fback)
|| (p_chmask && params->p_fu.id)
|| (c_chmask && params->c_fu.id))
strscpy(card->mixername, card_name, sizeof(card->driver));
if (c_chmask && g_audio->in_ep_fback) {
kctl = snd_ctl_new1(&u_audio_controls[UAC_FBACK_CTRL],
&uac->c_prm);
if (!kctl) {
err = -ENOMEM;
goto snd_fail;
}
kctl->id.device = pcm->device;
kctl->id.subdevice = 0;
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
}
if (p_chmask) {
kctl = snd_ctl_new1(&u_audio_controls[UAC_P_PITCH_CTRL],
&uac->p_prm);
if (!kctl) {
err = -ENOMEM;
goto snd_fail;
}
kctl->id.device = pcm->device;
kctl->id.subdevice = 0;
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
}
for (i = 0; i <= SNDRV_PCM_STREAM_LAST; i++) {
struct uac_rtd_params *prm;
struct uac_fu_params *fu;
char ctrl_name[24];
char *direction;
if (!pcm->streams[i].substream_count)
continue;
if (i == SNDRV_PCM_STREAM_PLAYBACK) {
prm = &uac->p_prm;
fu = ¶ms->p_fu;
direction = "Playback";
} else {
prm = &uac->c_prm;
fu = ¶ms->c_fu;
direction = "Capture";
}
prm->fu_id = fu->id;
if (fu->mute_present) {
snprintf(ctrl_name, sizeof(ctrl_name),
"PCM %s Switch", direction);
u_audio_controls[UAC_MUTE_CTRL].name = ctrl_name;
kctl = snd_ctl_new1(&u_audio_controls[UAC_MUTE_CTRL],
prm);
if (!kctl) {
err = -ENOMEM;
goto snd_fail;
}
kctl->id.device = pcm->device;
kctl->id.subdevice = 0;
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
prm->snd_kctl_mute = kctl;
prm->mute = 0;
}
if (fu->volume_present) {
snprintf(ctrl_name, sizeof(ctrl_name),
"PCM %s Volume", direction);
u_audio_controls[UAC_VOLUME_CTRL].name = ctrl_name;
kctl = snd_ctl_new1(&u_audio_controls[UAC_VOLUME_CTRL],
prm);
if (!kctl) {
err = -ENOMEM;
goto snd_fail;
}
kctl->id.device = pcm->device;
kctl->id.subdevice = 0;
kctl->tlv.c = u_audio_volume_tlv;
kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
prm->snd_kctl_volume = kctl;
prm->volume = fu->volume_max;
prm->volume_max = fu->volume_max;
prm->volume_min = fu->volume_min;
prm->volume_res = fu->volume_res;
}
/* Add rate control */
snprintf(ctrl_name, sizeof(ctrl_name),
"%s Rate", direction);
u_audio_controls[UAC_RATE_CTRL].name = ctrl_name;
kctl = snd_ctl_new1(&u_audio_controls[UAC_RATE_CTRL], prm);
if (!kctl) {
err = -ENOMEM;
goto snd_fail;
}
kctl->id.device = pcm->device;
kctl->id.subdevice = 0;
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
prm->snd_kctl_rate = kctl;
}
strscpy(card->driver, card_name, sizeof(card->driver));
strscpy(card->shortname, card_name, sizeof(card->shortname));
sprintf(card->longname, "%s %i", card_name, card->dev->id);
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
if (!err)
return 0;
snd_fail:
snd_card_free(card);
fail:
kfree(uac->p_prm.reqs);
kfree(uac->c_prm.reqs);
kfree(uac->p_prm.rbuf);
kfree(uac->c_prm.rbuf);
kfree(uac);
return err;
}
EXPORT_SYMBOL_GPL(g_audio_setup);
void g_audio_cleanup(struct g_audio *g_audio)
{
struct snd_uac_chip *uac;
struct snd_card *card;
if (!g_audio || !g_audio->uac)
return;
uac = g_audio->uac;
card = uac->card;
if (card)
snd_card_free_when_closed(card);
kfree(uac->p_prm.reqs);
kfree(uac->c_prm.reqs);
kfree(uac->p_prm.rbuf);
kfree(uac->c_prm.rbuf);
kfree(uac);
}
EXPORT_SYMBOL_GPL(g_audio_cleanup);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("USB gadget \"ALSA sound card\" utilities");
MODULE_AUTHOR("Ruslan Bilovol");
| linux-master | drivers/usb/gadget/function/u_audio.c |
// SPDX-License-Identifier: GPL-2.0
/* Target based USB-Gadget
*
* UAS protocol handling, target callbacks, configfs handling,
* BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
*
* Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/usb/ch9.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/usb/storage.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <asm/unaligned.h>
#include "tcm.h"
#include "u_tcm.h"
#include "configfs.h"
#define TPG_INSTANCES 1
struct tpg_instance {
struct usb_function_instance *func_inst;
struct usbg_tpg *tpg;
};
static struct tpg_instance tpg_instances[TPG_INSTANCES];
static DEFINE_MUTEX(tpg_instances_lock);
static inline struct f_uas *to_f_uas(struct usb_function *f)
{
return container_of(f, struct f_uas, function);
}
/* Start bot.c code */
static int bot_enqueue_cmd_cbw(struct f_uas *fu)
{
int ret;
if (fu->flags & USBG_BOT_CMD_PEND)
return 0;
ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
if (!ret)
fu->flags |= USBG_BOT_CMD_PEND;
return ret;
}
static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
struct f_uas *fu = cmd->fu;
transport_generic_free_cmd(&cmd->se_cmd, 0);
if (req->status < 0) {
pr_err("ERR %s(%d)\n", __func__, __LINE__);
return;
}
/* CSW completed, wait for next CBW */
bot_enqueue_cmd_cbw(fu);
}
static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
{
struct bulk_cs_wrap *csw = &fu->bot_status.csw;
int ret;
unsigned int csw_stat;
csw_stat = cmd->csw_code;
csw->Tag = cmd->bot_tag;
csw->Status = csw_stat;
fu->bot_status.req->context = cmd;
ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
if (ret)
pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
}
static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
struct f_uas *fu = cmd->fu;
if (req->status < 0)
pr_err("ERR %s(%d)\n", __func__, __LINE__);
if (cmd->data_len) {
if (cmd->data_len > ep->maxpacket) {
req->length = ep->maxpacket;
cmd->data_len -= ep->maxpacket;
} else {
req->length = cmd->data_len;
cmd->data_len = 0;
}
usb_ep_queue(ep, req, GFP_ATOMIC);
return;
}
bot_enqueue_sense_code(fu, cmd);
}
static void bot_send_bad_status(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct bulk_cs_wrap *csw = &fu->bot_status.csw;
struct usb_request *req;
struct usb_ep *ep;
csw->Residue = cpu_to_le32(cmd->data_len);
if (cmd->data_len) {
if (cmd->is_read) {
ep = fu->ep_in;
req = fu->bot_req_in;
} else {
ep = fu->ep_out;
req = fu->bot_req_out;
}
if (cmd->data_len > fu->ep_in->maxpacket) {
req->length = ep->maxpacket;
cmd->data_len -= ep->maxpacket;
} else {
req->length = cmd->data_len;
cmd->data_len = 0;
}
req->complete = bot_err_compl;
req->context = cmd;
req->buf = fu->cmd.buf;
usb_ep_queue(ep, req, GFP_KERNEL);
} else {
bot_enqueue_sense_code(fu, cmd);
}
}
static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
{
struct f_uas *fu = cmd->fu;
struct bulk_cs_wrap *csw = &fu->bot_status.csw;
int ret;
if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
if (!moved_data && cmd->data_len) {
/*
* the host wants to move data, we don't. Fill / empty
* the pipe and then send the csw with reside set.
*/
cmd->csw_code = US_BULK_STAT_OK;
bot_send_bad_status(cmd);
return 0;
}
csw->Tag = cmd->bot_tag;
csw->Residue = cpu_to_le32(0);
csw->Status = US_BULK_STAT_OK;
fu->bot_status.req->context = cmd;
ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
if (ret)
pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
} else {
cmd->csw_code = US_BULK_STAT_FAIL;
bot_send_bad_status(cmd);
}
return 0;
}
/*
* Called after command (no data transfer) or after the write (to device)
* operation is completed
*/
static int bot_send_status_response(struct usbg_cmd *cmd)
{
bool moved_data = false;
if (!cmd->is_read)
moved_data = true;
return bot_send_status(cmd, moved_data);
}
/* Read request completed, now we have to send the CSW */
static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
if (req->status < 0)
pr_err("ERR %s(%d)\n", __func__, __LINE__);
bot_send_status(cmd, true);
}
static int bot_send_read_response(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
if (!cmd->data_len) {
cmd->csw_code = US_BULK_STAT_PHASE;
bot_send_bad_status(cmd);
return 0;
}
if (!gadget->sg_supported) {
cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
if (!cmd->data_buf)
return -ENOMEM;
sg_copy_to_buffer(se_cmd->t_data_sg,
se_cmd->t_data_nents,
cmd->data_buf,
se_cmd->data_length);
fu->bot_req_in->buf = cmd->data_buf;
} else {
fu->bot_req_in->buf = NULL;
fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
fu->bot_req_in->sg = se_cmd->t_data_sg;
}
fu->bot_req_in->complete = bot_read_compl;
fu->bot_req_in->length = se_cmd->data_length;
fu->bot_req_in->context = cmd;
ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
if (ret)
pr_err("%s(%d)\n", __func__, __LINE__);
return 0;
}
static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
static int bot_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
init_completion(&cmd->write_complete);
cmd->fu = fu;
if (!cmd->data_len) {
cmd->csw_code = US_BULK_STAT_PHASE;
return -EINVAL;
}
if (!gadget->sg_supported) {
cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
if (!cmd->data_buf)
return -ENOMEM;
fu->bot_req_out->buf = cmd->data_buf;
} else {
fu->bot_req_out->buf = NULL;
fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
fu->bot_req_out->sg = se_cmd->t_data_sg;
}
fu->bot_req_out->complete = usbg_data_write_cmpl;
fu->bot_req_out->length = se_cmd->data_length;
fu->bot_req_out->context = cmd;
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
if (ret)
pr_err("%s(%d)\n", __func__, __LINE__);
wait_for_completion(&cmd->write_complete);
target_execute_cmd(se_cmd);
cleanup:
return ret;
}
static int bot_submit_command(struct f_uas *, void *, unsigned int);
static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_uas *fu = req->context;
int ret;
fu->flags &= ~USBG_BOT_CMD_PEND;
if (req->status < 0)
return;
ret = bot_submit_command(fu, req->buf, req->actual);
if (ret)
pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
}
static int bot_prepare_reqs(struct f_uas *fu)
{
int ret;
fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!fu->bot_req_in)
goto err;
fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
if (!fu->bot_req_out)
goto err_out;
fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
if (!fu->cmd.req)
goto err_cmd;
fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!fu->bot_status.req)
goto err_sts;
fu->bot_status.req->buf = &fu->bot_status.csw;
fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
fu->bot_status.req->complete = bot_status_complete;
fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
if (!fu->cmd.buf)
goto err_buf;
fu->cmd.req->complete = bot_cmd_complete;
fu->cmd.req->buf = fu->cmd.buf;
fu->cmd.req->length = fu->ep_out->maxpacket;
fu->cmd.req->context = fu;
ret = bot_enqueue_cmd_cbw(fu);
if (ret)
goto err_queue;
return 0;
err_queue:
kfree(fu->cmd.buf);
fu->cmd.buf = NULL;
err_buf:
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
err_sts:
usb_ep_free_request(fu->ep_out, fu->cmd.req);
fu->cmd.req = NULL;
err_cmd:
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
fu->bot_req_out = NULL;
err_out:
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
fu->bot_req_in = NULL;
err:
pr_err("BOT: endpoint setup failed\n");
return -ENOMEM;
}
static void bot_cleanup_old_alt(struct f_uas *fu)
{
if (!(fu->flags & USBG_ENABLED))
return;
usb_ep_disable(fu->ep_in);
usb_ep_disable(fu->ep_out);
if (!fu->bot_req_in)
return;
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
usb_ep_free_request(fu->ep_out, fu->cmd.req);
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
kfree(fu->cmd.buf);
fu->bot_req_in = NULL;
fu->bot_req_out = NULL;
fu->cmd.req = NULL;
fu->bot_status.req = NULL;
fu->cmd.buf = NULL;
}
static void bot_set_alt(struct f_uas *fu)
{
struct usb_function *f = &fu->function;
struct usb_gadget *gadget = f->config->cdev->gadget;
int ret;
fu->flags = USBG_IS_BOT;
config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_BBB);
ret = usb_ep_enable(fu->ep_in);
if (ret)
goto err_b_in;
config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_BBB);
ret = usb_ep_enable(fu->ep_out);
if (ret)
goto err_b_out;
ret = bot_prepare_reqs(fu);
if (ret)
goto err_wq;
fu->flags |= USBG_ENABLED;
pr_info("Using the BOT protocol\n");
return;
err_wq:
usb_ep_disable(fu->ep_out);
err_b_out:
usb_ep_disable(fu->ep_in);
err_b_in:
fu->flags = USBG_IS_BOT;
}
static int usbg_bot_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct f_uas *fu = to_f_uas(f);
struct usb_composite_dev *cdev = f->config->cdev;
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
int luns;
u8 *ret_lun;
switch (ctrl->bRequest) {
case US_BULK_GET_MAX_LUN:
if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE))
return -ENOTSUPP;
if (w_length < 1)
return -EINVAL;
if (w_value != 0)
return -EINVAL;
luns = atomic_read(&fu->tpg->tpg_port_count);
if (!luns) {
pr_err("No LUNs configured?\n");
return -EINVAL;
}
/*
* If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
* accessed. The upper limit is 0xf
*/
luns--;
if (luns > 0xf) {
pr_info_once("Limiting the number of luns to 16\n");
luns = 0xf;
}
ret_lun = cdev->req->buf;
*ret_lun = luns;
cdev->req->length = 1;
return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
case US_BULK_RESET_REQUEST:
/* XXX maybe we should remove previous requests for IN + OUT */
bot_enqueue_cmd_cbw(fu);
return 0;
}
return -ENOTSUPP;
}
/* Start uas.c code */
static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
{
/* We have either all three allocated or none */
if (!stream->req_in)
return;
usb_ep_free_request(fu->ep_in, stream->req_in);
usb_ep_free_request(fu->ep_out, stream->req_out);
usb_ep_free_request(fu->ep_status, stream->req_status);
stream->req_in = NULL;
stream->req_out = NULL;
stream->req_status = NULL;
}
static void uasp_free_cmdreq(struct f_uas *fu)
{
usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
kfree(fu->cmd.buf);
fu->cmd.req = NULL;
fu->cmd.buf = NULL;
}
static void uasp_cleanup_old_alt(struct f_uas *fu)
{
int i;
if (!(fu->flags & USBG_ENABLED))
return;
usb_ep_disable(fu->ep_in);
usb_ep_disable(fu->ep_out);
usb_ep_disable(fu->ep_status);
usb_ep_disable(fu->ep_cmd);
for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
uasp_cleanup_one_stream(fu, &fu->stream[i]);
uasp_free_cmdreq(fu);
}
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
static int uasp_prepare_r_request(struct usbg_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct f_uas *fu = cmd->fu;
struct usb_gadget *gadget = fuas_to_gadget(fu);
struct uas_stream *stream = cmd->stream;
if (!gadget->sg_supported) {
cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
if (!cmd->data_buf)
return -ENOMEM;
sg_copy_to_buffer(se_cmd->t_data_sg,
se_cmd->t_data_nents,
cmd->data_buf,
se_cmd->data_length);
stream->req_in->buf = cmd->data_buf;
} else {
stream->req_in->buf = NULL;
stream->req_in->num_sgs = se_cmd->t_data_nents;
stream->req_in->sg = se_cmd->t_data_sg;
}
stream->req_in->is_last = 1;
stream->req_in->complete = uasp_status_data_cmpl;
stream->req_in->length = se_cmd->data_length;
stream->req_in->context = cmd;
cmd->state = UASP_SEND_STATUS;
return 0;
}
static void uasp_prepare_status(struct usbg_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct sense_iu *iu = &cmd->sense_iu;
struct uas_stream *stream = cmd->stream;
cmd->state = UASP_QUEUE_COMMAND;
iu->iu_id = IU_ID_STATUS;
iu->tag = cpu_to_be16(cmd->tag);
/*
* iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
*/
iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
iu->status = se_cmd->scsi_status;
stream->req_status->is_last = 1;
stream->req_status->context = cmd;
stream->req_status->length = se_cmd->scsi_sense_length + 16;
stream->req_status->buf = iu;
stream->req_status->complete = uasp_status_data_cmpl;
}
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
struct uas_stream *stream = cmd->stream;
struct f_uas *fu = cmd->fu;
int ret;
if (req->status < 0)
goto cleanup;
switch (cmd->state) {
case UASP_SEND_DATA:
ret = uasp_prepare_r_request(cmd);
if (ret)
goto cleanup;
ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
if (ret)
pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
break;
case UASP_RECEIVE_DATA:
ret = usbg_prepare_w_request(cmd, stream->req_out);
if (ret)
goto cleanup;
ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
if (ret)
pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
break;
case UASP_SEND_STATUS:
uasp_prepare_status(cmd);
ret = usb_ep_queue(fu->ep_status, stream->req_status,
GFP_ATOMIC);
if (ret)
pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
break;
case UASP_QUEUE_COMMAND:
transport_generic_free_cmd(&cmd->se_cmd, 0);
usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
break;
default:
BUG();
}
return;
cleanup:
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int uasp_send_status_response(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct uas_stream *stream = cmd->stream;
struct sense_iu *iu = &cmd->sense_iu;
iu->tag = cpu_to_be16(cmd->tag);
stream->req_status->complete = uasp_status_data_cmpl;
stream->req_status->context = cmd;
cmd->fu = fu;
uasp_prepare_status(cmd);
return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
}
static int uasp_send_read_response(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct uas_stream *stream = cmd->stream;
struct sense_iu *iu = &cmd->sense_iu;
int ret;
cmd->fu = fu;
iu->tag = cpu_to_be16(cmd->tag);
if (fu->flags & USBG_USE_STREAMS) {
ret = uasp_prepare_r_request(cmd);
if (ret)
goto out;
ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
if (ret) {
pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
kfree(cmd->data_buf);
cmd->data_buf = NULL;
}
} else {
iu->iu_id = IU_ID_READ_READY;
iu->tag = cpu_to_be16(cmd->tag);
stream->req_status->complete = uasp_status_data_cmpl;
stream->req_status->context = cmd;
cmd->state = UASP_SEND_DATA;
stream->req_status->buf = iu;
stream->req_status->length = sizeof(struct iu);
ret = usb_ep_queue(fu->ep_status, stream->req_status,
GFP_ATOMIC);
if (ret)
pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
}
out:
return ret;
}
static int uasp_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct uas_stream *stream = cmd->stream;
struct sense_iu *iu = &cmd->sense_iu;
int ret;
init_completion(&cmd->write_complete);
cmd->fu = fu;
iu->tag = cpu_to_be16(cmd->tag);
if (fu->flags & USBG_USE_STREAMS) {
ret = usbg_prepare_w_request(cmd, stream->req_out);
if (ret)
goto cleanup;
ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
if (ret)
pr_err("%s(%d)\n", __func__, __LINE__);
} else {
iu->iu_id = IU_ID_WRITE_READY;
iu->tag = cpu_to_be16(cmd->tag);
stream->req_status->complete = uasp_status_data_cmpl;
stream->req_status->context = cmd;
cmd->state = UASP_RECEIVE_DATA;
stream->req_status->buf = iu;
stream->req_status->length = sizeof(struct iu);
ret = usb_ep_queue(fu->ep_status, stream->req_status,
GFP_ATOMIC);
if (ret)
pr_err("%s(%d)\n", __func__, __LINE__);
}
wait_for_completion(&cmd->write_complete);
target_execute_cmd(se_cmd);
cleanup:
return ret;
}
static int usbg_submit_command(struct f_uas *, void *, unsigned int);
static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_uas *fu = req->context;
int ret;
if (req->status < 0)
return;
ret = usbg_submit_command(fu, req->buf, req->actual);
/*
* Once we tune for performance enqueue the command req here again so
* we can receive a second command while we processing this one. Pay
* attention to properly sync STAUS endpoint with DATA IN + OUT so you
* don't break HS.
*/
if (!ret)
return;
usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
}
static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
{
stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!stream->req_in)
goto out;
stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
if (!stream->req_out)
goto err_out;
stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
if (!stream->req_status)
goto err_sts;
return 0;
err_sts:
usb_ep_free_request(fu->ep_out, stream->req_out);
stream->req_out = NULL;
err_out:
usb_ep_free_request(fu->ep_in, stream->req_in);
stream->req_in = NULL;
out:
return -ENOMEM;
}
static int uasp_alloc_cmd(struct f_uas *fu)
{
fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
if (!fu->cmd.req)
goto err;
fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
if (!fu->cmd.buf)
goto err_buf;
fu->cmd.req->complete = uasp_cmd_complete;
fu->cmd.req->buf = fu->cmd.buf;
fu->cmd.req->length = fu->ep_cmd->maxpacket;
fu->cmd.req->context = fu;
return 0;
err_buf:
usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
err:
return -ENOMEM;
}
static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
{
int i;
for (i = 0; i < max_streams; i++) {
struct uas_stream *s = &fu->stream[i];
s->req_in->stream_id = i + 1;
s->req_out->stream_id = i + 1;
s->req_status->stream_id = i + 1;
}
}
static int uasp_prepare_reqs(struct f_uas *fu)
{
int ret;
int i;
int max_streams;
if (fu->flags & USBG_USE_STREAMS)
max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
else
max_streams = 1;
for (i = 0; i < max_streams; i++) {
ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
if (ret)
goto err_cleanup;
}
ret = uasp_alloc_cmd(fu);
if (ret)
goto err_free_stream;
uasp_setup_stream_res(fu, max_streams);
ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
if (ret)
goto err_free_stream;
return 0;
err_free_stream:
uasp_free_cmdreq(fu);
err_cleanup:
if (i) {
do {
uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
i--;
} while (i);
}
pr_err("UASP: endpoint setup failed\n");
return ret;
}
static void uasp_set_alt(struct f_uas *fu)
{
struct usb_function *f = &fu->function;
struct usb_gadget *gadget = f->config->cdev->gadget;
int ret;
fu->flags = USBG_IS_UAS;
if (gadget->speed >= USB_SPEED_SUPER)
fu->flags |= USBG_USE_STREAMS;
config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_in);
if (ret)
goto err_b_in;
config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_out);
if (ret)
goto err_b_out;
config_ep_by_speed_and_alt(gadget, f, fu->ep_cmd, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_cmd);
if (ret)
goto err_cmd;
config_ep_by_speed_and_alt(gadget, f, fu->ep_status, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_status);
if (ret)
goto err_status;
ret = uasp_prepare_reqs(fu);
if (ret)
goto err_wq;
fu->flags |= USBG_ENABLED;
pr_info("Using the UAS protocol\n");
return;
err_wq:
usb_ep_disable(fu->ep_status);
err_status:
usb_ep_disable(fu->ep_cmd);
err_cmd:
usb_ep_disable(fu->ep_out);
err_b_out:
usb_ep_disable(fu->ep_in);
err_b_in:
fu->flags = 0;
}
static int get_cmd_dir(const unsigned char *cdb)
{
int ret;
switch (cdb[0]) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case INQUIRY:
case MODE_SENSE:
case MODE_SENSE_10:
case SERVICE_ACTION_IN_16:
case MAINTENANCE_IN:
case PERSISTENT_RESERVE_IN:
case SECURITY_PROTOCOL_IN:
case ACCESS_CONTROL_IN:
case REPORT_LUNS:
case READ_BLOCK_LIMITS:
case READ_POSITION:
case READ_CAPACITY:
case READ_TOC:
case READ_FORMAT_CAPACITIES:
case REQUEST_SENSE:
ret = DMA_FROM_DEVICE;
break;
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case MODE_SELECT:
case MODE_SELECT_10:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case PERSISTENT_RESERVE_OUT:
case MAINTENANCE_OUT:
case SECURITY_PROTOCOL_OUT:
case ACCESS_CONTROL_OUT:
ret = DMA_TO_DEVICE;
break;
case ALLOW_MEDIUM_REMOVAL:
case TEST_UNIT_READY:
case SYNCHRONIZE_CACHE:
case START_STOP:
case ERASE:
case REZERO_UNIT:
case SEEK_10:
case SPACE:
case VERIFY:
case WRITE_FILEMARKS:
ret = DMA_NONE;
break;
default:
#define CMD_DIR_MSG "target: Unknown data direction for SCSI Opcode 0x%02x\n"
pr_warn(CMD_DIR_MSG, cdb[0]);
#undef CMD_DIR_MSG
ret = -EINVAL;
}
return ret;
}
static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
struct se_cmd *se_cmd = &cmd->se_cmd;
if (req->status < 0) {
pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
goto cleanup;
}
if (req->num_sgs == 0) {
sg_copy_from_buffer(se_cmd->t_data_sg,
se_cmd->t_data_nents,
cmd->data_buf,
se_cmd->data_length);
}
complete(&cmd->write_complete);
return;
cleanup:
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct f_uas *fu = cmd->fu;
struct usb_gadget *gadget = fuas_to_gadget(fu);
if (!gadget->sg_supported) {
cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
if (!cmd->data_buf)
return -ENOMEM;
req->buf = cmd->data_buf;
} else {
req->buf = NULL;
req->num_sgs = se_cmd->t_data_nents;
req->sg = se_cmd->t_data_sg;
}
req->is_last = 1;
req->complete = usbg_data_write_cmpl;
req->length = se_cmd->data_length;
req->context = cmd;
return 0;
}
static int usbg_send_status_response(struct se_cmd *se_cmd)
{
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
se_cmd);
struct f_uas *fu = cmd->fu;
if (fu->flags & USBG_IS_BOT)
return bot_send_status_response(cmd);
else
return uasp_send_status_response(cmd);
}
static int usbg_send_write_request(struct se_cmd *se_cmd)
{
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
se_cmd);
struct f_uas *fu = cmd->fu;
if (fu->flags & USBG_IS_BOT)
return bot_send_write_request(cmd);
else
return uasp_send_write_request(cmd);
}
static int usbg_send_read_response(struct se_cmd *se_cmd)
{
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
se_cmd);
struct f_uas *fu = cmd->fu;
if (fu->flags & USBG_IS_BOT)
return bot_send_read_response(cmd);
else
return uasp_send_read_response(cmd);
}
static void usbg_cmd_work(struct work_struct *work)
{
struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg;
int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0) {
__target_init_cmd(se_cmd,
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense,
cmd->unpacked_lun, NULL);
goto out;
}
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
cmd->sense_iu.sense, cmd->unpacked_lun, 0,
cmd->prio_attr, dir, flags);
return;
out:
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
{
struct se_session *se_sess = tv_nexus->tvn_se_sess;
struct usbg_cmd *cmd;
int tag, cpu;
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(*cmd));
cmd->se_cmd.map_tag = tag;
cmd->se_cmd.map_cpu = cpu;
cmd->se_cmd.tag = cmd->tag = scsi_tag;
cmd->fu = fu;
return cmd;
}
static void usbg_release_cmd(struct se_cmd *);
static int usbg_submit_command(struct f_uas *fu,
void *cmdbuf, unsigned int len)
{
struct command_iu *cmd_iu = cmdbuf;
struct usbg_cmd *cmd;
struct usbg_tpg *tpg = fu->tpg;
struct tcm_usbg_nexus *tv_nexus;
u32 cmd_len;
u16 scsi_tag;
if (cmd_iu->iu_id != IU_ID_COMMAND) {
pr_err("Unsupported type %d\n", cmd_iu->iu_id);
return -EINVAL;
}
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Missing nexus, ignoring command\n");
return -EINVAL;
}
cmd_len = (cmd_iu->len & ~0x3) + 16;
if (cmd_len > USBG_MAX_CMD)
return -EINVAL;
scsi_tag = be16_to_cpup(&cmd_iu->tag);
cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
if (IS_ERR(cmd)) {
pr_err("usbg_get_cmd failed\n");
return -ENOMEM;
}
memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
if (fu->flags & USBG_USE_STREAMS) {
if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
goto err;
if (!cmd->tag)
cmd->stream = &fu->stream[0];
else
cmd->stream = &fu->stream[cmd->tag - 1];
} else {
cmd->stream = &fu->stream[0];
}
switch (cmd_iu->prio_attr & 0x7) {
case UAS_HEAD_TAG:
cmd->prio_attr = TCM_HEAD_TAG;
break;
case UAS_ORDERED_TAG:
cmd->prio_attr = TCM_ORDERED_TAG;
break;
case UAS_ACA:
cmd->prio_attr = TCM_ACA_TAG;
break;
default:
pr_debug_once("Unsupported prio_attr: %02x.\n",
cmd_iu->prio_attr);
fallthrough;
case UAS_SIMPLE_TAG:
cmd->prio_attr = TCM_SIMPLE_TAG;
break;
}
cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
INIT_WORK(&cmd->work, usbg_cmd_work);
queue_work(tpg->workqueue, &cmd->work);
return 0;
err:
usbg_release_cmd(&cmd->se_cmd);
return -EINVAL;
}
static void bot_cmd_work(struct work_struct *work)
{
struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg;
int dir;
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0) {
__target_init_cmd(se_cmd,
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense,
cmd->unpacked_lun, NULL);
goto out;
}
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
cmd->data_len, cmd->prio_attr, dir, 0);
return;
out:
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int bot_submit_command(struct f_uas *fu,
void *cmdbuf, unsigned int len)
{
struct bulk_cb_wrap *cbw = cmdbuf;
struct usbg_cmd *cmd;
struct usbg_tpg *tpg = fu->tpg;
struct tcm_usbg_nexus *tv_nexus;
u32 cmd_len;
if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
pr_err("Wrong signature on CBW\n");
return -EINVAL;
}
if (len != 31) {
pr_err("Wrong length for CBW\n");
return -EINVAL;
}
cmd_len = cbw->Length;
if (cmd_len < 1 || cmd_len > 16)
return -EINVAL;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Missing nexus, ignoring command\n");
return -ENODEV;
}
cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
if (IS_ERR(cmd)) {
pr_err("usbg_get_cmd failed\n");
return -ENOMEM;
}
memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
cmd->bot_tag = cbw->Tag;
cmd->prio_attr = TCM_SIMPLE_TAG;
cmd->unpacked_lun = cbw->Lun;
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
INIT_WORK(&cmd->work, bot_cmd_work);
queue_work(tpg->workqueue, &cmd->work);
return 0;
}
/* Start fabric.c code */
static int usbg_check_true(struct se_portal_group *se_tpg)
{
return 1;
}
static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct usbg_tpg *tpg = container_of(se_tpg,
struct usbg_tpg, se_tpg);
struct usbg_tport *tport = tpg->tport;
return &tport->tport_name[0];
}
static u16 usbg_get_tag(struct se_portal_group *se_tpg)
{
struct usbg_tpg *tpg = container_of(se_tpg,
struct usbg_tpg, se_tpg);
return tpg->tport_tpgt;
}
static void usbg_release_cmd(struct se_cmd *se_cmd)
{
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
se_cmd);
struct se_session *se_sess = se_cmd->se_sess;
kfree(cmd->data_buf);
target_free_tag(se_sess, se_cmd);
}
static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
{
}
static void usbg_aborted_task(struct se_cmd *se_cmd)
{
}
static const char *usbg_check_wwn(const char *name)
{
const char *n;
unsigned int len;
n = strstr(name, "naa.");
if (!n)
return NULL;
n += 4;
len = strlen(n);
if (len == 0 || len > USBG_NAMELEN - 1)
return NULL;
return n;
}
static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
if (!usbg_check_wwn(name))
return -EINVAL;
return 0;
}
static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
const char *name)
{
struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
tport_wwn);
struct usbg_tpg *tpg;
unsigned long tpgt;
int ret;
struct f_tcm_opts *opts;
unsigned i;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
return ERR_PTR(-EINVAL);
ret = -ENODEV;
mutex_lock(&tpg_instances_lock);
for (i = 0; i < TPG_INSTANCES; ++i)
if (tpg_instances[i].func_inst && !tpg_instances[i].tpg)
break;
if (i == TPG_INSTANCES)
goto unlock_inst;
opts = container_of(tpg_instances[i].func_inst, struct f_tcm_opts,
func_inst);
mutex_lock(&opts->dep_lock);
if (!opts->ready)
goto unlock_dep;
if (opts->has_dep) {
if (!try_module_get(opts->dependent))
goto unlock_dep;
} else {
ret = configfs_depend_item_unlocked(
wwn->wwn_group.cg_subsys,
&opts->func_inst.group.cg_item);
if (ret)
goto unlock_dep;
}
tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
ret = -ENOMEM;
if (!tpg)
goto unref_dep;
mutex_init(&tpg->tpg_mutex);
atomic_set(&tpg->tpg_port_count, 0);
tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
if (!tpg->workqueue)
goto free_tpg;
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
/*
* SPC doesn't assign a protocol identifier for USB-SCSI, so we
* pretend to be SAS..
*/
ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
if (ret < 0)
goto free_workqueue;
tpg_instances[i].tpg = tpg;
tpg->fi = tpg_instances[i].func_inst;
mutex_unlock(&opts->dep_lock);
mutex_unlock(&tpg_instances_lock);
return &tpg->se_tpg;
free_workqueue:
destroy_workqueue(tpg->workqueue);
free_tpg:
kfree(tpg);
unref_dep:
if (opts->has_dep)
module_put(opts->dependent);
else
configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
unlock_dep:
mutex_unlock(&opts->dep_lock);
unlock_inst:
mutex_unlock(&tpg_instances_lock);
return ERR_PTR(ret);
}
static int tcm_usbg_drop_nexus(struct usbg_tpg *);
static void usbg_drop_tpg(struct se_portal_group *se_tpg)
{
struct usbg_tpg *tpg = container_of(se_tpg,
struct usbg_tpg, se_tpg);
unsigned i;
struct f_tcm_opts *opts;
tcm_usbg_drop_nexus(tpg);
core_tpg_deregister(se_tpg);
destroy_workqueue(tpg->workqueue);
mutex_lock(&tpg_instances_lock);
for (i = 0; i < TPG_INSTANCES; ++i)
if (tpg_instances[i].tpg == tpg)
break;
if (i < TPG_INSTANCES) {
tpg_instances[i].tpg = NULL;
opts = container_of(tpg_instances[i].func_inst,
struct f_tcm_opts, func_inst);
mutex_lock(&opts->dep_lock);
if (opts->has_dep)
module_put(opts->dependent);
else
configfs_undepend_item_unlocked(
&opts->func_inst.group.cg_item);
mutex_unlock(&opts->dep_lock);
}
mutex_unlock(&tpg_instances_lock);
kfree(tpg);
}
static struct se_wwn *usbg_make_tport(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct usbg_tport *tport;
const char *wnn_name;
u64 wwpn = 0;
wnn_name = usbg_check_wwn(name);
if (!wnn_name)
return ERR_PTR(-EINVAL);
tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
if (!(tport))
return ERR_PTR(-ENOMEM);
tport->tport_wwpn = wwpn;
snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
return &tport->tport_wwn;
}
static void usbg_drop_tport(struct se_wwn *wwn)
{
struct usbg_tport *tport = container_of(wwn,
struct usbg_tport, tport_wwn);
kfree(tport);
}
/*
* If somebody feels like dropping the version property, go ahead.
*/
static ssize_t usbg_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "usb-gadget fabric module\n");
}
CONFIGFS_ATTR_RO(usbg_wwn_, version);
static struct configfs_attribute *usbg_wwn_attrs[] = {
&usbg_wwn_attr_version,
NULL,
};
static int usbg_attach(struct usbg_tpg *);
static void usbg_detach(struct usbg_tpg *);
static int usbg_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
int ret = 0;
if (enable)
ret = usbg_attach(tpg);
else
usbg_detach(tpg);
if (ret)
return ret;
tpg->gadget_connect = enable;
return 0;
}
static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
struct tcm_usbg_nexus *tv_nexus;
ssize_t ret;
mutex_lock(&tpg->tpg_mutex);
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
ret = -ENODEV;
goto out;
}
ret = snprintf(page, PAGE_SIZE, "%s\n",
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
out:
mutex_unlock(&tpg->tpg_mutex);
return ret;
}
static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
struct se_session *se_sess, void *p)
{
struct usbg_tpg *tpg = container_of(se_tpg,
struct usbg_tpg, se_tpg);
tpg->tpg_nexus = p;
return 0;
}
static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
{
struct tcm_usbg_nexus *tv_nexus;
int ret = 0;
mutex_lock(&tpg->tpg_mutex);
if (tpg->tpg_nexus) {
ret = -EEXIST;
pr_debug("tpg->tpg_nexus already exists\n");
goto out_unlock;
}
tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
if (!tv_nexus) {
ret = -ENOMEM;
goto out_unlock;
}
tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
USB_G_DEFAULT_SESSION_TAGS,
sizeof(struct usbg_cmd),
TARGET_PROT_NORMAL, name,
tv_nexus, usbg_alloc_sess_cb);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
pr_debug(MAKE_NEXUS_MSG, name);
#undef MAKE_NEXUS_MSG
ret = PTR_ERR(tv_nexus->tvn_se_sess);
kfree(tv_nexus);
}
out_unlock:
mutex_unlock(&tpg->tpg_mutex);
return ret;
}
static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
{
struct se_session *se_sess;
struct tcm_usbg_nexus *tv_nexus;
int ret = -ENODEV;
mutex_lock(&tpg->tpg_mutex);
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus)
goto out;
se_sess = tv_nexus->tvn_se_sess;
if (!se_sess)
goto out;
if (atomic_read(&tpg->tpg_port_count)) {
ret = -EPERM;
#define MSG "Unable to remove Host I_T Nexus with active TPG port count: %d\n"
pr_err(MSG, atomic_read(&tpg->tpg_port_count));
#undef MSG
goto out;
}
pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated vHost Target Port
*/
target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
kfree(tv_nexus);
ret = 0;
out:
mutex_unlock(&tpg->tpg_mutex);
return ret;
}
static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
unsigned char i_port[USBG_NAMELEN], *ptr;
int ret;
if (!strncmp(page, "NULL", 4)) {
ret = tcm_usbg_drop_nexus(tpg);
return (!ret) ? count : ret;
}
if (strlen(page) >= USBG_NAMELEN) {
#define NEXUS_STORE_MSG "Emulated NAA Sas Address: %s, exceeds max: %d\n"
pr_err(NEXUS_STORE_MSG, page, USBG_NAMELEN);
#undef NEXUS_STORE_MSG
return -EINVAL;
}
snprintf(i_port, USBG_NAMELEN, "%s", page);
ptr = strstr(i_port, "naa.");
if (!ptr) {
pr_err("Missing 'naa.' prefix\n");
return -EINVAL;
}
if (i_port[strlen(i_port) - 1] == '\n')
i_port[strlen(i_port) - 1] = '\0';
ret = tcm_usbg_make_nexus(tpg, &i_port[0]);
if (ret < 0)
return ret;
return count;
}
CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
static struct configfs_attribute *usbg_base_attrs[] = {
&tcm_usbg_tpg_attr_nexus,
NULL,
};
static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
{
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
atomic_inc(&tpg->tpg_port_count);
smp_mb__after_atomic();
return 0;
}
static void usbg_port_unlink(struct se_portal_group *se_tpg,
struct se_lun *se_lun)
{
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
atomic_dec(&tpg->tpg_port_count);
smp_mb__after_atomic();
}
static int usbg_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
}
static const struct target_core_fabric_ops usbg_ops = {
.module = THIS_MODULE,
.fabric_name = "usb_gadget",
.tpg_get_wwn = usbg_get_fabric_wwn,
.tpg_get_tag = usbg_get_tag,
.tpg_check_demo_mode = usbg_check_true,
.release_cmd = usbg_release_cmd,
.sess_get_initiator_sid = NULL,
.write_pending = usbg_send_write_request,
.queue_data_in = usbg_send_read_response,
.queue_status = usbg_send_status_response,
.queue_tm_rsp = usbg_queue_tm_rsp,
.aborted_task = usbg_aborted_task,
.check_stop_free = usbg_check_stop_free,
.fabric_make_wwn = usbg_make_tport,
.fabric_drop_wwn = usbg_drop_tport,
.fabric_make_tpg = usbg_make_tpg,
.fabric_enable_tpg = usbg_enable_tpg,
.fabric_drop_tpg = usbg_drop_tpg,
.fabric_post_link = usbg_port_link,
.fabric_pre_unlink = usbg_port_unlink,
.fabric_init_nodeacl = usbg_init_nodeacl,
.tfc_wwn_attrs = usbg_wwn_attrs,
.tfc_tpg_base_attrs = usbg_base_attrs,
};
/* Start gadget.c code */
static struct usb_interface_descriptor bot_intf_desc = {
.bLength = sizeof(bot_intf_desc),
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 2,
.bAlternateSetting = USB_G_ALT_INT_BBB,
.bInterfaceClass = USB_CLASS_MASS_STORAGE,
.bInterfaceSubClass = USB_SC_SCSI,
.bInterfaceProtocol = USB_PR_BULK,
};
static struct usb_interface_descriptor uasp_intf_desc = {
.bLength = sizeof(uasp_intf_desc),
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 4,
.bAlternateSetting = USB_G_ALT_INT_UAS,
.bInterfaceClass = USB_CLASS_MASS_STORAGE,
.bInterfaceSubClass = USB_SC_SCSI,
.bInterfaceProtocol = USB_PR_UAS,
};
static struct usb_endpoint_descriptor uasp_bi_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
.bLength = sizeof(uasp_bi_pipe_desc),
.bDescriptorType = USB_DT_PIPE_USAGE,
.bPipeID = DATA_IN_PIPE_ID,
};
static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
.bLength = sizeof(uasp_bi_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
.wBytesPerInterval = 0,
};
static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
.bLength = sizeof(bot_bi_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
};
static struct usb_endpoint_descriptor uasp_bo_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
.bLength = sizeof(uasp_bo_pipe_desc),
.bDescriptorType = USB_DT_PIPE_USAGE,
.bPipeID = DATA_OUT_PIPE_ID,
};
static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(0x400),
};
static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
.bLength = sizeof(uasp_bo_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
};
static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
.bLength = sizeof(bot_bo_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_endpoint_descriptor uasp_status_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor uasp_fs_status_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
.bLength = sizeof(uasp_status_pipe_desc),
.bDescriptorType = USB_DT_PIPE_USAGE,
.bPipeID = STATUS_PIPE_ID,
};
static struct usb_endpoint_descriptor uasp_ss_status_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
.bLength = sizeof(uasp_status_in_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
};
static struct usb_endpoint_descriptor uasp_cmd_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
.bLength = sizeof(uasp_cmd_pipe_desc),
.bDescriptorType = USB_DT_PIPE_USAGE,
.bPipeID = CMD_PIPE_ID,
};
static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
.bLength = sizeof(uasp_cmd_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_descriptor_header *uasp_fs_function_desc[] = {
(struct usb_descriptor_header *) &bot_intf_desc,
(struct usb_descriptor_header *) &uasp_fs_bi_desc,
(struct usb_descriptor_header *) &uasp_fs_bo_desc,
(struct usb_descriptor_header *) &uasp_intf_desc,
(struct usb_descriptor_header *) &uasp_fs_bi_desc,
(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
(struct usb_descriptor_header *) &uasp_fs_bo_desc,
(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
(struct usb_descriptor_header *) &uasp_fs_status_desc,
(struct usb_descriptor_header *) &uasp_status_pipe_desc,
(struct usb_descriptor_header *) &uasp_fs_cmd_desc,
(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
NULL,
};
static struct usb_descriptor_header *uasp_hs_function_desc[] = {
(struct usb_descriptor_header *) &bot_intf_desc,
(struct usb_descriptor_header *) &uasp_bi_desc,
(struct usb_descriptor_header *) &uasp_bo_desc,
(struct usb_descriptor_header *) &uasp_intf_desc,
(struct usb_descriptor_header *) &uasp_bi_desc,
(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
(struct usb_descriptor_header *) &uasp_bo_desc,
(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
(struct usb_descriptor_header *) &uasp_status_desc,
(struct usb_descriptor_header *) &uasp_status_pipe_desc,
(struct usb_descriptor_header *) &uasp_cmd_desc,
(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
NULL,
};
static struct usb_descriptor_header *uasp_ss_function_desc[] = {
(struct usb_descriptor_header *) &bot_intf_desc,
(struct usb_descriptor_header *) &uasp_ss_bi_desc,
(struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
(struct usb_descriptor_header *) &uasp_ss_bo_desc,
(struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
(struct usb_descriptor_header *) &uasp_intf_desc,
(struct usb_descriptor_header *) &uasp_ss_bi_desc,
(struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
(struct usb_descriptor_header *) &uasp_ss_bo_desc,
(struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
(struct usb_descriptor_header *) &uasp_ss_status_desc,
(struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
(struct usb_descriptor_header *) &uasp_status_pipe_desc,
(struct usb_descriptor_header *) &uasp_ss_cmd_desc,
(struct usb_descriptor_header *) &uasp_cmd_comp_desc,
(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
NULL,
};
static struct usb_string tcm_us_strings[] = {
[USB_G_STR_INT_UAS].s = "USB Attached SCSI",
[USB_G_STR_INT_BBB].s = "Bulk Only Transport",
{ },
};
static struct usb_gadget_strings tcm_stringtab = {
.language = 0x0409,
.strings = tcm_us_strings,
};
static struct usb_gadget_strings *tcm_strings[] = {
&tcm_stringtab,
NULL,
};
static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct f_uas *fu = to_f_uas(f);
struct usb_string *us;
struct usb_gadget *gadget = c->cdev->gadget;
struct usb_ep *ep;
struct f_tcm_opts *opts;
int iface;
int ret;
opts = container_of(f->fi, struct f_tcm_opts, func_inst);
mutex_lock(&opts->dep_lock);
if (!opts->can_attach) {
mutex_unlock(&opts->dep_lock);
return -ENODEV;
}
mutex_unlock(&opts->dep_lock);
us = usb_gstrings_attach(c->cdev, tcm_strings,
ARRAY_SIZE(tcm_us_strings));
if (IS_ERR(us))
return PTR_ERR(us);
bot_intf_desc.iInterface = us[USB_G_STR_INT_BBB].id;
uasp_intf_desc.iInterface = us[USB_G_STR_INT_UAS].id;
iface = usb_interface_id(c, f);
if (iface < 0)
return iface;
bot_intf_desc.bInterfaceNumber = iface;
uasp_intf_desc.bInterfaceNumber = iface;
fu->iface = iface;
ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
&uasp_bi_ep_comp_desc);
if (!ep)
goto ep_fail;
fu->ep_in = ep;
ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
&uasp_bo_ep_comp_desc);
if (!ep)
goto ep_fail;
fu->ep_out = ep;
ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
&uasp_status_in_ep_comp_desc);
if (!ep)
goto ep_fail;
fu->ep_status = ep;
ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
&uasp_cmd_comp_desc);
if (!ep)
goto ep_fail;
fu->ep_cmd = ep;
/* Assume endpoint addresses are the same for both speeds */
uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
uasp_status_desc.bEndpointAddress =
uasp_ss_status_desc.bEndpointAddress;
uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
uasp_fs_status_desc.bEndpointAddress =
uasp_ss_status_desc.bEndpointAddress;
uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, uasp_fs_function_desc,
uasp_hs_function_desc, uasp_ss_function_desc,
uasp_ss_function_desc);
if (ret)
goto ep_fail;
return 0;
ep_fail:
pr_err("Can't claim all required eps\n");
return -ENOTSUPP;
}
struct guas_setup_wq {
struct work_struct work;
struct f_uas *fu;
unsigned int alt;
};
static void tcm_delayed_set_alt(struct work_struct *wq)
{
struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
work);
struct f_uas *fu = work->fu;
int alt = work->alt;
kfree(work);
if (fu->flags & USBG_IS_BOT)
bot_cleanup_old_alt(fu);
if (fu->flags & USBG_IS_UAS)
uasp_cleanup_old_alt(fu);
if (alt == USB_G_ALT_INT_BBB)
bot_set_alt(fu);
else if (alt == USB_G_ALT_INT_UAS)
uasp_set_alt(fu);
usb_composite_setup_continue(fu->function.config->cdev);
}
static int tcm_get_alt(struct usb_function *f, unsigned intf)
{
if (intf == bot_intf_desc.bInterfaceNumber)
return USB_G_ALT_INT_BBB;
if (intf == uasp_intf_desc.bInterfaceNumber)
return USB_G_ALT_INT_UAS;
return -EOPNOTSUPP;
}
static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_uas *fu = to_f_uas(f);
if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
struct guas_setup_wq *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return -ENOMEM;
INIT_WORK(&work->work, tcm_delayed_set_alt);
work->fu = fu;
work->alt = alt;
schedule_work(&work->work);
return USB_GADGET_DELAYED_STATUS;
}
return -EOPNOTSUPP;
}
static void tcm_disable(struct usb_function *f)
{
struct f_uas *fu = to_f_uas(f);
if (fu->flags & USBG_IS_UAS)
uasp_cleanup_old_alt(fu);
else if (fu->flags & USBG_IS_BOT)
bot_cleanup_old_alt(fu);
fu->flags = 0;
}
static int tcm_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct f_uas *fu = to_f_uas(f);
if (!(fu->flags & USBG_IS_BOT))
return -EOPNOTSUPP;
return usbg_bot_setup(f, ctrl);
}
static inline struct f_tcm_opts *to_f_tcm_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_tcm_opts,
func_inst.group);
}
static void tcm_attr_release(struct config_item *item)
{
struct f_tcm_opts *opts = to_f_tcm_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations tcm_item_ops = {
.release = tcm_attr_release,
};
static const struct config_item_type tcm_func_type = {
.ct_item_ops = &tcm_item_ops,
.ct_owner = THIS_MODULE,
};
static void tcm_free_inst(struct usb_function_instance *f)
{
struct f_tcm_opts *opts;
unsigned i;
opts = container_of(f, struct f_tcm_opts, func_inst);
mutex_lock(&tpg_instances_lock);
for (i = 0; i < TPG_INSTANCES; ++i)
if (tpg_instances[i].func_inst == f)
break;
if (i < TPG_INSTANCES)
tpg_instances[i].func_inst = NULL;
mutex_unlock(&tpg_instances_lock);
kfree(opts);
}
static int tcm_register_callback(struct usb_function_instance *f)
{
struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
mutex_lock(&opts->dep_lock);
opts->can_attach = true;
mutex_unlock(&opts->dep_lock);
return 0;
}
static void tcm_unregister_callback(struct usb_function_instance *f)
{
struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
mutex_lock(&opts->dep_lock);
unregister_gadget_item(opts->
func_inst.group.cg_item.ci_parent->ci_parent);
opts->can_attach = false;
mutex_unlock(&opts->dep_lock);
}
static int usbg_attach(struct usbg_tpg *tpg)
{
struct usb_function_instance *f = tpg->fi;
struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
if (opts->tcm_register_callback)
return opts->tcm_register_callback(f);
return 0;
}
static void usbg_detach(struct usbg_tpg *tpg)
{
struct usb_function_instance *f = tpg->fi;
struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
if (opts->tcm_unregister_callback)
opts->tcm_unregister_callback(f);
}
static int tcm_set_name(struct usb_function_instance *f, const char *name)
{
struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
pr_debug("tcm: Activating %s\n", name);
mutex_lock(&opts->dep_lock);
opts->ready = true;
mutex_unlock(&opts->dep_lock);
return 0;
}
static struct usb_function_instance *tcm_alloc_inst(void)
{
struct f_tcm_opts *opts;
int i;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_lock(&tpg_instances_lock);
for (i = 0; i < TPG_INSTANCES; ++i)
if (!tpg_instances[i].func_inst)
break;
if (i == TPG_INSTANCES) {
mutex_unlock(&tpg_instances_lock);
kfree(opts);
return ERR_PTR(-EBUSY);
}
tpg_instances[i].func_inst = &opts->func_inst;
mutex_unlock(&tpg_instances_lock);
mutex_init(&opts->dep_lock);
opts->func_inst.set_inst_name = tcm_set_name;
opts->func_inst.free_func_inst = tcm_free_inst;
opts->tcm_register_callback = tcm_register_callback;
opts->tcm_unregister_callback = tcm_unregister_callback;
config_group_init_type_name(&opts->func_inst.group, "",
&tcm_func_type);
return &opts->func_inst;
}
static void tcm_free(struct usb_function *f)
{
struct f_uas *tcm = to_f_uas(f);
kfree(tcm);
}
static void tcm_unbind(struct usb_configuration *c, struct usb_function *f)
{
usb_free_all_descriptors(f);
}
static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
{
struct f_uas *fu;
unsigned i;
mutex_lock(&tpg_instances_lock);
for (i = 0; i < TPG_INSTANCES; ++i)
if (tpg_instances[i].func_inst == fi)
break;
if (i == TPG_INSTANCES) {
mutex_unlock(&tpg_instances_lock);
return ERR_PTR(-ENODEV);
}
fu = kzalloc(sizeof(*fu), GFP_KERNEL);
if (!fu) {
mutex_unlock(&tpg_instances_lock);
return ERR_PTR(-ENOMEM);
}
fu->function.name = "Target Function";
fu->function.bind = tcm_bind;
fu->function.unbind = tcm_unbind;
fu->function.set_alt = tcm_set_alt;
fu->function.get_alt = tcm_get_alt;
fu->function.setup = tcm_setup;
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
fu->tpg = tpg_instances[i].tpg;
mutex_unlock(&tpg_instances_lock);
return &fu->function;
}
DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
static int __init tcm_init(void)
{
int ret;
ret = usb_function_register(&tcmusb_func);
if (ret)
return ret;
ret = target_register_template(&usbg_ops);
if (ret)
usb_function_unregister(&tcmusb_func);
return ret;
}
module_init(tcm_init);
static void __exit tcm_exit(void)
{
target_unregister_template(&usbg_ops);
usb_function_unregister(&tcmusb_func);
}
module_exit(tcm_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sebastian Andrzej Siewior");
| linux-master | drivers/usb/gadget/function/f_tcm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_acm.c -- USB CDC serial (ACM) function driver
*
* Copyright (C) 2003 Al Borchers ([email protected])
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
* Copyright (C) 2009 by Samsung Electronics
* Author: Michal Nazarewicz ([email protected])
*/
/* #define VERBOSE_DEBUG */
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include "u_serial.h"
/*
* This CDC ACM function support just wraps control functions and
* notifications around the generic serial-over-usb code.
*
* Because CDC ACM is standardized by the USB-IF, many host operating
* systems have drivers for it. Accordingly, ACM is the preferred
* interop solution for serial-port type connections. The control
* models are often not necessary, and in any case don't do much in
* this bare-bones implementation.
*
* Note that even MS-Windows has some support for ACM. However, that
* support is somewhat broken because when you use ACM in a composite
* device, having multiple interfaces confuses the poor OS. It doesn't
* seem to understand CDC Union descriptors. The new "association"
* descriptors (roughly equivalent to CDC Unions) may sometimes help.
*/
struct f_acm {
struct gserial port;
u8 ctrl_id, data_id;
u8 port_num;
u8 pending;
/* lock is mostly for pending and notify_req ... they get accessed
* by callbacks both from tty (open/close/break) under its spinlock,
* and notify_req.complete() which can't use that lock.
*/
spinlock_t lock;
struct usb_ep *notify;
struct usb_request *notify_req;
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
/* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
u16 port_handshake_bits;
/* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
u16 serial_state;
};
static inline struct f_acm *func_to_acm(struct usb_function *f)
{
return container_of(f, struct f_acm, port.func);
}
static inline struct f_acm *port_to_acm(struct gserial *p)
{
return container_of(p, struct f_acm, port);
}
/*-------------------------------------------------------------------------*/
/* notification endpoint uses smallish and infrequent fixed-size messages */
#define GS_NOTIFY_INTERVAL_MS 32
#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
/* interface and class descriptors: */
static struct usb_interface_assoc_descriptor
acm_iad_descriptor = {
.bLength = sizeof acm_iad_descriptor,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
/* .bFirstInterface = DYNAMIC, */
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
.bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
/* .iFunction = DYNAMIC */
};
static struct usb_interface_descriptor acm_control_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
.bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
/* .iInterface = DYNAMIC */
};
static struct usb_interface_descriptor acm_data_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
static struct usb_cdc_header_desc acm_header_desc = {
.bLength = sizeof(acm_header_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static struct usb_cdc_call_mgmt_descriptor
acm_call_mgmt_descriptor = {
.bLength = sizeof(acm_call_mgmt_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
.bmCapabilities = 0,
/* .bDataInterface = DYNAMIC */
};
static struct usb_cdc_acm_descriptor acm_descriptor = {
.bLength = sizeof(acm_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
.bmCapabilities = USB_CDC_CAP_LINE,
};
static struct usb_cdc_union_desc acm_union_desc = {
.bLength = sizeof(acm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
/* .bMasterInterface0 = DYNAMIC */
/* .bSlaveInterface0 = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor acm_fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
.bInterval = GS_NOTIFY_INTERVAL_MS,
};
static struct usb_endpoint_descriptor acm_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor acm_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *acm_fs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
(struct usb_descriptor_header *) &acm_descriptor,
(struct usb_descriptor_header *) &acm_union_desc,
(struct usb_descriptor_header *) &acm_fs_notify_desc,
(struct usb_descriptor_header *) &acm_data_interface_desc,
(struct usb_descriptor_header *) &acm_fs_in_desc,
(struct usb_descriptor_header *) &acm_fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor acm_hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
.bInterval = USB_MS_TO_HS_INTERVAL(GS_NOTIFY_INTERVAL_MS),
};
static struct usb_endpoint_descriptor acm_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acm_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *acm_hs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
(struct usb_descriptor_header *) &acm_descriptor,
(struct usb_descriptor_header *) &acm_union_desc,
(struct usb_descriptor_header *) &acm_hs_notify_desc,
(struct usb_descriptor_header *) &acm_data_interface_desc,
(struct usb_descriptor_header *) &acm_hs_in_desc,
(struct usb_descriptor_header *) &acm_hs_out_desc,
NULL,
};
static struct usb_endpoint_descriptor acm_ss_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor acm_ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor acm_ss_bulk_comp_desc = {
.bLength = sizeof acm_ss_bulk_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_descriptor_header *acm_ss_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
(struct usb_descriptor_header *) &acm_descriptor,
(struct usb_descriptor_header *) &acm_union_desc,
(struct usb_descriptor_header *) &acm_hs_notify_desc,
(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &acm_data_interface_desc,
(struct usb_descriptor_header *) &acm_ss_in_desc,
(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &acm_ss_out_desc,
(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
NULL,
};
/* string descriptors: */
#define ACM_CTRL_IDX 0
#define ACM_DATA_IDX 1
#define ACM_IAD_IDX 2
/* static strings, in UTF-8 */
static struct usb_string acm_string_defs[] = {
[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
[ACM_DATA_IDX].s = "CDC ACM Data",
[ACM_IAD_IDX ].s = "CDC Serial",
{ } /* end of list */
};
static struct usb_gadget_strings acm_string_table = {
.language = 0x0409, /* en-us */
.strings = acm_string_defs,
};
static struct usb_gadget_strings *acm_strings[] = {
&acm_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
/* ACM control ... data handling is delegated to tty library code.
* The main task of this function is to activate and deactivate
* that code based on device state; track parameters like line
* speed, handshake state, and so on; and issue notifications.
*/
static void acm_complete_set_line_coding(struct usb_ep *ep,
struct usb_request *req)
{
struct f_acm *acm = ep->driver_data;
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
if (req->status != 0) {
dev_dbg(&cdev->gadget->dev, "acm ttyGS%d completion, err %d\n",
acm->port_num, req->status);
return;
}
/* normal completion */
if (req->actual != sizeof(acm->port_line_coding)) {
dev_dbg(&cdev->gadget->dev, "acm ttyGS%d short resp, len %d\n",
acm->port_num, req->actual);
usb_ep_set_halt(ep);
} else {
struct usb_cdc_line_coding *value = req->buf;
/* REVISIT: we currently just remember this data.
* If we change that, (a) validate it first, then
* (b) update whatever hardware needs updating,
* (c) worry about locking. This is information on
* the order of 9600-8-N-1 ... most of which means
* nothing unless we control a real RS232 line.
*/
acm->port_line_coding = *value;
}
}
static int acm_send_break(struct gserial *port, int duration);
static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_acm *acm = func_to_acm(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
/* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
*
* Note CDC spec table 4 lists the ACM request profile. It requires
* encapsulated command support ... we don't handle any, and respond
* to them by stalling. Options include get/set/clear comm features
* (not that useful) and SEND_BREAK.
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
/* SET_LINE_CODING ... just read and save what the host sends */
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_LINE_CODING:
if (w_length != sizeof(struct usb_cdc_line_coding)
|| w_index != acm->ctrl_id)
goto invalid;
value = w_length;
cdev->gadget->ep0->driver_data = acm;
req->complete = acm_complete_set_line_coding;
break;
/* GET_LINE_CODING ... return what host sent, or initial value */
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_GET_LINE_CODING:
if (w_index != acm->ctrl_id)
goto invalid;
value = min_t(unsigned, w_length,
sizeof(struct usb_cdc_line_coding));
memcpy(req->buf, &acm->port_line_coding, value);
break;
/* SET_CONTROL_LINE_STATE ... save what the host sent */
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
if (w_index != acm->ctrl_id)
goto invalid;
value = 0;
/* FIXME we should not allow data to flow until the
* host sets the USB_CDC_CTRL_DTR bit; and when it clears
* that bit, we should return to that no-flow state.
*/
acm->port_handshake_bits = w_value;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SEND_BREAK:
if (w_index != acm->ctrl_id)
goto invalid;
acm_send_break(&acm->port, w_value);
break;
default:
invalid:
dev_vdbg(&cdev->gadget->dev,
"invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (value >= 0) {
dev_dbg(&cdev->gadget->dev,
"acm ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
acm->port_num, ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = 0;
req->length = value;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(cdev, "acm response on ttyGS%d, err %d\n",
acm->port_num, value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_acm *acm = func_to_acm(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* we know alt == 0, so this is an activation or a reset */
if (intf == acm->ctrl_id) {
if (acm->notify->enabled) {
dev_vdbg(&cdev->gadget->dev,
"reset acm control interface %d\n", intf);
usb_ep_disable(acm->notify);
}
if (!acm->notify->desc)
if (config_ep_by_speed(cdev->gadget, f, acm->notify))
return -EINVAL;
usb_ep_enable(acm->notify);
} else if (intf == acm->data_id) {
if (acm->notify->enabled) {
dev_dbg(&cdev->gadget->dev,
"reset acm ttyGS%d\n", acm->port_num);
gserial_disconnect(&acm->port);
}
if (!acm->port.in->desc || !acm->port.out->desc) {
dev_dbg(&cdev->gadget->dev,
"activate acm ttyGS%d\n", acm->port_num);
if (config_ep_by_speed(cdev->gadget, f,
acm->port.in) ||
config_ep_by_speed(cdev->gadget, f,
acm->port.out)) {
acm->port.in->desc = NULL;
acm->port.out->desc = NULL;
return -EINVAL;
}
}
gserial_connect(&acm->port, acm->port_num);
} else
return -EINVAL;
return 0;
}
static void acm_disable(struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
struct usb_composite_dev *cdev = f->config->cdev;
dev_dbg(&cdev->gadget->dev, "acm ttyGS%d deactivated\n", acm->port_num);
gserial_disconnect(&acm->port);
usb_ep_disable(acm->notify);
}
/*-------------------------------------------------------------------------*/
/**
* acm_cdc_notify - issue CDC notification to host
* @acm: wraps host to be notified
* @type: notification type
* @value: Refer to cdc specs, wValue field.
* @data: data to be sent
* @length: size of data
* Context: irqs blocked, acm->lock held, acm_notify_req non-null
*
* Returns zero on success or a negative errno.
*
* See section 6.3.5 of the CDC 1.1 specification for information
* about the only notification we issue: SerialState change.
*/
static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
void *data, unsigned length)
{
struct usb_ep *ep = acm->notify;
struct usb_request *req;
struct usb_cdc_notification *notify;
const unsigned len = sizeof(*notify) + length;
void *buf;
int status;
req = acm->notify_req;
acm->notify_req = NULL;
acm->pending = false;
req->length = len;
notify = req->buf;
buf = notify + 1;
notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
| USB_RECIP_INTERFACE;
notify->bNotificationType = type;
notify->wValue = cpu_to_le16(value);
notify->wIndex = cpu_to_le16(acm->ctrl_id);
notify->wLength = cpu_to_le16(length);
memcpy(buf, data, length);
/* ep_queue() can complete immediately if it fills the fifo... */
spin_unlock(&acm->lock);
status = usb_ep_queue(ep, req, GFP_ATOMIC);
spin_lock(&acm->lock);
if (status < 0) {
ERROR(acm->port.func.config->cdev,
"acm ttyGS%d can't notify serial state, %d\n",
acm->port_num, status);
acm->notify_req = req;
}
return status;
}
static int acm_notify_serial_state(struct f_acm *acm)
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
__le16 serial_state;
spin_lock(&acm->lock);
if (acm->notify_req) {
dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
}
spin_unlock(&acm->lock);
return status;
}
static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_acm *acm = req->context;
u8 doit = false;
/* on this call path we do NOT hold the port spinlock,
* which is why ACM needs its own spinlock
*/
spin_lock(&acm->lock);
if (req->status != -ESHUTDOWN)
doit = acm->pending;
acm->notify_req = req;
spin_unlock(&acm->lock);
if (doit)
acm_notify_serial_state(acm);
}
/* connect == the TTY link is open */
static void acm_connect(struct gserial *port)
{
struct f_acm *acm = port_to_acm(port);
acm->serial_state |= USB_CDC_SERIAL_STATE_DSR | USB_CDC_SERIAL_STATE_DCD;
acm_notify_serial_state(acm);
}
static void acm_disconnect(struct gserial *port)
{
struct f_acm *acm = port_to_acm(port);
acm->serial_state &= ~(USB_CDC_SERIAL_STATE_DSR | USB_CDC_SERIAL_STATE_DCD);
acm_notify_serial_state(acm);
}
static int acm_send_break(struct gserial *port, int duration)
{
struct f_acm *acm = port_to_acm(port);
u16 state;
state = acm->serial_state;
state &= ~USB_CDC_SERIAL_STATE_BREAK;
if (duration)
state |= USB_CDC_SERIAL_STATE_BREAK;
acm->serial_state = state;
return acm_notify_serial_state(acm);
}
/*-------------------------------------------------------------------------*/
/* ACM function driver setup/binding */
static int
acm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_acm *acm = func_to_acm(f);
struct usb_string *us;
int status;
struct usb_ep *ep;
/* REVISIT might want instance-specific strings to help
* distinguish instances ...
*/
/* maybe allocate device-global string IDs, and patch descriptors */
us = usb_gstrings_attach(cdev, acm_strings,
ARRAY_SIZE(acm_string_defs));
if (IS_ERR(us))
return PTR_ERR(us);
acm_control_interface_desc.iInterface = us[ACM_CTRL_IDX].id;
acm_data_interface_desc.iInterface = us[ACM_DATA_IDX].id;
acm_iad_descriptor.iFunction = us[ACM_IAD_IDX].id;
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
acm->ctrl_id = status;
acm_iad_descriptor.bFirstInterface = status;
acm_control_interface_desc.bInterfaceNumber = status;
acm_union_desc .bMasterInterface0 = status;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
acm->data_id = status;
acm_data_interface_desc.bInterfaceNumber = status;
acm_union_desc.bSlaveInterface0 = status;
acm_call_mgmt_descriptor.bDataInterface = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
if (!ep)
goto fail;
acm->port.in = ep;
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
if (!ep)
goto fail;
acm->port.out = ep;
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
if (!ep)
goto fail;
acm->notify = ep;
/* allocate notification */
acm->notify_req = gs_alloc_req(ep,
sizeof(struct usb_cdc_notification) + 2,
GFP_KERNEL);
if (!acm->notify_req)
goto fail;
acm->notify_req->complete = acm_cdc_notify_complete;
acm->notify_req->context = acm;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
acm_hs_in_desc.bEndpointAddress = acm_fs_in_desc.bEndpointAddress;
acm_hs_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
acm_hs_notify_desc.bEndpointAddress =
acm_fs_notify_desc.bEndpointAddress;
acm_ss_in_desc.bEndpointAddress = acm_fs_in_desc.bEndpointAddress;
acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
acm_ss_function, acm_ss_function);
if (status)
goto fail;
dev_dbg(&cdev->gadget->dev,
"acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n",
acm->port_num,
acm->port.in->name, acm->port.out->name,
acm->notify->name);
return 0;
fail:
if (acm->notify_req)
gs_free_req(acm->notify, acm->notify_req);
ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
return status;
}
static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
acm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
if (acm->notify_req)
gs_free_req(acm->notify, acm->notify_req);
}
static void acm_free_func(struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
kfree(acm);
}
static void acm_resume(struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
gserial_resume(&acm->port);
}
static void acm_suspend(struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
gserial_suspend(&acm->port);
}
static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
{
struct f_serial_opts *opts;
struct f_acm *acm;
acm = kzalloc(sizeof(*acm), GFP_KERNEL);
if (!acm)
return ERR_PTR(-ENOMEM);
spin_lock_init(&acm->lock);
acm->port.connect = acm_connect;
acm->port.disconnect = acm_disconnect;
acm->port.send_break = acm_send_break;
acm->port.func.name = "acm";
acm->port.func.strings = acm_strings;
/* descriptors are per-instance copies */
acm->port.func.bind = acm_bind;
acm->port.func.set_alt = acm_set_alt;
acm->port.func.setup = acm_setup;
acm->port.func.disable = acm_disable;
opts = container_of(fi, struct f_serial_opts, func_inst);
acm->port_num = opts->port_num;
acm->port.func.unbind = acm_unbind;
acm->port.func.free_func = acm_free_func;
acm->port.func.resume = acm_resume;
acm->port.func.suspend = acm_suspend;
return &acm->port.func;
}
static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_serial_opts,
func_inst.group);
}
static void acm_attr_release(struct config_item *item)
{
struct f_serial_opts *opts = to_f_serial_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations acm_item_ops = {
.release = acm_attr_release,
};
#ifdef CONFIG_U_SERIAL_CONSOLE
static ssize_t f_acm_console_store(struct config_item *item,
const char *page, size_t count)
{
return gserial_set_console(to_f_serial_opts(item)->port_num,
page, count);
}
static ssize_t f_acm_console_show(struct config_item *item, char *page)
{
return gserial_get_console(to_f_serial_opts(item)->port_num, page);
}
CONFIGFS_ATTR(f_acm_, console);
#endif /* CONFIG_U_SERIAL_CONSOLE */
static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
}
CONFIGFS_ATTR_RO(f_acm_, port_num);
static struct configfs_attribute *acm_attrs[] = {
#ifdef CONFIG_U_SERIAL_CONSOLE
&f_acm_attr_console,
#endif
&f_acm_attr_port_num,
NULL,
};
static const struct config_item_type acm_func_type = {
.ct_item_ops = &acm_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
};
static void acm_free_instance(struct usb_function_instance *fi)
{
struct f_serial_opts *opts;
opts = container_of(fi, struct f_serial_opts, func_inst);
gserial_free_line(opts->port_num);
kfree(opts);
}
static struct usb_function_instance *acm_alloc_instance(void)
{
struct f_serial_opts *opts;
int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = acm_free_instance;
ret = gserial_alloc_line(&opts->port_num);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
}
config_group_init_type_name(&opts->func_inst.group, "",
&acm_func_type);
return &opts->func_inst;
}
DECLARE_USB_FUNCTION_INIT(acm, acm_alloc_instance, acm_alloc_func);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/f_acm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RNDIS MSG parser
*
* Authors: Benedikt Spranger, Pengutronix
* Robert Schwebel, Pengutronix
*
* This software was originally developed in conformance with
* Microsoft's Remote NDIS Specification License Agreement.
*
* 03/12/2004 Kai-Uwe Bloem <[email protected]>
* Fixed message length bug in init_response
*
* 03/25/2004 Kai-Uwe Bloem <[email protected]>
* Fixed rndis_rm_hdr length bug.
*
* Copyright (C) 2004 by David Brownell
* updates to merge with Linux 2.6, better match RNDIS spec
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/netdevice.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include "u_rndis.h"
#undef VERBOSE_DEBUG
#include "rndis.h"
/* The driver for your USB chip needs to support ep0 OUT to work with
* RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
*
* Windows hosts need an INF file like Documentation/usb/linux.inf
* and will be happier if you provide the host_addr module parameter.
*/
#if 0
static int rndis_debug = 0;
module_param (rndis_debug, int, 0);
MODULE_PARM_DESC (rndis_debug, "enable debugging");
#else
#define rndis_debug 0
#endif
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#define NAME_TEMPLATE "driver/rndis-%03d"
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
static DEFINE_IDA(rndis_ida);
/* Driver Version */
static const __le32 rndis_driver_version = cpu_to_le32(1);
/* Function Prototypes */
static rndis_resp_t *rndis_add_response(struct rndis_params *params,
u32 length);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const struct proc_ops rndis_proc_ops;
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/* supported OIDs */
static const u32 oid_supported_list[] = {
/* the general stuff */
RNDIS_OID_GEN_SUPPORTED_LIST,
RNDIS_OID_GEN_HARDWARE_STATUS,
RNDIS_OID_GEN_MEDIA_SUPPORTED,
RNDIS_OID_GEN_MEDIA_IN_USE,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
RNDIS_OID_GEN_LINK_SPEED,
RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE,
RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE,
RNDIS_OID_GEN_VENDOR_ID,
RNDIS_OID_GEN_VENDOR_DESCRIPTION,
RNDIS_OID_GEN_VENDOR_DRIVER_VERSION,
RNDIS_OID_GEN_CURRENT_PACKET_FILTER,
RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
RNDIS_OID_GEN_PHYSICAL_MEDIUM,
/* the statistical stuff */
RNDIS_OID_GEN_XMIT_OK,
RNDIS_OID_GEN_RCV_OK,
RNDIS_OID_GEN_XMIT_ERROR,
RNDIS_OID_GEN_RCV_ERROR,
RNDIS_OID_GEN_RCV_NO_BUFFER,
#ifdef RNDIS_OPTIONAL_STATS
RNDIS_OID_GEN_DIRECTED_BYTES_XMIT,
RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT,
RNDIS_OID_GEN_MULTICAST_BYTES_XMIT,
RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT,
RNDIS_OID_GEN_BROADCAST_BYTES_XMIT,
RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT,
RNDIS_OID_GEN_DIRECTED_BYTES_RCV,
RNDIS_OID_GEN_DIRECTED_FRAMES_RCV,
RNDIS_OID_GEN_MULTICAST_BYTES_RCV,
RNDIS_OID_GEN_MULTICAST_FRAMES_RCV,
RNDIS_OID_GEN_BROADCAST_BYTES_RCV,
RNDIS_OID_GEN_BROADCAST_FRAMES_RCV,
RNDIS_OID_GEN_RCV_CRC_ERROR,
RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH,
#endif /* RNDIS_OPTIONAL_STATS */
/* mandatory 802.3 */
/* the general stuff */
RNDIS_OID_802_3_PERMANENT_ADDRESS,
RNDIS_OID_802_3_CURRENT_ADDRESS,
RNDIS_OID_802_3_MULTICAST_LIST,
RNDIS_OID_802_3_MAC_OPTIONS,
RNDIS_OID_802_3_MAXIMUM_LIST_SIZE,
/* the statistical stuff */
RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT,
RNDIS_OID_802_3_XMIT_ONE_COLLISION,
RNDIS_OID_802_3_XMIT_MORE_COLLISIONS,
#ifdef RNDIS_OPTIONAL_STATS
RNDIS_OID_802_3_XMIT_DEFERRED,
RNDIS_OID_802_3_XMIT_MAX_COLLISIONS,
RNDIS_OID_802_3_RCV_OVERRUN,
RNDIS_OID_802_3_XMIT_UNDERRUN,
RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE,
RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST,
RNDIS_OID_802_3_XMIT_LATE_COLLISIONS,
#endif /* RNDIS_OPTIONAL_STATS */
#ifdef RNDIS_PM
/* PM and wakeup are "mandatory" for USB, but the RNDIS specs
* don't say what they mean ... and the NDIS specs are often
* confusing and/or ambiguous in this context. (That is, more
* so than their specs for the other OIDs.)
*
* FIXME someone who knows what these should do, please
* implement them!
*/
/* power management */
OID_PNP_CAPABILITIES,
OID_PNP_QUERY_POWER,
OID_PNP_SET_POWER,
#ifdef RNDIS_WAKEUP
/* wake up host */
OID_PNP_ENABLE_WAKE_UP,
OID_PNP_ADD_WAKE_UP_PATTERN,
OID_PNP_REMOVE_WAKE_UP_PATTERN,
#endif /* RNDIS_WAKEUP */
#endif /* RNDIS_PM */
};
/* NDIS Functions */
static int gen_ndis_query_resp(struct rndis_params *params, u32 OID, u8 *buf,
unsigned buf_len, rndis_resp_t *r)
{
int retval = -ENOTSUPP;
u32 length = 4; /* usually */
__le32 *outbuf;
int i, count;
rndis_query_cmplt_type *resp;
struct net_device *net;
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
if (!r) return -ENOMEM;
resp = (rndis_query_cmplt_type *)r->buf;
if (!resp) return -ENOMEM;
if (buf_len && rndis_debug > 1) {
pr_debug("query OID %08x value, len %d:\n", OID, buf_len);
for (i = 0; i < buf_len; i += 16) {
pr_debug("%03d: %08x %08x %08x %08x\n", i,
get_unaligned_le32(&buf[i]),
get_unaligned_le32(&buf[i + 4]),
get_unaligned_le32(&buf[i + 8]),
get_unaligned_le32(&buf[i + 12]));
}
}
/* response goes here, right after the header */
outbuf = (__le32 *)&resp[1];
resp->InformationBufferOffset = cpu_to_le32(16);
net = params->dev;
stats = dev_get_stats(net, &temp);
switch (OID) {
/* general oids (table 4-1) */
/* mandatory */
case RNDIS_OID_GEN_SUPPORTED_LIST:
pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__);
length = sizeof(oid_supported_list);
count = length / sizeof(u32);
for (i = 0; i < count; i++)
outbuf[i] = cpu_to_le32(oid_supported_list[i]);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_HARDWARE_STATUS:
pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__);
/* Bogus question!
* Hardware must be ready to receive high level protocols.
* BTW:
* reddite ergo quae sunt Caesaris Caesari
* et quae sunt Dei Deo!
*/
*outbuf = cpu_to_le32(0);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_MEDIA_SUPPORTED:
pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__);
*outbuf = cpu_to_le32(params->medium);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_MEDIA_IN_USE:
pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__);
/* one medium, one transport... (maybe you do it better) */
*outbuf = cpu_to_le32(params->medium);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE:
pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
if (params->dev) {
*outbuf = cpu_to_le32(params->dev->mtu);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_GEN_LINK_SPEED:
if (rndis_debug > 1)
pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__);
if (params->media_state == RNDIS_MEDIA_STATE_DISCONNECTED)
*outbuf = cpu_to_le32(0);
else
*outbuf = cpu_to_le32(params->speed);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE:
pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
if (params->dev) {
*outbuf = cpu_to_le32(params->dev->mtu);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE:
pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
if (params->dev) {
*outbuf = cpu_to_le32(params->dev->mtu);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_GEN_VENDOR_ID:
pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__);
*outbuf = cpu_to_le32(params->vendorID);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_VENDOR_DESCRIPTION:
pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__);
if (params->vendorDescr) {
length = strlen(params->vendorDescr);
memcpy(outbuf, params->vendorDescr, length);
} else {
outbuf[0] = 0;
}
retval = 0;
break;
case RNDIS_OID_GEN_VENDOR_DRIVER_VERSION:
pr_debug("%s: RNDIS_OID_GEN_VENDOR_DRIVER_VERSION\n", __func__);
/* Created as LE */
*outbuf = rndis_driver_version;
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
*outbuf = cpu_to_le32(*params->filter);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE:
pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__);
*outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS:
if (rndis_debug > 1)
pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
*outbuf = cpu_to_le32(params->media_state);
retval = 0;
break;
case RNDIS_OID_GEN_PHYSICAL_MEDIUM:
pr_debug("%s: RNDIS_OID_GEN_PHYSICAL_MEDIUM\n", __func__);
*outbuf = cpu_to_le32(0);
retval = 0;
break;
/* The RNDIS specification is incomplete/wrong. Some versions
* of MS-Windows expect OIDs that aren't specified there. Other
* versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
*/
case RNDIS_OID_GEN_MAC_OPTIONS: /* from WinME */
pr_debug("%s: RNDIS_OID_GEN_MAC_OPTIONS\n", __func__);
*outbuf = cpu_to_le32(
RNDIS_MAC_OPTION_RECEIVE_SERIALIZED
| RNDIS_MAC_OPTION_FULL_DUPLEX);
retval = 0;
break;
/* statistics OIDs (table 4-2) */
/* mandatory */
case RNDIS_OID_GEN_XMIT_OK:
if (rndis_debug > 1)
pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__);
if (stats) {
*outbuf = cpu_to_le32(stats->tx_packets
- stats->tx_errors - stats->tx_dropped);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_GEN_RCV_OK:
if (rndis_debug > 1)
pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__);
if (stats) {
*outbuf = cpu_to_le32(stats->rx_packets
- stats->rx_errors - stats->rx_dropped);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_GEN_XMIT_ERROR:
if (rndis_debug > 1)
pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__);
if (stats) {
*outbuf = cpu_to_le32(stats->tx_errors);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_GEN_RCV_ERROR:
if (rndis_debug > 1)
pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__);
if (stats) {
*outbuf = cpu_to_le32(stats->rx_errors);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_GEN_RCV_NO_BUFFER:
pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__);
if (stats) {
*outbuf = cpu_to_le32(stats->rx_dropped);
retval = 0;
}
break;
/* ieee802.3 OIDs (table 4-3) */
/* mandatory */
case RNDIS_OID_802_3_PERMANENT_ADDRESS:
pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__);
if (params->dev) {
length = ETH_ALEN;
memcpy(outbuf, params->host_mac, length);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_802_3_CURRENT_ADDRESS:
pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__);
if (params->dev) {
length = ETH_ALEN;
memcpy(outbuf, params->host_mac, length);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_802_3_MULTICAST_LIST:
pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
/* Multicast base address only */
*outbuf = cpu_to_le32(0xE0000000);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_802_3_MAXIMUM_LIST_SIZE:
pr_debug("%s: RNDIS_OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
/* Multicast base address only */
*outbuf = cpu_to_le32(1);
retval = 0;
break;
case RNDIS_OID_802_3_MAC_OPTIONS:
pr_debug("%s: RNDIS_OID_802_3_MAC_OPTIONS\n", __func__);
*outbuf = cpu_to_le32(0);
retval = 0;
break;
/* ieee802.3 statistics OIDs (table 4-4) */
/* mandatory */
case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT:
pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
if (stats) {
*outbuf = cpu_to_le32(stats->rx_frame_errors);
retval = 0;
}
break;
/* mandatory */
case RNDIS_OID_802_3_XMIT_ONE_COLLISION:
pr_debug("%s: RNDIS_OID_802_3_XMIT_ONE_COLLISION\n", __func__);
*outbuf = cpu_to_le32(0);
retval = 0;
break;
/* mandatory */
case RNDIS_OID_802_3_XMIT_MORE_COLLISIONS:
pr_debug("%s: RNDIS_OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
*outbuf = cpu_to_le32(0);
retval = 0;
break;
default:
pr_warn("%s: query unknown OID 0x%08X\n", __func__, OID);
}
if (retval < 0)
length = 0;
resp->InformationBufferLength = cpu_to_le32(length);
r->length = length + sizeof(*resp);
resp->MessageLength = cpu_to_le32(r->length);
return retval;
}
static int gen_ndis_set_resp(struct rndis_params *params, u32 OID,
u8 *buf, u32 buf_len, rndis_resp_t *r)
{
rndis_set_cmplt_type *resp;
int i, retval = -ENOTSUPP;
if (!r)
return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf;
if (!resp)
return -ENOMEM;
if (buf_len && rndis_debug > 1) {
pr_debug("set OID %08x value, len %d:\n", OID, buf_len);
for (i = 0; i < buf_len; i += 16) {
pr_debug("%03d: %08x %08x %08x %08x\n", i,
get_unaligned_le32(&buf[i]),
get_unaligned_le32(&buf[i + 4]),
get_unaligned_le32(&buf[i + 8]),
get_unaligned_le32(&buf[i + 12]));
}
}
switch (OID) {
case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
/* these NDIS_PACKET_TYPE_* bitflags are shared with
* cdc_filter; it's not RNDIS-specific
* NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in:
* PROMISCUOUS, DIRECTED,
* MULTICAST, ALL_MULTICAST, BROADCAST
*/
*params->filter = (u16)get_unaligned_le32(buf);
pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER %08x\n",
__func__, *params->filter);
/* this call has a significant side effect: it's
* what makes the packet flow start and stop, like
* activating the CDC Ethernet altsetting.
*/
retval = 0;
if (*params->filter) {
params->state = RNDIS_DATA_INITIALIZED;
netif_carrier_on(params->dev);
if (netif_running(params->dev))
netif_wake_queue(params->dev);
} else {
params->state = RNDIS_INITIALIZED;
netif_carrier_off(params->dev);
netif_stop_queue(params->dev);
}
break;
case RNDIS_OID_802_3_MULTICAST_LIST:
/* I think we can ignore this */
pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
retval = 0;
break;
default:
pr_warn("%s: set unknown OID 0x%08X, size %d\n",
__func__, OID, buf_len);
}
return retval;
}
/*
* Response Functions
*/
static int rndis_init_response(struct rndis_params *params,
rndis_init_msg_type *buf)
{
rndis_init_cmplt_type *resp;
rndis_resp_t *r;
if (!params->dev)
return -ENOTSUPP;
r = rndis_add_response(params, sizeof(rndis_init_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_init_cmplt_type *)r->buf;
resp->MessageType = cpu_to_le32(RNDIS_MSG_INIT_C);
resp->MessageLength = cpu_to_le32(52);
resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
resp->MajorVersion = cpu_to_le32(RNDIS_MAJOR_VERSION);
resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
resp->MaxPacketsPerTransfer = cpu_to_le32(1);
resp->MaxTransferSize = cpu_to_le32(
params->dev->mtu
+ sizeof(struct ethhdr)
+ sizeof(struct rndis_packet_msg_type)
+ 22);
resp->PacketAlignmentFactor = cpu_to_le32(0);
resp->AFListOffset = cpu_to_le32(0);
resp->AFListSize = cpu_to_le32(0);
params->resp_avail(params->v);
return 0;
}
static int rndis_query_response(struct rndis_params *params,
rndis_query_msg_type *buf)
{
rndis_query_cmplt_type *resp;
rndis_resp_t *r;
/* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */
if (!params->dev)
return -ENOTSUPP;
/*
* we need more memory:
* gen_ndis_query_resp expects enough space for
* rndis_query_cmplt_type followed by data.
* oid_supported_list is the largest data reply
*/
r = rndis_add_response(params,
sizeof(oid_supported_list) + sizeof(rndis_query_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_query_cmplt_type *)r->buf;
resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C);
resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
if (gen_ndis_query_resp(params, le32_to_cpu(buf->OID),
le32_to_cpu(buf->InformationBufferOffset)
+ 8 + (u8 *)buf,
le32_to_cpu(buf->InformationBufferLength),
r)) {
/* OID not supported */
resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
resp->MessageLength = cpu_to_le32(sizeof *resp);
resp->InformationBufferLength = cpu_to_le32(0);
resp->InformationBufferOffset = cpu_to_le32(0);
} else
resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
params->resp_avail(params->v);
return 0;
}
static int rndis_set_response(struct rndis_params *params,
rndis_set_msg_type *buf)
{
u32 BufLength, BufOffset;
rndis_set_cmplt_type *resp;
rndis_resp_t *r;
BufLength = le32_to_cpu(buf->InformationBufferLength);
BufOffset = le32_to_cpu(buf->InformationBufferOffset);
if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
(BufOffset > RNDIS_MAX_TOTAL_SIZE) ||
(BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
return -EINVAL;
r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf;
#ifdef VERBOSE_DEBUG
pr_debug("%s: Length: %d\n", __func__, BufLength);
pr_debug("%s: Offset: %d\n", __func__, BufOffset);
pr_debug("%s: InfoBuffer: ", __func__);
for (i = 0; i < BufLength; i++) {
pr_debug("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
}
pr_debug("\n");
#endif
resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C);
resp->MessageLength = cpu_to_le32(16);
resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
if (gen_ndis_set_resp(params, le32_to_cpu(buf->OID),
((u8 *)buf) + 8 + BufOffset, BufLength, r))
resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
else
resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
params->resp_avail(params->v);
return 0;
}
static int rndis_reset_response(struct rndis_params *params,
rndis_reset_msg_type *buf)
{
rndis_reset_cmplt_type *resp;
rndis_resp_t *r;
u8 *xbuf;
u32 length;
/* drain the response queue */
while ((xbuf = rndis_get_next_response(params, &length)))
rndis_free_response(params, xbuf);
r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_reset_cmplt_type *)r->buf;
resp->MessageType = cpu_to_le32(RNDIS_MSG_RESET_C);
resp->MessageLength = cpu_to_le32(16);
resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
/* resent information */
resp->AddressingReset = cpu_to_le32(1);
params->resp_avail(params->v);
return 0;
}
static int rndis_keepalive_response(struct rndis_params *params,
rndis_keepalive_msg_type *buf)
{
rndis_keepalive_cmplt_type *resp;
rndis_resp_t *r;
/* host "should" check only in RNDIS_DATA_INITIALIZED state */
r = rndis_add_response(params, sizeof(rndis_keepalive_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_keepalive_cmplt_type *)r->buf;
resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C);
resp->MessageLength = cpu_to_le32(16);
resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
params->resp_avail(params->v);
return 0;
}
/*
* Device to Host Comunication
*/
static int rndis_indicate_status_msg(struct rndis_params *params, u32 status)
{
rndis_indicate_status_msg_type *resp;
rndis_resp_t *r;
if (params->state == RNDIS_UNINITIALIZED)
return -ENOTSUPP;
r = rndis_add_response(params, sizeof(rndis_indicate_status_msg_type));
if (!r)
return -ENOMEM;
resp = (rndis_indicate_status_msg_type *)r->buf;
resp->MessageType = cpu_to_le32(RNDIS_MSG_INDICATE);
resp->MessageLength = cpu_to_le32(20);
resp->Status = cpu_to_le32(status);
resp->StatusBufferLength = cpu_to_le32(0);
resp->StatusBufferOffset = cpu_to_le32(0);
params->resp_avail(params->v);
return 0;
}
int rndis_signal_connect(struct rndis_params *params)
{
params->media_state = RNDIS_MEDIA_STATE_CONNECTED;
return rndis_indicate_status_msg(params, RNDIS_STATUS_MEDIA_CONNECT);
}
EXPORT_SYMBOL_GPL(rndis_signal_connect);
int rndis_signal_disconnect(struct rndis_params *params)
{
params->media_state = RNDIS_MEDIA_STATE_DISCONNECTED;
return rndis_indicate_status_msg(params, RNDIS_STATUS_MEDIA_DISCONNECT);
}
EXPORT_SYMBOL_GPL(rndis_signal_disconnect);
void rndis_uninit(struct rndis_params *params)
{
u8 *buf;
u32 length;
if (!params)
return;
params->state = RNDIS_UNINITIALIZED;
/* drain the response queue */
while ((buf = rndis_get_next_response(params, &length)))
rndis_free_response(params, buf);
}
EXPORT_SYMBOL_GPL(rndis_uninit);
void rndis_set_host_mac(struct rndis_params *params, const u8 *addr)
{
params->host_mac = addr;
}
EXPORT_SYMBOL_GPL(rndis_set_host_mac);
/*
* Message Parser
*/
int rndis_msg_parser(struct rndis_params *params, u8 *buf)
{
u32 MsgType, MsgLength;
__le32 *tmp;
if (!buf)
return -ENOMEM;
tmp = (__le32 *)buf;
MsgType = get_unaligned_le32(tmp++);
MsgLength = get_unaligned_le32(tmp++);
if (!params)
return -ENOTSUPP;
/* NOTE: RNDIS is *EXTREMELY* chatty ... Windows constantly polls for
* rx/tx statistics and link status, in addition to KEEPALIVE traffic
* and normal HC level polling to see if there's any IN traffic.
*/
/* For USB: responses may take up to 10 seconds */
switch (MsgType) {
case RNDIS_MSG_INIT:
pr_debug("%s: RNDIS_MSG_INIT\n",
__func__);
params->state = RNDIS_INITIALIZED;
return rndis_init_response(params, (rndis_init_msg_type *)buf);
case RNDIS_MSG_HALT:
pr_debug("%s: RNDIS_MSG_HALT\n",
__func__);
params->state = RNDIS_UNINITIALIZED;
if (params->dev) {
netif_carrier_off(params->dev);
netif_stop_queue(params->dev);
}
return 0;
case RNDIS_MSG_QUERY:
return rndis_query_response(params,
(rndis_query_msg_type *)buf);
case RNDIS_MSG_SET:
return rndis_set_response(params, (rndis_set_msg_type *)buf);
case RNDIS_MSG_RESET:
pr_debug("%s: RNDIS_MSG_RESET\n",
__func__);
return rndis_reset_response(params,
(rndis_reset_msg_type *)buf);
case RNDIS_MSG_KEEPALIVE:
/* For USB: host does this every 5 seconds */
if (rndis_debug > 1)
pr_debug("%s: RNDIS_MSG_KEEPALIVE\n",
__func__);
return rndis_keepalive_response(params,
(rndis_keepalive_msg_type *)
buf);
default:
/* At least Windows XP emits some undefined RNDIS messages.
* In one case those messages seemed to relate to the host
* suspending itself.
*/
pr_warn("%s: unknown RNDIS message 0x%08X len %d\n",
__func__, MsgType, MsgLength);
/* Garbled message can be huge, so limit what we display */
if (MsgLength > 16)
MsgLength = 16;
print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
buf, MsgLength);
break;
}
return -ENOTSUPP;
}
EXPORT_SYMBOL_GPL(rndis_msg_parser);
static inline int rndis_get_nr(void)
{
return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL);
}
static inline void rndis_put_nr(int nr)
{
ida_simple_remove(&rndis_ida, nr);
}
struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
{
struct rndis_params *params;
int i;
if (!resp_avail)
return ERR_PTR(-EINVAL);
i = rndis_get_nr();
if (i < 0) {
pr_debug("failed\n");
return ERR_PTR(-ENODEV);
}
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params) {
rndis_put_nr(i);
return ERR_PTR(-ENOMEM);
}
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
{
struct proc_dir_entry *proc_entry;
char name[20];
sprintf(name, NAME_TEMPLATE, i);
proc_entry = proc_create_data(name, 0660, NULL,
&rndis_proc_ops, params);
if (!proc_entry) {
kfree(params);
rndis_put_nr(i);
return ERR_PTR(-EIO);
}
}
#endif
params->confignr = i;
params->used = 1;
params->state = RNDIS_UNINITIALIZED;
params->media_state = RNDIS_MEDIA_STATE_DISCONNECTED;
params->resp_avail = resp_avail;
params->v = v;
INIT_LIST_HEAD(¶ms->resp_queue);
spin_lock_init(¶ms->resp_lock);
pr_debug("%s: configNr = %d\n", __func__, i);
return params;
}
EXPORT_SYMBOL_GPL(rndis_register);
void rndis_deregister(struct rndis_params *params)
{
int i;
pr_debug("%s:\n", __func__);
if (!params)
return;
i = params->confignr;
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
{
char name[20];
sprintf(name, NAME_TEMPLATE, i);
remove_proc_entry(name, NULL);
}
#endif
kfree(params);
rndis_put_nr(i);
}
EXPORT_SYMBOL_GPL(rndis_deregister);
int rndis_set_param_dev(struct rndis_params *params, struct net_device *dev,
u16 *cdc_filter)
{
pr_debug("%s:\n", __func__);
if (!dev)
return -EINVAL;
if (!params)
return -1;
params->dev = dev;
params->filter = cdc_filter;
return 0;
}
EXPORT_SYMBOL_GPL(rndis_set_param_dev);
int rndis_set_param_vendor(struct rndis_params *params, u32 vendorID,
const char *vendorDescr)
{
pr_debug("%s:\n", __func__);
if (!vendorDescr) return -1;
if (!params)
return -1;
params->vendorID = vendorID;
params->vendorDescr = vendorDescr;
return 0;
}
EXPORT_SYMBOL_GPL(rndis_set_param_vendor);
int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed)
{
pr_debug("%s: %u %u\n", __func__, medium, speed);
if (!params)
return -1;
params->medium = medium;
params->speed = speed;
return 0;
}
EXPORT_SYMBOL_GPL(rndis_set_param_medium);
void rndis_add_hdr(struct sk_buff *skb)
{
struct rndis_packet_msg_type *header;
if (!skb)
return;
header = skb_push(skb, sizeof(*header));
memset(header, 0, sizeof *header);
header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
header->MessageLength = cpu_to_le32(skb->len);
header->DataOffset = cpu_to_le32(36);
header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
}
EXPORT_SYMBOL_GPL(rndis_add_hdr);
void rndis_free_response(struct rndis_params *params, u8 *buf)
{
rndis_resp_t *r, *n;
spin_lock(¶ms->resp_lock);
list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) {
if (r->buf == buf) {
list_del(&r->list);
kfree(r);
}
}
spin_unlock(¶ms->resp_lock);
}
EXPORT_SYMBOL_GPL(rndis_free_response);
u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
{
rndis_resp_t *r, *n;
if (!length) return NULL;
spin_lock(¶ms->resp_lock);
list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) {
if (!r->send) {
r->send = 1;
*length = r->length;
spin_unlock(¶ms->resp_lock);
return r->buf;
}
}
spin_unlock(¶ms->resp_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(rndis_get_next_response);
static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
{
rndis_resp_t *r;
/* NOTE: this gets copied into ether.c USB_BUFSIZ bytes ... */
r = kmalloc(sizeof(rndis_resp_t) + length, GFP_ATOMIC);
if (!r) return NULL;
r->buf = (u8 *)(r + 1);
r->length = length;
r->send = 0;
spin_lock(¶ms->resp_lock);
list_add_tail(&r->list, ¶ms->resp_queue);
spin_unlock(¶ms->resp_lock);
return r;
}
int rndis_rm_hdr(struct gether *port,
struct sk_buff *skb,
struct sk_buff_head *list)
{
/* tmp points to a struct rndis_packet_msg_type */
__le32 *tmp = (void *)skb->data;
/* MessageType, MessageLength */
if (cpu_to_le32(RNDIS_MSG_PACKET)
!= get_unaligned(tmp++)) {
dev_kfree_skb_any(skb);
return -EINVAL;
}
tmp++;
/* DataOffset, DataLength */
if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
dev_kfree_skb_any(skb);
return -EOVERFLOW;
}
skb_trim(skb, get_unaligned_le32(tmp++));
skb_queue_tail(list, skb);
return 0;
}
EXPORT_SYMBOL_GPL(rndis_rm_hdr);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static int rndis_proc_show(struct seq_file *m, void *v)
{
rndis_params *param = m->private;
seq_printf(m,
"Config Nr. %d\n"
"used : %s\n"
"state : %s\n"
"medium : 0x%08X\n"
"speed : %u\n"
"cable : %s\n"
"vendor ID : 0x%08X\n"
"vendor : %s\n",
param->confignr, (param->used) ? "y" : "n",
({ char *s = "?";
switch (param->state) {
case RNDIS_UNINITIALIZED:
s = "RNDIS_UNINITIALIZED"; break;
case RNDIS_INITIALIZED:
s = "RNDIS_INITIALIZED"; break;
case RNDIS_DATA_INITIALIZED:
s = "RNDIS_DATA_INITIALIZED"; break;
} s; }),
param->medium,
(param->media_state) ? 0 : param->speed*100,
(param->media_state) ? "disconnected" : "connected",
param->vendorID, param->vendorDescr);
return 0;
}
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
rndis_params *p = pde_data(file_inode(file));
u32 speed = 0;
int i, fl_speed = 0;
for (i = 0; i < count; i++) {
char c;
if (get_user(c, buffer))
return -EFAULT;
switch (c) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
fl_speed = 1;
speed = speed * 10 + c - '0';
break;
case 'C':
case 'c':
rndis_signal_connect(p);
break;
case 'D':
case 'd':
rndis_signal_disconnect(p);
break;
default:
if (fl_speed) p->speed = speed;
else pr_debug("%c is not valid\n", c);
break;
}
buffer++;
}
return count;
}
static int rndis_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, rndis_proc_show, pde_data(inode));
}
static const struct proc_ops rndis_proc_ops = {
.proc_open = rndis_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = rndis_proc_write,
};
#define NAME_TEMPLATE "driver/rndis-%03d"
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
| linux-master | drivers/usb/gadget/function/rndis.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* u_serial.c - utilities for USB gadget "serial port"/TTY support
*
* Copyright (C) 2003 Al Borchers ([email protected])
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
*
* This code also borrows from usbserial.c, which is
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2000 Peter Berger ([email protected])
* Copyright (C) 2000 Al Borchers ([email protected])
*/
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/kstrtox.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
#include "u_serial.h"
/*
* This component encapsulates the TTY layer glue needed to provide basic
* "serial port" functionality through the USB gadget stack. Each such
* port is exposed through a /dev/ttyGS* node.
*
* After this module has been loaded, the individual TTY port can be requested
* (gserial_alloc_line()) and it will stay available until they are removed
* (gserial_free_line()). Each one may be connected to a USB function
* (gserial_connect), or disconnected (with gserial_disconnect) when the USB
* host issues a config change event. Data can only flow when the port is
* connected to the host.
*
* A given TTY port can be made available in multiple configurations.
* For example, each one might expose a ttyGS0 node which provides a
* login application. In one case that might use CDC ACM interface 0,
* while another configuration might use interface 3 for that. The
* work to handle that (including descriptor management) is not part
* of this component.
*
* Configurations may expose more than one TTY port. For example, if
* ttyGS0 provides login service, then ttyGS1 might provide dialer access
* for a telephone or fax link. And ttyGS2 might be something that just
* needs a simple byte stream interface for some messaging protocol that
* is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
*
*
* gserial is the lifecycle interface, used by USB functions
* gs_port is the I/O nexus, used by the tty driver
* tty_struct links to the tty/filesystem framework
*
* gserial <---> gs_port ... links will be null when the USB link is
* inactive; managed by gserial_{connect,disconnect}(). each gserial
* instance can wrap its own USB control protocol.
* gserial->ioport == usb_ep->driver_data ... gs_port
* gs_port->port_usb ... gserial
*
* gs_port <---> tty_struct ... links will be null when the TTY file
* isn't opened; managed by gs_open()/gs_close()
* gserial->port_tty ... tty_struct
* tty_struct->driver_data ... gserial
*/
/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
* next layer of buffering. For TX that's a circular buffer; for RX
* consider it a NOP. A third layer is provided by the TTY code.
*/
#define QUEUE_SIZE 16
#define WRITE_BUF_SIZE 8192 /* TX only */
#define GS_CONSOLE_BUF_SIZE 8192
/* Prevents race conditions while accessing gser->ioport */
static DEFINE_SPINLOCK(serial_port_lock);
/* console info */
struct gs_console {
struct console console;
struct work_struct work;
spinlock_t lock;
struct usb_request *req;
struct kfifo buf;
size_t missed;
};
/*
* The port structure holds info for each port, one for each minor number
* (and thus for each /dev/ node).
*/
struct gs_port {
struct tty_port port;
spinlock_t port_lock; /* guard port_* access */
struct gserial *port_usb;
#ifdef CONFIG_U_SERIAL_CONSOLE
struct gs_console *console;
#endif
u8 port_num;
struct list_head read_pool;
int read_started;
int read_allocated;
struct list_head read_queue;
unsigned n_read;
struct delayed_work push;
struct list_head write_pool;
int write_started;
int write_allocated;
struct kfifo port_write_buf;
wait_queue_head_t drain_wait; /* wait while writes drain */
bool write_busy;
wait_queue_head_t close_wait;
bool suspended; /* port suspended */
bool start_delayed; /* delay start when suspended */
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
};
static struct portmaster {
struct mutex lock; /* protect open/close */
struct gs_port *port;
} ports[MAX_U_SERIAL_PORTS];
#define GS_CLOSE_TIMEOUT 15 /* seconds */
#ifdef VERBOSE_DEBUG
#ifndef pr_vdebug
#define pr_vdebug(fmt, arg...) \
pr_debug(fmt, ##arg)
#endif /* pr_vdebug */
#else
#ifndef pr_vdebug
#define pr_vdebug(fmt, arg...) \
({ if (0) pr_debug(fmt, ##arg); })
#endif /* pr_vdebug */
#endif
/*-------------------------------------------------------------------------*/
/* I/O glue between TTY (upper) and USB function (lower) driver layers */
/*
* gs_alloc_req
*
* Allocate a usb_request and its buffer. Returns a pointer to the
* usb_request or NULL if there is an error.
*/
struct usb_request *
gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, kmalloc_flags);
if (req != NULL) {
req->length = len;
req->buf = kmalloc(len, kmalloc_flags);
if (req->buf == NULL) {
usb_ep_free_request(ep, req);
return NULL;
}
}
return req;
}
EXPORT_SYMBOL_GPL(gs_alloc_req);
/*
* gs_free_req
*
* Free a usb_request and its buffer.
*/
void gs_free_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
}
EXPORT_SYMBOL_GPL(gs_free_req);
/*
* gs_send_packet
*
* If there is data to send, a packet is built in the given
* buffer and the size is returned. If there is no data to
* send, 0 is returned.
*
* Called with port_lock held.
*/
static unsigned
gs_send_packet(struct gs_port *port, char *packet, unsigned size)
{
unsigned len;
len = kfifo_len(&port->port_write_buf);
if (len < size)
size = len;
if (size != 0)
size = kfifo_out(&port->port_write_buf, packet, size);
return size;
}
/*
* gs_start_tx
*
* This function finds available write requests, calls
* gs_send_packet to fill these packets with data, and
* continues until either there are no more write requests
* available or no more data to send. This function is
* run whenever data arrives or write requests are available.
*
* Context: caller owns port_lock; port_usb is non-null.
*/
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
struct list_head *pool = &port->write_pool;
struct usb_ep *in;
int status = 0;
bool do_tty_wake = false;
if (!port->port_usb)
return status;
in = port->port_usb->in;
while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
if (port->write_started >= QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
len = gs_send_packet(port, req->buf, in->maxpacket);
if (len == 0) {
wake_up_interruptible(&port->drain_wait);
break;
}
do_tty_wake = true;
req->length = len;
list_del(&req->list);
req->zero = kfifo_is_empty(&port->port_write_buf);
pr_vdebug("ttyGS%d: tx len=%d, %3ph ...\n", port->port_num, len, req->buf);
/* Drop lock while we call out of driver; completions
* could be issued while we do so. Disconnection may
* happen too; maybe immediately before we queue this!
*
* NOTE that we may keep sending data for a while after
* the TTY closed (dev->ioport->port_tty is NULL).
*/
port->write_busy = true;
spin_unlock(&port->port_lock);
status = usb_ep_queue(in, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
port->write_busy = false;
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", in->name, status);
list_add(&req->list, pool);
break;
}
port->write_started++;
/* abort immediately after disconnect */
if (!port->port_usb)
break;
}
if (do_tty_wake && port->port.tty)
tty_wakeup(port->port.tty);
return status;
}
/*
* Context: caller owns port_lock, and port_usb is set
*/
static unsigned gs_start_rx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
struct list_head *pool = &port->read_pool;
struct usb_ep *out = port->port_usb->out;
while (!list_empty(pool)) {
struct usb_request *req;
int status;
struct tty_struct *tty;
/* no more rx if closed */
tty = port->port.tty;
if (!tty)
break;
if (port->read_started >= QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
list_del(&req->list);
req->length = out->maxpacket;
/* drop lock while we call out; the controller driver
* may need to call us back (e.g. for disconnect)
*/
spin_unlock(&port->port_lock);
status = usb_ep_queue(out, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", out->name, status);
list_add(&req->list, pool);
break;
}
port->read_started++;
/* abort immediately after disconnect */
if (!port->port_usb)
break;
}
return port->read_started;
}
/*
* RX work takes data out of the RX queue and hands it up to the TTY
* layer until it refuses to take any more data (or is throttled back).
* Then it issues reads for any further data.
*
* If the RX queue becomes full enough that no usb_request is queued,
* the OUT endpoint may begin NAKing as soon as its FIFO fills up.
* So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
* can be buffered before the TTY layer's buffers (currently 64 KB).
*/
static void gs_rx_push(struct work_struct *work)
{
struct delayed_work *w = to_delayed_work(work);
struct gs_port *port = container_of(w, struct gs_port, push);
struct tty_struct *tty;
struct list_head *queue = &port->read_queue;
bool disconnect = false;
bool do_push = false;
/* hand any queued data to the tty */
spin_lock_irq(&port->port_lock);
tty = port->port.tty;
while (!list_empty(queue)) {
struct usb_request *req;
req = list_first_entry(queue, struct usb_request, list);
/* leave data queued if tty was rx throttled */
if (tty && tty_throttled(tty))
break;
switch (req->status) {
case -ESHUTDOWN:
disconnect = true;
pr_vdebug("ttyGS%d: shutdown\n", port->port_num);
break;
default:
/* presumably a transient fault */
pr_warn("ttyGS%d: unexpected RX status %d\n",
port->port_num, req->status);
fallthrough;
case 0:
/* normal completion */
break;
}
/* push data to (open) tty */
if (req->actual && tty) {
char *packet = req->buf;
unsigned size = req->actual;
unsigned n;
int count;
/* we may have pushed part of this packet already... */
n = port->n_read;
if (n) {
packet += n;
size -= n;
}
count = tty_insert_flip_string(&port->port, packet,
size);
if (count)
do_push = true;
if (count != size) {
/* stop pushing; TTY layer can't handle more */
port->n_read += count;
pr_vdebug("ttyGS%d: rx block %d/%d\n",
port->port_num, count, req->actual);
break;
}
port->n_read = 0;
}
list_move(&req->list, &port->read_pool);
port->read_started--;
}
/* Push from tty to ldisc; this is handled by a workqueue,
* so we won't get callbacks and can hold port_lock
*/
if (do_push)
tty_flip_buffer_push(&port->port);
/* We want our data queue to become empty ASAP, keeping data
* in the tty and ldisc (not here). If we couldn't push any
* this time around, RX may be starved, so wait until next jiffy.
*
* We may leave non-empty queue only when there is a tty, and
* either it is throttled or there is no more room in flip buffer.
*/
if (!list_empty(queue) && !tty_throttled(tty))
schedule_delayed_work(&port->push, 1);
/* If we're still connected, refill the USB RX queue. */
if (!disconnect && port->port_usb)
gs_start_rx(port);
spin_unlock_irq(&port->port_lock);
}
static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
/* Queue all received data until the tty layer is ready for it. */
spin_lock(&port->port_lock);
list_add_tail(&req->list, &port->read_queue);
schedule_delayed_work(&port->push, 0);
spin_unlock(&port->port_lock);
}
static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
spin_lock(&port->port_lock);
list_add(&req->list, &port->write_pool);
port->write_started--;
switch (req->status) {
default:
/* presumably a transient fault */
pr_warn("%s: unexpected %s status %d\n",
__func__, ep->name, req->status);
fallthrough;
case 0:
/* normal completion */
gs_start_tx(port);
break;
case -ESHUTDOWN:
/* disconnect */
pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
break;
}
spin_unlock(&port->port_lock);
}
static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
int *allocated)
{
struct usb_request *req;
while (!list_empty(head)) {
req = list_entry(head->next, struct usb_request, list);
list_del(&req->list);
gs_free_req(ep, req);
if (allocated)
(*allocated)--;
}
}
static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
void (*fn)(struct usb_ep *, struct usb_request *),
int *allocated)
{
int i;
struct usb_request *req;
int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
* do quite that many this time, don't fail ... we just won't
* be as speedy as we might otherwise be.
*/
for (i = 0; i < n; i++) {
req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
if (!req)
return list_empty(head) ? -ENOMEM : 0;
req->complete = fn;
list_add_tail(&req->list, head);
if (allocated)
(*allocated)++;
}
return 0;
}
/**
* gs_start_io - start USB I/O streams
* @port: port to use
* Context: holding port_lock; port_tty and port_usb are non-null
*
* We only start I/O when something is connected to both sides of
* this port. If nothing is listening on the host side, we may
* be pointlessly filling up our TX buffers and FIFO.
*/
static int gs_start_io(struct gs_port *port)
{
struct list_head *head = &port->read_pool;
struct usb_ep *ep;
int status;
unsigned started;
if (!port->port_usb || !port->port.tty)
return -EIO;
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
ep = port->port_usb->out;
status = gs_alloc_requests(ep, head, gs_read_complete,
&port->read_allocated);
if (status)
return status;
status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
gs_write_complete, &port->write_allocated);
if (status) {
gs_free_requests(ep, head, &port->read_allocated);
return status;
}
/* queue read requests */
port->n_read = 0;
started = gs_start_rx(port);
if (started) {
gs_start_tx(port);
/* Unblock any pending writes into our circular buffer, in case
* we didn't in gs_start_tx() */
tty_wakeup(port->port.tty);
} else {
gs_free_requests(ep, head, &port->read_allocated);
gs_free_requests(port->port_usb->in, &port->write_pool,
&port->write_allocated);
status = -EIO;
}
return status;
}
/*-------------------------------------------------------------------------*/
/* TTY Driver */
/*
* gs_open sets up the link between a gs_port and its associated TTY.
* That link is broken *only* by TTY close(), and all driver methods
* know that.
*/
static int gs_open(struct tty_struct *tty, struct file *file)
{
int port_num = tty->index;
struct gs_port *port;
int status = 0;
mutex_lock(&ports[port_num].lock);
port = ports[port_num].port;
if (!port) {
status = -ENODEV;
goto out;
}
spin_lock_irq(&port->port_lock);
/* allocate circular buffer on first open */
if (!kfifo_initialized(&port->port_write_buf)) {
spin_unlock_irq(&port->port_lock);
/*
* portmaster's mutex still protects from simultaneous open(),
* and close() can't happen, yet.
*/
status = kfifo_alloc(&port->port_write_buf,
WRITE_BUF_SIZE, GFP_KERNEL);
if (status) {
pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
port_num, tty, file);
goto out;
}
spin_lock_irq(&port->port_lock);
}
/* already open? Great. */
if (port->port.count++)
goto exit_unlock_port;
tty->driver_data = port;
port->port.tty = tty;
/* if connected, start the I/O stream */
if (port->port_usb) {
/* if port is suspended, wait resume to start I/0 stream */
if (!port->suspended) {
struct gserial *gser = port->port_usb;
pr_debug("gs_open: start ttyGS%d\n", port->port_num);
gs_start_io(port);
if (gser->connect)
gser->connect(gser);
} else {
pr_debug("delay start of ttyGS%d\n", port->port_num);
port->start_delayed = true;
}
}
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
exit_unlock_port:
spin_unlock_irq(&port->port_lock);
out:
mutex_unlock(&ports[port_num].lock);
return status;
}
static int gs_close_flush_done(struct gs_port *p)
{
int cond;
/* return true on disconnect or empty buffer or if raced with open() */
spin_lock_irq(&p->port_lock);
cond = p->port_usb == NULL || !kfifo_len(&p->port_write_buf) ||
p->port.count > 1;
spin_unlock_irq(&p->port_lock);
return cond;
}
static void gs_close(struct tty_struct *tty, struct file *file)
{
struct gs_port *port = tty->driver_data;
struct gserial *gser;
spin_lock_irq(&port->port_lock);
if (port->port.count != 1) {
raced_with_open:
if (port->port.count == 0)
WARN_ON(1);
else
--port->port.count;
goto exit;
}
pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
gser = port->port_usb;
if (gser && !port->suspended && gser->disconnect)
gser->disconnect(gser);
/* wait for circular write buffer to drain, disconnect, or at
* most GS_CLOSE_TIMEOUT seconds; then discard the rest
*/
if (kfifo_len(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
gs_close_flush_done(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
if (port->port.count != 1)
goto raced_with_open;
gser = port->port_usb;
}
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
* let the push async work fire again until we're re-opened.
*/
if (gser == NULL)
kfifo_free(&port->port_write_buf);
else
kfifo_reset(&port->port_write_buf);
port->start_delayed = false;
port->port.count = 0;
port->port.tty = NULL;
pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
port->port_num, tty, file);
wake_up(&port->close_wait);
exit:
spin_unlock_irq(&port->port_lock);
}
static ssize_t gs_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
pr_vdebug("gs_write: ttyGS%d (%p) writing %zu bytes\n",
port->port_num, tty, count);
spin_lock_irqsave(&port->port_lock, flags);
if (count)
count = kfifo_in(&port->port_write_buf, buf, count);
/* treat count == 0 as flush_chars() */
if (port->port_usb)
gs_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return count;
}
static int gs_put_char(struct tty_struct *tty, u8 ch)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
int status;
pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %ps\n",
port->port_num, tty, ch, __builtin_return_address(0));
spin_lock_irqsave(&port->port_lock, flags);
status = kfifo_put(&port->port_write_buf, ch);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
}
static void gs_flush_chars(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
gs_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static unsigned int gs_write_room(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
unsigned int room = 0;
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
room = kfifo_avail(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
pr_vdebug("gs_write_room: (%d,%p) room=%u\n",
port->port_num, tty, room);
return room;
}
static unsigned int gs_chars_in_buffer(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
chars = kfifo_len(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%u\n",
port->port_num, tty, chars);
return chars;
}
/* undo side effects of setting TTY_THROTTLED */
static void gs_unthrottle(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb) {
/* Kickstart read queue processing. We don't do xon/xoff,
* rts/cts, or other handshaking with the host, but if the
* read queue backs up enough we'll be NAKing OUT packets.
*/
pr_vdebug("ttyGS%d: unthrottle\n", port->port_num);
schedule_delayed_work(&port->push, 0);
}
spin_unlock_irqrestore(&port->port_lock, flags);
}
static int gs_break_ctl(struct tty_struct *tty, int duration)
{
struct gs_port *port = tty->driver_data;
int status = 0;
struct gserial *gser;
pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
port->port_num, duration);
spin_lock_irq(&port->port_lock);
gser = port->port_usb;
if (gser && gser->send_break)
status = gser->send_break(gser, duration);
spin_unlock_irq(&port->port_lock);
return status;
}
static const struct tty_operations gs_tty_ops = {
.open = gs_open,
.close = gs_close,
.write = gs_write,
.put_char = gs_put_char,
.flush_chars = gs_flush_chars,
.write_room = gs_write_room,
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
.break_ctl = gs_break_ctl,
};
/*-------------------------------------------------------------------------*/
static struct tty_driver *gs_tty_driver;
#ifdef CONFIG_U_SERIAL_CONSOLE
static void gs_console_complete_out(struct usb_ep *ep, struct usb_request *req)
{
struct gs_console *cons = req->context;
switch (req->status) {
default:
pr_warn("%s: unexpected %s status %d\n",
__func__, ep->name, req->status);
fallthrough;
case 0:
/* normal completion */
spin_lock(&cons->lock);
req->length = 0;
schedule_work(&cons->work);
spin_unlock(&cons->lock);
break;
case -ECONNRESET:
case -ESHUTDOWN:
/* disconnect */
pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
break;
}
}
static void __gs_console_push(struct gs_console *cons)
{
struct usb_request *req = cons->req;
struct usb_ep *ep;
size_t size;
if (!req)
return; /* disconnected */
if (req->length)
return; /* busy */
ep = cons->console.data;
size = kfifo_out(&cons->buf, req->buf, ep->maxpacket);
if (!size)
return;
if (cons->missed && ep->maxpacket >= 64) {
char buf[64];
size_t len;
len = sprintf(buf, "\n[missed %zu bytes]\n", cons->missed);
kfifo_in(&cons->buf, buf, len);
cons->missed = 0;
}
req->length = size;
spin_unlock_irq(&cons->lock);
if (usb_ep_queue(ep, req, GFP_ATOMIC))
req->length = 0;
spin_lock_irq(&cons->lock);
}
static void gs_console_work(struct work_struct *work)
{
struct gs_console *cons = container_of(work, struct gs_console, work);
spin_lock_irq(&cons->lock);
__gs_console_push(cons);
spin_unlock_irq(&cons->lock);
}
static void gs_console_write(struct console *co,
const char *buf, unsigned count)
{
struct gs_console *cons = container_of(co, struct gs_console, console);
unsigned long flags;
size_t n;
spin_lock_irqsave(&cons->lock, flags);
n = kfifo_in(&cons->buf, buf, count);
if (n < count)
cons->missed += count - n;
if (cons->req && !cons->req->length)
schedule_work(&cons->work);
spin_unlock_irqrestore(&cons->lock, flags);
}
static struct tty_driver *gs_console_device(struct console *co, int *index)
{
*index = co->index;
return gs_tty_driver;
}
static int gs_console_connect(struct gs_port *port)
{
struct gs_console *cons = port->console;
struct usb_request *req;
struct usb_ep *ep;
if (!cons)
return 0;
ep = port->port_usb->in;
req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
if (!req)
return -ENOMEM;
req->complete = gs_console_complete_out;
req->context = cons;
req->length = 0;
spin_lock(&cons->lock);
cons->req = req;
cons->console.data = ep;
spin_unlock(&cons->lock);
pr_debug("ttyGS%d: console connected!\n", port->port_num);
schedule_work(&cons->work);
return 0;
}
static void gs_console_disconnect(struct gs_port *port)
{
struct gs_console *cons = port->console;
struct usb_request *req;
struct usb_ep *ep;
if (!cons)
return;
spin_lock(&cons->lock);
req = cons->req;
ep = cons->console.data;
cons->req = NULL;
spin_unlock(&cons->lock);
if (!req)
return;
usb_ep_dequeue(ep, req);
gs_free_req(ep, req);
}
static int gs_console_init(struct gs_port *port)
{
struct gs_console *cons;
int err;
if (port->console)
return 0;
cons = kzalloc(sizeof(*port->console), GFP_KERNEL);
if (!cons)
return -ENOMEM;
strcpy(cons->console.name, "ttyGS");
cons->console.write = gs_console_write;
cons->console.device = gs_console_device;
cons->console.flags = CON_PRINTBUFFER;
cons->console.index = port->port_num;
INIT_WORK(&cons->work, gs_console_work);
spin_lock_init(&cons->lock);
err = kfifo_alloc(&cons->buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
if (err) {
pr_err("ttyGS%d: allocate console buffer failed\n", port->port_num);
kfree(cons);
return err;
}
port->console = cons;
register_console(&cons->console);
spin_lock_irq(&port->port_lock);
if (port->port_usb)
gs_console_connect(port);
spin_unlock_irq(&port->port_lock);
return 0;
}
static void gs_console_exit(struct gs_port *port)
{
struct gs_console *cons = port->console;
if (!cons)
return;
unregister_console(&cons->console);
spin_lock_irq(&port->port_lock);
if (cons->req)
gs_console_disconnect(port);
spin_unlock_irq(&port->port_lock);
cancel_work_sync(&cons->work);
kfifo_free(&cons->buf);
kfree(cons);
port->console = NULL;
}
ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count)
{
struct gs_port *port;
bool enable;
int ret;
ret = kstrtobool(page, &enable);
if (ret)
return ret;
mutex_lock(&ports[port_num].lock);
port = ports[port_num].port;
if (WARN_ON(port == NULL)) {
ret = -ENXIO;
goto out;
}
if (enable)
ret = gs_console_init(port);
else
gs_console_exit(port);
out:
mutex_unlock(&ports[port_num].lock);
return ret < 0 ? ret : count;
}
EXPORT_SYMBOL_GPL(gserial_set_console);
ssize_t gserial_get_console(unsigned char port_num, char *page)
{
struct gs_port *port;
ssize_t ret;
mutex_lock(&ports[port_num].lock);
port = ports[port_num].port;
if (WARN_ON(port == NULL))
ret = -ENXIO;
else
ret = sprintf(page, "%u\n", !!port->console);
mutex_unlock(&ports[port_num].lock);
return ret;
}
EXPORT_SYMBOL_GPL(gserial_get_console);
#else
static int gs_console_connect(struct gs_port *port)
{
return 0;
}
static void gs_console_disconnect(struct gs_port *port)
{
}
static int gs_console_init(struct gs_port *port)
{
return -ENOSYS;
}
static void gs_console_exit(struct gs_port *port)
{
}
#endif
static int
gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
{
struct gs_port *port;
int ret = 0;
mutex_lock(&ports[port_num].lock);
if (ports[port_num].port) {
ret = -EBUSY;
goto out;
}
port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
if (port == NULL) {
ret = -ENOMEM;
goto out;
}
tty_port_init(&port->port);
spin_lock_init(&port->port_lock);
init_waitqueue_head(&port->drain_wait);
init_waitqueue_head(&port->close_wait);
INIT_DELAYED_WORK(&port->push, gs_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
INIT_LIST_HEAD(&port->write_pool);
port->port_num = port_num;
port->port_line_coding = *coding;
ports[port_num].port = port;
out:
mutex_unlock(&ports[port_num].lock);
return ret;
}
static int gs_closed(struct gs_port *port)
{
int cond;
spin_lock_irq(&port->port_lock);
cond = port->port.count == 0;
spin_unlock_irq(&port->port_lock);
return cond;
}
static void gserial_free_port(struct gs_port *port)
{
cancel_delayed_work_sync(&port->push);
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
WARN_ON(port->port_usb != NULL);
tty_port_destroy(&port->port);
kfree(port);
}
void gserial_free_line(unsigned char port_num)
{
struct gs_port *port;
mutex_lock(&ports[port_num].lock);
if (!ports[port_num].port) {
mutex_unlock(&ports[port_num].lock);
return;
}
port = ports[port_num].port;
gs_console_exit(port);
ports[port_num].port = NULL;
mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
tty_unregister_device(gs_tty_driver, port_num);
}
EXPORT_SYMBOL_GPL(gserial_free_line);
int gserial_alloc_line_no_console(unsigned char *line_num)
{
struct usb_cdc_line_coding coding;
struct gs_port *port;
struct device *tty_dev;
int ret;
int port_num;
coding.dwDTERate = cpu_to_le32(9600);
coding.bCharFormat = 8;
coding.bParityType = USB_CDC_NO_PARITY;
coding.bDataBits = USB_CDC_1_STOP_BITS;
for (port_num = 0; port_num < MAX_U_SERIAL_PORTS; port_num++) {
ret = gs_port_alloc(port_num, &coding);
if (ret == -EBUSY)
continue;
if (ret)
return ret;
break;
}
if (ret)
return ret;
/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
port = ports[port_num].port;
tty_dev = tty_port_register_device(&port->port,
gs_tty_driver, port_num, NULL);
if (IS_ERR(tty_dev)) {
pr_err("%s: failed to register tty for port %d, err %ld\n",
__func__, port_num, PTR_ERR(tty_dev));
ret = PTR_ERR(tty_dev);
mutex_lock(&ports[port_num].lock);
ports[port_num].port = NULL;
mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
goto err;
}
*line_num = port_num;
err:
return ret;
}
EXPORT_SYMBOL_GPL(gserial_alloc_line_no_console);
int gserial_alloc_line(unsigned char *line_num)
{
int ret = gserial_alloc_line_no_console(line_num);
if (!ret && !*line_num)
gs_console_init(ports[*line_num].port);
return ret;
}
EXPORT_SYMBOL_GPL(gserial_alloc_line);
/**
* gserial_connect - notify TTY I/O glue that USB link is active
* @gser: the function, set up with endpoints and descriptors
* @port_num: which port is active
* Context: any (usually from irq)
*
* This is called activate endpoints and let the TTY layer know that
* the connection is active ... not unlike "carrier detect". It won't
* necessarily start I/O queues; unless the TTY is held open by any
* task, there would be no point. However, the endpoints will be
* activated so the USB host can perform I/O, subject to basic USB
* hardware flow control.
*
* Caller needs to have set up the endpoints and USB function in @dev
* before calling this, as well as the appropriate (speed-specific)
* endpoint descriptors, and also have allocate @port_num by calling
* @gserial_alloc_line().
*
* Returns negative errno or zero.
* On success, ep->driver_data will be overwritten.
*/
int gserial_connect(struct gserial *gser, u8 port_num)
{
struct gs_port *port;
unsigned long flags;
int status;
if (port_num >= MAX_U_SERIAL_PORTS)
return -ENXIO;
port = ports[port_num].port;
if (!port) {
pr_err("serial line %d not allocated.\n", port_num);
return -EINVAL;
}
if (port->port_usb) {
pr_err("serial line %d is in use.\n", port_num);
return -EBUSY;
}
/* activate the endpoints */
status = usb_ep_enable(gser->in);
if (status < 0)
return status;
gser->in->driver_data = port;
status = usb_ep_enable(gser->out);
if (status < 0)
goto fail_out;
gser->out->driver_data = port;
/* then tell the tty glue that I/O can work */
spin_lock_irqsave(&port->port_lock, flags);
gser->ioport = port;
port->port_usb = gser;
/* REVISIT unclear how best to handle this state...
* we don't really couple it with the Linux TTY.
*/
gser->port_line_coding = port->port_line_coding;
/* REVISIT if waiting on "carrier detect", signal. */
/* if it's already open, start I/O ... and notify the serial
* protocol about open/close status (connect/disconnect).
*/
if (port->port.count) {
pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
gs_start_io(port);
if (gser->connect)
gser->connect(gser);
} else {
if (gser->disconnect)
gser->disconnect(gser);
}
status = gs_console_connect(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
fail_out:
usb_ep_disable(gser->in);
return status;
}
EXPORT_SYMBOL_GPL(gserial_connect);
/**
* gserial_disconnect - notify TTY I/O glue that USB link is inactive
* @gser: the function, on which gserial_connect() was called
* Context: any (usually from irq)
*
* This is called to deactivate endpoints and let the TTY layer know
* that the connection went inactive ... not unlike "hangup".
*
* On return, the state is as if gserial_connect() had never been called;
* there is no active USB I/O on these endpoints.
*/
void gserial_disconnect(struct gserial *gser)
{
struct gs_port *port = gser->ioport;
unsigned long flags;
if (!port)
return;
spin_lock_irqsave(&serial_port_lock, flags);
/* tell the TTY glue not to do I/O here any more */
spin_lock(&port->port_lock);
gs_console_disconnect(port);
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
port->port_usb = NULL;
gser->ioport = NULL;
if (port->port.count > 0) {
wake_up_interruptible(&port->drain_wait);
if (port->port.tty)
tty_hangup(port->port.tty);
}
port->suspended = false;
spin_unlock(&port->port_lock);
spin_unlock_irqrestore(&serial_port_lock, flags);
/* disable endpoints, aborting down any active I/O */
usb_ep_disable(gser->out);
usb_ep_disable(gser->in);
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
if (port->port.count == 0)
kfifo_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue, NULL);
gs_free_requests(gser->in, &port->write_pool, NULL);
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
void gserial_suspend(struct gserial *gser)
{
struct gs_port *port;
unsigned long flags;
spin_lock_irqsave(&serial_port_lock, flags);
port = gser->ioport;
if (!port) {
spin_unlock_irqrestore(&serial_port_lock, flags);
return;
}
spin_lock(&port->port_lock);
spin_unlock(&serial_port_lock);
port->suspended = true;
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_suspend);
void gserial_resume(struct gserial *gser)
{
struct gs_port *port;
unsigned long flags;
spin_lock_irqsave(&serial_port_lock, flags);
port = gser->ioport;
if (!port) {
spin_unlock_irqrestore(&serial_port_lock, flags);
return;
}
spin_lock(&port->port_lock);
spin_unlock(&serial_port_lock);
port->suspended = false;
if (!port->start_delayed) {
spin_unlock_irqrestore(&port->port_lock, flags);
return;
}
pr_debug("delayed start ttyGS%d\n", port->port_num);
gs_start_io(port);
if (gser->connect)
gser->connect(gser);
port->start_delayed = false;
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_resume);
static int __init userial_init(void)
{
struct tty_driver *driver;
unsigned i;
int status;
driver = tty_alloc_driver(MAX_U_SERIAL_PORTS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(driver))
return PTR_ERR(driver);
driver->driver_name = "g_serial";
driver->name = "ttyGS";
/* uses dynamically assigned dev_t values */
driver->type = TTY_DRIVER_TYPE_SERIAL;
driver->subtype = SERIAL_TYPE_NORMAL;
driver->init_termios = tty_std_termios;
/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
* MS-Windows. Otherwise, most of these flags shouldn't affect
* anything unless we were to actually hook up to a serial line.
*/
driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
driver->init_termios.c_ispeed = 9600;
driver->init_termios.c_ospeed = 9600;
tty_set_operations(driver, &gs_tty_ops);
for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
mutex_init(&ports[i].lock);
/* export the driver ... */
status = tty_register_driver(driver);
if (status) {
pr_err("%s: cannot register, err %d\n",
__func__, status);
goto fail;
}
gs_tty_driver = driver;
pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
MAX_U_SERIAL_PORTS,
(MAX_U_SERIAL_PORTS == 1) ? "" : "s");
return status;
fail:
tty_driver_kref_put(driver);
return status;
}
module_init(userial_init);
static void __exit userial_cleanup(void)
{
tty_unregister_driver(gs_tty_driver);
tty_driver_kref_put(gs_tty_driver);
gs_tty_driver = NULL;
}
module_exit(userial_cleanup);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/u_serial.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_uac1.c -- USB Audio Class 1.0 Function (using u_audio API)
*
* Copyright (C) 2016 Ruslan Bilovol <[email protected]>
* Copyright (C) 2021 Julian Scheel <[email protected]>
*
* This driver doesn't expect any real Audio codec to be present
* on the device - the audio streams are simply sinked to and
* sourced from a virtual ALSA sound card created.
*
* This file is based on f_uac1.c which is
* Copyright (C) 2008 Bryan Wu <[email protected]>
* Copyright (C) 2008 Analog Devices, Inc
*/
#include <linux/usb/audio.h>
#include <linux/module.h>
#include "u_audio.h"
#include "u_uac1.h"
/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
#define UAC1_CHANNEL_MASK 0x0FFF
#define USB_OUT_FU_ID (out_feature_unit_desc->bUnitID)
#define USB_IN_FU_ID (in_feature_unit_desc->bUnitID)
#define EPIN_EN(_opts) ((_opts)->p_chmask != 0)
#define EPOUT_EN(_opts) ((_opts)->c_chmask != 0)
#define FUIN_EN(_opts) ((_opts)->p_mute_present \
|| (_opts)->p_volume_present)
#define FUOUT_EN(_opts) ((_opts)->c_mute_present \
|| (_opts)->c_volume_present)
struct f_uac1 {
struct g_audio g_audio;
u8 ac_intf, as_in_intf, as_out_intf;
u8 ac_alt, as_in_alt, as_out_alt; /* needed for get_alt() */
struct usb_ctrlrequest setup_cr; /* will be used in data stage */
/* Interrupt IN endpoint of AC interface */
struct usb_ep *int_ep;
atomic_t int_count;
int ctl_id; /* EP id */
int c_srate; /* current capture srate */
int p_srate; /* current playback prate */
};
static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
{
return container_of(f, struct f_uac1, g_audio.func);
}
static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
{
return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
}
/*
* DESCRIPTORS ... most are static, but strings and full
* configuration descriptors are built on demand.
*/
/*
* We have three interfaces - one AudioControl and two AudioStreaming
*
* The driver implements a simple UAC_1 topology.
* USB-OUT -> IT_1 -> OT_2 -> ALSA_Capture
* ALSA_Playback -> IT_3 -> OT_4 -> USB-IN
*/
/* B.3.1 Standard AC Interface Descriptor */
static struct usb_interface_descriptor ac_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bNumEndpoints = DYNAMIC */
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
};
/* B.3.2 Class-Specific AC Interface Descriptor */
static struct uac1_ac_header_descriptor *ac_header_desc;
static struct uac_input_terminal_descriptor usb_out_it_desc = {
.bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
.wChannelConfig = cpu_to_le16(0x3),
};
static struct uac1_output_terminal_descriptor io_out_ot_desc = {
.bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
};
static struct uac_input_terminal_descriptor io_in_it_desc = {
.bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
.wChannelConfig = cpu_to_le16(0x3),
};
static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
.bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
};
static struct uac_feature_unit_descriptor *in_feature_unit_desc;
static struct uac_feature_unit_descriptor *out_feature_unit_desc;
/* AC IN Interrupt Endpoint */
static struct usb_endpoint_descriptor ac_int_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(2),
.bInterval = 4,
};
/* B.4.1 Standard AS Interface Descriptor */
static struct usb_interface_descriptor as_out_interface_alt_0_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
static struct usb_interface_descriptor as_out_interface_alt_1_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
static struct usb_interface_descriptor as_in_interface_alt_0_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
static struct usb_interface_descriptor as_in_interface_alt_1_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
/* B.4.2 Class-Specific AS Interface Descriptor */
static struct uac1_as_header_descriptor as_out_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
/* .bTerminalLink = DYNAMIC */
.bDelay = 1,
.wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
static struct uac1_as_header_descriptor as_in_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
/* .bTerminalLink = DYNAMIC */
.bDelay = 1,
.wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(UAC_MAX_RATES);
#define uac_format_type_i_discrete_descriptor \
uac_format_type_i_discrete_descriptor_##UAC_MAX_RATES
static struct uac_format_type_i_discrete_descriptor as_out_type_i_desc = {
.bLength = 0, /* filled on rate setup */
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
.bFormatType = UAC_FORMAT_TYPE_I,
.bSubframeSize = 2,
.bBitResolution = 16,
.bSamFreqType = 0, /* filled on rate setup */
};
/* Standard ISO OUT Endpoint Descriptor */
static struct usb_endpoint_descriptor as_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE
| USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
.bInterval = 4,
};
/* Class-specific AS ISO OUT Endpoint Descriptor */
static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
.bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 1,
.bLockDelayUnits = 1,
.wLockDelay = cpu_to_le16(1),
};
static struct uac_format_type_i_discrete_descriptor as_in_type_i_desc = {
.bLength = 0, /* filled on rate setup */
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
.bFormatType = UAC_FORMAT_TYPE_I,
.bSubframeSize = 2,
.bBitResolution = 16,
.bSamFreqType = 0, /* filled on rate setup */
};
/* Standard ISO OUT Endpoint Descriptor */
static struct usb_endpoint_descriptor as_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_SYNC_ASYNC
| USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
.bInterval = 4,
};
/* Class-specific AS ISO OUT Endpoint Descriptor */
static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
.bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 1,
.bLockDelayUnits = 0,
.wLockDelay = 0,
};
static struct usb_descriptor_header *f_audio_desc[] = {
(struct usb_descriptor_header *)&ac_interface_desc,
(struct usb_descriptor_header *)&ac_header_desc,
(struct usb_descriptor_header *)&usb_out_it_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
(struct usb_descriptor_header *)&out_feature_unit_desc,
(struct usb_descriptor_header *)&io_in_it_desc,
(struct usb_descriptor_header *)&usb_in_ot_desc,
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&ac_int_ep_desc,
(struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
(struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
(struct usb_descriptor_header *)&as_out_header_desc,
(struct usb_descriptor_header *)&as_out_type_i_desc,
(struct usb_descriptor_header *)&as_out_ep_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
(struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
(struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
(struct usb_descriptor_header *)&as_in_header_desc,
(struct usb_descriptor_header *)&as_in_type_i_desc,
(struct usb_descriptor_header *)&as_in_ep_desc,
(struct usb_descriptor_header *)&as_iso_in_desc,
NULL,
};
enum {
STR_AC_IF,
STR_USB_OUT_IT,
STR_USB_OUT_IT_CH_NAMES,
STR_IO_OUT_OT,
STR_IO_IN_IT,
STR_IO_IN_IT_CH_NAMES,
STR_USB_IN_OT,
STR_FU_IN,
STR_FU_OUT,
STR_AS_OUT_IF_ALT0,
STR_AS_OUT_IF_ALT1,
STR_AS_IN_IF_ALT0,
STR_AS_IN_IF_ALT1,
};
static struct usb_string strings_uac1[] = {
/* [STR_AC_IF].s = DYNAMIC, */
[STR_USB_OUT_IT].s = "Playback Input terminal",
[STR_USB_OUT_IT_CH_NAMES].s = "Playback Channels",
[STR_IO_OUT_OT].s = "Playback Output terminal",
[STR_IO_IN_IT].s = "Capture Input terminal",
[STR_IO_IN_IT_CH_NAMES].s = "Capture Channels",
[STR_USB_IN_OT].s = "Capture Output terminal",
[STR_FU_IN].s = "Capture Volume",
[STR_FU_OUT].s = "Playback Volume",
[STR_AS_OUT_IF_ALT0].s = "Playback Inactive",
[STR_AS_OUT_IF_ALT1].s = "Playback Active",
[STR_AS_IN_IF_ALT0].s = "Capture Inactive",
[STR_AS_IN_IF_ALT1].s = "Capture Active",
{ },
};
static struct usb_gadget_strings str_uac1 = {
.language = 0x0409, /* en-us */
.strings = strings_uac1,
};
static struct usb_gadget_strings *uac1_strings[] = {
&str_uac1,
NULL,
};
/*
* This function is an ALSA sound card following USB Audio Class Spec 1.0.
*/
static void uac_cs_attr_sample_rate(struct usb_ep *ep, struct usb_request *req)
{
struct usb_function *fn = ep->driver_data;
struct usb_composite_dev *cdev = fn->config->cdev;
struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac1 *uac1 = func_to_uac1(fn);
u8 *buf = (u8 *)req->buf;
u32 val = 0;
if (req->actual != 3) {
WARN(cdev, "Invalid data size for UAC_EP_CS_ATTR_SAMPLE_RATE.\n");
return;
}
val = buf[0] | (buf[1] << 8) | (buf[2] << 16);
if (uac1->ctl_id == (USB_DIR_IN | 2)) {
uac1->p_srate = val;
u_audio_set_playback_srate(agdev, uac1->p_srate);
} else if (uac1->ctl_id == (USB_DIR_OUT | 1)) {
uac1->c_srate = val;
u_audio_set_capture_srate(agdev, uac1->c_srate);
}
}
static void audio_notify_complete(struct usb_ep *_ep, struct usb_request *req)
{
struct g_audio *audio = req->context;
struct f_uac1 *uac1 = func_to_uac1(&audio->func);
atomic_dec(&uac1->int_count);
kfree(req->buf);
usb_ep_free_request(_ep, req);
}
static int audio_notify(struct g_audio *audio, int unit_id, int cs)
{
struct f_uac1 *uac1 = func_to_uac1(&audio->func);
struct usb_request *req;
struct uac1_status_word *msg;
int ret;
if (!uac1->int_ep->enabled)
return 0;
if (atomic_inc_return(&uac1->int_count) > UAC1_DEF_INT_REQ_NUM) {
atomic_dec(&uac1->int_count);
return 0;
}
req = usb_ep_alloc_request(uac1->int_ep, GFP_ATOMIC);
if (req == NULL) {
ret = -ENOMEM;
goto err_dec_int_count;
}
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
if (msg == NULL) {
ret = -ENOMEM;
goto err_free_request;
}
msg->bStatusType = UAC1_STATUS_TYPE_IRQ_PENDING
| UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF;
msg->bOriginator = unit_id;
req->length = sizeof(*msg);
req->buf = msg;
req->context = audio;
req->complete = audio_notify_complete;
ret = usb_ep_queue(uac1->int_ep, req, GFP_ATOMIC);
if (ret)
goto err_free_msg;
return 0;
err_free_msg:
kfree(msg);
err_free_request:
usb_ep_free_request(uac1->int_ep, req);
err_dec_int_count:
atomic_dec(&uac1->int_count);
return ret;
}
static int
in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
struct g_audio *audio = func_to_g_audio(fn);
struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
int value = -EOPNOTSUPP;
if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_MUTE) {
unsigned int mute;
u_audio_get_mute(audio, is_playback, &mute);
*(u8 *)req->buf = mute;
value = min_t(unsigned int, w_length, 1);
} else if (control_selector == UAC_FU_VOLUME) {
__le16 c;
s16 volume;
u_audio_get_volume(audio, is_playback, &volume);
c = cpu_to_le16(volume);
value = min_t(unsigned int, w_length, sizeof(c));
memcpy(req->buf, &c, value);
} else {
dev_err(&audio->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else {
dev_err(&audio->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return value;
}
static int
in_rq_min(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
struct g_audio *audio = func_to_g_audio(fn);
struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
int value = -EOPNOTSUPP;
if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_VOLUME) {
__le16 r;
s16 min_db;
if (is_playback)
min_db = opts->p_volume_min;
else
min_db = opts->c_volume_min;
r = cpu_to_le16(min_db);
value = min_t(unsigned int, w_length, sizeof(r));
memcpy(req->buf, &r, value);
} else {
dev_err(&audio->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else {
dev_err(&audio->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return value;
}
static int
in_rq_max(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
struct g_audio *audio = func_to_g_audio(fn);
struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
int value = -EOPNOTSUPP;
if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_VOLUME) {
__le16 r;
s16 max_db;
if (is_playback)
max_db = opts->p_volume_max;
else
max_db = opts->c_volume_max;
r = cpu_to_le16(max_db);
value = min_t(unsigned int, w_length, sizeof(r));
memcpy(req->buf, &r, value);
} else {
dev_err(&audio->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else {
dev_err(&audio->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return value;
}
static int
in_rq_res(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
struct g_audio *audio = func_to_g_audio(fn);
struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
int value = -EOPNOTSUPP;
if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_VOLUME) {
__le16 r;
s16 res_db;
if (is_playback)
res_db = opts->p_volume_res;
else
res_db = opts->c_volume_res;
r = cpu_to_le16(res_db);
value = min_t(unsigned int, w_length, sizeof(r));
memcpy(req->buf, &r, value);
} else {
dev_err(&audio->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
} else {
dev_err(&audio->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return value;
}
static void
out_rq_cur_complete(struct usb_ep *ep, struct usb_request *req)
{
struct g_audio *audio = req->context;
struct usb_composite_dev *cdev = audio->func.config->cdev;
struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
struct f_uac1 *uac1 = func_to_uac1(&audio->func);
struct usb_ctrlrequest *cr = &uac1->setup_cr;
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
if (req->status != 0) {
dev_dbg(&cdev->gadget->dev, "completion err %d\n", req->status);
return;
}
if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
unsigned int is_playback = 0;
if (FUIN_EN(opts) && (entity_id == USB_IN_FU_ID))
is_playback = 1;
if (control_selector == UAC_FU_MUTE) {
u8 mute = *(u8 *)req->buf;
u_audio_set_mute(audio, is_playback, mute);
return;
} else if (control_selector == UAC_FU_VOLUME) {
__le16 *c = req->buf;
s16 volume;
volume = le16_to_cpu(*c);
u_audio_set_volume(audio, is_playback, volume);
return;
} else {
dev_err(&audio->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
usb_ep_set_halt(ep);
}
} else {
dev_err(&audio->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
usb_ep_set_halt(ep);
}
}
static int
out_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
struct g_audio *audio = func_to_g_audio(fn);
struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
struct f_uac1 *uac1 = func_to_uac1(&audio->func);
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
u16 w_value = le16_to_cpu(cr->wValue);
u8 entity_id = (w_index >> 8) & 0xff;
u8 control_selector = w_value >> 8;
if ((FUIN_EN(opts) && (entity_id == USB_IN_FU_ID)) ||
(FUOUT_EN(opts) && (entity_id == USB_OUT_FU_ID))) {
memcpy(&uac1->setup_cr, cr, sizeof(*cr));
req->context = audio;
req->complete = out_rq_cur_complete;
return w_length;
} else {
dev_err(&audio->gadget->dev,
"%s:%d entity_id=%d control_selector=%d TODO!\n",
__func__, __LINE__, entity_id, control_selector);
}
return -EOPNOTSUPP;
}
static int ac_rq_in(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
int value = -EOPNOTSUPP;
u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
u16 len = le16_to_cpu(ctrl->wLength);
u16 w_value = le16_to_cpu(ctrl->wValue);
DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
case UAC_GET_CUR:
return in_rq_cur(f, ctrl);
case UAC_GET_MIN:
return in_rq_min(f, ctrl);
case UAC_GET_MAX:
return in_rq_max(f, ctrl);
case UAC_GET_RES:
return in_rq_res(f, ctrl);
case UAC_GET_MEM:
break;
case UAC_GET_STAT:
value = len;
break;
default:
break;
}
return value;
}
static int audio_set_endpoint_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = f->config->cdev->req;
struct f_uac1 *uac1 = func_to_uac1(f);
int value = -EOPNOTSUPP;
u16 ep = le16_to_cpu(ctrl->wIndex);
u16 len = le16_to_cpu(ctrl->wLength);
u16 w_value = le16_to_cpu(ctrl->wValue);
u8 cs = w_value >> 8;
DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
case UAC_SET_CUR: {
if (cs == UAC_EP_CS_ATTR_SAMPLE_RATE) {
cdev->gadget->ep0->driver_data = f;
uac1->ctl_id = ep;
req->complete = uac_cs_attr_sample_rate;
}
value = len;
break;
}
case UAC_SET_MIN:
break;
case UAC_SET_MAX:
break;
case UAC_SET_RES:
break;
case UAC_SET_MEM:
break;
default:
break;
}
return value;
}
static int audio_get_endpoint_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = f->config->cdev->req;
struct f_uac1 *uac1 = func_to_uac1(f);
u8 *buf = (u8 *)req->buf;
int value = -EOPNOTSUPP;
u8 ep = le16_to_cpu(ctrl->wIndex);
u16 len = le16_to_cpu(ctrl->wLength);
u16 w_value = le16_to_cpu(ctrl->wValue);
u8 cs = w_value >> 8;
u32 val = 0;
DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
case UAC_GET_CUR: {
if (cs == UAC_EP_CS_ATTR_SAMPLE_RATE) {
if (ep == (USB_DIR_IN | 2))
val = uac1->p_srate;
else if (ep == (USB_DIR_OUT | 1))
val = uac1->c_srate;
buf[2] = (val >> 16) & 0xff;
buf[1] = (val >> 8) & 0xff;
buf[0] = val & 0xff;
}
value = len;
break;
}
case UAC_GET_MIN:
case UAC_GET_MAX:
case UAC_GET_RES:
value = len;
break;
case UAC_GET_MEM:
break;
default:
break;
}
return value;
}
static int
f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
/* composite driver infrastructure handles everything; interface
* activation uses set_alt().
*/
switch (ctrl->bRequestType) {
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_set_endpoint_req(f, ctrl);
break;
case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_get_endpoint_req(f, ctrl);
break;
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
if (ctrl->bRequest == UAC_SET_CUR)
value = out_rq_cur(f, ctrl);
break;
case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
value = ac_rq_in(f, ctrl);
break;
default:
ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (value >= 0) {
DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = 0;
req->length = value;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(cdev, "audio response on err %d\n", value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_gadget *gadget = cdev->gadget;
struct device *dev = &gadget->dev;
struct g_audio *audio = func_to_g_audio(f);
struct f_uac1 *uac1 = func_to_uac1(f);
int ret = 0;
/* No i/f has more than 2 alt settings */
if (alt > 1) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
if (intf == uac1->ac_intf) {
/* Control I/f has only 1 AltSetting - 0 */
if (alt) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
/* restart interrupt endpoint */
if (uac1->int_ep) {
usb_ep_disable(uac1->int_ep);
config_ep_by_speed(gadget, &audio->func, uac1->int_ep);
usb_ep_enable(uac1->int_ep);
}
return 0;
}
if (intf == uac1->as_out_intf) {
uac1->as_out_alt = alt;
if (alt)
ret = u_audio_start_capture(&uac1->g_audio);
else
u_audio_stop_capture(&uac1->g_audio);
} else if (intf == uac1->as_in_intf) {
uac1->as_in_alt = alt;
if (alt)
ret = u_audio_start_playback(&uac1->g_audio);
else
u_audio_stop_playback(&uac1->g_audio);
} else {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
return ret;
}
static int f_audio_get_alt(struct usb_function *f, unsigned intf)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_gadget *gadget = cdev->gadget;
struct device *dev = &gadget->dev;
struct f_uac1 *uac1 = func_to_uac1(f);
if (intf == uac1->ac_intf)
return uac1->ac_alt;
else if (intf == uac1->as_out_intf)
return uac1->as_out_alt;
else if (intf == uac1->as_in_intf)
return uac1->as_in_alt;
else
dev_err(dev, "%s:%d Invalid Interface %d!\n",
__func__, __LINE__, intf);
return -EINVAL;
}
static void f_audio_disable(struct usb_function *f)
{
struct f_uac1 *uac1 = func_to_uac1(f);
uac1->as_out_alt = 0;
uac1->as_in_alt = 0;
u_audio_stop_playback(&uac1->g_audio);
u_audio_stop_capture(&uac1->g_audio);
if (uac1->int_ep)
usb_ep_disable(uac1->int_ep);
}
static void
f_audio_suspend(struct usb_function *f)
{
struct f_uac1 *uac1 = func_to_uac1(f);
u_audio_suspend(&uac1->g_audio);
}
/*-------------------------------------------------------------------------*/
static struct uac_feature_unit_descriptor *build_fu_desc(int chmask)
{
struct uac_feature_unit_descriptor *fu_desc;
int channels = num_channels(chmask);
int fu_desc_size = UAC_DT_FEATURE_UNIT_SIZE(channels);
fu_desc = kzalloc(fu_desc_size, GFP_KERNEL);
if (!fu_desc)
return NULL;
fu_desc->bLength = fu_desc_size;
fu_desc->bDescriptorType = USB_DT_CS_INTERFACE;
fu_desc->bDescriptorSubtype = UAC_FEATURE_UNIT;
fu_desc->bControlSize = 2;
/* bUnitID, bSourceID and bmaControls will be defined later */
return fu_desc;
}
/* B.3.2 Class-Specific AC Interface Descriptor */
static struct
uac1_ac_header_descriptor *build_ac_header_desc(struct f_uac1_opts *opts)
{
struct uac1_ac_header_descriptor *ac_desc;
int ac_header_desc_size;
int num_ifaces = 0;
if (EPOUT_EN(opts))
num_ifaces++;
if (EPIN_EN(opts))
num_ifaces++;
ac_header_desc_size = UAC_DT_AC_HEADER_SIZE(num_ifaces);
ac_desc = kzalloc(ac_header_desc_size, GFP_KERNEL);
if (!ac_desc)
return NULL;
ac_desc->bLength = ac_header_desc_size;
ac_desc->bDescriptorType = USB_DT_CS_INTERFACE;
ac_desc->bDescriptorSubtype = UAC_HEADER;
ac_desc->bcdADC = cpu_to_le16(0x0100);
ac_desc->bInCollection = num_ifaces;
/* wTotalLength and baInterfaceNr will be defined later */
return ac_desc;
}
/* Use macro to overcome line length limitation */
#define USBDHDR(p) (struct usb_descriptor_header *)(p)
static void setup_descriptor(struct f_uac1_opts *opts)
{
/* patch descriptors */
int i = 1; /* ID's start with 1 */
if (EPOUT_EN(opts))
usb_out_it_desc.bTerminalID = i++;
if (EPIN_EN(opts))
io_in_it_desc.bTerminalID = i++;
if (EPOUT_EN(opts))
io_out_ot_desc.bTerminalID = i++;
if (EPIN_EN(opts))
usb_in_ot_desc.bTerminalID = i++;
if (FUOUT_EN(opts))
out_feature_unit_desc->bUnitID = i++;
if (FUIN_EN(opts))
in_feature_unit_desc->bUnitID = i++;
if (FUIN_EN(opts)) {
usb_in_ot_desc.bSourceID = in_feature_unit_desc->bUnitID;
in_feature_unit_desc->bSourceID = io_in_it_desc.bTerminalID;
} else {
usb_in_ot_desc.bSourceID = io_in_it_desc.bTerminalID;
}
if (FUOUT_EN(opts)) {
io_out_ot_desc.bSourceID = out_feature_unit_desc->bUnitID;
out_feature_unit_desc->bSourceID = usb_out_it_desc.bTerminalID;
} else {
io_out_ot_desc.bSourceID = usb_out_it_desc.bTerminalID;
}
as_out_header_desc.bTerminalLink = usb_out_it_desc.bTerminalID;
as_in_header_desc.bTerminalLink = usb_in_ot_desc.bTerminalID;
ac_header_desc->wTotalLength = cpu_to_le16(ac_header_desc->bLength);
if (EPIN_EN(opts)) {
u16 len = le16_to_cpu(ac_header_desc->wTotalLength);
len += sizeof(usb_in_ot_desc);
len += sizeof(io_in_it_desc);
if (FUIN_EN(opts))
len += in_feature_unit_desc->bLength;
ac_header_desc->wTotalLength = cpu_to_le16(len);
}
if (EPOUT_EN(opts)) {
u16 len = le16_to_cpu(ac_header_desc->wTotalLength);
len += sizeof(usb_out_it_desc);
len += sizeof(io_out_ot_desc);
if (FUOUT_EN(opts))
len += out_feature_unit_desc->bLength;
ac_header_desc->wTotalLength = cpu_to_le16(len);
}
i = 0;
f_audio_desc[i++] = USBDHDR(&ac_interface_desc);
f_audio_desc[i++] = USBDHDR(ac_header_desc);
if (EPOUT_EN(opts)) {
f_audio_desc[i++] = USBDHDR(&usb_out_it_desc);
f_audio_desc[i++] = USBDHDR(&io_out_ot_desc);
if (FUOUT_EN(opts))
f_audio_desc[i++] = USBDHDR(out_feature_unit_desc);
}
if (EPIN_EN(opts)) {
f_audio_desc[i++] = USBDHDR(&io_in_it_desc);
f_audio_desc[i++] = USBDHDR(&usb_in_ot_desc);
if (FUIN_EN(opts))
f_audio_desc[i++] = USBDHDR(in_feature_unit_desc);
}
if (FUOUT_EN(opts) || FUIN_EN(opts))
f_audio_desc[i++] = USBDHDR(&ac_int_ep_desc);
if (EPOUT_EN(opts)) {
f_audio_desc[i++] = USBDHDR(&as_out_interface_alt_0_desc);
f_audio_desc[i++] = USBDHDR(&as_out_interface_alt_1_desc);
f_audio_desc[i++] = USBDHDR(&as_out_header_desc);
f_audio_desc[i++] = USBDHDR(&as_out_type_i_desc);
f_audio_desc[i++] = USBDHDR(&as_out_ep_desc);
f_audio_desc[i++] = USBDHDR(&as_iso_out_desc);
}
if (EPIN_EN(opts)) {
f_audio_desc[i++] = USBDHDR(&as_in_interface_alt_0_desc);
f_audio_desc[i++] = USBDHDR(&as_in_interface_alt_1_desc);
f_audio_desc[i++] = USBDHDR(&as_in_header_desc);
f_audio_desc[i++] = USBDHDR(&as_in_type_i_desc);
f_audio_desc[i++] = USBDHDR(&as_in_ep_desc);
f_audio_desc[i++] = USBDHDR(&as_iso_in_desc);
}
f_audio_desc[i] = NULL;
}
static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
{
struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
if (!opts->p_chmask && !opts->c_chmask) {
dev_err(dev, "Error: no playback and capture channels\n");
return -EINVAL;
} else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
dev_err(dev, "Error: unsupported playback channels mask\n");
return -EINVAL;
} else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
dev_err(dev, "Error: unsupported capture channels mask\n");
return -EINVAL;
} else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
dev_err(dev, "Error: incorrect playback sample size\n");
return -EINVAL;
} else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
dev_err(dev, "Error: incorrect capture sample size\n");
return -EINVAL;
} else if (!opts->p_srates[0]) {
dev_err(dev, "Error: incorrect playback sampling rate\n");
return -EINVAL;
} else if (!opts->c_srates[0]) {
dev_err(dev, "Error: incorrect capture sampling rate\n");
return -EINVAL;
}
if (opts->p_volume_max <= opts->p_volume_min) {
dev_err(dev, "Error: incorrect playback volume max/min\n");
return -EINVAL;
} else if (opts->c_volume_max <= opts->c_volume_min) {
dev_err(dev, "Error: incorrect capture volume max/min\n");
return -EINVAL;
} else if (opts->p_volume_res <= 0) {
dev_err(dev, "Error: negative/zero playback volume resolution\n");
return -EINVAL;
} else if (opts->c_volume_res <= 0) {
dev_err(dev, "Error: negative/zero capture volume resolution\n");
return -EINVAL;
}
if ((opts->p_volume_max - opts->p_volume_min) % opts->p_volume_res) {
dev_err(dev, "Error: incorrect playback volume resolution\n");
return -EINVAL;
} else if ((opts->c_volume_max - opts->c_volume_min) % opts->c_volume_res) {
dev_err(dev, "Error: incorrect capture volume resolution\n");
return -EINVAL;
}
return 0;
}
/* audio function driver setup/binding */
static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct usb_gadget *gadget = cdev->gadget;
struct device *dev = &gadget->dev;
struct f_uac1 *uac1 = func_to_uac1(f);
struct g_audio *audio = func_to_g_audio(f);
struct f_uac1_opts *audio_opts;
struct usb_ep *ep = NULL;
struct usb_string *us;
int ba_iface_id;
int status;
int idx, i;
status = f_audio_validate_opts(audio, dev);
if (status)
return status;
audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
strings_uac1[STR_AC_IF].s = audio_opts->function_name;
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
if (IS_ERR(us))
return PTR_ERR(us);
ac_header_desc = build_ac_header_desc(audio_opts);
if (!ac_header_desc)
return -ENOMEM;
if (FUOUT_EN(audio_opts)) {
out_feature_unit_desc = build_fu_desc(audio_opts->c_chmask);
if (!out_feature_unit_desc) {
status = -ENOMEM;
goto fail;
}
}
if (FUIN_EN(audio_opts)) {
in_feature_unit_desc = build_fu_desc(audio_opts->p_chmask);
if (!in_feature_unit_desc) {
status = -ENOMEM;
goto err_free_fu;
}
}
ac_interface_desc.iInterface = us[STR_AC_IF].id;
usb_out_it_desc.iTerminal = us[STR_USB_OUT_IT].id;
usb_out_it_desc.iChannelNames = us[STR_USB_OUT_IT_CH_NAMES].id;
io_out_ot_desc.iTerminal = us[STR_IO_OUT_OT].id;
as_out_interface_alt_0_desc.iInterface = us[STR_AS_OUT_IF_ALT0].id;
as_out_interface_alt_1_desc.iInterface = us[STR_AS_OUT_IF_ALT1].id;
io_in_it_desc.iTerminal = us[STR_IO_IN_IT].id;
io_in_it_desc.iChannelNames = us[STR_IO_IN_IT_CH_NAMES].id;
usb_in_ot_desc.iTerminal = us[STR_USB_IN_OT].id;
as_in_interface_alt_0_desc.iInterface = us[STR_AS_IN_IF_ALT0].id;
as_in_interface_alt_1_desc.iInterface = us[STR_AS_IN_IF_ALT1].id;
if (FUOUT_EN(audio_opts)) {
u8 *i_feature;
i_feature = (u8 *)out_feature_unit_desc +
out_feature_unit_desc->bLength - 1;
*i_feature = us[STR_FU_OUT].id;
}
if (FUIN_EN(audio_opts)) {
u8 *i_feature;
i_feature = (u8 *)in_feature_unit_desc +
in_feature_unit_desc->bLength - 1;
*i_feature = us[STR_FU_IN].id;
}
/* Set channel numbers */
usb_out_it_desc.bNrChannels = num_channels(audio_opts->c_chmask);
usb_out_it_desc.wChannelConfig = cpu_to_le16(audio_opts->c_chmask);
as_out_type_i_desc.bNrChannels = num_channels(audio_opts->c_chmask);
as_out_type_i_desc.bSubframeSize = audio_opts->c_ssize;
as_out_type_i_desc.bBitResolution = audio_opts->c_ssize * 8;
io_in_it_desc.bNrChannels = num_channels(audio_opts->p_chmask);
io_in_it_desc.wChannelConfig = cpu_to_le16(audio_opts->p_chmask);
as_in_type_i_desc.bNrChannels = num_channels(audio_opts->p_chmask);
as_in_type_i_desc.bSubframeSize = audio_opts->p_ssize;
as_in_type_i_desc.bBitResolution = audio_opts->p_ssize * 8;
if (FUOUT_EN(audio_opts)) {
__le16 *bma = (__le16 *)&out_feature_unit_desc->bmaControls[0];
u32 control = 0;
if (audio_opts->c_mute_present)
control |= UAC_FU_MUTE;
if (audio_opts->c_volume_present)
control |= UAC_FU_VOLUME;
*bma = cpu_to_le16(control);
}
if (FUIN_EN(audio_opts)) {
__le16 *bma = (__le16 *)&in_feature_unit_desc->bmaControls[0];
u32 control = 0;
if (audio_opts->p_mute_present)
control |= UAC_FU_MUTE;
if (audio_opts->p_volume_present)
control |= UAC_FU_VOLUME;
*bma = cpu_to_le16(control);
}
/* Set sample rates */
for (i = 0, idx = 0; i < UAC_MAX_RATES; i++) {
if (audio_opts->c_srates[i] == 0)
break;
memcpy(as_out_type_i_desc.tSamFreq[idx++],
&audio_opts->c_srates[i], 3);
}
as_out_type_i_desc.bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(idx);
as_out_type_i_desc.bSamFreqType = idx;
for (i = 0, idx = 0; i < UAC_MAX_RATES; i++) {
if (audio_opts->p_srates[i] == 0)
break;
memcpy(as_in_type_i_desc.tSamFreq[idx++],
&audio_opts->p_srates[i], 3);
}
as_in_type_i_desc.bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(idx);
as_in_type_i_desc.bSamFreqType = idx;
uac1->p_srate = audio_opts->p_srates[0];
uac1->c_srate = audio_opts->c_srates[0];
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto err_free_fu;
ac_interface_desc.bInterfaceNumber = status;
uac1->ac_intf = status;
uac1->ac_alt = 0;
ba_iface_id = 0;
if (EPOUT_EN(audio_opts)) {
status = usb_interface_id(c, f);
if (status < 0)
goto err_free_fu;
as_out_interface_alt_0_desc.bInterfaceNumber = status;
as_out_interface_alt_1_desc.bInterfaceNumber = status;
ac_header_desc->baInterfaceNr[ba_iface_id++] = status;
uac1->as_out_intf = status;
uac1->as_out_alt = 0;
}
if (EPIN_EN(audio_opts)) {
status = usb_interface_id(c, f);
if (status < 0)
goto err_free_fu;
as_in_interface_alt_0_desc.bInterfaceNumber = status;
as_in_interface_alt_1_desc.bInterfaceNumber = status;
ac_header_desc->baInterfaceNr[ba_iface_id++] = status;
uac1->as_in_intf = status;
uac1->as_in_alt = 0;
}
audio->gadget = gadget;
status = -ENODEV;
ac_interface_desc.bNumEndpoints = 0;
/* allocate AC interrupt endpoint */
if (FUOUT_EN(audio_opts) || FUIN_EN(audio_opts)) {
ep = usb_ep_autoconfig(cdev->gadget, &ac_int_ep_desc);
if (!ep)
goto err_free_fu;
uac1->int_ep = ep;
uac1->int_ep->desc = &ac_int_ep_desc;
ac_interface_desc.bNumEndpoints = 1;
}
/* allocate instance-specific endpoints */
if (EPOUT_EN(audio_opts)) {
ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
if (!ep)
goto err_free_fu;
audio->out_ep = ep;
audio->out_ep->desc = &as_out_ep_desc;
}
if (EPIN_EN(audio_opts)) {
ep = usb_ep_autoconfig(cdev->gadget, &as_in_ep_desc);
if (!ep)
goto err_free_fu;
audio->in_ep = ep;
audio->in_ep->desc = &as_in_ep_desc;
}
setup_descriptor(audio_opts);
/* copy descriptors, and track endpoint copies */
status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
NULL);
if (status)
goto err_free_fu;
audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize);
audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize);
audio->params.c_chmask = audio_opts->c_chmask;
memcpy(audio->params.c_srates, audio_opts->c_srates,
sizeof(audio->params.c_srates));
audio->params.c_ssize = audio_opts->c_ssize;
if (FUIN_EN(audio_opts)) {
audio->params.p_fu.id = USB_IN_FU_ID;
audio->params.p_fu.mute_present = audio_opts->p_mute_present;
audio->params.p_fu.volume_present =
audio_opts->p_volume_present;
audio->params.p_fu.volume_min = audio_opts->p_volume_min;
audio->params.p_fu.volume_max = audio_opts->p_volume_max;
audio->params.p_fu.volume_res = audio_opts->p_volume_res;
}
audio->params.p_chmask = audio_opts->p_chmask;
memcpy(audio->params.p_srates, audio_opts->p_srates,
sizeof(audio->params.p_srates));
audio->params.p_ssize = audio_opts->p_ssize;
if (FUOUT_EN(audio_opts)) {
audio->params.c_fu.id = USB_OUT_FU_ID;
audio->params.c_fu.mute_present = audio_opts->c_mute_present;
audio->params.c_fu.volume_present =
audio_opts->c_volume_present;
audio->params.c_fu.volume_min = audio_opts->c_volume_min;
audio->params.c_fu.volume_max = audio_opts->c_volume_max;
audio->params.c_fu.volume_res = audio_opts->c_volume_res;
}
audio->params.req_number = audio_opts->req_number;
audio->params.fb_max = FBACK_FAST_MAX;
if (FUOUT_EN(audio_opts) || FUIN_EN(audio_opts))
audio->notify = audio_notify;
status = g_audio_setup(audio, "UAC1_PCM", "UAC1_Gadget");
if (status)
goto err_card_register;
return 0;
err_card_register:
usb_free_all_descriptors(f);
err_free_fu:
kfree(out_feature_unit_desc);
out_feature_unit_desc = NULL;
kfree(in_feature_unit_desc);
in_feature_unit_desc = NULL;
fail:
kfree(ac_header_desc);
ac_header_desc = NULL;
return status;
}
/*-------------------------------------------------------------------------*/
static inline struct f_uac1_opts *to_f_uac1_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uac1_opts,
func_inst.group);
}
static void f_uac1_attr_release(struct config_item *item)
{
struct f_uac1_opts *opts = to_f_uac1_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations f_uac1_item_ops = {
.release = f_uac1_attr_release,
};
#define uac1_kstrtou32 kstrtou32
#define uac1_kstrtos16 kstrtos16
#define uac1_kstrtobool(s, base, res) kstrtobool((s), (res))
static const char *u32_fmt = "%u\n";
static const char *s16_fmt = "%hd\n";
static const char *bool_fmt = "%u\n";
#define UAC1_ATTRIBUTE(type, name) \
static ssize_t f_uac1_opts_##name##_show( \
struct config_item *item, \
char *page) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, type##_fmt, opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac1_opts_##name##_store( \
struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
int ret; \
type num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = uac1_kstrto##type(page, 0, &num); \
if (ret) \
goto end; \
\
opts->name = num; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac1_opts_, name)
#define UAC1_RATE_ATTRIBUTE(name) \
static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
int result = 0; \
int i; \
\
mutex_lock(&opts->lock); \
page[0] = '\0'; \
for (i = 0; i < UAC_MAX_RATES; i++) { \
if (opts->name##s[i] == 0) \
break; \
result += sprintf(page + strlen(page), "%u,", \
opts->name##s[i]); \
} \
if (strlen(page) > 0) \
page[strlen(page) - 1] = '\n'; \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
char *split_page = NULL; \
int ret = -EINVAL; \
char *token; \
u32 num; \
int i; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
i = 0; \
memset(opts->name##s, 0x00, sizeof(opts->name##s)); \
split_page = kstrdup(page, GFP_KERNEL); \
while ((token = strsep(&split_page, ",")) != NULL) { \
ret = kstrtou32(token, 0, &num); \
if (ret) \
goto end; \
\
opts->name##s[i++] = num; \
ret = len; \
}; \
\
end: \
kfree(split_page); \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac1_opts_, name)
#define UAC1_ATTRIBUTE_STRING(name) \
static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
char *page) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = snprintf(page, sizeof(opts->name), "%s", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
int ret = 0; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = snprintf(opts->name, min(sizeof(opts->name), len), \
"%s", page); \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_uac1_opts_, name)
UAC1_ATTRIBUTE(u32, c_chmask);
UAC1_RATE_ATTRIBUTE(c_srate);
UAC1_ATTRIBUTE(u32, c_ssize);
UAC1_ATTRIBUTE(u32, p_chmask);
UAC1_RATE_ATTRIBUTE(p_srate);
UAC1_ATTRIBUTE(u32, p_ssize);
UAC1_ATTRIBUTE(u32, req_number);
UAC1_ATTRIBUTE(bool, p_mute_present);
UAC1_ATTRIBUTE(bool, p_volume_present);
UAC1_ATTRIBUTE(s16, p_volume_min);
UAC1_ATTRIBUTE(s16, p_volume_max);
UAC1_ATTRIBUTE(s16, p_volume_res);
UAC1_ATTRIBUTE(bool, c_mute_present);
UAC1_ATTRIBUTE(bool, c_volume_present);
UAC1_ATTRIBUTE(s16, c_volume_min);
UAC1_ATTRIBUTE(s16, c_volume_max);
UAC1_ATTRIBUTE(s16, c_volume_res);
UAC1_ATTRIBUTE_STRING(function_name);
static struct configfs_attribute *f_uac1_attrs[] = {
&f_uac1_opts_attr_c_chmask,
&f_uac1_opts_attr_c_srate,
&f_uac1_opts_attr_c_ssize,
&f_uac1_opts_attr_p_chmask,
&f_uac1_opts_attr_p_srate,
&f_uac1_opts_attr_p_ssize,
&f_uac1_opts_attr_req_number,
&f_uac1_opts_attr_p_mute_present,
&f_uac1_opts_attr_p_volume_present,
&f_uac1_opts_attr_p_volume_min,
&f_uac1_opts_attr_p_volume_max,
&f_uac1_opts_attr_p_volume_res,
&f_uac1_opts_attr_c_mute_present,
&f_uac1_opts_attr_c_volume_present,
&f_uac1_opts_attr_c_volume_min,
&f_uac1_opts_attr_c_volume_max,
&f_uac1_opts_attr_c_volume_res,
&f_uac1_opts_attr_function_name,
NULL,
};
static const struct config_item_type f_uac1_func_type = {
.ct_item_ops = &f_uac1_item_ops,
.ct_attrs = f_uac1_attrs,
.ct_owner = THIS_MODULE,
};
static void f_audio_free_inst(struct usb_function_instance *f)
{
struct f_uac1_opts *opts;
opts = container_of(f, struct f_uac1_opts, func_inst);
kfree(opts);
}
static struct usb_function_instance *f_audio_alloc_inst(void)
{
struct f_uac1_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = f_audio_free_inst;
config_group_init_type_name(&opts->func_inst.group, "",
&f_uac1_func_type);
opts->c_chmask = UAC1_DEF_CCHMASK;
opts->c_srates[0] = UAC1_DEF_CSRATE;
opts->c_ssize = UAC1_DEF_CSSIZE;
opts->p_chmask = UAC1_DEF_PCHMASK;
opts->p_srates[0] = UAC1_DEF_PSRATE;
opts->p_ssize = UAC1_DEF_PSSIZE;
opts->p_mute_present = UAC1_DEF_MUTE_PRESENT;
opts->p_volume_present = UAC1_DEF_VOLUME_PRESENT;
opts->p_volume_min = UAC1_DEF_MIN_DB;
opts->p_volume_max = UAC1_DEF_MAX_DB;
opts->p_volume_res = UAC1_DEF_RES_DB;
opts->c_mute_present = UAC1_DEF_MUTE_PRESENT;
opts->c_volume_present = UAC1_DEF_VOLUME_PRESENT;
opts->c_volume_min = UAC1_DEF_MIN_DB;
opts->c_volume_max = UAC1_DEF_MAX_DB;
opts->c_volume_res = UAC1_DEF_RES_DB;
opts->req_number = UAC1_DEF_REQ_NUM;
snprintf(opts->function_name, sizeof(opts->function_name), "AC Interface");
return &opts->func_inst;
}
static void f_audio_free(struct usb_function *f)
{
struct g_audio *audio;
struct f_uac1_opts *opts;
audio = func_to_g_audio(f);
opts = container_of(f->fi, struct f_uac1_opts, func_inst);
kfree(audio);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
}
static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct g_audio *audio = func_to_g_audio(f);
g_audio_cleanup(audio);
usb_free_all_descriptors(f);
kfree(out_feature_unit_desc);
out_feature_unit_desc = NULL;
kfree(in_feature_unit_desc);
in_feature_unit_desc = NULL;
kfree(ac_header_desc);
ac_header_desc = NULL;
audio->gadget = NULL;
}
static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
{
struct f_uac1 *uac1;
struct f_uac1_opts *opts;
/* allocate and initialize one new instance */
uac1 = kzalloc(sizeof(*uac1), GFP_KERNEL);
if (!uac1)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_uac1_opts, func_inst);
mutex_lock(&opts->lock);
++opts->refcnt;
mutex_unlock(&opts->lock);
uac1->g_audio.func.name = "uac1_func";
uac1->g_audio.func.bind = f_audio_bind;
uac1->g_audio.func.unbind = f_audio_unbind;
uac1->g_audio.func.set_alt = f_audio_set_alt;
uac1->g_audio.func.get_alt = f_audio_get_alt;
uac1->g_audio.func.setup = f_audio_setup;
uac1->g_audio.func.disable = f_audio_disable;
uac1->g_audio.func.suspend = f_audio_suspend;
uac1->g_audio.func.free_func = f_audio_free;
return &uac1->g_audio.func;
}
DECLARE_USB_FUNCTION_INIT(uac1, f_audio_alloc_inst, f_audio_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ruslan Bilovol");
| linux-master | drivers/usb/gadget/function/f_uac1.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_fs.c -- user mode file system API for USB composite function controllers
*
* Copyright (C) 2010 Samsung Electronics
* Author: Michal Nazarewicz <[email protected]>
*
* Based on inode.c (GadgetFS) which was:
* Copyright (C) 2003-2004 David Brownell
* Copyright (C) 2003 Agilent Technologies
*/
/* #define DEBUG */
/* #define VERBOSE_DEBUG */
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/fs_parser.h>
#include <linux/hid.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched/signal.h>
#include <linux/uio.h>
#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include <linux/usb/ccid.h>
#include <linux/usb/composite.h>
#include <linux/usb/functionfs.h>
#include <linux/aio.h>
#include <linux/kthread.h>
#include <linux/poll.h>
#include <linux/eventfd.h>
#include "u_fs.h"
#include "u_f.h"
#include "u_os_desc.h"
#include "configfs.h"
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
/* Creates new ffs_data object. */
static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
__attribute__((malloc));
/* Opened counter handling. */
static void ffs_data_opened(struct ffs_data *ffs);
static void ffs_data_closed(struct ffs_data *ffs);
/* Called with ffs->mutex held; take over ownership of data. */
static int __must_check
__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
static int __must_check
__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
/* The function structure ***************************************************/
struct ffs_ep;
struct ffs_function {
struct usb_configuration *conf;
struct usb_gadget *gadget;
struct ffs_data *ffs;
struct ffs_ep *eps;
u8 eps_revmap[16];
short *interfaces_nums;
struct usb_function function;
};
static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
{
return container_of(f, struct ffs_function, function);
}
static inline enum ffs_setup_state
ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
{
return (enum ffs_setup_state)
cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
}
static void ffs_func_eps_disable(struct ffs_function *func);
static int __must_check ffs_func_eps_enable(struct ffs_function *func);
static int ffs_func_bind(struct usb_configuration *,
struct usb_function *);
static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
static void ffs_func_disable(struct usb_function *);
static int ffs_func_setup(struct usb_function *,
const struct usb_ctrlrequest *);
static bool ffs_func_req_match(struct usb_function *,
const struct usb_ctrlrequest *,
bool config0);
static void ffs_func_suspend(struct usb_function *);
static void ffs_func_resume(struct usb_function *);
static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
/* The endpoints structures *************************************************/
struct ffs_ep {
struct usb_ep *ep; /* P: ffs->eps_lock */
struct usb_request *req; /* P: epfile->mutex */
/* [0]: full speed, [1]: high speed, [2]: super speed */
struct usb_endpoint_descriptor *descs[3];
u8 num;
};
struct ffs_epfile {
/* Protects ep->ep and ep->req. */
struct mutex mutex;
struct ffs_data *ffs;
struct ffs_ep *ep; /* P: ffs->eps_lock */
struct dentry *dentry;
/*
* Buffer for holding data from partial reads which may happen since
* we’re rounding user read requests to a multiple of a max packet size.
*
* The pointer is initialised with NULL value and may be set by
* __ffs_epfile_read_data function to point to a temporary buffer.
*
* In normal operation, calls to __ffs_epfile_read_buffered will consume
* data from said buffer and eventually free it. Importantly, while the
* function is using the buffer, it sets the pointer to NULL. This is
* all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
* can never run concurrently (they are synchronised by epfile->mutex)
* so the latter will not assign a new value to the pointer.
*
* Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
* valid) and sets the pointer to READ_BUFFER_DROP value. This special
* value is crux of the synchronisation between ffs_func_eps_disable and
* __ffs_epfile_read_data.
*
* Once __ffs_epfile_read_data is about to finish it will try to set the
* pointer back to its old value (as described above), but seeing as the
* pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
* the buffer.
*
* == State transitions ==
*
* • ptr == NULL: (initial state)
* ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
* ◦ __ffs_epfile_read_buffered: nop
* ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
* ◦ reading finishes: n/a, not in ‘and reading’ state
* • ptr == DROP:
* ◦ __ffs_epfile_read_buffer_free: nop
* ◦ __ffs_epfile_read_buffered: go to ptr == NULL
* ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
* ◦ reading finishes: n/a, not in ‘and reading’ state
* • ptr == buf:
* ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
* ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
* ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
* is always called first
* ◦ reading finishes: n/a, not in ‘and reading’ state
* • ptr == NULL and reading:
* ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
* ◦ __ffs_epfile_read_buffered: n/a, mutex is held
* ◦ __ffs_epfile_read_data: n/a, mutex is held
* ◦ reading finishes and …
* … all data read: free buf, go to ptr == NULL
* … otherwise: go to ptr == buf and reading
* • ptr == DROP and reading:
* ◦ __ffs_epfile_read_buffer_free: nop
* ◦ __ffs_epfile_read_buffered: n/a, mutex is held
* ◦ __ffs_epfile_read_data: n/a, mutex is held
* ◦ reading finishes: free buf, go to ptr == DROP
*/
struct ffs_buffer *read_buffer;
#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
char name[5];
unsigned char in; /* P: ffs->eps_lock */
unsigned char isoc; /* P: ffs->eps_lock */
unsigned char _pad;
};
struct ffs_buffer {
size_t length;
char *data;
char storage[];
};
/* ffs_io_data structure ***************************************************/
struct ffs_io_data {
bool aio;
bool read;
struct kiocb *kiocb;
struct iov_iter data;
const void *to_free;
char *buf;
struct mm_struct *mm;
struct work_struct work;
struct usb_ep *ep;
struct usb_request *req;
struct sg_table sgt;
bool use_sg;
struct ffs_data *ffs;
int status;
struct completion done;
};
struct ffs_desc_helper {
struct ffs_data *ffs;
unsigned interfaces_count;
unsigned eps_count;
};
static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
static struct dentry *
ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
const struct file_operations *fops);
/* Devices management *******************************************************/
DEFINE_MUTEX(ffs_lock);
EXPORT_SYMBOL_GPL(ffs_lock);
static struct ffs_dev *_ffs_find_dev(const char *name);
static struct ffs_dev *_ffs_alloc_dev(void);
static void _ffs_free_dev(struct ffs_dev *dev);
static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
static void ffs_release_dev(struct ffs_dev *ffs_dev);
static int ffs_ready(struct ffs_data *ffs);
static void ffs_closed(struct ffs_data *ffs);
/* Misc helper functions ****************************************************/
static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
__attribute__((warn_unused_result, nonnull));
static char *ffs_prepare_buffer(const char __user *buf, size_t len)
__attribute__((warn_unused_result, nonnull));
/* Control file aka ep0 *****************************************************/
static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
{
struct ffs_data *ffs = req->context;
complete(&ffs->ep0req_completion);
}
static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
__releases(&ffs->ev.waitq.lock)
{
struct usb_request *req = ffs->ep0req;
int ret;
if (!req) {
spin_unlock_irq(&ffs->ev.waitq.lock);
return -EINVAL;
}
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
spin_unlock_irq(&ffs->ev.waitq.lock);
req->buf = data;
req->length = len;
/*
* UDC layer requires to provide a buffer even for ZLP, but should
* not use it at all. Let's provide some poisoned pointer to catch
* possible bug in the driver.
*/
if (req->buf == NULL)
req->buf = (void *)0xDEADBABE;
reinit_completion(&ffs->ep0req_completion);
ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
if (ret < 0)
return ret;
ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
if (ret) {
usb_ep_dequeue(ffs->gadget->ep0, req);
return -EINTR;
}
ffs->setup_state = FFS_NO_SETUP;
return req->status ? req->status : req->actual;
}
static int __ffs_ep0_stall(struct ffs_data *ffs)
{
if (ffs->ev.can_stall) {
pr_vdebug("ep0 stall\n");
usb_ep_set_halt(ffs->gadget->ep0);
ffs->setup_state = FFS_NO_SETUP;
return -EL2HLT;
} else {
pr_debug("bogus ep0 stall!\n");
return -ESRCH;
}
}
static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
size_t len, loff_t *ptr)
{
struct ffs_data *ffs = file->private_data;
ssize_t ret;
char *data;
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
/* Acquire mutex */
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
if (ret < 0)
return ret;
/* Check state */
switch (ffs->state) {
case FFS_READ_DESCRIPTORS:
case FFS_READ_STRINGS:
/* Copy data */
if (len < 16) {
ret = -EINVAL;
break;
}
data = ffs_prepare_buffer(buf, len);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
break;
}
/* Handle data */
if (ffs->state == FFS_READ_DESCRIPTORS) {
pr_info("read descriptors\n");
ret = __ffs_data_got_descs(ffs, data, len);
if (ret < 0)
break;
ffs->state = FFS_READ_STRINGS;
ret = len;
} else {
pr_info("read strings\n");
ret = __ffs_data_got_strings(ffs, data, len);
if (ret < 0)
break;
ret = ffs_epfiles_create(ffs);
if (ret) {
ffs->state = FFS_CLOSING;
break;
}
ffs->state = FFS_ACTIVE;
mutex_unlock(&ffs->mutex);
ret = ffs_ready(ffs);
if (ret < 0) {
ffs->state = FFS_CLOSING;
return ret;
}
return len;
}
break;
case FFS_ACTIVE:
data = NULL;
/*
* We're called from user space, we can use _irq
* rather then _irqsave
*/
spin_lock_irq(&ffs->ev.waitq.lock);
switch (ffs_setup_state_clear_cancelled(ffs)) {
case FFS_SETUP_CANCELLED:
ret = -EIDRM;
goto done_spin;
case FFS_NO_SETUP:
ret = -ESRCH;
goto done_spin;
case FFS_SETUP_PENDING:
break;
}
/* FFS_SETUP_PENDING */
if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
spin_unlock_irq(&ffs->ev.waitq.lock);
ret = __ffs_ep0_stall(ffs);
break;
}
/* FFS_SETUP_PENDING and not stall */
len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
spin_unlock_irq(&ffs->ev.waitq.lock);
data = ffs_prepare_buffer(buf, len);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
break;
}
spin_lock_irq(&ffs->ev.waitq.lock);
/*
* We are guaranteed to be still in FFS_ACTIVE state
* but the state of setup could have changed from
* FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
* to check for that. If that happened we copied data
* from user space in vain but it's unlikely.
*
* For sure we are not in FFS_NO_SETUP since this is
* the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
* transition can be performed and it's protected by
* mutex.
*/
if (ffs_setup_state_clear_cancelled(ffs) ==
FFS_SETUP_CANCELLED) {
ret = -EIDRM;
done_spin:
spin_unlock_irq(&ffs->ev.waitq.lock);
} else {
/* unlocks spinlock */
ret = __ffs_ep0_queue_wait(ffs, data, len);
}
kfree(data);
break;
default:
ret = -EBADFD;
break;
}
mutex_unlock(&ffs->mutex);
return ret;
}
/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
size_t n)
__releases(&ffs->ev.waitq.lock)
{
/*
* n cannot be bigger than ffs->ev.count, which cannot be bigger than
* size of ffs->ev.types array (which is four) so that's how much space
* we reserve.
*/
struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
const size_t size = n * sizeof *events;
unsigned i = 0;
memset(events, 0, size);
do {
events[i].type = ffs->ev.types[i];
if (events[i].type == FUNCTIONFS_SETUP) {
events[i].u.setup = ffs->ev.setup;
ffs->setup_state = FFS_SETUP_PENDING;
}
} while (++i < n);
ffs->ev.count -= n;
if (ffs->ev.count)
memmove(ffs->ev.types, ffs->ev.types + n,
ffs->ev.count * sizeof *ffs->ev.types);
spin_unlock_irq(&ffs->ev.waitq.lock);
mutex_unlock(&ffs->mutex);
return copy_to_user(buf, events, size) ? -EFAULT : size;
}
static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
size_t len, loff_t *ptr)
{
struct ffs_data *ffs = file->private_data;
char *data = NULL;
size_t n;
int ret;
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
/* Acquire mutex */
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
if (ret < 0)
return ret;
/* Check state */
if (ffs->state != FFS_ACTIVE) {
ret = -EBADFD;
goto done_mutex;
}
/*
* We're called from user space, we can use _irq rather then
* _irqsave
*/
spin_lock_irq(&ffs->ev.waitq.lock);
switch (ffs_setup_state_clear_cancelled(ffs)) {
case FFS_SETUP_CANCELLED:
ret = -EIDRM;
break;
case FFS_NO_SETUP:
n = len / sizeof(struct usb_functionfs_event);
if (!n) {
ret = -EINVAL;
break;
}
if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
ret = -EAGAIN;
break;
}
if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
ffs->ev.count)) {
ret = -EINTR;
break;
}
/* unlocks spinlock */
return __ffs_ep0_read_events(ffs, buf,
min(n, (size_t)ffs->ev.count));
case FFS_SETUP_PENDING:
if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
spin_unlock_irq(&ffs->ev.waitq.lock);
ret = __ffs_ep0_stall(ffs);
goto done_mutex;
}
len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
spin_unlock_irq(&ffs->ev.waitq.lock);
if (len) {
data = kmalloc(len, GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
goto done_mutex;
}
}
spin_lock_irq(&ffs->ev.waitq.lock);
/* See ffs_ep0_write() */
if (ffs_setup_state_clear_cancelled(ffs) ==
FFS_SETUP_CANCELLED) {
ret = -EIDRM;
break;
}
/* unlocks spinlock */
ret = __ffs_ep0_queue_wait(ffs, data, len);
if ((ret > 0) && (copy_to_user(buf, data, len)))
ret = -EFAULT;
goto done_mutex;
default:
ret = -EBADFD;
break;
}
spin_unlock_irq(&ffs->ev.waitq.lock);
done_mutex:
mutex_unlock(&ffs->mutex);
kfree(data);
return ret;
}
static int ffs_ep0_open(struct inode *inode, struct file *file)
{
struct ffs_data *ffs = inode->i_private;
if (ffs->state == FFS_CLOSING)
return -EBUSY;
file->private_data = ffs;
ffs_data_opened(ffs);
return stream_open(inode, file);
}
static int ffs_ep0_release(struct inode *inode, struct file *file)
{
struct ffs_data *ffs = file->private_data;
ffs_data_closed(ffs);
return 0;
}
static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
{
struct ffs_data *ffs = file->private_data;
struct usb_gadget *gadget = ffs->gadget;
long ret;
if (code == FUNCTIONFS_INTERFACE_REVMAP) {
struct ffs_function *func = ffs->func;
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
} else if (gadget && gadget->ops->ioctl) {
ret = gadget->ops->ioctl(gadget, code, value);
} else {
ret = -ENOTTY;
}
return ret;
}
static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
{
struct ffs_data *ffs = file->private_data;
__poll_t mask = EPOLLWRNORM;
int ret;
poll_wait(file, &ffs->ev.waitq, wait);
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
if (ret < 0)
return mask;
switch (ffs->state) {
case FFS_READ_DESCRIPTORS:
case FFS_READ_STRINGS:
mask |= EPOLLOUT;
break;
case FFS_ACTIVE:
switch (ffs->setup_state) {
case FFS_NO_SETUP:
if (ffs->ev.count)
mask |= EPOLLIN;
break;
case FFS_SETUP_PENDING:
case FFS_SETUP_CANCELLED:
mask |= (EPOLLIN | EPOLLOUT);
break;
}
break;
case FFS_CLOSING:
break;
case FFS_DEACTIVATED:
break;
}
mutex_unlock(&ffs->mutex);
return mask;
}
static const struct file_operations ffs_ep0_operations = {
.llseek = no_llseek,
.open = ffs_ep0_open,
.write = ffs_ep0_write,
.read = ffs_ep0_read,
.release = ffs_ep0_release,
.unlocked_ioctl = ffs_ep0_ioctl,
.poll = ffs_ep0_poll,
};
/* "Normal" endpoints operations ********************************************/
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
struct ffs_io_data *io_data = req->context;
if (req->status)
io_data->status = req->status;
else
io_data->status = req->actual;
complete(&io_data->done);
}
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
{
ssize_t ret = copy_to_iter(data, data_len, iter);
if (ret == data_len)
return ret;
if (iov_iter_count(iter))
return -EFAULT;
/*
* Dear user space developer!
*
* TL;DR: To stop getting below error message in your kernel log, change
* user space code using functionfs to align read buffers to a max
* packet size.
*
* Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
* packet size. When unaligned buffer is passed to functionfs, it
* internally uses a larger, aligned buffer so that such UDCs are happy.
*
* Unfortunately, this means that host may send more data than was
* requested in read(2) system call. f_fs doesn’t know what to do with
* that excess data so it simply drops it.
*
* Was the buffer aligned in the first place, no such problem would
* happen.
*
* Data may be dropped only in AIO reads. Synchronous reads are handled
* by splitting a request into multiple parts. This splitting may still
* be a problem though so it’s likely best to align the buffer
* regardless of it being AIO or not..
*
* This only affects OUT endpoints, i.e. reading data with a read(2),
* aio_read(2) etc. system calls. Writing data to an IN endpoint is not
* affected.
*/
pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
"Align read buffer size to max packet size to avoid the problem.\n",
data_len, ret);
return ret;
}
/*
* allocate a virtually contiguous buffer and create a scatterlist describing it
* @sg_table - pointer to a place to be filled with sg_table contents
* @size - required buffer size
*/
static void *ffs_build_sg_list(struct sg_table *sgt, size_t sz)
{
struct page **pages;
void *vaddr, *ptr;
unsigned int n_pages;
int i;
vaddr = vmalloc(sz);
if (!vaddr)
return NULL;
n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
vfree(vaddr);
return NULL;
}
for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE)
pages[i] = vmalloc_to_page(ptr);
if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) {
kvfree(pages);
vfree(vaddr);
return NULL;
}
kvfree(pages);
return vaddr;
}
static inline void *ffs_alloc_buffer(struct ffs_io_data *io_data,
size_t data_len)
{
if (io_data->use_sg)
return ffs_build_sg_list(&io_data->sgt, data_len);
return kmalloc(data_len, GFP_KERNEL);
}
static inline void ffs_free_buffer(struct ffs_io_data *io_data)
{
if (!io_data->buf)
return;
if (io_data->use_sg) {
sg_free_table(&io_data->sgt);
vfree(io_data->buf);
} else {
kfree(io_data->buf);
}
}
static void ffs_user_copy_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
work);
int ret = io_data->status;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
if (io_data->read && ret > 0) {
kthread_use_mm(io_data->mm);
ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
kthread_unuse_mm(io_data->mm);
}
io_data->kiocb->ki_complete(io_data->kiocb, ret);
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
if (io_data->read)
kfree(io_data->to_free);
ffs_free_buffer(io_data);
kfree(io_data);
}
static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
struct usb_request *req)
{
struct ffs_io_data *io_data = req->context;
struct ffs_data *ffs = io_data->ffs;
io_data->status = req->status ? req->status : req->actual;
usb_ep_free_request(_ep, req);
INIT_WORK(&io_data->work, ffs_user_copy_worker);
queue_work(ffs->io_completion_wq, &io_data->work);
}
static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
{
/*
* See comment in struct ffs_epfile for full read_buffer pointer
* synchronisation story.
*/
struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
if (buf && buf != READ_BUFFER_DROP)
kfree(buf);
}
/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
struct iov_iter *iter)
{
/*
* Null out epfile->read_buffer so ffs_func_eps_disable does not free
* the buffer while we are using it. See comment in struct ffs_epfile
* for full read_buffer pointer synchronisation story.
*/
struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
ssize_t ret;
if (!buf || buf == READ_BUFFER_DROP)
return 0;
ret = copy_to_iter(buf->data, buf->length, iter);
if (buf->length == ret) {
kfree(buf);
return ret;
}
if (iov_iter_count(iter)) {
ret = -EFAULT;
} else {
buf->length -= ret;
buf->data += ret;
}
if (cmpxchg(&epfile->read_buffer, NULL, buf))
kfree(buf);
return ret;
}
/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
void *data, int data_len,
struct iov_iter *iter)
{
struct ffs_buffer *buf;
ssize_t ret = copy_to_iter(data, data_len, iter);
if (data_len == ret)
return ret;
if (iov_iter_count(iter))
return -EFAULT;
/* See ffs_copy_to_iter for more context. */
pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
data_len, ret);
data_len -= ret;
buf = kmalloc(struct_size(buf, storage, data_len), GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->length = data_len;
buf->data = buf->storage;
memcpy(buf->storage, data + ret, flex_array_size(buf, storage, data_len));
/*
* At this point read_buffer is NULL or READ_BUFFER_DROP (if
* ffs_func_eps_disable has been called in the meanwhile). See comment
* in struct ffs_epfile for full read_buffer pointer synchronisation
* story.
*/
if (cmpxchg(&epfile->read_buffer, NULL, buf))
kfree(buf);
return ret;
}
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
struct usb_request *req;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len = -EINVAL;
int halt;
/* Are we still active? */
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
/* Wait for endpoint to be enabled */
ep = epfile->ep;
if (!ep) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(
epfile->ffs->wait, (ep = epfile->ep));
if (ret)
return -EINTR;
}
/* Do we halt? */
halt = (!io_data->read == !epfile->in);
if (halt && epfile->isoc)
return -EINVAL;
/* We will be using request and read_buffer */
ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
if (ret)
goto error;
/* Allocate & copy */
if (!halt) {
struct usb_gadget *gadget;
/*
* Do we have buffered data from previous partial read? Check
* that for synchronous case only because we do not have
* facility to ‘wake up’ a pending asynchronous read and push
* buffered data to it which we would need to make things behave
* consistently.
*/
if (!io_data->aio && io_data->read) {
ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
if (ret)
goto error_mutex;
}
/*
* if we _do_ wait above, the epfile->ffs->gadget might be NULL
* before the waiting completes, so do not assign to 'gadget'
* earlier
*/
gadget = epfile->ffs->gadget;
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
if (epfile->ep != ep) {
ret = -ESHUTDOWN;
goto error_lock;
}
data_len = iov_iter_count(&io_data->data);
/*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
if (io_data->read)
data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
spin_unlock_irq(&epfile->ffs->eps_lock);
data = ffs_alloc_buffer(io_data, data_len);
if (!data) {
ret = -ENOMEM;
goto error_mutex;
}
if (!io_data->read &&
!copy_from_iter_full(data, data_len, &io_data->data)) {
ret = -EFAULT;
goto error_mutex;
}
}
spin_lock_irq(&epfile->ffs->eps_lock);
if (epfile->ep != ep) {
/* In the meantime, endpoint got disabled or changed. */
ret = -ESHUTDOWN;
} else if (halt) {
ret = usb_ep_set_halt(ep->ep);
if (!ret)
ret = -EBADMSG;
} else if (data_len == -EINVAL) {
/*
* Sanity Check: even though data_len can't be used
* uninitialized at the time I write this comment, some
* compilers complain about this situation.
* In order to keep the code clean from warnings, data_len is
* being initialized to -EINVAL during its declaration, which
* means we can't rely on compiler anymore to warn no future
* changes won't result in data_len being used uninitialized.
* For such reason, we're adding this redundant sanity check
* here.
*/
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
} else if (!io_data->aio) {
bool interrupted = false;
req = ep->req;
if (io_data->use_sg) {
req->buf = NULL;
req->sg = io_data->sgt.sgl;
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
req->num_sgs = 0;
}
req->length = data_len;
io_data->buf = data;
init_completion(&io_data->done);
req->context = io_data;
req->complete = ffs_epfile_io_complete;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (ret < 0)
goto error_lock;
spin_unlock_irq(&epfile->ffs->eps_lock);
if (wait_for_completion_interruptible(&io_data->done)) {
spin_lock_irq(&epfile->ffs->eps_lock);
if (epfile->ep != ep) {
ret = -ESHUTDOWN;
goto error_lock;
}
/*
* To avoid race condition with ffs_epfile_io_complete,
* dequeue the request first then check
* status. usb_ep_dequeue API should guarantee no race
* condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
spin_unlock_irq(&epfile->ffs->eps_lock);
wait_for_completion(&io_data->done);
interrupted = io_data->status < 0;
}
if (interrupted)
ret = -EINTR;
else if (io_data->read && io_data->status > 0)
ret = __ffs_epfile_read_data(epfile, data, io_data->status,
&io_data->data);
else
ret = io_data->status;
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;
} else {
if (io_data->use_sg) {
req->buf = NULL;
req->sg = io_data->sgt.sgl;
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
req->num_sgs = 0;
}
req->length = data_len;
io_data->buf = data;
io_data->ep = ep->ep;
io_data->req = req;
io_data->ffs = epfile->ffs;
req->context = io_data;
req->complete = ffs_epfile_async_io_complete;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (ret) {
io_data->req = NULL;
usb_ep_free_request(ep->ep, req);
goto error_lock;
}
ret = -EIOCBQUEUED;
/*
* Do not kfree the buffer in this function. It will be freed
* by ffs_user_copy_worker.
*/
data = NULL;
}
error_lock:
spin_unlock_irq(&epfile->ffs->eps_lock);
error_mutex:
mutex_unlock(&epfile->mutex);
error:
if (ret != -EIOCBQUEUED) /* don't free if there is iocb queued */
ffs_free_buffer(io_data);
return ret;
}
static int
ffs_epfile_open(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
file->private_data = epfile;
ffs_data_opened(epfile->ffs);
return stream_open(inode, file);
}
static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
unsigned long flags;
int value;
spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
if (io_data && io_data->ep && io_data->req)
value = usb_ep_dequeue(io_data->ep, io_data->req);
else
value = -EINVAL;
spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
return value;
}
static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
if (!is_sync_kiocb(kiocb)) {
p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->aio = true;
} else {
memset(p, 0, sizeof(*p));
p->aio = false;
}
p->read = false;
p->kiocb = kiocb;
p->data = *from;
p->mm = current->mm;
kiocb->private = p;
if (p->aio)
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
res = ffs_epfile_io(kiocb->ki_filp, p);
if (res == -EIOCBQUEUED)
return res;
if (p->aio)
kfree(p);
else
*from = p->data;
return res;
}
static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
if (!is_sync_kiocb(kiocb)) {
p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->aio = true;
} else {
memset(p, 0, sizeof(*p));
p->aio = false;
}
p->read = true;
p->kiocb = kiocb;
if (p->aio) {
p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
if (!iter_is_ubuf(&p->data) && !p->to_free) {
kfree(p);
return -ENOMEM;
}
} else {
p->data = *to;
p->to_free = NULL;
}
p->mm = current->mm;
kiocb->private = p;
if (p->aio)
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
res = ffs_epfile_io(kiocb->ki_filp, p);
if (res == -EIOCBQUEUED)
return res;
if (p->aio) {
kfree(p->to_free);
kfree(p);
} else {
*to = p->data;
}
return res;
}
static int
ffs_epfile_release(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
__ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
return 0;
}
static long ffs_epfile_ioctl(struct file *file, unsigned code,
unsigned long value)
{
struct ffs_epfile *epfile = file->private_data;
struct ffs_ep *ep;
int ret;
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
/* Wait for endpoint to be enabled */
ep = epfile->ep;
if (!ep) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(
epfile->ffs->wait, (ep = epfile->ep));
if (ret)
return -EINTR;
}
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
if (epfile->ep != ep) {
spin_unlock_irq(&epfile->ffs->eps_lock);
return -ESHUTDOWN;
}
switch (code) {
case FUNCTIONFS_FIFO_STATUS:
ret = usb_ep_fifo_status(epfile->ep->ep);
break;
case FUNCTIONFS_FIFO_FLUSH:
usb_ep_fifo_flush(epfile->ep->ep);
ret = 0;
break;
case FUNCTIONFS_CLEAR_HALT:
ret = usb_ep_clear_halt(epfile->ep->ep);
break;
case FUNCTIONFS_ENDPOINT_REVMAP:
ret = epfile->ep->num;
break;
case FUNCTIONFS_ENDPOINT_DESC:
{
int desc_idx;
struct usb_endpoint_descriptor desc1, *desc;
switch (epfile->ffs->gadget->speed) {
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
desc_idx = 2;
break;
case USB_SPEED_HIGH:
desc_idx = 1;
break;
default:
desc_idx = 0;
}
desc = epfile->ep->descs[desc_idx];
memcpy(&desc1, desc, desc->bLength);
spin_unlock_irq(&epfile->ffs->eps_lock);
ret = copy_to_user((void __user *)value, &desc1, desc1.bLength);
if (ret)
ret = -EFAULT;
return ret;
}
default:
ret = -ENOTTY;
}
spin_unlock_irq(&epfile->ffs->eps_lock);
return ret;
}
static const struct file_operations ffs_epfile_operations = {
.llseek = no_llseek,
.open = ffs_epfile_open,
.write_iter = ffs_epfile_write_iter,
.read_iter = ffs_epfile_read_iter,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
/* File system and super block operations ***********************************/
/*
* Mounting the file system creates a controller file, used first for
* function configuration then later for event monitoring.
*/
static struct inode *__must_check
ffs_sb_make_inode(struct super_block *sb, void *data,
const struct file_operations *fops,
const struct inode_operations *iops,
struct ffs_file_perms *perms)
{
struct inode *inode;
inode = new_inode(sb);
if (inode) {
struct timespec64 ts = inode_set_ctime_current(inode);
inode->i_ino = get_next_ino();
inode->i_mode = perms->mode;
inode->i_uid = perms->uid;
inode->i_gid = perms->gid;
inode->i_atime = ts;
inode->i_mtime = ts;
inode->i_private = data;
if (fops)
inode->i_fop = fops;
if (iops)
inode->i_op = iops;
}
return inode;
}
/* Create "regular" file */
static struct dentry *ffs_sb_create_file(struct super_block *sb,
const char *name, void *data,
const struct file_operations *fops)
{
struct ffs_data *ffs = sb->s_fs_info;
struct dentry *dentry;
struct inode *inode;
dentry = d_alloc_name(sb->s_root, name);
if (!dentry)
return NULL;
inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
if (!inode) {
dput(dentry);
return NULL;
}
d_add(dentry, inode);
return dentry;
}
/* Super block */
static const struct super_operations ffs_sb_operations = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
struct ffs_sb_fill_data {
struct ffs_file_perms perms;
umode_t root_mode;
const char *dev_name;
bool no_disconnect;
struct ffs_data *ffs_data;
};
static int ffs_sb_fill(struct super_block *sb, struct fs_context *fc)
{
struct ffs_sb_fill_data *data = fc->fs_private;
struct inode *inode;
struct ffs_data *ffs = data->ffs_data;
ffs->sb = sb;
data->ffs_data = NULL;
sb->s_fs_info = ffs;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = FUNCTIONFS_MAGIC;
sb->s_op = &ffs_sb_operations;
sb->s_time_gran = 1;
/* Root inode */
data->perms.mode = data->root_mode;
inode = ffs_sb_make_inode(sb, NULL,
&simple_dir_operations,
&simple_dir_inode_operations,
&data->perms);
sb->s_root = d_make_root(inode);
if (!sb->s_root)
return -ENOMEM;
/* EP0 file */
if (!ffs_sb_create_file(sb, "ep0", ffs, &ffs_ep0_operations))
return -ENOMEM;
return 0;
}
enum {
Opt_no_disconnect,
Opt_rmode,
Opt_fmode,
Opt_mode,
Opt_uid,
Opt_gid,
};
static const struct fs_parameter_spec ffs_fs_fs_parameters[] = {
fsparam_bool ("no_disconnect", Opt_no_disconnect),
fsparam_u32 ("rmode", Opt_rmode),
fsparam_u32 ("fmode", Opt_fmode),
fsparam_u32 ("mode", Opt_mode),
fsparam_u32 ("uid", Opt_uid),
fsparam_u32 ("gid", Opt_gid),
{}
};
static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct ffs_sb_fill_data *data = fc->fs_private;
struct fs_parse_result result;
int opt;
opt = fs_parse(fc, ffs_fs_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case Opt_no_disconnect:
data->no_disconnect = result.boolean;
break;
case Opt_rmode:
data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
break;
case Opt_fmode:
data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
break;
case Opt_mode:
data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
break;
case Opt_uid:
data->perms.uid = make_kuid(current_user_ns(), result.uint_32);
if (!uid_valid(data->perms.uid))
goto unmapped_value;
break;
case Opt_gid:
data->perms.gid = make_kgid(current_user_ns(), result.uint_32);
if (!gid_valid(data->perms.gid))
goto unmapped_value;
break;
default:
return -ENOPARAM;
}
return 0;
unmapped_value:
return invalf(fc, "%s: unmapped value: %u", param->key, result.uint_32);
}
/*
* Set up the superblock for a mount.
*/
static int ffs_fs_get_tree(struct fs_context *fc)
{
struct ffs_sb_fill_data *ctx = fc->fs_private;
struct ffs_data *ffs;
int ret;
if (!fc->source)
return invalf(fc, "No source specified");
ffs = ffs_data_new(fc->source);
if (!ffs)
return -ENOMEM;
ffs->file_perms = ctx->perms;
ffs->no_disconnect = ctx->no_disconnect;
ffs->dev_name = kstrdup(fc->source, GFP_KERNEL);
if (!ffs->dev_name) {
ffs_data_put(ffs);
return -ENOMEM;
}
ret = ffs_acquire_dev(ffs->dev_name, ffs);
if (ret) {
ffs_data_put(ffs);
return ret;
}
ctx->ffs_data = ffs;
return get_tree_nodev(fc, ffs_sb_fill);
}
static void ffs_fs_free_fc(struct fs_context *fc)
{
struct ffs_sb_fill_data *ctx = fc->fs_private;
if (ctx) {
if (ctx->ffs_data) {
ffs_data_put(ctx->ffs_data);
}
kfree(ctx);
}
}
static const struct fs_context_operations ffs_fs_context_ops = {
.free = ffs_fs_free_fc,
.parse_param = ffs_fs_parse_param,
.get_tree = ffs_fs_get_tree,
};
static int ffs_fs_init_fs_context(struct fs_context *fc)
{
struct ffs_sb_fill_data *ctx;
ctx = kzalloc(sizeof(struct ffs_sb_fill_data), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->perms.mode = S_IFREG | 0600;
ctx->perms.uid = GLOBAL_ROOT_UID;
ctx->perms.gid = GLOBAL_ROOT_GID;
ctx->root_mode = S_IFDIR | 0500;
ctx->no_disconnect = false;
fc->fs_private = ctx;
fc->ops = &ffs_fs_context_ops;
return 0;
}
static void
ffs_fs_kill_sb(struct super_block *sb)
{
kill_litter_super(sb);
if (sb->s_fs_info)
ffs_data_closed(sb->s_fs_info);
}
static struct file_system_type ffs_fs_type = {
.owner = THIS_MODULE,
.name = "functionfs",
.init_fs_context = ffs_fs_init_fs_context,
.parameters = ffs_fs_fs_parameters,
.kill_sb = ffs_fs_kill_sb,
};
MODULE_ALIAS_FS("functionfs");
/* Driver's main init/cleanup functions *************************************/
static int functionfs_init(void)
{
int ret;
ret = register_filesystem(&ffs_fs_type);
if (!ret)
pr_info("file system registered\n");
else
pr_err("failed registering file system (%d)\n", ret);
return ret;
}
static void functionfs_cleanup(void)
{
pr_info("unloading\n");
unregister_filesystem(&ffs_fs_type);
}
/* ffs_data and ffs_function construction and destruction code **************/
static void ffs_data_clear(struct ffs_data *ffs);
static void ffs_data_reset(struct ffs_data *ffs);
static void ffs_data_get(struct ffs_data *ffs)
{
refcount_inc(&ffs->ref);
}
static void ffs_data_opened(struct ffs_data *ffs)
{
refcount_inc(&ffs->ref);
if (atomic_add_return(1, &ffs->opened) == 1 &&
ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
}
static void ffs_data_put(struct ffs_data *ffs)
{
if (refcount_dec_and_test(&ffs->ref)) {
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
ffs_release_dev(ffs->private_data);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
swait_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait));
destroy_workqueue(ffs->io_completion_wq);
kfree(ffs->dev_name);
kfree(ffs);
}
}
static void ffs_data_closed(struct ffs_data *ffs)
{
struct ffs_epfile *epfiles;
unsigned long flags;
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
spin_lock_irqsave(&ffs->eps_lock, flags);
epfiles = ffs->epfiles;
ffs->epfiles = NULL;
spin_unlock_irqrestore(&ffs->eps_lock,
flags);
if (epfiles)
ffs_epfiles_destroy(epfiles,
ffs->eps_count);
if (ffs->setup_state == FFS_SETUP_PENDING)
__ffs_ep0_stall(ffs);
} else {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
}
if (atomic_read(&ffs->opened) < 0) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
ffs_data_put(ffs);
}
static struct ffs_data *ffs_data_new(const char *dev_name)
{
struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
if (!ffs)
return NULL;
ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
if (!ffs->io_completion_wq) {
kfree(ffs);
return NULL;
}
refcount_set(&ffs->ref, 1);
atomic_set(&ffs->opened, 0);
ffs->state = FFS_READ_DESCRIPTORS;
mutex_init(&ffs->mutex);
spin_lock_init(&ffs->eps_lock);
init_waitqueue_head(&ffs->ev.waitq);
init_waitqueue_head(&ffs->wait);
init_completion(&ffs->ep0req_completion);
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
return ffs;
}
static void ffs_data_clear(struct ffs_data *ffs)
{
struct ffs_epfile *epfiles;
unsigned long flags;
ffs_closed(ffs);
BUG_ON(ffs->gadget);
spin_lock_irqsave(&ffs->eps_lock, flags);
epfiles = ffs->epfiles;
ffs->epfiles = NULL;
spin_unlock_irqrestore(&ffs->eps_lock, flags);
/*
* potential race possible between ffs_func_eps_disable
* & ffs_epfile_release therefore maintaining a local
* copy of epfile will save us from use-after-free.
*/
if (epfiles) {
ffs_epfiles_destroy(epfiles, ffs->eps_count);
ffs->epfiles = NULL;
}
if (ffs->ffs_eventfd) {
eventfd_ctx_put(ffs->ffs_eventfd);
ffs->ffs_eventfd = NULL;
}
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
}
static void ffs_data_reset(struct ffs_data *ffs)
{
ffs_data_clear(ffs);
ffs->raw_descs_data = NULL;
ffs->raw_descs = NULL;
ffs->raw_strings = NULL;
ffs->stringtabs = NULL;
ffs->raw_descs_length = 0;
ffs->fs_descs_count = 0;
ffs->hs_descs_count = 0;
ffs->ss_descs_count = 0;
ffs->strings_count = 0;
ffs->interfaces_count = 0;
ffs->eps_count = 0;
ffs->ev.count = 0;
ffs->state = FFS_READ_DESCRIPTORS;
ffs->setup_state = FFS_NO_SETUP;
ffs->flags = 0;
ffs->ms_os_descs_ext_prop_count = 0;
ffs->ms_os_descs_ext_prop_name_len = 0;
ffs->ms_os_descs_ext_prop_data_len = 0;
}
static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
{
struct usb_gadget_strings **lang;
int first_id;
if (WARN_ON(ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
first_id = usb_string_ids_n(cdev, ffs->strings_count);
if (first_id < 0)
return first_id;
ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
if (!ffs->ep0req)
return -ENOMEM;
ffs->ep0req->complete = ffs_ep0_complete;
ffs->ep0req->context = ffs;
lang = ffs->stringtabs;
if (lang) {
for (; *lang; ++lang) {
struct usb_string *str = (*lang)->strings;
int id = first_id;
for (; str->s; ++id, ++str)
str->id = id;
}
}
ffs->gadget = cdev->gadget;
ffs_data_get(ffs);
return 0;
}
static void functionfs_unbind(struct ffs_data *ffs)
{
if (!WARN_ON(!ffs->gadget)) {
/* dequeue before freeing ep0req */
usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
mutex_lock(&ffs->mutex);
usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
mutex_unlock(&ffs->mutex);
ffs_data_put(ffs);
}
}
static int ffs_epfiles_create(struct ffs_data *ffs)
{
struct ffs_epfile *epfile, *epfiles;
unsigned i, count;
count = ffs->eps_count;
epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
return -ENOMEM;
epfile = epfiles;
for (i = 1; i <= count; ++i, ++epfile) {
epfile->ffs = ffs;
mutex_init(&epfile->mutex);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
sprintf(epfile->name, "ep%u", i);
epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
epfile,
&ffs_epfile_operations);
if (!epfile->dentry) {
ffs_epfiles_destroy(epfiles, i - 1);
return -ENOMEM;
}
}
ffs->epfiles = epfiles;
return 0;
}
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
{
struct ffs_epfile *epfile = epfiles;
for (; count; --count, ++epfile) {
BUG_ON(mutex_is_locked(&epfile->mutex));
if (epfile->dentry) {
d_delete(epfile->dentry);
dput(epfile->dentry);
epfile->dentry = NULL;
}
}
kfree(epfiles);
}
static void ffs_func_eps_disable(struct ffs_function *func)
{
struct ffs_ep *ep;
struct ffs_epfile *epfile;
unsigned short count;
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
count = func->ffs->eps_count;
epfile = func->ffs->epfiles;
ep = func->eps;
while (count--) {
/* pending requests get nuked */
if (ep->ep)
usb_ep_disable(ep->ep);
++ep;
if (epfile) {
epfile->ep = NULL;
__ffs_epfile_read_buffer_free(epfile);
++epfile;
}
}
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
static int ffs_func_eps_enable(struct ffs_function *func)
{
struct ffs_data *ffs;
struct ffs_ep *ep;
struct ffs_epfile *epfile;
unsigned short count;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
ffs = func->ffs;
ep = func->eps;
epfile = ffs->epfiles;
count = ffs->eps_count;
while(count--) {
ep->ep->driver_data = ep;
ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
if (ret) {
pr_err("%s: config_ep_by_speed(%s) returned %d\n",
__func__, ep->ep->name, ret);
break;
}
ret = usb_ep_enable(ep->ep);
if (!ret) {
epfile->ep = ep;
epfile->in = usb_endpoint_dir_in(ep->ep->desc);
epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
} else {
break;
}
++ep;
++epfile;
}
wake_up_interruptible(&ffs->wait);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
return ret;
}
/* Parsing and building descriptors and strings *****************************/
/*
* This validates if data pointed by data is a valid USB descriptor as
* well as record how many interfaces, endpoints and strings are
* required by given configuration. Returns address after the
* descriptor or NULL if data is invalid.
*/
enum ffs_entity_type {
FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
};
enum ffs_os_desc_type {
FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
};
typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
u8 *valuep,
struct usb_descriptor_header *desc,
void *priv);
typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
struct usb_os_desc_header *h, void *data,
unsigned len, void *priv);
static int __must_check ffs_do_single_desc(char *data, unsigned len,
ffs_entity_callback entity,
void *priv, int *current_class)
{
struct usb_descriptor_header *_ds = (void *)data;
u8 length;
int ret;
/* At least two bytes are required: length and type */
if (len < 2) {
pr_vdebug("descriptor too short\n");
return -EINVAL;
}
/* If we have at least as many bytes as the descriptor takes? */
length = _ds->bLength;
if (len < length) {
pr_vdebug("descriptor longer then available data\n");
return -EINVAL;
}
#define __entity_check_INTERFACE(val) 1
#define __entity_check_STRING(val) (val)
#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
#define __entity(type, val) do { \
pr_vdebug("entity " #type "(%02x)\n", (val)); \
if (!__entity_check_ ##type(val)) { \
pr_vdebug("invalid entity's value\n"); \
return -EINVAL; \
} \
ret = entity(FFS_ ##type, &val, _ds, priv); \
if (ret < 0) { \
pr_debug("entity " #type "(%02x); ret = %d\n", \
(val), ret); \
return ret; \
} \
} while (0)
/* Parse descriptor depending on type. */
switch (_ds->bDescriptorType) {
case USB_DT_DEVICE:
case USB_DT_CONFIG:
case USB_DT_STRING:
case USB_DT_DEVICE_QUALIFIER:
/* function can't have any of those */
pr_vdebug("descriptor reserved for gadget: %d\n",
_ds->bDescriptorType);
return -EINVAL;
case USB_DT_INTERFACE: {
struct usb_interface_descriptor *ds = (void *)_ds;
pr_vdebug("interface descriptor\n");
if (length != sizeof *ds)
goto inv_length;
__entity(INTERFACE, ds->bInterfaceNumber);
if (ds->iInterface)
__entity(STRING, ds->iInterface);
*current_class = ds->bInterfaceClass;
}
break;
case USB_DT_ENDPOINT: {
struct usb_endpoint_descriptor *ds = (void *)_ds;
pr_vdebug("endpoint descriptor\n");
if (length != USB_DT_ENDPOINT_SIZE &&
length != USB_DT_ENDPOINT_AUDIO_SIZE)
goto inv_length;
__entity(ENDPOINT, ds->bEndpointAddress);
}
break;
case USB_TYPE_CLASS | 0x01:
if (*current_class == USB_INTERFACE_CLASS_HID) {
pr_vdebug("hid descriptor\n");
if (length != sizeof(struct hid_descriptor))
goto inv_length;
break;
} else if (*current_class == USB_INTERFACE_CLASS_CCID) {
pr_vdebug("ccid descriptor\n");
if (length != sizeof(struct ccid_descriptor))
goto inv_length;
break;
} else {
pr_vdebug("unknown descriptor: %d for class %d\n",
_ds->bDescriptorType, *current_class);
return -EINVAL;
}
case USB_DT_OTG:
if (length != sizeof(struct usb_otg_descriptor))
goto inv_length;
break;
case USB_DT_INTERFACE_ASSOCIATION: {
struct usb_interface_assoc_descriptor *ds = (void *)_ds;
pr_vdebug("interface association descriptor\n");
if (length != sizeof *ds)
goto inv_length;
if (ds->iFunction)
__entity(STRING, ds->iFunction);
}
break;
case USB_DT_SS_ENDPOINT_COMP:
pr_vdebug("EP SS companion descriptor\n");
if (length != sizeof(struct usb_ss_ep_comp_descriptor))
goto inv_length;
break;
case USB_DT_OTHER_SPEED_CONFIG:
case USB_DT_INTERFACE_POWER:
case USB_DT_DEBUG:
case USB_DT_SECURITY:
case USB_DT_CS_RADIO_CONTROL:
/* TODO */
pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
return -EINVAL;
default:
/* We should never be here */
pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
return -EINVAL;
inv_length:
pr_vdebug("invalid length: %d (descriptor %d)\n",
_ds->bLength, _ds->bDescriptorType);
return -EINVAL;
}
#undef __entity
#undef __entity_check_DESCRIPTOR
#undef __entity_check_INTERFACE
#undef __entity_check_STRING
#undef __entity_check_ENDPOINT
return length;
}
static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
ffs_entity_callback entity, void *priv)
{
const unsigned _len = len;
unsigned long num = 0;
int current_class = -1;
for (;;) {
int ret;
if (num == count)
data = NULL;
/* Record "descriptor" entity */
ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
if (ret < 0) {
pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
num, ret);
return ret;
}
if (!data)
return _len - len;
ret = ffs_do_single_desc(data, len, entity, priv,
¤t_class);
if (ret < 0) {
pr_debug("%s returns %d\n", __func__, ret);
return ret;
}
len -= ret;
data += ret;
++num;
}
}
static int __ffs_data_do_entity(enum ffs_entity_type type,
u8 *valuep, struct usb_descriptor_header *desc,
void *priv)
{
struct ffs_desc_helper *helper = priv;
struct usb_endpoint_descriptor *d;
switch (type) {
case FFS_DESCRIPTOR:
break;
case FFS_INTERFACE:
/*
* Interfaces are indexed from zero so if we
* encountered interface "n" then there are at least
* "n+1" interfaces.
*/
if (*valuep >= helper->interfaces_count)
helper->interfaces_count = *valuep + 1;
break;
case FFS_STRING:
/*
* Strings are indexed from 1 (0 is reserved
* for languages list)
*/
if (*valuep > helper->ffs->strings_count)
helper->ffs->strings_count = *valuep;
break;
case FFS_ENDPOINT:
d = (void *)desc;
helper->eps_count++;
if (helper->eps_count >= FFS_MAX_EPS_COUNT)
return -EINVAL;
/* Check if descriptors for any speed were already parsed */
if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
helper->ffs->eps_addrmap[helper->eps_count] =
d->bEndpointAddress;
else if (helper->ffs->eps_addrmap[helper->eps_count] !=
d->bEndpointAddress)
return -EINVAL;
break;
}
return 0;
}
static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
struct usb_os_desc_header *desc)
{
u16 bcd_version = le16_to_cpu(desc->bcdVersion);
u16 w_index = le16_to_cpu(desc->wIndex);
if (bcd_version == 0x1) {
pr_warn("bcdVersion must be 0x0100, stored in Little Endian order. "
"Userspace driver should be fixed, accepting 0x0001 for compatibility.\n");
} else if (bcd_version != 0x100) {
pr_vdebug("unsupported os descriptors version: 0x%x\n",
bcd_version);
return -EINVAL;
}
switch (w_index) {
case 0x4:
*next_type = FFS_OS_DESC_EXT_COMPAT;
break;
case 0x5:
*next_type = FFS_OS_DESC_EXT_PROP;
break;
default:
pr_vdebug("unsupported os descriptor type: %d", w_index);
return -EINVAL;
}
return sizeof(*desc);
}
/*
* Process all extended compatibility/extended property descriptors
* of a feature descriptor
*/
static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
enum ffs_os_desc_type type,
u16 feature_count,
ffs_os_desc_callback entity,
void *priv,
struct usb_os_desc_header *h)
{
int ret;
const unsigned _len = len;
/* loop over all ext compat/ext prop descriptors */
while (feature_count--) {
ret = entity(type, h, data, len, priv);
if (ret < 0) {
pr_debug("bad OS descriptor, type: %d\n", type);
return ret;
}
data += ret;
len -= ret;
}
return _len - len;
}
/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
static int __must_check ffs_do_os_descs(unsigned count,
char *data, unsigned len,
ffs_os_desc_callback entity, void *priv)
{
const unsigned _len = len;
unsigned long num = 0;
for (num = 0; num < count; ++num) {
int ret;
enum ffs_os_desc_type type;
u16 feature_count;
struct usb_os_desc_header *desc = (void *)data;
if (len < sizeof(*desc))
return -EINVAL;
/*
* Record "descriptor" entity.
* Process dwLength, bcdVersion, wIndex, get b/wCount.
* Move the data pointer to the beginning of extended
* compatibilities proper or extended properties proper
* portions of the data
*/
if (le32_to_cpu(desc->dwLength) > len)
return -EINVAL;
ret = __ffs_do_os_desc_header(&type, desc);
if (ret < 0) {
pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
num, ret);
return ret;
}
/*
* 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
*/
feature_count = le16_to_cpu(desc->wCount);
if (type == FFS_OS_DESC_EXT_COMPAT &&
(feature_count > 255 || desc->Reserved))
return -EINVAL;
len -= ret;
data += ret;
/*
* Process all function/property descriptors
* of this Feature Descriptor
*/
ret = ffs_do_single_os_desc(data, len, type,
feature_count, entity, priv, desc);
if (ret < 0) {
pr_debug("%s returns %d\n", __func__, ret);
return ret;
}
len -= ret;
data += ret;
}
return _len - len;
}
/*
* Validate contents of the buffer from userspace related to OS descriptors.
*/
static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
struct usb_os_desc_header *h, void *data,
unsigned len, void *priv)
{
struct ffs_data *ffs = priv;
u8 length;
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *d = data;
int i;
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count)
return -EINVAL;
if (d->Reserved1 != 1) {
/*
* According to the spec, Reserved1 must be set to 1
* but older kernels incorrectly rejected non-zero
* values. We fix it here to avoid returning EINVAL
* in response to values we used to accept.
*/
pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
d->Reserved1 = 1;
}
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
return -EINVAL;
length = sizeof(struct usb_ext_compat_desc);
}
break;
case FFS_OS_DESC_EXT_PROP: {
struct usb_ext_prop_desc *d = data;
u32 type, pdl;
u16 pnl;
if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
return -EINVAL;
length = le32_to_cpu(d->dwSize);
if (len < length)
return -EINVAL;
type = le32_to_cpu(d->dwPropertyDataType);
if (type < USB_EXT_PROP_UNICODE ||
type > USB_EXT_PROP_UNICODE_MULTI) {
pr_vdebug("unsupported os descriptor property type: %d",
type);
return -EINVAL;
}
pnl = le16_to_cpu(d->wPropertyNameLength);
if (length < 14 + pnl) {
pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
length, pnl, type);
return -EINVAL;
}
pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl));
if (length != 14 + pnl + pdl) {
pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
length, pnl, pdl, type);
return -EINVAL;
}
++ffs->ms_os_descs_ext_prop_count;
/* property name reported to the host as "WCHAR"s */
ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
ffs->ms_os_descs_ext_prop_data_len += pdl;
}
break;
default:
pr_vdebug("unknown descriptor: %d\n", type);
return -EINVAL;
}
return length;
}
static int __ffs_data_got_descs(struct ffs_data *ffs,
char *const _data, size_t len)
{
char *data = _data, *raw_descs;
unsigned os_descs_count = 0, counts[3], flags;
int ret = -EINVAL, i;
struct ffs_desc_helper helper;
if (get_unaligned_le32(data + 4) != len)
goto error;
switch (get_unaligned_le32(data)) {
case FUNCTIONFS_DESCRIPTORS_MAGIC:
flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
data += 8;
len -= 8;
break;
case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
flags = get_unaligned_le32(data + 8);
ffs->user_flags = flags;
if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
FUNCTIONFS_HAS_HS_DESC |
FUNCTIONFS_HAS_SS_DESC |
FUNCTIONFS_HAS_MS_OS_DESC |
FUNCTIONFS_VIRTUAL_ADDR |
FUNCTIONFS_EVENTFD |
FUNCTIONFS_ALL_CTRL_RECIP |
FUNCTIONFS_CONFIG0_SETUP)) {
ret = -ENOSYS;
goto error;
}
data += 12;
len -= 12;
break;
default:
goto error;
}
if (flags & FUNCTIONFS_EVENTFD) {
if (len < 4)
goto error;
ffs->ffs_eventfd =
eventfd_ctx_fdget((int)get_unaligned_le32(data));
if (IS_ERR(ffs->ffs_eventfd)) {
ret = PTR_ERR(ffs->ffs_eventfd);
ffs->ffs_eventfd = NULL;
goto error;
}
data += 4;
len -= 4;
}
/* Read fs_count, hs_count and ss_count (if present) */
for (i = 0; i < 3; ++i) {
if (!(flags & (1 << i))) {
counts[i] = 0;
} else if (len < 4) {
goto error;
} else {
counts[i] = get_unaligned_le32(data);
data += 4;
len -= 4;
}
}
if (flags & (1 << i)) {
if (len < 4) {
goto error;
}
os_descs_count = get_unaligned_le32(data);
data += 4;
len -= 4;
}
/* Read descriptors */
raw_descs = data;
helper.ffs = ffs;
for (i = 0; i < 3; ++i) {
if (!counts[i])
continue;
helper.interfaces_count = 0;
helper.eps_count = 0;
ret = ffs_do_descs(counts[i], data, len,
__ffs_data_do_entity, &helper);
if (ret < 0)
goto error;
if (!ffs->eps_count && !ffs->interfaces_count) {
ffs->eps_count = helper.eps_count;
ffs->interfaces_count = helper.interfaces_count;
} else {
if (ffs->eps_count != helper.eps_count) {
ret = -EINVAL;
goto error;
}
if (ffs->interfaces_count != helper.interfaces_count) {
ret = -EINVAL;
goto error;
}
}
data += ret;
len -= ret;
}
if (os_descs_count) {
ret = ffs_do_os_descs(os_descs_count, data, len,
__ffs_data_do_os_desc, ffs);
if (ret < 0)
goto error;
data += ret;
len -= ret;
}
if (raw_descs == data || len) {
ret = -EINVAL;
goto error;
}
ffs->raw_descs_data = _data;
ffs->raw_descs = raw_descs;
ffs->raw_descs_length = data - raw_descs;
ffs->fs_descs_count = counts[0];
ffs->hs_descs_count = counts[1];
ffs->ss_descs_count = counts[2];
ffs->ms_os_descs_count = os_descs_count;
return 0;
error:
kfree(_data);
return ret;
}
static int __ffs_data_got_strings(struct ffs_data *ffs,
char *const _data, size_t len)
{
u32 str_count, needed_count, lang_count;
struct usb_gadget_strings **stringtabs, *t;
const char *data = _data;
struct usb_string *s;
if (len < 16 ||
get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
get_unaligned_le32(data + 4) != len)
goto error;
str_count = get_unaligned_le32(data + 8);
lang_count = get_unaligned_le32(data + 12);
/* if one is zero the other must be zero */
if (!str_count != !lang_count)
goto error;
/* Do we have at least as many strings as descriptors need? */
needed_count = ffs->strings_count;
if (str_count < needed_count)
goto error;
/*
* If we don't need any strings just return and free all
* memory.
*/
if (!needed_count) {
kfree(_data);
return 0;
}
/* Allocate everything in one chunk so there's less maintenance. */
{
unsigned i = 0;
vla_group(d);
vla_item(d, struct usb_gadget_strings *, stringtabs,
size_add(lang_count, 1));
vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
vla_item(d, struct usb_string, strings,
size_mul(lang_count, (needed_count + 1)));
char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
if (!vlabuf) {
kfree(_data);
return -ENOMEM;
}
/* Initialize the VLA pointers */
stringtabs = vla_ptr(vlabuf, d, stringtabs);
t = vla_ptr(vlabuf, d, stringtab);
i = lang_count;
do {
*stringtabs++ = t++;
} while (--i);
*stringtabs = NULL;
/* stringtabs = vlabuf = d_stringtabs for later kfree */
stringtabs = vla_ptr(vlabuf, d, stringtabs);
t = vla_ptr(vlabuf, d, stringtab);
s = vla_ptr(vlabuf, d, strings);
}
/* For each language */
data += 16;
len -= 16;
do { /* lang_count > 0 so we can use do-while */
unsigned needed = needed_count;
u32 str_per_lang = str_count;
if (len < 3)
goto error_free;
t->language = get_unaligned_le16(data);
t->strings = s;
++t;
data += 2;
len -= 2;
/* For each string */
do { /* str_count > 0 so we can use do-while */
size_t length = strnlen(data, len);
if (length == len)
goto error_free;
/*
* User may provide more strings then we need,
* if that's the case we simply ignore the
* rest
*/
if (needed) {
/*
* s->id will be set while adding
* function to configuration so for
* now just leave garbage here.
*/
s->s = data;
--needed;
++s;
}
data += length + 1;
len -= length + 1;
} while (--str_per_lang);
s->id = 0; /* terminator */
s->s = NULL;
++s;
} while (--lang_count);
/* Some garbage left? */
if (len)
goto error_free;
/* Done! */
ffs->stringtabs = stringtabs;
ffs->raw_strings = _data;
return 0;
error_free:
kfree(stringtabs);
error:
kfree(_data);
return -EINVAL;
}
/* Events handling and management *******************************************/
static void __ffs_event_add(struct ffs_data *ffs,
enum usb_functionfs_event_type type)
{
enum usb_functionfs_event_type rem_type1, rem_type2 = type;
int neg = 0;
/*
* Abort any unhandled setup
*
* We do not need to worry about some cmpxchg() changing value
* of ffs->setup_state without holding the lock because when
* state is FFS_SETUP_PENDING cmpxchg() in several places in
* the source does nothing.
*/
if (ffs->setup_state == FFS_SETUP_PENDING)
ffs->setup_state = FFS_SETUP_CANCELLED;
/*
* Logic of this function guarantees that there are at most four pending
* evens on ffs->ev.types queue. This is important because the queue
* has space for four elements only and __ffs_ep0_read_events function
* depends on that limit as well. If more event types are added, those
* limits have to be revisited or guaranteed to still hold.
*/
switch (type) {
case FUNCTIONFS_RESUME:
rem_type2 = FUNCTIONFS_SUSPEND;
fallthrough;
case FUNCTIONFS_SUSPEND:
case FUNCTIONFS_SETUP:
rem_type1 = type;
/* Discard all similar events */
break;
case FUNCTIONFS_BIND:
case FUNCTIONFS_UNBIND:
case FUNCTIONFS_DISABLE:
case FUNCTIONFS_ENABLE:
/* Discard everything other then power management. */
rem_type1 = FUNCTIONFS_SUSPEND;
rem_type2 = FUNCTIONFS_RESUME;
neg = 1;
break;
default:
WARN(1, "%d: unknown event, this should not happen\n", type);
return;
}
{
u8 *ev = ffs->ev.types, *out = ev;
unsigned n = ffs->ev.count;
for (; n; --n, ++ev)
if ((*ev == rem_type1 || *ev == rem_type2) == neg)
*out++ = *ev;
else
pr_vdebug("purging event %d\n", *ev);
ffs->ev.count = out - ffs->ev.types;
}
pr_vdebug("adding event %d\n", type);
ffs->ev.types[ffs->ev.count++] = type;
wake_up_locked(&ffs->ev.waitq);
if (ffs->ffs_eventfd)
eventfd_signal(ffs->ffs_eventfd, 1);
}
static void ffs_event_add(struct ffs_data *ffs,
enum usb_functionfs_event_type type)
{
unsigned long flags;
spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
__ffs_event_add(ffs, type);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
}
/* Bind/unbind USB function hooks *******************************************/
static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
{
int i;
for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
if (ffs->eps_addrmap[i] == endpoint_address)
return i;
return -ENOENT;
}
static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
struct usb_descriptor_header *desc,
void *priv)
{
struct usb_endpoint_descriptor *ds = (void *)desc;
struct ffs_function *func = priv;
struct ffs_ep *ffs_ep;
unsigned ep_desc_id;
int idx;
static const char *speed_names[] = { "full", "high", "super" };
if (type != FFS_DESCRIPTOR)
return 0;
/*
* If ss_descriptors is not NULL, we are reading super speed
* descriptors; if hs_descriptors is not NULL, we are reading high
* speed descriptors; otherwise, we are reading full speed
* descriptors.
*/
if (func->function.ss_descriptors) {
ep_desc_id = 2;
func->function.ss_descriptors[(long)valuep] = desc;
} else if (func->function.hs_descriptors) {
ep_desc_id = 1;
func->function.hs_descriptors[(long)valuep] = desc;
} else {
ep_desc_id = 0;
func->function.fs_descriptors[(long)valuep] = desc;
}
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return 0;
idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
if (idx < 0)
return idx;
ffs_ep = func->eps + idx;
if (ffs_ep->descs[ep_desc_id]) {
pr_err("two %sspeed descriptors for EP %d\n",
speed_names[ep_desc_id],
ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
return -EINVAL;
}
ffs_ep->descs[ep_desc_id] = ds;
ffs_dump_mem(": Original ep desc", ds, ds->bLength);
if (ffs_ep->ep) {
ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
if (!ds->wMaxPacketSize)
ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
} else {
struct usb_request *req;
struct usb_ep *ep;
u8 bEndpointAddress;
u16 wMaxPacketSize;
/*
* We back up bEndpointAddress because autoconfig overwrites
* it with physical endpoint address.
*/
bEndpointAddress = ds->bEndpointAddress;
/*
* We back up wMaxPacketSize because autoconfig treats
* endpoint descriptors as if they were full speed.
*/
wMaxPacketSize = ds->wMaxPacketSize;
pr_vdebug("autoconfig\n");
ep = usb_ep_autoconfig(func->gadget, ds);
if (!ep)
return -ENOTSUPP;
ep->driver_data = func->eps + idx;
req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!req)
return -ENOMEM;
ffs_ep->ep = ep;
ffs_ep->req = req;
func->eps_revmap[ds->bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK] = idx + 1;
/*
* If we use virtual address mapping, we restore
* original bEndpointAddress value.
*/
if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
ds->bEndpointAddress = bEndpointAddress;
/*
* Restore wMaxPacketSize which was potentially
* overwritten by autoconfig.
*/
ds->wMaxPacketSize = wMaxPacketSize;
}
ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
return 0;
}
static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
struct usb_descriptor_header *desc,
void *priv)
{
struct ffs_function *func = priv;
unsigned idx;
u8 newValue;
switch (type) {
default:
case FFS_DESCRIPTOR:
/* Handled in previous pass by __ffs_func_bind_do_descs() */
return 0;
case FFS_INTERFACE:
idx = *valuep;
if (func->interfaces_nums[idx] < 0) {
int id = usb_interface_id(func->conf, &func->function);
if (id < 0)
return id;
func->interfaces_nums[idx] = id;
}
newValue = func->interfaces_nums[idx];
break;
case FFS_STRING:
/* String' IDs are allocated when fsf_data is bound to cdev */
newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
break;
case FFS_ENDPOINT:
/*
* USB_DT_ENDPOINT are handled in
* __ffs_func_bind_do_descs().
*/
if (desc->bDescriptorType == USB_DT_ENDPOINT)
return 0;
idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
if (!func->eps[idx].ep)
return -EINVAL;
{
struct usb_endpoint_descriptor **descs;
descs = func->eps[idx].descs;
newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
}
break;
}
pr_vdebug("%02x -> %02x\n", *valuep, newValue);
*valuep = newValue;
return 0;
}
static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
struct usb_os_desc_header *h, void *data,
unsigned len, void *priv)
{
struct ffs_function *func = priv;
u8 length = 0;
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *desc = data;
struct usb_os_desc_table *t;
t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
ARRAY_SIZE(desc->CompatibleID) +
ARRAY_SIZE(desc->SubCompatibleID));
length = sizeof(*desc);
}
break;
case FFS_OS_DESC_EXT_PROP: {
struct usb_ext_prop_desc *desc = data;
struct usb_os_desc_table *t;
struct usb_os_desc_ext_prop *ext_prop;
char *ext_prop_name;
char *ext_prop_data;
t = &func->function.os_desc_table[h->interface];
t->if_id = func->interfaces_nums[h->interface];
ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
ext_prop->data_len = le32_to_cpu(*(__le32 *)
usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
length = ext_prop->name_len + ext_prop->data_len + 14;
ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
func->ffs->ms_os_descs_ext_prop_name_avail +=
ext_prop->name_len;
ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
func->ffs->ms_os_descs_ext_prop_data_avail +=
ext_prop->data_len;
memcpy(ext_prop_data,
usb_ext_prop_data_ptr(data, ext_prop->name_len),
ext_prop->data_len);
/* unicode data reported to the host as "WCHAR"s */
switch (ext_prop->type) {
case USB_EXT_PROP_UNICODE:
case USB_EXT_PROP_UNICODE_ENV:
case USB_EXT_PROP_UNICODE_LINK:
case USB_EXT_PROP_UNICODE_MULTI:
ext_prop->data_len *= 2;
break;
}
ext_prop->data = ext_prop_data;
memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
ext_prop->name_len);
/* property name reported to the host as "WCHAR"s */
ext_prop->name_len *= 2;
ext_prop->name = ext_prop_name;
t->os_desc->ext_prop_len +=
ext_prop->name_len + ext_prop->data_len + 14;
++t->os_desc->ext_prop_count;
list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
}
break;
default:
pr_vdebug("unknown descriptor: %d\n", type);
}
return length;
}
static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
struct usb_configuration *c)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct f_fs_opts *ffs_opts =
container_of(f->fi, struct f_fs_opts, func_inst);
struct ffs_data *ffs_data;
int ret;
/*
* Legacy gadget triggers binding in functionfs_ready_callback,
* which already uses locking; taking the same lock here would
* cause a deadlock.
*
* Configfs-enabled gadgets however do need ffs_dev_lock.
*/
if (!ffs_opts->no_configfs)
ffs_dev_lock();
ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
ffs_data = ffs_opts->dev->ffs_data;
if (!ffs_opts->no_configfs)
ffs_dev_unlock();
if (ret)
return ERR_PTR(ret);
func->ffs = ffs_data;
func->conf = c;
func->gadget = c->cdev->gadget;
/*
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
* configurations are bound in sequence with list_for_each_entry,
* in each configuration its functions are bound in sequence
* with list_for_each_entry, so we assume no race condition
* with regard to ffs_opts->bound access
*/
if (!ffs_opts->refcnt) {
ret = functionfs_bind(func->ffs, c->cdev);
if (ret)
return ERR_PTR(ret);
}
ffs_opts->refcnt++;
func->function.strings = func->ffs->stringtabs;
return ffs_opts;
}
static int _ffs_func_bind(struct usb_configuration *c,
struct usb_function *f)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
const int full = !!func->ffs->fs_descs_count;
const int high = !!func->ffs->hs_descs_count;
const int super = !!func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
struct ffs_ep *eps_ptr;
/* Make it a single chunk, less management later on */
vla_group(d);
vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
full ? ffs->fs_descs_count + 1 : 0);
vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
high ? ffs->hs_descs_count + 1 : 0);
vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
super ? ffs->ss_descs_count + 1 : 0);
vla_item_with_sz(d, short, inums, ffs->interfaces_count);
vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
c->cdev->use_os_string ? ffs->interfaces_count : 0);
vla_item_with_sz(d, char[16], ext_compat,
c->cdev->use_os_string ? ffs->interfaces_count : 0);
vla_item_with_sz(d, struct usb_os_desc, os_desc,
c->cdev->use_os_string ? ffs->interfaces_count : 0);
vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
ffs->ms_os_descs_ext_prop_count);
vla_item_with_sz(d, char, ext_prop_name,
ffs->ms_os_descs_ext_prop_name_len);
vla_item_with_sz(d, char, ext_prop_data,
ffs->ms_os_descs_ext_prop_data_len);
vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
char *vlabuf;
/* Has descriptors only for speeds gadget does not support */
if (!(full | high | super))
return -ENOTSUPP;
/* Allocate a single chunk, less management later on */
vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
if (!vlabuf)
return -ENOMEM;
ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
ffs->ms_os_descs_ext_prop_name_avail =
vla_ptr(vlabuf, d, ext_prop_name);
ffs->ms_os_descs_ext_prop_data_avail =
vla_ptr(vlabuf, d, ext_prop_data);
/* Copy descriptors */
memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
ffs->raw_descs_length);
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
eps_ptr = vla_ptr(vlabuf, d, eps);
for (i = 0; i < ffs->eps_count; i++)
eps_ptr[i].num = -1;
/* Save pointers
* d_eps == vlabuf, func->eps used to kfree vlabuf later
*/
func->eps = vla_ptr(vlabuf, d, eps);
func->interfaces_nums = vla_ptr(vlabuf, d, inums);
/*
* Go through all the endpoint descriptors and allocate
* endpoints first, so that later we can rewrite the endpoint
* numbers without worrying that it may be described later on.
*/
if (full) {
func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
fs_len = ffs_do_descs(ffs->fs_descs_count,
vla_ptr(vlabuf, d, raw_descs),
d_raw_descs__sz,
__ffs_func_bind_do_descs, func);
if (fs_len < 0) {
ret = fs_len;
goto error;
}
} else {
fs_len = 0;
}
if (high) {
func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
hs_len = ffs_do_descs(ffs->hs_descs_count,
vla_ptr(vlabuf, d, raw_descs) + fs_len,
d_raw_descs__sz - fs_len,
__ffs_func_bind_do_descs, func);
if (hs_len < 0) {
ret = hs_len;
goto error;
}
} else {
hs_len = 0;
}
if (super) {
func->function.ss_descriptors = func->function.ssp_descriptors =
vla_ptr(vlabuf, d, ss_descs);
ss_len = ffs_do_descs(ffs->ss_descs_count,
vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
d_raw_descs__sz - fs_len - hs_len,
__ffs_func_bind_do_descs, func);
if (ss_len < 0) {
ret = ss_len;
goto error;
}
} else {
ss_len = 0;
}
/*
* Now handle interface numbers allocation and interface and
* endpoint numbers rewriting. We can do that in one go
* now.
*/
ret = ffs_do_descs(ffs->fs_descs_count +
(high ? ffs->hs_descs_count : 0) +
(super ? ffs->ss_descs_count : 0),
vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
__ffs_func_bind_do_nums, func);
if (ret < 0)
goto error;
func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
if (c->cdev->use_os_string) {
for (i = 0; i < ffs->interfaces_count; ++i) {
struct usb_os_desc *desc;
desc = func->function.os_desc_table[i].os_desc =
vla_ptr(vlabuf, d, os_desc) +
i * sizeof(struct usb_os_desc);
desc->ext_compat_id =
vla_ptr(vlabuf, d, ext_compat) + i * 16;
INIT_LIST_HEAD(&desc->ext_prop);
}
ret = ffs_do_os_descs(ffs->ms_os_descs_count,
vla_ptr(vlabuf, d, raw_descs) +
fs_len + hs_len + ss_len,
d_raw_descs__sz - fs_len - hs_len -
ss_len,
__ffs_func_bind_do_os_desc, func);
if (ret < 0)
goto error;
}
func->function.os_desc_n =
c->cdev->use_os_string ? ffs->interfaces_count : 0;
/* And we're done */
ffs_event_add(ffs, FUNCTIONFS_BIND);
return 0;
error:
/* XXX Do we need to release all claimed endpoints here? */
return ret;
}
static int ffs_func_bind(struct usb_configuration *c,
struct usb_function *f)
{
struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
struct ffs_function *func = ffs_func_from_usb(f);
int ret;
if (IS_ERR(ffs_opts))
return PTR_ERR(ffs_opts);
ret = _ffs_func_bind(c, f);
if (ret && !--ffs_opts->refcnt)
functionfs_unbind(func->ffs);
return ret;
}
/* Other USB function hooks *************************************************/
static void ffs_reset_work(struct work_struct *work)
{
struct ffs_data *ffs = container_of(work,
struct ffs_data, reset_work);
ffs_data_reset(ffs);
}
static int ffs_func_set_alt(struct usb_function *f,
unsigned interface, unsigned alt)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
int ret = 0, intf;
if (alt != (unsigned)-1) {
intf = ffs_func_revmap_intf(func, interface);
if (intf < 0)
return intf;
}
if (ffs->func)
ffs_func_eps_disable(ffs->func);
if (ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
INIT_WORK(&ffs->reset_work, ffs_reset_work);
schedule_work(&ffs->reset_work);
return -ENODEV;
}
if (ffs->state != FFS_ACTIVE)
return -ENODEV;
if (alt == (unsigned)-1) {
ffs->func = NULL;
ffs_event_add(ffs, FUNCTIONFS_DISABLE);
return 0;
}
ffs->func = func;
ret = ffs_func_eps_enable(func);
if (ret >= 0)
ffs_event_add(ffs, FUNCTIONFS_ENABLE);
return ret;
}
static void ffs_func_disable(struct usb_function *f)
{
ffs_func_set_alt(f, 0, (unsigned)-1);
}
static int ffs_func_setup(struct usb_function *f,
const struct usb_ctrlrequest *creq)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
unsigned long flags;
int ret;
pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
/*
* Most requests directed to interface go through here
* (notable exceptions are set/get interface) so we need to
* handle them. All other either handled by composite or
* passed to usb_configuration->setup() (if one is set). No
* matter, we will handle requests directed to endpoint here
* as well (as it's straightforward). Other request recipient
* types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
* is being used.
*/
if (ffs->state != FFS_ACTIVE)
return -ENODEV;
switch (creq->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
if (ret < 0)
return ret;
break;
case USB_RECIP_ENDPOINT:
ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
if (ret < 0)
return ret;
if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
ret = func->ffs->eps_addrmap[ret];
break;
default:
if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
ret = le16_to_cpu(creq->wIndex);
else
return -EOPNOTSUPP;
}
spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
ffs->ev.setup = *creq;
ffs->ev.setup.wIndex = cpu_to_le16(ret);
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
static bool ffs_func_req_match(struct usb_function *f,
const struct usb_ctrlrequest *creq,
bool config0)
{
struct ffs_function *func = ffs_func_from_usb(f);
if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
return false;
switch (creq->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
return (ffs_func_revmap_intf(func,
le16_to_cpu(creq->wIndex)) >= 0);
case USB_RECIP_ENDPOINT:
return (ffs_func_revmap_ep(func,
le16_to_cpu(creq->wIndex)) >= 0);
default:
return (bool) (func->ffs->user_flags &
FUNCTIONFS_ALL_CTRL_RECIP);
}
}
static void ffs_func_suspend(struct usb_function *f)
{
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
}
static void ffs_func_resume(struct usb_function *f)
{
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
}
/* Endpoint and interface numbers reverse mapping ***************************/
static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
{
num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
return num ? num : -EDOM;
}
static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
{
short *nums = func->interfaces_nums;
unsigned count = func->ffs->interfaces_count;
for (; count; --count, ++nums) {
if (*nums >= 0 && *nums == intf)
return nums - func->interfaces_nums;
}
return -EDOM;
}
/* Devices management *******************************************************/
static LIST_HEAD(ffs_devices);
static struct ffs_dev *_ffs_do_find_dev(const char *name)
{
struct ffs_dev *dev;
if (!name)
return NULL;
list_for_each_entry(dev, &ffs_devices, entry) {
if (strcmp(dev->name, name) == 0)
return dev;
}
return NULL;
}
/*
* ffs_lock must be taken by the caller of this function
*/
static struct ffs_dev *_ffs_get_single_dev(void)
{
struct ffs_dev *dev;
if (list_is_singular(&ffs_devices)) {
dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
if (dev->single)
return dev;
}
return NULL;
}
/*
* ffs_lock must be taken by the caller of this function
*/
static struct ffs_dev *_ffs_find_dev(const char *name)
{
struct ffs_dev *dev;
dev = _ffs_get_single_dev();
if (dev)
return dev;
return _ffs_do_find_dev(name);
}
/* Configfs support *********************************************************/
static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_fs_opts,
func_inst.group);
}
static void ffs_attr_release(struct config_item *item)
{
struct f_fs_opts *opts = to_ffs_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations ffs_item_ops = {
.release = ffs_attr_release,
};
static const struct config_item_type ffs_func_type = {
.ct_item_ops = &ffs_item_ops,
.ct_owner = THIS_MODULE,
};
/* Function registration interface ******************************************/
static void ffs_free_inst(struct usb_function_instance *f)
{
struct f_fs_opts *opts;
opts = to_f_fs_opts(f);
ffs_release_dev(opts->dev);
ffs_dev_lock();
_ffs_free_dev(opts->dev);
ffs_dev_unlock();
kfree(opts);
}
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
{
if (strlen(name) >= sizeof_field(struct ffs_dev, name))
return -ENAMETOOLONG;
return ffs_name_dev(to_f_fs_opts(fi)->dev, name);
}
static struct usb_function_instance *ffs_alloc_inst(void)
{
struct f_fs_opts *opts;
struct ffs_dev *dev;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
opts->func_inst.set_inst_name = ffs_set_inst_name;
opts->func_inst.free_func_inst = ffs_free_inst;
ffs_dev_lock();
dev = _ffs_alloc_dev();
ffs_dev_unlock();
if (IS_ERR(dev)) {
kfree(opts);
return ERR_CAST(dev);
}
opts->dev = dev;
dev->opts = opts;
config_group_init_type_name(&opts->func_inst.group, "",
&ffs_func_type);
return &opts->func_inst;
}
static void ffs_free(struct usb_function *f)
{
kfree(ffs_func_from_usb(f));
}
static void ffs_func_unbind(struct usb_configuration *c,
struct usb_function *f)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
struct f_fs_opts *opts =
container_of(f->fi, struct f_fs_opts, func_inst);
struct ffs_ep *ep = func->eps;
unsigned count = ffs->eps_count;
unsigned long flags;
if (ffs->func == func) {
ffs_func_eps_disable(func);
ffs->func = NULL;
}
/* Drain any pending AIO completions */
drain_workqueue(ffs->io_completion_wq);
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
if (!--opts->refcnt)
functionfs_unbind(ffs);
/* cleanup after autoconfig */
spin_lock_irqsave(&func->ffs->eps_lock, flags);
while (count--) {
if (ep->ep && ep->req)
usb_ep_free_request(ep->ep, ep->req);
ep->req = NULL;
++ep;
}
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
kfree(func->eps);
func->eps = NULL;
/*
* eps, descriptors and interfaces_nums are allocated in the
* same chunk so only one free is required.
*/
func->function.fs_descriptors = NULL;
func->function.hs_descriptors = NULL;
func->function.ss_descriptors = NULL;
func->function.ssp_descriptors = NULL;
func->interfaces_nums = NULL;
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
{
struct ffs_function *func;
func = kzalloc(sizeof(*func), GFP_KERNEL);
if (!func)
return ERR_PTR(-ENOMEM);
func->function.name = "Function FS Gadget";
func->function.bind = ffs_func_bind;
func->function.unbind = ffs_func_unbind;
func->function.set_alt = ffs_func_set_alt;
func->function.disable = ffs_func_disable;
func->function.setup = ffs_func_setup;
func->function.req_match = ffs_func_req_match;
func->function.suspend = ffs_func_suspend;
func->function.resume = ffs_func_resume;
func->function.free_func = ffs_free;
return &func->function;
}
/*
* ffs_lock must be taken by the caller of this function
*/
static struct ffs_dev *_ffs_alloc_dev(void)
{
struct ffs_dev *dev;
int ret;
if (_ffs_get_single_dev())
return ERR_PTR(-EBUSY);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
if (list_empty(&ffs_devices)) {
ret = functionfs_init();
if (ret) {
kfree(dev);
return ERR_PTR(ret);
}
}
list_add(&dev->entry, &ffs_devices);
return dev;
}
int ffs_name_dev(struct ffs_dev *dev, const char *name)
{
struct ffs_dev *existing;
int ret = 0;
ffs_dev_lock();
existing = _ffs_do_find_dev(name);
if (!existing)
strscpy(dev->name, name, ARRAY_SIZE(dev->name));
else if (existing != dev)
ret = -EBUSY;
ffs_dev_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(ffs_name_dev);
int ffs_single_dev(struct ffs_dev *dev)
{
int ret;
ret = 0;
ffs_dev_lock();
if (!list_is_singular(&ffs_devices))
ret = -EBUSY;
else
dev->single = true;
ffs_dev_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(ffs_single_dev);
/*
* ffs_lock must be taken by the caller of this function
*/
static void _ffs_free_dev(struct ffs_dev *dev)
{
list_del(&dev->entry);
kfree(dev);
if (list_empty(&ffs_devices))
functionfs_cleanup();
}
static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
{
int ret = 0;
struct ffs_dev *ffs_dev;
ffs_dev_lock();
ffs_dev = _ffs_find_dev(dev_name);
if (!ffs_dev) {
ret = -ENOENT;
} else if (ffs_dev->mounted) {
ret = -EBUSY;
} else if (ffs_dev->ffs_acquire_dev_callback &&
ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
ret = -ENOENT;
} else {
ffs_dev->mounted = true;
ffs_dev->ffs_data = ffs_data;
ffs_data->private_data = ffs_dev;
}
ffs_dev_unlock();
return ret;
}
static void ffs_release_dev(struct ffs_dev *ffs_dev)
{
ffs_dev_lock();
if (ffs_dev && ffs_dev->mounted) {
ffs_dev->mounted = false;
if (ffs_dev->ffs_data) {
ffs_dev->ffs_data->private_data = NULL;
ffs_dev->ffs_data = NULL;
}
if (ffs_dev->ffs_release_dev_callback)
ffs_dev->ffs_release_dev_callback(ffs_dev);
}
ffs_dev_unlock();
}
static int ffs_ready(struct ffs_data *ffs)
{
struct ffs_dev *ffs_obj;
int ret = 0;
ffs_dev_lock();
ffs_obj = ffs->private_data;
if (!ffs_obj) {
ret = -EINVAL;
goto done;
}
if (WARN_ON(ffs_obj->desc_ready)) {
ret = -EBUSY;
goto done;
}
ffs_obj->desc_ready = true;
if (ffs_obj->ffs_ready_callback) {
ret = ffs_obj->ffs_ready_callback(ffs);
if (ret)
goto done;
}
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
done:
ffs_dev_unlock();
return ret;
}
static void ffs_closed(struct ffs_data *ffs)
{
struct ffs_dev *ffs_obj;
struct f_fs_opts *opts;
struct config_item *ci;
ffs_dev_lock();
ffs_obj = ffs->private_data;
if (!ffs_obj)
goto done;
ffs_obj->desc_ready = false;
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
ffs_obj->ffs_closed_callback)
ffs_obj->ffs_closed_callback(ffs);
if (ffs_obj->opts)
opts = ffs_obj->opts;
else
goto done;
if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
|| !kref_read(&opts->func_inst.group.cg_item.ci_kref))
goto done;
ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
ffs_dev_unlock();
if (test_bit(FFS_FL_BOUND, &ffs->flags))
unregister_gadget_item(ci);
return;
done:
ffs_dev_unlock();
}
/* Misc helper functions ****************************************************/
static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
{
return nonblock
? mutex_trylock(mutex) ? 0 : -EAGAIN
: mutex_lock_interruptible(mutex);
}
static char *ffs_prepare_buffer(const char __user *buf, size_t len)
{
char *data;
if (!len)
return NULL;
data = memdup_user(buf, len);
if (IS_ERR(data))
return data;
pr_vdebug("Buffer from user space:\n");
ffs_dump_mem("", data, len);
return data;
}
DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Nazarewicz");
| linux-master | drivers/usb/gadget/function/f_fs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_loopback.c - USB peripheral loopback configuration driver
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
*/
/* #define VERBOSE_DEBUG */
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/usb/composite.h>
#include "g_zero.h"
#include "u_f.h"
/*
* LOOPBACK FUNCTION ... a testing vehicle for USB peripherals,
*
* This takes messages of various sizes written OUT to a device, and loops
* them back so they can be read IN from it. It has been used by certain
* test applications. It supports limited testing of data queueing logic.
*/
struct f_loopback {
struct usb_function function;
struct usb_ep *in_ep;
struct usb_ep *out_ep;
unsigned qlen;
unsigned buflen;
};
static inline struct f_loopback *func_to_loop(struct usb_function *f)
{
return container_of(f, struct f_loopback, function);
}
/*-------------------------------------------------------------------------*/
static struct usb_interface_descriptor loopback_intf = {
.bLength = sizeof(loopback_intf),
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor fs_loop_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor fs_loop_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *fs_loopback_descs[] = {
(struct usb_descriptor_header *) &loopback_intf,
(struct usb_descriptor_header *) &fs_loop_sink_desc,
(struct usb_descriptor_header *) &fs_loop_source_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor hs_loop_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor hs_loop_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *hs_loopback_descs[] = {
(struct usb_descriptor_header *) &loopback_intf,
(struct usb_descriptor_header *) &hs_loop_source_desc,
(struct usb_descriptor_header *) &hs_loop_sink_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor ss_loop_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_loop_source_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = 0,
};
static struct usb_endpoint_descriptor ss_loop_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_loop_sink_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = 0,
};
static struct usb_descriptor_header *ss_loopback_descs[] = {
(struct usb_descriptor_header *) &loopback_intf,
(struct usb_descriptor_header *) &ss_loop_source_desc,
(struct usb_descriptor_header *) &ss_loop_source_comp_desc,
(struct usb_descriptor_header *) &ss_loop_sink_desc,
(struct usb_descriptor_header *) &ss_loop_sink_comp_desc,
NULL,
};
/* function-specific strings: */
static struct usb_string strings_loopback[] = {
[0].s = "loop input to output",
{ } /* end of list */
};
static struct usb_gadget_strings stringtab_loop = {
.language = 0x0409, /* en-us */
.strings = strings_loopback,
};
static struct usb_gadget_strings *loopback_strings[] = {
&stringtab_loop,
NULL,
};
/*-------------------------------------------------------------------------*/
static int loopback_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_loopback *loop = func_to_loop(f);
int id;
int ret;
/* allocate interface ID(s) */
id = usb_interface_id(c, f);
if (id < 0)
return id;
loopback_intf.bInterfaceNumber = id;
id = usb_string_id(cdev);
if (id < 0)
return id;
strings_loopback[0].id = id;
loopback_intf.iInterface = id;
/* allocate endpoints */
loop->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_loop_source_desc);
if (!loop->in_ep) {
autoconf_fail:
ERROR(cdev, "%s: can't autoconfigure on %s\n",
f->name, cdev->gadget->name);
return -ENODEV;
}
loop->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_loop_sink_desc);
if (!loop->out_ep)
goto autoconf_fail;
/* support high speed hardware */
hs_loop_source_desc.bEndpointAddress =
fs_loop_source_desc.bEndpointAddress;
hs_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
/* support super speed hardware */
ss_loop_source_desc.bEndpointAddress =
fs_loop_source_desc.bEndpointAddress;
ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
ss_loopback_descs, ss_loopback_descs);
if (ret)
return ret;
DBG(cdev, "%s: IN/%s, OUT/%s\n",
f->name, loop->in_ep->name, loop->out_ep->name);
return 0;
}
static void lb_free_func(struct usb_function *f)
{
struct f_lb_opts *opts;
opts = container_of(f->fi, struct f_lb_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
usb_free_all_descriptors(f);
kfree(func_to_loop(f));
}
static void loopback_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_loopback *loop = ep->driver_data;
struct usb_composite_dev *cdev = loop->function.config->cdev;
int status = req->status;
switch (status) {
case 0: /* normal completion? */
if (ep == loop->out_ep) {
/*
* We received some data from the host so let's
* queue it so host can read the from our in ep
*/
struct usb_request *in_req = req->context;
in_req->zero = (req->actual < req->length);
in_req->length = req->actual;
ep = loop->in_ep;
req = in_req;
} else {
/*
* We have just looped back a bunch of data
* to host. Now let's wait for some more data.
*/
req = req->context;
ep = loop->out_ep;
}
/* queue the buffer back to host or for next bunch of data */
status = usb_ep_queue(ep, req, GFP_ATOMIC);
if (status == 0) {
return;
} else {
ERROR(cdev, "Unable to loop back buffer to %s: %d\n",
ep->name, status);
goto free_req;
}
/* "should never get here" */
default:
ERROR(cdev, "%s loop complete --> %d, %d/%d\n", ep->name,
status, req->actual, req->length);
fallthrough;
/* NOTE: since this driver doesn't maintain an explicit record
* of requests it submitted (just maintains qlen count), we
* rely on the hardware driver to clean up on disconnect or
* endpoint disable.
*/
case -ECONNABORTED: /* hardware forced ep reset */
case -ECONNRESET: /* request dequeued */
case -ESHUTDOWN: /* disconnect from host */
free_req:
usb_ep_free_request(ep == loop->in_ep ?
loop->out_ep : loop->in_ep,
req->context);
free_ep_req(ep, req);
return;
}
}
static void disable_loopback(struct f_loopback *loop)
{
struct usb_composite_dev *cdev;
cdev = loop->function.config->cdev;
disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL);
VDBG(cdev, "%s disabled\n", loop->function.name);
}
static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len)
{
return alloc_ep_req(ep, len);
}
static int alloc_requests(struct usb_composite_dev *cdev,
struct f_loopback *loop)
{
struct usb_request *in_req, *out_req;
int i;
int result = 0;
/*
* allocate a bunch of read buffers and queue them all at once.
* we buffer at most 'qlen' transfers; We allocate buffers only
* for out transfer and reuse them in IN transfers to implement
* our loopback functionality
*/
for (i = 0; i < loop->qlen && result == 0; i++) {
result = -ENOMEM;
in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC);
if (!in_req)
goto fail;
out_req = lb_alloc_ep_req(loop->out_ep, loop->buflen);
if (!out_req)
goto fail_in;
in_req->complete = loopback_complete;
out_req->complete = loopback_complete;
in_req->buf = out_req->buf;
/* length will be set in complete routine */
in_req->context = out_req;
out_req->context = in_req;
result = usb_ep_queue(loop->out_ep, out_req, GFP_ATOMIC);
if (result) {
ERROR(cdev, "%s queue req --> %d\n",
loop->out_ep->name, result);
goto fail_out;
}
}
return 0;
fail_out:
free_ep_req(loop->out_ep, out_req);
fail_in:
usb_ep_free_request(loop->in_ep, in_req);
fail:
return result;
}
static int enable_endpoint(struct usb_composite_dev *cdev,
struct f_loopback *loop, struct usb_ep *ep)
{
int result;
result = config_ep_by_speed(cdev->gadget, &(loop->function), ep);
if (result)
goto out;
result = usb_ep_enable(ep);
if (result < 0)
goto out;
ep->driver_data = loop;
result = 0;
out:
return result;
}
static int
enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop)
{
int result = 0;
result = enable_endpoint(cdev, loop, loop->in_ep);
if (result)
goto out;
result = enable_endpoint(cdev, loop, loop->out_ep);
if (result)
goto disable_in;
result = alloc_requests(cdev, loop);
if (result)
goto disable_out;
DBG(cdev, "%s enabled\n", loop->function.name);
return 0;
disable_out:
usb_ep_disable(loop->out_ep);
disable_in:
usb_ep_disable(loop->in_ep);
out:
return result;
}
static int loopback_set_alt(struct usb_function *f,
unsigned intf, unsigned alt)
{
struct f_loopback *loop = func_to_loop(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* we know alt is zero */
disable_loopback(loop);
return enable_loopback(cdev, loop);
}
static void loopback_disable(struct usb_function *f)
{
struct f_loopback *loop = func_to_loop(f);
disable_loopback(loop);
}
static struct usb_function *loopback_alloc(struct usb_function_instance *fi)
{
struct f_loopback *loop;
struct f_lb_opts *lb_opts;
loop = kzalloc(sizeof *loop, GFP_KERNEL);
if (!loop)
return ERR_PTR(-ENOMEM);
lb_opts = container_of(fi, struct f_lb_opts, func_inst);
mutex_lock(&lb_opts->lock);
lb_opts->refcnt++;
mutex_unlock(&lb_opts->lock);
loop->buflen = lb_opts->bulk_buflen;
loop->qlen = lb_opts->qlen;
if (!loop->qlen)
loop->qlen = 32;
loop->function.name = "loopback";
loop->function.bind = loopback_bind;
loop->function.set_alt = loopback_set_alt;
loop->function.disable = loopback_disable;
loop->function.strings = loopback_strings;
loop->function.free_func = lb_free_func;
return &loop->function;
}
static inline struct f_lb_opts *to_f_lb_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_lb_opts,
func_inst.group);
}
static void lb_attr_release(struct config_item *item)
{
struct f_lb_opts *lb_opts = to_f_lb_opts(item);
usb_put_function_instance(&lb_opts->func_inst);
}
static struct configfs_item_operations lb_item_ops = {
.release = lb_attr_release,
};
static ssize_t f_lb_opts_qlen_show(struct config_item *item, char *page)
{
struct f_lb_opts *opts = to_f_lb_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%d\n", opts->qlen);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_lb_opts_qlen_store(struct config_item *item,
const char *page, size_t len)
{
struct f_lb_opts *opts = to_f_lb_opts(item);
int ret;
u32 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou32(page, 0, &num);
if (ret)
goto end;
opts->qlen = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_lb_opts_, qlen);
static ssize_t f_lb_opts_bulk_buflen_show(struct config_item *item, char *page)
{
struct f_lb_opts *opts = to_f_lb_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%d\n", opts->bulk_buflen);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_lb_opts_bulk_buflen_store(struct config_item *item,
const char *page, size_t len)
{
struct f_lb_opts *opts = to_f_lb_opts(item);
int ret;
u32 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou32(page, 0, &num);
if (ret)
goto end;
opts->bulk_buflen = num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_lb_opts_, bulk_buflen);
static struct configfs_attribute *lb_attrs[] = {
&f_lb_opts_attr_qlen,
&f_lb_opts_attr_bulk_buflen,
NULL,
};
static const struct config_item_type lb_func_type = {
.ct_item_ops = &lb_item_ops,
.ct_attrs = lb_attrs,
.ct_owner = THIS_MODULE,
};
static void lb_free_instance(struct usb_function_instance *fi)
{
struct f_lb_opts *lb_opts;
lb_opts = container_of(fi, struct f_lb_opts, func_inst);
kfree(lb_opts);
}
static struct usb_function_instance *loopback_alloc_instance(void)
{
struct f_lb_opts *lb_opts;
lb_opts = kzalloc(sizeof(*lb_opts), GFP_KERNEL);
if (!lb_opts)
return ERR_PTR(-ENOMEM);
mutex_init(&lb_opts->lock);
lb_opts->func_inst.free_func_inst = lb_free_instance;
lb_opts->bulk_buflen = GZERO_BULK_BUFLEN;
lb_opts->qlen = GZERO_QLEN;
config_group_init_type_name(&lb_opts->func_inst.group, "",
&lb_func_type);
return &lb_opts->func_inst;
}
DECLARE_USB_FUNCTION(Loopback, loopback_alloc_instance, loopback_alloc);
int __init lb_modinit(void)
{
return usb_function_register(&Loopbackusb_func);
}
void __exit lb_modexit(void)
{
usb_function_unregister(&Loopbackusb_func);
}
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/gadget/function/f_loopback.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_ecm.c -- USB CDC Ethernet (ECM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
*/
/* #define VERBOSE_DEBUG */
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_ecm.h"
/*
* This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
* Ethernet link. The data transfer model is simple (packets sent and
* received over bulk endpoints using normal short packet termination),
* and the control model exposes various data and optional notifications.
*
* ECM is well standardized and (except for Microsoft) supported by most
* operating systems with USB host support. It's the preferred interop
* solution for Ethernet over USB, at least for firmware based solutions.
* (Hardware solutions tend to be more minimalist.) A newer and simpler
* "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
*
* Note that ECM requires the use of "alternate settings" for its data
* interface. This means that the set_alt() method has real work to do,
* and also means that a get_alt() method is required.
*/
enum ecm_notify_state {
ECM_NOTIFY_NONE, /* don't notify */
ECM_NOTIFY_CONNECT, /* issue CONNECT next */
ECM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */
};
struct f_ecm {
struct gether port;
u8 ctrl_id, data_id;
char ethaddr[14];
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
atomic_t notify_count;
bool is_open;
/* FIXME is_open needs some irq-ish locking
* ... possibly the same as port.ioport
*/
};
static inline struct f_ecm *func_to_ecm(struct usb_function *f)
{
return container_of(f, struct f_ecm, port.func);
}
/*-------------------------------------------------------------------------*/
/*
* Include the status endpoint if we can, even though it's optional.
*
* Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
* packet, to simplify cancellation; and a big transfer interval, to
* waste less bandwidth.
*
* Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
* if they ignore the connect/disconnect notifications that real aether
* can provide. More advanced cdc configurations might want to support
* encapsulated commands (vendor-specific, using control-OUT).
*/
#define ECM_STATUS_INTERVAL_MS 32
#define ECM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
/* interface descriptor: */
static struct usb_interface_assoc_descriptor
ecm_iad_descriptor = {
.bLength = sizeof ecm_iad_descriptor,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
/* .bFirstInterface = DYNAMIC, */
.bInterfaceCount = 2, /* control + data */
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
.bFunctionProtocol = USB_CDC_PROTO_NONE,
/* .iFunction = DYNAMIC */
};
static struct usb_interface_descriptor ecm_control_intf = {
.bLength = sizeof ecm_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
/* status endpoint is optional; this could be patched later */
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
/* .iInterface = DYNAMIC */
};
static struct usb_cdc_header_desc ecm_header_desc = {
.bLength = sizeof ecm_header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static struct usb_cdc_union_desc ecm_union_desc = {
.bLength = sizeof(ecm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
/* .bMasterInterface0 = DYNAMIC */
/* .bSlaveInterface0 = DYNAMIC */
};
static struct usb_cdc_ether_desc ecm_desc = {
.bLength = sizeof ecm_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
/* this descriptor actually adds value, surprise! */
/* .iMACAddress = DYNAMIC */
.bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
.wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
.wNumberMCFilters = cpu_to_le16(0),
.bNumberPowerFilters = 0,
};
/* the default data interface has no endpoints ... */
static struct usb_interface_descriptor ecm_data_nop_intf = {
.bLength = sizeof ecm_data_nop_intf,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 1,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
/* ... but the "real" data interface has two bulk endpoints */
static struct usb_interface_descriptor ecm_data_intf = {
.bLength = sizeof ecm_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 1,
.bAlternateSetting = 1,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor fs_ecm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT),
.bInterval = ECM_STATUS_INTERVAL_MS,
};
static struct usb_endpoint_descriptor fs_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor fs_ecm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *ecm_fs_function[] = {
/* CDC ECM control descriptors */
(struct usb_descriptor_header *) &ecm_iad_descriptor,
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
/* NOTE: status endpoint might need to be removed */
(struct usb_descriptor_header *) &fs_ecm_notify_desc,
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ecm_data_nop_intf,
(struct usb_descriptor_header *) &ecm_data_intf,
(struct usb_descriptor_header *) &fs_ecm_in_desc,
(struct usb_descriptor_header *) &fs_ecm_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor hs_ecm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT),
.bInterval = USB_MS_TO_HS_INTERVAL(ECM_STATUS_INTERVAL_MS),
};
static struct usb_endpoint_descriptor hs_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor hs_ecm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *ecm_hs_function[] = {
/* CDC ECM control descriptors */
(struct usb_descriptor_header *) &ecm_iad_descriptor,
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
/* NOTE: status endpoint might need to be removed */
(struct usb_descriptor_header *) &hs_ecm_notify_desc,
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ecm_data_nop_intf,
(struct usb_descriptor_header *) &ecm_data_intf,
(struct usb_descriptor_header *) &hs_ecm_in_desc,
(struct usb_descriptor_header *) &hs_ecm_out_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor ss_ecm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT),
.bInterval = USB_MS_TO_HS_INTERVAL(ECM_STATUS_INTERVAL_MS),
};
static struct usb_ss_ep_comp_descriptor ss_ecm_intr_comp_desc = {
.bLength = sizeof ss_ecm_intr_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 3 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
.wBytesPerInterval = cpu_to_le16(ECM_STATUS_BYTECOUNT),
};
static struct usb_endpoint_descriptor ss_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor ss_ecm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_ecm_bulk_comp_desc = {
.bLength = sizeof ss_ecm_bulk_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_descriptor_header *ecm_ss_function[] = {
/* CDC ECM control descriptors */
(struct usb_descriptor_header *) &ecm_iad_descriptor,
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
/* NOTE: status endpoint might need to be removed */
(struct usb_descriptor_header *) &ss_ecm_notify_desc,
(struct usb_descriptor_header *) &ss_ecm_intr_comp_desc,
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ecm_data_nop_intf,
(struct usb_descriptor_header *) &ecm_data_intf,
(struct usb_descriptor_header *) &ss_ecm_in_desc,
(struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc,
(struct usb_descriptor_header *) &ss_ecm_out_desc,
(struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc,
NULL,
};
/* string descriptors: */
static struct usb_string ecm_string_defs[] = {
[0].s = "CDC Ethernet Control Model (ECM)",
[1].s = "",
[2].s = "CDC Ethernet Data",
[3].s = "CDC ECM",
{ } /* end of list */
};
static struct usb_gadget_strings ecm_string_table = {
.language = 0x0409, /* en-us */
.strings = ecm_string_defs,
};
static struct usb_gadget_strings *ecm_strings[] = {
&ecm_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static void ecm_do_notify(struct f_ecm *ecm)
{
struct usb_request *req = ecm->notify_req;
struct usb_cdc_notification *event;
struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
__le32 *data;
int status;
/* notification already in flight? */
if (atomic_read(&ecm->notify_count))
return;
event = req->buf;
switch (ecm->notify_state) {
case ECM_NOTIFY_NONE:
return;
case ECM_NOTIFY_CONNECT:
event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
if (ecm->is_open)
event->wValue = cpu_to_le16(1);
else
event->wValue = cpu_to_le16(0);
event->wLength = 0;
req->length = sizeof *event;
DBG(cdev, "notify connect %s\n",
ecm->is_open ? "true" : "false");
ecm->notify_state = ECM_NOTIFY_SPEED;
break;
case ECM_NOTIFY_SPEED:
event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
event->wValue = cpu_to_le16(0);
event->wLength = cpu_to_le16(8);
req->length = ECM_STATUS_BYTECOUNT;
/* SPEED_CHANGE data is up/down speeds in bits/sec */
data = req->buf + sizeof *event;
data[0] = cpu_to_le32(gether_bitrate(cdev->gadget));
data[1] = data[0];
DBG(cdev, "notify speed %d\n", gether_bitrate(cdev->gadget));
ecm->notify_state = ECM_NOTIFY_NONE;
break;
}
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ecm->ctrl_id);
atomic_inc(&ecm->notify_count);
status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
if (status < 0) {
atomic_dec(&ecm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
static void ecm_notify(struct f_ecm *ecm)
{
/* NOTE on most versions of Linux, host side cdc-ethernet
* won't listen for notifications until its netdevice opens.
* The first notification then sits in the FIFO for a long
* time, and the second one is queued.
*/
ecm->notify_state = ECM_NOTIFY_CONNECT;
ecm_do_notify(ecm);
}
static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_ecm *ecm = req->context;
struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
struct usb_cdc_notification *event = req->buf;
switch (req->status) {
case 0:
/* no fault */
atomic_dec(&ecm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
atomic_set(&ecm->notify_count, 0);
ecm->notify_state = ECM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
atomic_dec(&ecm->notify_count);
break;
}
ecm_do_notify(ecm);
}
static int ecm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_ecm *ecm = func_to_ecm(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
/* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SET_ETHERNET_PACKET_FILTER:
/* see 6.2.30: no data, wIndex = interface,
* wValue = packet filter bitmap
*/
if (w_length != 0 || w_index != ecm->ctrl_id)
goto invalid;
DBG(cdev, "packet filter %02x\n", w_value);
/* REVISIT locking of cdc_filter. This assumes the UDC
* driver won't have a concurrent packet TX irq running on
* another CPU; or that if it does, this write is atomic...
*/
ecm->port.cdc_filter = w_value;
value = 0;
break;
/* and optionally:
* case USB_CDC_SEND_ENCAPSULATED_COMMAND:
* case USB_CDC_GET_ENCAPSULATED_RESPONSE:
* case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
* case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
* case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
* case USB_CDC_GET_ETHERNET_STATISTIC:
*/
default:
invalid:
DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (value >= 0) {
DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = 0;
req->length = value;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(cdev, "ecm req %02x.%02x response err %d\n",
ctrl->bRequestType, ctrl->bRequest,
value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_ecm *ecm = func_to_ecm(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* Control interface has only altsetting 0 */
if (intf == ecm->ctrl_id) {
if (alt != 0)
goto fail;
VDBG(cdev, "reset ecm control %d\n", intf);
usb_ep_disable(ecm->notify);
if (!(ecm->notify->desc)) {
VDBG(cdev, "init ecm ctrl %d\n", intf);
if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
goto fail;
}
usb_ep_enable(ecm->notify);
/* Data interface has two altsettings, 0 and 1 */
} else if (intf == ecm->data_id) {
if (alt > 1)
goto fail;
if (ecm->port.in_ep->enabled) {
DBG(cdev, "reset ecm\n");
gether_disconnect(&ecm->port);
}
if (!ecm->port.in_ep->desc ||
!ecm->port.out_ep->desc) {
DBG(cdev, "init ecm\n");
if (config_ep_by_speed(cdev->gadget, f,
ecm->port.in_ep) ||
config_ep_by_speed(cdev->gadget, f,
ecm->port.out_ep)) {
ecm->port.in_ep->desc = NULL;
ecm->port.out_ep->desc = NULL;
goto fail;
}
}
/* CDC Ethernet only sends data in non-default altsettings.
* Changing altsettings resets filters, statistics, etc.
*/
if (alt == 1) {
struct net_device *net;
/* Enable zlps by default for ECM conformance;
* override for musb_hdrc (avoids txdma ovhead).
*/
ecm->port.is_zlp_ok =
gadget_is_zlp_supported(cdev->gadget);
ecm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ecm\n");
net = gether_connect(&ecm->port);
if (IS_ERR(net))
return PTR_ERR(net);
}
/* NOTE this can be a minor disagreement with the ECM spec,
* which says speed notifications will "always" follow
* connection notifications. But we allow one connect to
* follow another (if the first is in flight), and instead
* just guarantee that a speed notification is always sent.
*/
ecm_notify(ecm);
} else
goto fail;
return 0;
fail:
return -EINVAL;
}
/* Because the data interface supports multiple altsettings,
* this ECM function *MUST* implement a get_alt() method.
*/
static int ecm_get_alt(struct usb_function *f, unsigned intf)
{
struct f_ecm *ecm = func_to_ecm(f);
if (intf == ecm->ctrl_id)
return 0;
return ecm->port.in_ep->enabled ? 1 : 0;
}
static void ecm_disable(struct usb_function *f)
{
struct f_ecm *ecm = func_to_ecm(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "ecm deactivated\n");
if (ecm->port.in_ep->enabled) {
gether_disconnect(&ecm->port);
} else {
ecm->port.in_ep->desc = NULL;
ecm->port.out_ep->desc = NULL;
}
usb_ep_disable(ecm->notify);
ecm->notify->desc = NULL;
}
/*-------------------------------------------------------------------------*/
/*
* Callbacks let us notify the host about connect/disconnect when the
* net device is opened or closed.
*
* For testing, note that link states on this side include both opened
* and closed variants of:
*
* - disconnected/unconfigured
* - configured but inactive (data alt 0)
* - configured and active (data alt 1)
*
* Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
* SET_INTERFACE (altsetting). Remember also that "configured" doesn't
* imply the host is actually polling the notification endpoint, and
* likewise that "active" doesn't imply it's actually using the data
* endpoints for traffic.
*/
static void ecm_open(struct gether *geth)
{
struct f_ecm *ecm = func_to_ecm(&geth->func);
DBG(ecm->port.func.config->cdev, "%s\n", __func__);
ecm->is_open = true;
ecm_notify(ecm);
}
static void ecm_close(struct gether *geth)
{
struct f_ecm *ecm = func_to_ecm(&geth->func);
DBG(ecm->port.func.config->cdev, "%s\n", __func__);
ecm->is_open = false;
ecm_notify(ecm);
}
/*-------------------------------------------------------------------------*/
/* ethernet function driver setup/binding */
static int
ecm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_ecm *ecm = func_to_ecm(f);
struct usb_string *us;
int status = 0;
struct usb_ep *ep;
struct f_ecm_opts *ecm_opts;
if (!can_support_ecm(cdev->gadget))
return -EINVAL;
ecm_opts = container_of(f->fi, struct f_ecm_opts, func_inst);
mutex_lock(&ecm_opts->lock);
gether_set_gadget(ecm_opts->net, cdev->gadget);
if (!ecm_opts->bound) {
status = gether_register_netdev(ecm_opts->net);
ecm_opts->bound = true;
}
mutex_unlock(&ecm_opts->lock);
if (status)
return status;
ecm_string_defs[1].s = ecm->ethaddr;
us = usb_gstrings_attach(cdev, ecm_strings,
ARRAY_SIZE(ecm_string_defs));
if (IS_ERR(us))
return PTR_ERR(us);
ecm_control_intf.iInterface = us[0].id;
ecm_data_intf.iInterface = us[2].id;
ecm_desc.iMACAddress = us[1].id;
ecm_iad_descriptor.iFunction = us[3].id;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ecm->ctrl_id = status;
ecm_iad_descriptor.bFirstInterface = status;
ecm_control_intf.bInterfaceNumber = status;
ecm_union_desc.bMasterInterface0 = status;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ecm->data_id = status;
ecm_data_nop_intf.bInterfaceNumber = status;
ecm_data_intf.bInterfaceNumber = status;
ecm_union_desc.bSlaveInterface0 = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc);
if (!ep)
goto fail;
ecm->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc);
if (!ep)
goto fail;
ecm->port.out_ep = ep;
/* NOTE: a status/notification endpoint is *OPTIONAL* but we
* don't treat it that way. It's simpler, and some newer CDC
* profiles (wireless handsets) no longer treat it as optional.
*/
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc);
if (!ep)
goto fail;
ecm->notify = ep;
status = -ENOMEM;
/* allocate notification request and buffer */
ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!ecm->notify_req)
goto fail;
ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
if (!ecm->notify_req->buf)
goto fail;
ecm->notify_req->context = ecm;
ecm->notify_req->complete = ecm_notify_complete;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
hs_ecm_in_desc.bEndpointAddress = fs_ecm_in_desc.bEndpointAddress;
hs_ecm_out_desc.bEndpointAddress = fs_ecm_out_desc.bEndpointAddress;
hs_ecm_notify_desc.bEndpointAddress =
fs_ecm_notify_desc.bEndpointAddress;
ss_ecm_in_desc.bEndpointAddress = fs_ecm_in_desc.bEndpointAddress;
ss_ecm_out_desc.bEndpointAddress = fs_ecm_out_desc.bEndpointAddress;
ss_ecm_notify_desc.bEndpointAddress =
fs_ecm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
ecm_ss_function, ecm_ss_function);
if (status)
goto fail;
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
* until we're activated via set_alt().
*/
ecm->port.open = ecm_open;
ecm->port.close = ecm_close;
DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n",
ecm->port.in_ep->name, ecm->port.out_ep->name,
ecm->notify->name);
return 0;
fail:
if (ecm->notify_req) {
kfree(ecm->notify_req->buf);
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_ecm_opts,
func_inst.group);
}
/* f_ecm_item_ops */
USB_ETHERNET_CONFIGFS_ITEM(ecm);
/* f_ecm_opts_dev_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ecm);
/* f_ecm_opts_host_addr */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ecm);
/* f_ecm_opts_qmult */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ecm);
/* f_ecm_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ecm);
static struct configfs_attribute *ecm_attrs[] = {
&ecm_opts_attr_dev_addr,
&ecm_opts_attr_host_addr,
&ecm_opts_attr_qmult,
&ecm_opts_attr_ifname,
NULL,
};
static const struct config_item_type ecm_func_type = {
.ct_item_ops = &ecm_item_ops,
.ct_attrs = ecm_attrs,
.ct_owner = THIS_MODULE,
};
static void ecm_free_inst(struct usb_function_instance *f)
{
struct f_ecm_opts *opts;
opts = container_of(f, struct f_ecm_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
kfree(opts);
}
static struct usb_function_instance *ecm_alloc_inst(void)
{
struct f_ecm_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ecm_free_inst;
opts->net = gether_setup_default();
if (IS_ERR(opts->net)) {
struct net_device *net = opts->net;
kfree(opts);
return ERR_CAST(net);
}
config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type);
return &opts->func_inst;
}
static void ecm_suspend(struct usb_function *f)
{
struct f_ecm *ecm = func_to_ecm(f);
struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
DBG(cdev, "ECM Suspend\n");
gether_suspend(&ecm->port);
}
static void ecm_resume(struct usb_function *f)
{
struct f_ecm *ecm = func_to_ecm(f);
struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
DBG(cdev, "ECM Resume\n");
gether_resume(&ecm->port);
}
static void ecm_free(struct usb_function *f)
{
struct f_ecm *ecm;
struct f_ecm_opts *opts;
ecm = func_to_ecm(f);
opts = container_of(f->fi, struct f_ecm_opts, func_inst);
kfree(ecm);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
}
static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_ecm *ecm = func_to_ecm(f);
DBG(c->cdev, "ecm unbind\n");
usb_free_all_descriptors(f);
if (atomic_read(&ecm->notify_count)) {
usb_ep_dequeue(ecm->notify, ecm->notify_req);
atomic_set(&ecm->notify_count, 0);
}
kfree(ecm->notify_req->buf);
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
{
struct f_ecm *ecm;
struct f_ecm_opts *opts;
int status;
/* allocate and initialize one new instance */
ecm = kzalloc(sizeof(*ecm), GFP_KERNEL);
if (!ecm)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_ecm_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt++;
/* export host's Ethernet address in CDC format */
status = gether_get_host_addr_cdc(opts->net, ecm->ethaddr,
sizeof(ecm->ethaddr));
if (status < 12) {
kfree(ecm);
mutex_unlock(&opts->lock);
return ERR_PTR(-EINVAL);
}
ecm->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);
ecm->port.cdc_filter = DEFAULT_FILTER;
ecm->port.func.name = "cdc_ethernet";
/* descriptors are per-instance copies */
ecm->port.func.bind = ecm_bind;
ecm->port.func.unbind = ecm_unbind;
ecm->port.func.set_alt = ecm_set_alt;
ecm->port.func.get_alt = ecm_get_alt;
ecm->port.func.setup = ecm_setup;
ecm->port.func.disable = ecm_disable;
ecm->port.func.free_func = ecm_free;
ecm->port.func.suspend = ecm_suspend;
ecm->port.func.resume = ecm_resume;
return &ecm->port.func;
}
DECLARE_USB_FUNCTION_INIT(ecm, ecm_alloc_inst, ecm_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
| linux-master | drivers/usb/gadget/function/f_ecm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* u_uac1.c -- ALSA audio utilities for Gadget stack
*
* Copyright (C) 2008 Bryan Wu <[email protected]>
* Copyright (C) 2008 Analog Devices, Inc
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/random.h>
#include <linux/syscalls.h>
#include "u_uac1_legacy.h"
/*
* This component encapsulates the ALSA devices for USB audio gadget
*/
/*-------------------------------------------------------------------------*/
/*
* Some ALSA internal helper functions
*/
static int snd_interval_refine_set(struct snd_interval *i, unsigned int val)
{
struct snd_interval t;
t.empty = 0;
t.min = t.max = val;
t.openmin = t.openmax = 0;
t.integer = 1;
return snd_interval_refine(i, &t);
}
static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, unsigned int val,
int dir)
{
int changed;
if (hw_is_mask(var)) {
struct snd_mask *m = hw_param_mask(params, var);
if (val == 0 && dir < 0) {
changed = -EINVAL;
snd_mask_none(m);
} else {
if (dir > 0)
val++;
else if (dir < 0)
val--;
changed = snd_mask_refine_set(
hw_param_mask(params, var), val);
}
} else if (hw_is_interval(var)) {
struct snd_interval *i = hw_param_interval(params, var);
if (val == 0 && dir < 0) {
changed = -EINVAL;
snd_interval_none(i);
} else if (dir == 0)
changed = snd_interval_refine_set(i, val);
else {
struct snd_interval t;
t.openmin = 1;
t.openmax = 1;
t.empty = 0;
t.integer = 0;
if (dir < 0) {
t.min = val - 1;
t.max = val;
} else {
t.min = val;
t.max = val+1;
}
changed = snd_interval_refine(i, &t);
}
} else
return -EINVAL;
if (changed) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
return changed;
}
/*-------------------------------------------------------------------------*/
/*
* Set default hardware params
*/
static int playback_default_hw_params(struct gaudio_snd_dev *snd)
{
struct snd_pcm_substream *substream = snd->substream;
struct snd_pcm_hw_params *params;
snd_pcm_sframes_t result;
/*
* SNDRV_PCM_ACCESS_RW_INTERLEAVED,
* SNDRV_PCM_FORMAT_S16_LE
* CHANNELS: 2
* RATE: 48000
*/
snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED;
snd->format = SNDRV_PCM_FORMAT_S16_LE;
snd->channels = 2;
snd->rate = 48000;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
_snd_pcm_hw_params_any(params);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
snd->access, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
snd->format, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
snd->channels, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
snd->rate, 0);
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params);
result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL);
if (result < 0) {
ERROR(snd->card,
"Preparing sound card failed: %d\n", (int)result);
kfree(params);
return result;
}
/* Store the hardware parameters */
snd->access = params_access(params);
snd->format = params_format(params);
snd->channels = params_channels(params);
snd->rate = params_rate(params);
kfree(params);
INFO(snd->card,
"Hardware params: access %x, format %x, channels %d, rate %d\n",
snd->access, snd->format, snd->channels, snd->rate);
return 0;
}
/*
* Playback audio buffer data by ALSA PCM device
*/
size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
{
struct gaudio_snd_dev *snd = &card->playback;
struct snd_pcm_substream *substream = snd->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
ssize_t result;
snd_pcm_sframes_t frames;
try_again:
if (runtime->state == SNDRV_PCM_STATE_XRUN ||
runtime->state == SNDRV_PCM_STATE_SUSPENDED) {
result = snd_pcm_kernel_ioctl(substream,
SNDRV_PCM_IOCTL_PREPARE, NULL);
if (result < 0) {
ERROR(card, "Preparing sound card failed: %d\n",
(int)result);
return result;
}
}
frames = bytes_to_frames(runtime, count);
result = snd_pcm_kernel_write(snd->substream, buf, frames);
if (result != frames) {
ERROR(card, "Playback error: %d\n", (int)result);
goto try_again;
}
return 0;
}
int u_audio_get_playback_channels(struct gaudio *card)
{
return card->playback.channels;
}
int u_audio_get_playback_rate(struct gaudio *card)
{
return card->playback.rate;
}
/*
* Open ALSA PCM and control device files
* Initial the PCM or control device
*/
static int gaudio_open_snd_dev(struct gaudio *card)
{
struct snd_pcm_file *pcm_file;
struct gaudio_snd_dev *snd;
struct f_uac1_legacy_opts *opts;
char *fn_play, *fn_cap, *fn_cntl;
opts = container_of(card->func.fi, struct f_uac1_legacy_opts,
func_inst);
fn_play = opts->fn_play;
fn_cap = opts->fn_cap;
fn_cntl = opts->fn_cntl;
/* Open control device */
snd = &card->control;
snd->filp = filp_open(fn_cntl, O_RDWR, 0);
if (IS_ERR(snd->filp)) {
int ret = PTR_ERR(snd->filp);
ERROR(card, "unable to open sound control device file: %s\n",
fn_cntl);
snd->filp = NULL;
return ret;
}
snd->card = card;
/* Open PCM playback device and setup substream */
snd = &card->playback;
snd->filp = filp_open(fn_play, O_WRONLY, 0);
if (IS_ERR(snd->filp)) {
int ret = PTR_ERR(snd->filp);
ERROR(card, "No such PCM playback device: %s\n", fn_play);
snd->filp = NULL;
return ret;
}
pcm_file = snd->filp->private_data;
snd->substream = pcm_file->substream;
snd->card = card;
playback_default_hw_params(snd);
/* Open PCM capture device and setup substream */
snd = &card->capture;
snd->filp = filp_open(fn_cap, O_RDONLY, 0);
if (IS_ERR(snd->filp)) {
ERROR(card, "No such PCM capture device: %s\n", fn_cap);
snd->substream = NULL;
snd->card = NULL;
snd->filp = NULL;
} else {
pcm_file = snd->filp->private_data;
snd->substream = pcm_file->substream;
snd->card = card;
}
return 0;
}
/*
* Close ALSA PCM and control device files
*/
static int gaudio_close_snd_dev(struct gaudio *gau)
{
struct gaudio_snd_dev *snd;
/* Close control device */
snd = &gau->control;
if (snd->filp)
filp_close(snd->filp, NULL);
/* Close PCM playback device and setup substream */
snd = &gau->playback;
if (snd->filp)
filp_close(snd->filp, NULL);
/* Close PCM capture device and setup substream */
snd = &gau->capture;
if (snd->filp)
filp_close(snd->filp, NULL);
return 0;
}
/*
* gaudio_setup - setup ALSA interface and preparing for USB transfer
*
* This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using.
*
* Returns negative errno, or zero on success
*/
int gaudio_setup(struct gaudio *card)
{
int ret;
ret = gaudio_open_snd_dev(card);
if (ret)
ERROR(card, "we need at least one control device\n");
return ret;
}
/*
* gaudio_cleanup - remove ALSA device interface
*
* This is called to free all resources allocated by @gaudio_setup().
*/
void gaudio_cleanup(struct gaudio *the_card)
{
if (the_card)
gaudio_close_snd_dev(the_card);
}
| linux-master | drivers/usb/gadget/function/u_uac1_legacy.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_video.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart ([email protected])
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/video.h>
#include <asm/unaligned.h>
#include <media/v4l2-dev.h>
#include "uvc.h"
#include "uvc_queue.h"
#include "uvc_video.h"
/* --------------------------------------------------------------------------
* Video codecs
*/
static int
uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
u8 *data, int len)
{
struct uvc_device *uvc = container_of(video, struct uvc_device, video);
struct usb_composite_dev *cdev = uvc->func.config->cdev;
struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
int pos = 2;
data[1] = UVC_STREAM_EOH | video->fid;
if (video->queue.buf_used == 0 && ts.tv_sec) {
/* dwClockFrequency is 48 MHz */
u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
data[1] |= UVC_STREAM_PTS;
put_unaligned_le32(pts, &data[pos]);
pos += 4;
}
if (cdev->gadget->ops->get_frame) {
u32 sof, stc;
sof = usb_gadget_frame_number(cdev->gadget);
ktime_get_ts64(&ts);
stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
data[1] |= UVC_STREAM_SCR;
put_unaligned_le32(stc, &data[pos]);
put_unaligned_le16(sof, &data[pos+4]);
pos += 6;
}
data[0] = pos;
if (buf->bytesused - video->queue.buf_used <= len - pos)
data[1] |= UVC_STREAM_EOF;
return pos;
}
static int
uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
u8 *data, int len)
{
struct uvc_video_queue *queue = &video->queue;
unsigned int nbytes;
void *mem;
/* Copy video data to the USB buffer. */
mem = buf->mem + queue->buf_used;
nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
memcpy(data, mem, nbytes);
queue->buf_used += nbytes;
return nbytes;
}
static void
uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
struct uvc_buffer *buf)
{
void *mem = req->buf;
struct uvc_request *ureq = req->context;
int len = video->req_size;
int ret;
/* Add a header at the beginning of the payload. */
if (video->payload_size == 0) {
ret = uvc_video_encode_header(video, buf, mem, len);
video->payload_size += ret;
mem += ret;
len -= ret;
}
/* Process video data. */
len = min((int)(video->max_payload_size - video->payload_size), len);
ret = uvc_video_encode_data(video, buf, mem, len);
video->payload_size += ret;
len -= ret;
req->length = video->req_size - len;
req->zero = video->payload_size == video->max_payload_size;
if (buf->bytesused == video->queue.buf_used) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
list_del(&buf->queue);
video->fid ^= UVC_STREAM_FID;
ureq->last_buf = buf;
video->payload_size = 0;
}
if (video->payload_size == video->max_payload_size ||
video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
buf->bytesused == video->queue.buf_used)
video->payload_size = 0;
}
static void
uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
struct uvc_buffer *buf)
{
unsigned int pending = buf->bytesused - video->queue.buf_used;
struct uvc_request *ureq = req->context;
struct scatterlist *sg, *iter;
unsigned int len = video->req_size;
unsigned int sg_left, part = 0;
unsigned int i;
int header_len;
sg = ureq->sgt.sgl;
sg_init_table(sg, ureq->sgt.nents);
/* Init the header. */
header_len = uvc_video_encode_header(video, buf, ureq->header,
video->req_size);
sg_set_buf(sg, ureq->header, header_len);
len -= header_len;
if (pending <= len)
len = pending;
req->length = (len == pending) ?
len + header_len : video->req_size;
/* Init the pending sgs with payload */
sg = sg_next(sg);
for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
if (!len || !buf->sg || !buf->sg->length)
break;
sg_left = buf->sg->length - buf->offset;
part = min_t(unsigned int, len, sg_left);
sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
if (part == sg_left) {
buf->offset = 0;
buf->sg = sg_next(buf->sg);
} else {
buf->offset += part;
}
len -= part;
}
/* Assign the video data with header. */
req->buf = NULL;
req->sg = ureq->sgt.sgl;
req->num_sgs = i + 1;
req->length -= len;
video->queue.buf_used += req->length - header_len;
if (buf->bytesused == video->queue.buf_used || !buf->sg ||
video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
buf->offset = 0;
list_del(&buf->queue);
video->fid ^= UVC_STREAM_FID;
ureq->last_buf = buf;
}
}
static void
uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
struct uvc_buffer *buf)
{
void *mem = req->buf;
struct uvc_request *ureq = req->context;
int len = video->req_size;
int ret;
/* Add the header. */
ret = uvc_video_encode_header(video, buf, mem, len);
mem += ret;
len -= ret;
/* Process video data. */
ret = uvc_video_encode_data(video, buf, mem, len);
len -= ret;
req->length = video->req_size - len;
if (buf->bytesused == video->queue.buf_used ||
video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
list_del(&buf->queue);
video->fid ^= UVC_STREAM_FID;
ureq->last_buf = buf;
}
}
/* --------------------------------------------------------------------------
* Request handling
*/
static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
{
int ret;
ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
if (ret < 0) {
uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
ret);
/* If the endpoint is disabled the descriptor may be NULL. */
if (video->ep->desc) {
/* Isochronous endpoints can't be halted. */
if (usb_endpoint_xfer_bulk(video->ep->desc))
usb_ep_set_halt(video->ep);
}
}
return ret;
}
static void
uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_request *ureq = req->context;
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
struct uvc_device *uvc = video->uvc;
unsigned long flags;
switch (req->status) {
case 0:
break;
case -EXDEV:
uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
break;
case -ESHUTDOWN: /* disconnect from host. */
uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1);
break;
default:
uvcg_warn(&video->uvc->func,
"VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
}
if (ureq->last_buf) {
uvcg_complete_buffer(&video->queue, ureq->last_buf);
ureq->last_buf = NULL;
}
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
if (uvc->state == UVC_STATE_STREAMING)
queue_work(video->async_wq, &video->pump);
}
static int
uvc_video_free_requests(struct uvc_video *video)
{
unsigned int i;
if (video->ureq) {
for (i = 0; i < video->uvc_num_requests; ++i) {
sg_free_table(&video->ureq[i].sgt);
if (video->ureq[i].req) {
usb_ep_free_request(video->ep, video->ureq[i].req);
video->ureq[i].req = NULL;
}
if (video->ureq[i].req_buffer) {
kfree(video->ureq[i].req_buffer);
video->ureq[i].req_buffer = NULL;
}
}
kfree(video->ureq);
video->ureq = NULL;
}
INIT_LIST_HEAD(&video->req_free);
video->req_size = 0;
return 0;
}
static int
uvc_video_alloc_requests(struct uvc_video *video)
{
unsigned int req_size;
unsigned int i;
int ret = -ENOMEM;
BUG_ON(video->req_size);
req_size = video->ep->maxpacket
* max_t(unsigned int, video->ep->maxburst, 1)
* (video->ep->mult);
video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL);
if (video->ureq == NULL)
return -ENOMEM;
for (i = 0; i < video->uvc_num_requests; ++i) {
video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL);
if (video->ureq[i].req_buffer == NULL)
goto error;
video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
if (video->ureq[i].req == NULL)
goto error;
video->ureq[i].req->buf = video->ureq[i].req_buffer;
video->ureq[i].req->length = 0;
video->ureq[i].req->complete = uvc_video_complete;
video->ureq[i].req->context = &video->ureq[i];
video->ureq[i].video = video;
video->ureq[i].last_buf = NULL;
list_add_tail(&video->ureq[i].req->list, &video->req_free);
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
sg_alloc_table(&video->ureq[i].sgt,
DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
PAGE_SIZE) + 2, GFP_KERNEL);
}
video->req_size = req_size;
return 0;
error:
uvc_video_free_requests(video);
return ret;
}
/* --------------------------------------------------------------------------
* Video streaming
*/
/*
* uvcg_video_pump - Pump video data into the USB requests
*
* This function fills the available USB requests (listed in req_free) with
* video data from the queued buffers.
*/
static void uvcg_video_pump(struct work_struct *work)
{
struct uvc_video *video = container_of(work, struct uvc_video, pump);
struct uvc_video_queue *queue = &video->queue;
/* video->max_payload_size is only set when using bulk transfer */
bool is_bulk = video->max_payload_size;
struct usb_request *req = NULL;
struct uvc_buffer *buf;
unsigned long flags;
bool buf_done;
int ret;
while (video->ep->enabled) {
/*
* Retrieve the first available USB request, protected by the
* request lock.
*/
spin_lock_irqsave(&video->req_lock, flags);
if (list_empty(&video->req_free)) {
spin_unlock_irqrestore(&video->req_lock, flags);
return;
}
req = list_first_entry(&video->req_free, struct usb_request,
list);
list_del(&req->list);
spin_unlock_irqrestore(&video->req_lock, flags);
/*
* Retrieve the first available video buffer and fill the
* request, protected by the video queue irqlock.
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
if (buf != NULL) {
video->encode(req, video, buf);
buf_done = buf->state == UVC_BUF_STATE_DONE;
} else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
/*
* No video buffer available; the queue is still connected and
* we're transferring over ISOC. Queue a 0 length request to
* prevent missed ISOC transfers.
*/
req->length = 0;
buf_done = false;
} else {
/*
* Either the queue has been disconnected or no video buffer
* available for bulk transfer. Either way, stop processing
* further.
*/
spin_unlock_irqrestore(&queue->irqlock, flags);
break;
}
/*
* With USB3 handling more requests at a higher speed, we can't
* afford to generate an interrupt for every request. Decide to
* interrupt:
*
* - When no more requests are available in the free queue, as
* this may be our last chance to refill the endpoint's
* request queue.
*
* - When this is request is the last request for the video
* buffer, as we want to start sending the next video buffer
* ASAP in case it doesn't get started already in the next
* iteration of this loop.
*
* - Four times over the length of the requests queue (as
* indicated by video->uvc_num_requests), as a trade-off
* between latency and interrupt load.
*/
if (list_empty(&video->req_free) || buf_done ||
!(video->req_int_count %
DIV_ROUND_UP(video->uvc_num_requests, 4))) {
video->req_int_count = 0;
req->no_interrupt = 0;
} else {
req->no_interrupt = 1;
}
/* Queue the USB request */
ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&queue->irqlock, flags);
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
break;
}
/* Endpoint now owns the request */
req = NULL;
video->req_int_count++;
}
if (!req)
return;
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
return;
}
/*
* Enable or disable the video stream.
*/
int uvcg_video_enable(struct uvc_video *video, int enable)
{
unsigned int i;
int ret;
if (video->ep == NULL) {
uvcg_info(&video->uvc->func,
"Video enable failed, device is uninitialized.\n");
return -ENODEV;
}
if (!enable) {
cancel_work_sync(&video->pump);
uvcg_queue_cancel(&video->queue, 0);
for (i = 0; i < video->uvc_num_requests; ++i)
if (video->ureq && video->ureq[i].req)
usb_ep_dequeue(video->ep, video->ureq[i].req);
uvc_video_free_requests(video);
uvcg_queue_enable(&video->queue, 0);
return 0;
}
if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
return ret;
if ((ret = uvc_video_alloc_requests(video)) < 0)
return ret;
if (video->max_payload_size) {
video->encode = uvc_video_encode_bulk;
video->payload_size = 0;
} else
video->encode = video->queue.use_sg ?
uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
video->req_int_count = 0;
queue_work(video->async_wq, &video->pump);
return ret;
}
/*
* Initialize the UVC video stream.
*/
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
INIT_LIST_HEAD(&video->req_free);
spin_lock_init(&video->req_lock);
INIT_WORK(&video->pump, uvcg_video_pump);
/* Allocate a work queue for asynchronous video pump handler. */
video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!video->async_wq)
return -EINVAL;
video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
video->bpp = 16;
video->width = 320;
video->height = 240;
video->imagesize = 320 * 240 * 2;
/* Initialize the video buffers queue. */
uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
return 0;
}
| linux-master | drivers/usb/gadget/function/uvc_video.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* f_printer.c - USB printer function driver
*
* Copied from drivers/usb/gadget/legacy/printer.c,
* which was:
*
* printer.c -- Printer gadget driver
*
* Copyright (C) 2003-2005 David Brownell
* Copyright (C) 2006 Craig W. Nadler
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/cdev.h>
#include <linux/kref.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/usb/ch9.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/usb/g_printer.h>
#include "u_printer.h"
#define PRINTER_MINORS 4
#define GET_DEVICE_ID 0
#define GET_PORT_STATUS 1
#define SOFT_RESET 2
#define DEFAULT_Q_LEN 10 /* same as legacy g_printer gadget */
static int major, minors;
static const struct class usb_gadget_class = {
.name = "usb_printer_gadget",
};
static DEFINE_IDA(printer_ida);
static DEFINE_MUTEX(printer_ida_lock); /* protects access do printer_ida */
/*-------------------------------------------------------------------------*/
struct printer_dev {
spinlock_t lock; /* lock this structure */
/* lock buffer lists during read/write calls */
struct mutex lock_printer_io;
struct usb_gadget *gadget;
s8 interface;
struct usb_ep *in_ep, *out_ep;
struct kref kref;
struct list_head rx_reqs; /* List of free RX structs */
struct list_head rx_reqs_active; /* List of Active RX xfers */
struct list_head rx_buffers; /* List of completed xfers */
/* wait until there is data to be read. */
wait_queue_head_t rx_wait;
struct list_head tx_reqs; /* List of free TX structs */
struct list_head tx_reqs_active; /* List of Active TX xfers */
/* Wait until there are write buffers available to use. */
wait_queue_head_t tx_wait;
/* Wait until all write buffers have been sent. */
wait_queue_head_t tx_flush_wait;
struct usb_request *current_rx_req;
size_t current_rx_bytes;
u8 *current_rx_buf;
u8 printer_status;
u8 reset_printer;
int minor;
struct cdev printer_cdev;
u8 printer_cdev_open;
wait_queue_head_t wait;
unsigned q_len;
char **pnp_string; /* We don't own memory! */
struct usb_function function;
};
static inline struct printer_dev *func_to_printer(struct usb_function *f)
{
return container_of(f, struct printer_dev, function);
}
/*-------------------------------------------------------------------------*/
/*
* DESCRIPTORS ... most are static, but strings and (full) configuration
* descriptors are built on demand.
*/
/* holds our biggest descriptor */
#define USB_DESC_BUFSIZE 256
#define USB_BUFSIZE 8192
static struct usb_interface_descriptor intf_desc = {
.bLength = sizeof(intf_desc),
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_PRINTER,
.bInterfaceSubClass = 1, /* Printer Sub-Class */
.bInterfaceProtocol = 2, /* Bi-Directional */
.iInterface = 0
};
static struct usb_endpoint_descriptor fs_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK
};
static struct usb_endpoint_descriptor fs_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK
};
static struct usb_descriptor_header *fs_printer_function[] = {
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &fs_ep_in_desc,
(struct usb_descriptor_header *) &fs_ep_out_desc,
NULL
};
/*
* usb 2.0 devices need to expose both high speed and full speed
* descriptors, unless they only run at full speed.
*/
static struct usb_endpoint_descriptor hs_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512)
};
static struct usb_endpoint_descriptor hs_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512)
};
static struct usb_descriptor_header *hs_printer_function[] = {
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &hs_ep_in_desc,
(struct usb_descriptor_header *) &hs_ep_out_desc,
NULL
};
/*
* Added endpoint descriptors for 3.0 devices
*/
static struct usb_endpoint_descriptor ss_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_ep_in_comp_desc = {
.bLength = sizeof(ss_ep_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_endpoint_descriptor ss_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor ss_ep_out_comp_desc = {
.bLength = sizeof(ss_ep_out_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
static struct usb_descriptor_header *ss_printer_function[] = {
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &ss_ep_in_desc,
(struct usb_descriptor_header *) &ss_ep_in_comp_desc,
(struct usb_descriptor_header *) &ss_ep_out_desc,
(struct usb_descriptor_header *) &ss_ep_out_comp_desc,
NULL
};
/* maxpacket and other transfer characteristics vary by speed. */
static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *fs,
struct usb_endpoint_descriptor *hs,
struct usb_endpoint_descriptor *ss)
{
switch (gadget->speed) {
case USB_SPEED_SUPER:
return ss;
case USB_SPEED_HIGH:
return hs;
default:
return fs;
}
}
/*-------------------------------------------------------------------------*/
static void printer_dev_free(struct kref *kref)
{
struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
kfree(dev);
}
static struct usb_request *
printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, gfp_flags);
if (req != NULL) {
req->length = len;
req->buf = kmalloc(len, gfp_flags);
if (req->buf == NULL) {
usb_ep_free_request(ep, req);
return NULL;
}
}
return req;
}
static void
printer_req_free(struct usb_ep *ep, struct usb_request *req)
{
if (ep != NULL && req != NULL) {
kfree(req->buf);
usb_ep_free_request(ep, req);
}
}
/*-------------------------------------------------------------------------*/
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct printer_dev *dev = ep->driver_data;
int status = req->status;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
list_del_init(&req->list); /* Remode from Active List */
switch (status) {
/* normal completion */
case 0:
if (req->actual > 0) {
list_add_tail(&req->list, &dev->rx_buffers);
DBG(dev, "G_Printer : rx length %d\n", req->actual);
} else {
list_add(&req->list, &dev->rx_reqs);
}
break;
/* software-driven interface shutdown */
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
VDBG(dev, "rx shutdown, code %d\n", status);
list_add(&req->list, &dev->rx_reqs);
break;
/* for hardware automagic (such as pxa) */
case -ECONNABORTED: /* endpoint reset */
DBG(dev, "rx %s reset\n", ep->name);
list_add(&req->list, &dev->rx_reqs);
break;
/* data overrun */
case -EOVERFLOW:
fallthrough;
default:
DBG(dev, "rx status %d\n", status);
list_add(&req->list, &dev->rx_reqs);
break;
}
wake_up_interruptible(&dev->rx_wait);
spin_unlock_irqrestore(&dev->lock, flags);
}
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct printer_dev *dev = ep->driver_data;
switch (req->status) {
default:
VDBG(dev, "tx err %d\n", req->status);
fallthrough;
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
break;
case 0:
break;
}
spin_lock(&dev->lock);
/* Take the request struct off the active list and put it on the
* free list.
*/
list_del_init(&req->list);
list_add(&req->list, &dev->tx_reqs);
wake_up_interruptible(&dev->tx_wait);
if (likely(list_empty(&dev->tx_reqs_active)))
wake_up_interruptible(&dev->tx_flush_wait);
spin_unlock(&dev->lock);
}
/*-------------------------------------------------------------------------*/
static int
printer_open(struct inode *inode, struct file *fd)
{
struct printer_dev *dev;
unsigned long flags;
int ret = -EBUSY;
dev = container_of(inode->i_cdev, struct printer_dev, printer_cdev);
spin_lock_irqsave(&dev->lock, flags);
if (dev->interface < 0) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENODEV;
}
if (!dev->printer_cdev_open) {
dev->printer_cdev_open = 1;
fd->private_data = dev;
ret = 0;
/* Change the printer status to show that it's on-line. */
dev->printer_status |= PRINTER_SELECTED;
}
spin_unlock_irqrestore(&dev->lock, flags);
kref_get(&dev->kref);
return ret;
}
static int
printer_close(struct inode *inode, struct file *fd)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
dev->printer_cdev_open = 0;
fd->private_data = NULL;
/* Change printer status to show that the printer is off-line. */
dev->printer_status &= ~PRINTER_SELECTED;
spin_unlock_irqrestore(&dev->lock, flags);
kref_put(&dev->kref, printer_dev_free);
return 0;
}
/* This function must be called with interrupts turned off. */
static void
setup_rx_reqs(struct printer_dev *dev)
{
struct usb_request *req;
while (likely(!list_empty(&dev->rx_reqs))) {
int error;
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del_init(&req->list);
/* The USB Host sends us whatever amount of data it wants to
* so we always set the length field to the full USB_BUFSIZE.
* If the amount of data is more than the read() caller asked
* for it will be stored in the request buffer until it is
* asked for by read().
*/
req->length = USB_BUFSIZE;
req->complete = rx_complete;
/* here, we unlock, and only unlock, to avoid deadlock. */
spin_unlock(&dev->lock);
error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC);
spin_lock(&dev->lock);
if (error) {
DBG(dev, "rx submit --> %d\n", error);
list_add(&req->list, &dev->rx_reqs);
break;
}
/* if the req is empty, then add it into dev->rx_reqs_active. */
else if (list_empty(&req->list))
list_add(&req->list, &dev->rx_reqs_active);
}
}
static ssize_t
printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
size_t size;
size_t bytes_copied;
struct usb_request *req;
/* This is a pointer to the current USB rx request. */
struct usb_request *current_rx_req;
/* This is the number of bytes in the current rx buffer. */
size_t current_rx_bytes;
/* This is a pointer to the current rx buffer. */
u8 *current_rx_buf;
if (len == 0)
return -EINVAL;
DBG(dev, "printer_read trying to read %d bytes\n", (int)len);
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
if (dev->interface < 0) {
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -ENODEV;
}
/* We will use this flag later to check if a printer reset happened
* after we turn interrupts back on.
*/
dev->reset_printer = 0;
setup_rx_reqs(dev);
bytes_copied = 0;
current_rx_req = dev->current_rx_req;
current_rx_bytes = dev->current_rx_bytes;
current_rx_buf = dev->current_rx_buf;
dev->current_rx_req = NULL;
dev->current_rx_bytes = 0;
dev->current_rx_buf = NULL;
/* Check if there is any data in the read buffers. Please note that
* current_rx_bytes is the number of bytes in the current rx buffer.
* If it is zero then check if there are any other rx_buffers that
* are on the completed list. We are only out of data if all rx
* buffers are empty.
*/
if ((current_rx_bytes == 0) &&
(likely(list_empty(&dev->rx_buffers)))) {
/* Turn interrupts back on before sleeping. */
spin_unlock_irqrestore(&dev->lock, flags);
/*
* If no data is available check if this is a NON-Blocking
* call or not.
*/
if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) {
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
/* Sleep until data is available */
wait_event_interruptible(dev->rx_wait,
(likely(!list_empty(&dev->rx_buffers))));
spin_lock_irqsave(&dev->lock, flags);
}
/* We have data to return then copy it to the caller's buffer.*/
while ((current_rx_bytes || likely(!list_empty(&dev->rx_buffers)))
&& len) {
if (current_rx_bytes == 0) {
req = container_of(dev->rx_buffers.next,
struct usb_request, list);
list_del_init(&req->list);
if (req->actual && req->buf) {
current_rx_req = req;
current_rx_bytes = req->actual;
current_rx_buf = req->buf;
} else {
list_add(&req->list, &dev->rx_reqs);
continue;
}
}
/* Don't leave irqs off while doing memory copies */
spin_unlock_irqrestore(&dev->lock, flags);
if (len > current_rx_bytes)
size = current_rx_bytes;
else
size = len;
size -= copy_to_user(buf, current_rx_buf, size);
bytes_copied += size;
len -= size;
buf += size;
spin_lock_irqsave(&dev->lock, flags);
/* We've disconnected or reset so return. */
if (dev->reset_printer) {
list_add(¤t_rx_req->list, &dev->rx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
/* If we not returning all the data left in this RX request
* buffer then adjust the amount of data left in the buffer.
* Othewise if we are done with this RX request buffer then
* requeue it to get any incoming data from the USB host.
*/
if (size < current_rx_bytes) {
current_rx_bytes -= size;
current_rx_buf += size;
} else {
list_add(¤t_rx_req->list, &dev->rx_reqs);
current_rx_bytes = 0;
current_rx_buf = NULL;
current_rx_req = NULL;
}
}
dev->current_rx_req = current_rx_req;
dev->current_rx_bytes = current_rx_bytes;
dev->current_rx_buf = current_rx_buf;
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
DBG(dev, "printer_read returned %d bytes\n", (int)bytes_copied);
if (bytes_copied)
return bytes_copied;
else
return -EAGAIN;
}
static ssize_t
printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
size_t size; /* Amount of data in a TX request. */
size_t bytes_copied = 0;
struct usb_request *req;
int value;
DBG(dev, "printer_write trying to send %d bytes\n", (int)len);
if (len == 0)
return -EINVAL;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
if (dev->interface < 0) {
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -ENODEV;
}
/* Check if a printer reset happens while we have interrupts on */
dev->reset_printer = 0;
/* Check if there is any available write buffers */
if (likely(list_empty(&dev->tx_reqs))) {
/* Turn interrupts back on before sleeping. */
spin_unlock_irqrestore(&dev->lock, flags);
/*
* If write buffers are available check if this is
* a NON-Blocking call or not.
*/
if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) {
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
/* Sleep until a write buffer is available */
wait_event_interruptible(dev->tx_wait,
(likely(!list_empty(&dev->tx_reqs))));
spin_lock_irqsave(&dev->lock, flags);
}
while (likely(!list_empty(&dev->tx_reqs)) && len) {
if (len > USB_BUFSIZE)
size = USB_BUFSIZE;
else
size = len;
req = container_of(dev->tx_reqs.next, struct usb_request,
list);
list_del_init(&req->list);
req->complete = tx_complete;
req->length = size;
/* Check if we need to send a zero length packet. */
if (len > size)
/* They will be more TX requests so no yet. */
req->zero = 0;
else
/* If the data amount is not a multiple of the
* maxpacket size then send a zero length packet.
*/
req->zero = ((len % dev->in_ep->maxpacket) == 0);
/* Don't leave irqs off while doing memory copies */
spin_unlock_irqrestore(&dev->lock, flags);
if (copy_from_user(req->buf, buf, size)) {
list_add(&req->list, &dev->tx_reqs);
mutex_unlock(&dev->lock_printer_io);
return bytes_copied;
}
bytes_copied += size;
len -= size;
buf += size;
spin_lock_irqsave(&dev->lock, flags);
/* We've disconnected or reset so free the req and buffer */
if (dev->reset_printer) {
list_add(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
list_add(&req->list, &dev->tx_reqs_active);
/* here, we unlock, and only unlock, to avoid deadlock. */
spin_unlock(&dev->lock);
value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
spin_lock(&dev->lock);
if (value) {
list_move(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
}
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
DBG(dev, "printer_write sent %d bytes\n", (int)bytes_copied);
if (bytes_copied)
return bytes_copied;
else
return -EAGAIN;
}
static int
printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
{
struct printer_dev *dev = fd->private_data;
struct inode *inode = file_inode(fd);
unsigned long flags;
int tx_list_empty;
inode_lock(inode);
spin_lock_irqsave(&dev->lock, flags);
if (dev->interface < 0) {
spin_unlock_irqrestore(&dev->lock, flags);
inode_unlock(inode);
return -ENODEV;
}
tx_list_empty = (likely(list_empty(&dev->tx_reqs)));
spin_unlock_irqrestore(&dev->lock, flags);
if (!tx_list_empty) {
/* Sleep until all data has been sent */
wait_event_interruptible(dev->tx_flush_wait,
(likely(list_empty(&dev->tx_reqs_active))));
}
inode_unlock(inode);
return 0;
}
static __poll_t
printer_poll(struct file *fd, poll_table *wait)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
__poll_t status = 0;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
if (dev->interface < 0) {
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return EPOLLERR | EPOLLHUP;
}
setup_rx_reqs(dev);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
poll_wait(fd, &dev->rx_wait, wait);
poll_wait(fd, &dev->tx_wait, wait);
spin_lock_irqsave(&dev->lock, flags);
if (likely(!list_empty(&dev->tx_reqs)))
status |= EPOLLOUT | EPOLLWRNORM;
if (likely(dev->current_rx_bytes) ||
likely(!list_empty(&dev->rx_buffers)))
status |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&dev->lock, flags);
return status;
}
static long
printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
int status = 0;
DBG(dev, "printer_ioctl: cmd=0x%4.4x, arg=%lu\n", code, arg);
/* handle ioctls */
spin_lock_irqsave(&dev->lock, flags);
if (dev->interface < 0) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENODEV;
}
switch (code) {
case GADGET_GET_PRINTER_STATUS:
status = (int)dev->printer_status;
break;
case GADGET_SET_PRINTER_STATUS:
dev->printer_status = (u8)arg;
break;
default:
/* could not handle ioctl */
DBG(dev, "printer_ioctl: ERROR cmd=0x%4.4xis not supported\n",
code);
status = -ENOTTY;
}
spin_unlock_irqrestore(&dev->lock, flags);
return status;
}
/* used after endpoint configuration */
static const struct file_operations printer_io_operations = {
.owner = THIS_MODULE,
.open = printer_open,
.read = printer_read,
.write = printer_write,
.fsync = printer_fsync,
.poll = printer_poll,
.unlocked_ioctl = printer_ioctl,
.release = printer_close,
.llseek = noop_llseek,
};
/*-------------------------------------------------------------------------*/
static int
set_printer_interface(struct printer_dev *dev)
{
int result = 0;
dev->in_ep->desc = ep_desc(dev->gadget, &fs_ep_in_desc, &hs_ep_in_desc,
&ss_ep_in_desc);
dev->in_ep->driver_data = dev;
dev->out_ep->desc = ep_desc(dev->gadget, &fs_ep_out_desc,
&hs_ep_out_desc, &ss_ep_out_desc);
dev->out_ep->driver_data = dev;
result = usb_ep_enable(dev->in_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result);
goto done;
}
result = usb_ep_enable(dev->out_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n", dev->out_ep->name, result);
goto done;
}
done:
/* on error, disable any endpoints */
if (result != 0) {
(void) usb_ep_disable(dev->in_ep);
(void) usb_ep_disable(dev->out_ep);
dev->in_ep->desc = NULL;
dev->out_ep->desc = NULL;
}
/* caller is responsible for cleanup on error */
return result;
}
static void printer_reset_interface(struct printer_dev *dev)
{
unsigned long flags;
if (dev->interface < 0)
return;
if (dev->in_ep->desc)
usb_ep_disable(dev->in_ep);
if (dev->out_ep->desc)
usb_ep_disable(dev->out_ep);
spin_lock_irqsave(&dev->lock, flags);
dev->in_ep->desc = NULL;
dev->out_ep->desc = NULL;
dev->interface = -1;
spin_unlock_irqrestore(&dev->lock, flags);
}
/* Change our operational Interface. */
static int set_interface(struct printer_dev *dev, unsigned number)
{
int result = 0;
/* Free the current interface */
printer_reset_interface(dev);
result = set_printer_interface(dev);
if (result)
printer_reset_interface(dev);
else
dev->interface = number;
if (!result)
INFO(dev, "Using interface %x\n", number);
return result;
}
static void printer_soft_reset(struct printer_dev *dev)
{
struct usb_request *req;
if (usb_ep_disable(dev->in_ep))
DBG(dev, "Failed to disable USB in_ep\n");
if (usb_ep_disable(dev->out_ep))
DBG(dev, "Failed to disable USB out_ep\n");
if (dev->current_rx_req != NULL) {
list_add(&dev->current_rx_req->list, &dev->rx_reqs);
dev->current_rx_req = NULL;
}
dev->current_rx_bytes = 0;
dev->current_rx_buf = NULL;
dev->reset_printer = 1;
while (likely(!(list_empty(&dev->rx_buffers)))) {
req = container_of(dev->rx_buffers.next, struct usb_request,
list);
list_del_init(&req->list);
list_add(&req->list, &dev->rx_reqs);
}
while (likely(!(list_empty(&dev->rx_reqs_active)))) {
req = container_of(dev->rx_buffers.next, struct usb_request,
list);
list_del_init(&req->list);
list_add(&req->list, &dev->rx_reqs);
}
while (likely(!(list_empty(&dev->tx_reqs_active)))) {
req = container_of(dev->tx_reqs_active.next,
struct usb_request, list);
list_del_init(&req->list);
list_add(&req->list, &dev->tx_reqs);
}
if (usb_ep_enable(dev->in_ep))
DBG(dev, "Failed to enable USB in_ep\n");
if (usb_ep_enable(dev->out_ep))
DBG(dev, "Failed to enable USB out_ep\n");
wake_up_interruptible(&dev->rx_wait);
wake_up_interruptible(&dev->tx_wait);
wake_up_interruptible(&dev->tx_flush_wait);
}
/*-------------------------------------------------------------------------*/
static bool gprinter_req_match(struct usb_function *f,
const struct usb_ctrlrequest *ctrl,
bool config0)
{
struct printer_dev *dev = func_to_printer(f);
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
if (config0)
return false;
if ((ctrl->bRequestType & USB_RECIP_MASK) != USB_RECIP_INTERFACE ||
(ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS)
return false;
switch (ctrl->bRequest) {
case GET_DEVICE_ID:
w_index >>= 8;
if (USB_DIR_IN & ctrl->bRequestType)
break;
return false;
case GET_PORT_STATUS:
if (!w_value && w_length == 1 &&
(USB_DIR_IN & ctrl->bRequestType))
break;
return false;
case SOFT_RESET:
if (!w_value && !w_length &&
!(USB_DIR_IN & ctrl->bRequestType))
break;
fallthrough;
default:
return false;
}
return w_index == dev->interface;
}
/*
* The setup() callback implements all the ep0 functionality that's not
* handled lower down.
*/
static int printer_func_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct printer_dev *dev = func_to_printer(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
u8 *buf = req->buf;
int value = -EOPNOTSUPP;
u16 wIndex = le16_to_cpu(ctrl->wIndex);
u16 wValue = le16_to_cpu(ctrl->wValue);
u16 wLength = le16_to_cpu(ctrl->wLength);
DBG(dev, "ctrl req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest, wValue, wIndex, wLength);
switch (ctrl->bRequestType&USB_TYPE_MASK) {
case USB_TYPE_CLASS:
switch (ctrl->bRequest) {
case GET_DEVICE_ID: /* Get the IEEE-1284 PNP String */
/* Only one printer interface is supported. */
if ((wIndex>>8) != dev->interface)
break;
if (!*dev->pnp_string) {
value = 0;
break;
}
value = strlen(*dev->pnp_string);
buf[0] = (value >> 8) & 0xFF;
buf[1] = value & 0xFF;
memcpy(buf + 2, *dev->pnp_string, value);
DBG(dev, "1284 PNP String: %x %s\n", value,
*dev->pnp_string);
break;
case GET_PORT_STATUS: /* Get Port Status */
/* Only one printer interface is supported. */
if (wIndex != dev->interface)
break;
buf[0] = dev->printer_status;
value = min_t(u16, wLength, 1);
break;
case SOFT_RESET: /* Soft Reset */
/* Only one printer interface is supported. */
if (wIndex != dev->interface)
break;
printer_soft_reset(dev);
value = 0;
break;
default:
goto unknown;
}
break;
default:
unknown:
VDBG(dev,
"unknown ctrl req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
wValue, wIndex, wLength);
break;
}
/* host either stalls (value < 0) or reports success */
if (value >= 0) {
req->length = value;
req->zero = value < wLength;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
req->status = 0;
}
}
return value;
}
static int printer_func_bind(struct usb_configuration *c,
struct usb_function *f)
{
struct usb_gadget *gadget = c->cdev->gadget;
struct printer_dev *dev = func_to_printer(f);
struct device *pdev;
struct usb_composite_dev *cdev = c->cdev;
struct usb_ep *in_ep;
struct usb_ep *out_ep = NULL;
struct usb_request *req;
dev_t devt;
int id;
int ret;
u32 i;
id = usb_interface_id(c, f);
if (id < 0)
return id;
intf_desc.bInterfaceNumber = id;
/* finish hookup to lower layer ... */
dev->gadget = gadget;
/* all we really need is bulk IN/OUT */
in_ep = usb_ep_autoconfig(cdev->gadget, &fs_ep_in_desc);
if (!in_ep) {
autoconf_fail:
dev_err(&cdev->gadget->dev, "can't autoconfigure on %s\n",
cdev->gadget->name);
return -ENODEV;
}
out_ep = usb_ep_autoconfig(cdev->gadget, &fs_ep_out_desc);
if (!out_ep)
goto autoconf_fail;
/* assumes that all endpoints are dual-speed */
hs_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress;
hs_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ss_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress;
ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_printer_function,
hs_printer_function, ss_printer_function,
ss_printer_function);
if (ret)
return ret;
dev->in_ep = in_ep;
dev->out_ep = out_ep;
ret = -ENOMEM;
for (i = 0; i < dev->q_len; i++) {
req = printer_req_alloc(dev->in_ep, USB_BUFSIZE, GFP_KERNEL);
if (!req)
goto fail_tx_reqs;
list_add(&req->list, &dev->tx_reqs);
}
for (i = 0; i < dev->q_len; i++) {
req = printer_req_alloc(dev->out_ep, USB_BUFSIZE, GFP_KERNEL);
if (!req)
goto fail_rx_reqs;
list_add(&req->list, &dev->rx_reqs);
}
/* Setup the sysfs files for the printer gadget. */
devt = MKDEV(major, dev->minor);
pdev = device_create(&usb_gadget_class, NULL, devt,
NULL, "g_printer%d", dev->minor);
if (IS_ERR(pdev)) {
ERROR(dev, "Failed to create device: g_printer\n");
ret = PTR_ERR(pdev);
goto fail_rx_reqs;
}
/*
* Register a character device as an interface to a user mode
* program that handles the printer specific functionality.
*/
cdev_init(&dev->printer_cdev, &printer_io_operations);
dev->printer_cdev.owner = THIS_MODULE;
ret = cdev_add(&dev->printer_cdev, devt, 1);
if (ret) {
ERROR(dev, "Failed to open char device\n");
goto fail_cdev_add;
}
return 0;
fail_cdev_add:
device_destroy(&usb_gadget_class, devt);
fail_rx_reqs:
while (!list_empty(&dev->rx_reqs)) {
req = container_of(dev->rx_reqs.next, struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->out_ep, req);
}
fail_tx_reqs:
while (!list_empty(&dev->tx_reqs)) {
req = container_of(dev->tx_reqs.next, struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->in_ep, req);
}
usb_free_all_descriptors(f);
return ret;
}
static int printer_func_set_alt(struct usb_function *f,
unsigned intf, unsigned alt)
{
struct printer_dev *dev = func_to_printer(f);
int ret = -ENOTSUPP;
if (!alt)
ret = set_interface(dev, intf);
return ret;
}
static void printer_func_disable(struct usb_function *f)
{
struct printer_dev *dev = func_to_printer(f);
printer_reset_interface(dev);
}
static inline struct f_printer_opts
*to_f_printer_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_printer_opts,
func_inst.group);
}
static void printer_attr_release(struct config_item *item)
{
struct f_printer_opts *opts = to_f_printer_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations printer_item_ops = {
.release = printer_attr_release,
};
static ssize_t f_printer_opts_pnp_string_show(struct config_item *item,
char *page)
{
struct f_printer_opts *opts = to_f_printer_opts(item);
int result = 0;
mutex_lock(&opts->lock);
if (!opts->pnp_string)
goto unlock;
result = strscpy(page, opts->pnp_string, PAGE_SIZE);
if (result < 1) {
result = PAGE_SIZE;
} else if (page[result - 1] != '\n' && result + 1 < PAGE_SIZE) {
page[result++] = '\n';
page[result] = '\0';
}
unlock:
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_printer_opts_pnp_string_store(struct config_item *item,
const char *page, size_t len)
{
struct f_printer_opts *opts = to_f_printer_opts(item);
char *new_pnp;
int result;
mutex_lock(&opts->lock);
new_pnp = kstrndup(page, len, GFP_KERNEL);
if (!new_pnp) {
result = -ENOMEM;
goto unlock;
}
if (opts->pnp_string_allocated)
kfree(opts->pnp_string);
opts->pnp_string_allocated = true;
opts->pnp_string = new_pnp;
result = len;
unlock:
mutex_unlock(&opts->lock);
return result;
}
CONFIGFS_ATTR(f_printer_opts_, pnp_string);
static ssize_t f_printer_opts_q_len_show(struct config_item *item,
char *page)
{
struct f_printer_opts *opts = to_f_printer_opts(item);
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%d\n", opts->q_len);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_printer_opts_q_len_store(struct config_item *item,
const char *page, size_t len)
{
struct f_printer_opts *opts = to_f_printer_opts(item);
int ret;
u16 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou16(page, 0, &num);
if (ret)
goto end;
opts->q_len = (unsigned)num;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_printer_opts_, q_len);
static struct configfs_attribute *printer_attrs[] = {
&f_printer_opts_attr_pnp_string,
&f_printer_opts_attr_q_len,
NULL,
};
static const struct config_item_type printer_func_type = {
.ct_item_ops = &printer_item_ops,
.ct_attrs = printer_attrs,
.ct_owner = THIS_MODULE,
};
static inline int gprinter_get_minor(void)
{
int ret;
ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
if (ret >= PRINTER_MINORS) {
ida_simple_remove(&printer_ida, ret);
ret = -ENODEV;
}
return ret;
}
static inline void gprinter_put_minor(int minor)
{
ida_simple_remove(&printer_ida, minor);
}
static int gprinter_setup(int);
static void gprinter_cleanup(void);
static void gprinter_free_inst(struct usb_function_instance *f)
{
struct f_printer_opts *opts;
opts = container_of(f, struct f_printer_opts, func_inst);
mutex_lock(&printer_ida_lock);
gprinter_put_minor(opts->minor);
if (ida_is_empty(&printer_ida))
gprinter_cleanup();
mutex_unlock(&printer_ida_lock);
if (opts->pnp_string_allocated)
kfree(opts->pnp_string);
kfree(opts);
}
static struct usb_function_instance *gprinter_alloc_inst(void)
{
struct f_printer_opts *opts;
struct usb_function_instance *ret;
int status = 0;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = gprinter_free_inst;
ret = &opts->func_inst;
/* Make sure q_len is initialized, otherwise the bound device can't support read/write! */
opts->q_len = DEFAULT_Q_LEN;
mutex_lock(&printer_ida_lock);
if (ida_is_empty(&printer_ida)) {
status = gprinter_setup(PRINTER_MINORS);
if (status) {
ret = ERR_PTR(status);
kfree(opts);
goto unlock;
}
}
opts->minor = gprinter_get_minor();
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
if (ida_is_empty(&printer_ida))
gprinter_cleanup();
goto unlock;
}
config_group_init_type_name(&opts->func_inst.group, "",
&printer_func_type);
unlock:
mutex_unlock(&printer_ida_lock);
return ret;
}
static void gprinter_free(struct usb_function *f)
{
struct printer_dev *dev = func_to_printer(f);
struct f_printer_opts *opts;
opts = container_of(f->fi, struct f_printer_opts, func_inst);
kref_put(&dev->kref, printer_dev_free);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
}
static void printer_func_unbind(struct usb_configuration *c,
struct usb_function *f)
{
struct printer_dev *dev;
struct usb_request *req;
dev = func_to_printer(f);
device_destroy(&usb_gadget_class, MKDEV(major, dev->minor));
/* Remove Character Device */
cdev_del(&dev->printer_cdev);
/* we must already have been disconnected ... no i/o may be active */
WARN_ON(!list_empty(&dev->tx_reqs_active));
WARN_ON(!list_empty(&dev->rx_reqs_active));
/* Free all memory for this driver. */
while (!list_empty(&dev->tx_reqs)) {
req = container_of(dev->tx_reqs.next, struct usb_request,
list);
list_del(&req->list);
printer_req_free(dev->in_ep, req);
}
if (dev->current_rx_req != NULL)
printer_req_free(dev->out_ep, dev->current_rx_req);
while (!list_empty(&dev->rx_reqs)) {
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->out_ep, req);
}
while (!list_empty(&dev->rx_buffers)) {
req = container_of(dev->rx_buffers.next,
struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->out_ep, req);
}
usb_free_all_descriptors(f);
}
static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
{
struct printer_dev *dev;
struct f_printer_opts *opts;
opts = container_of(fi, struct f_printer_opts, func_inst);
mutex_lock(&opts->lock);
if (opts->minor >= minors) {
mutex_unlock(&opts->lock);
return ERR_PTR(-ENOENT);
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
mutex_unlock(&opts->lock);
return ERR_PTR(-ENOMEM);
}
kref_init(&dev->kref);
++opts->refcnt;
dev->minor = opts->minor;
dev->pnp_string = &opts->pnp_string;
dev->q_len = opts->q_len;
mutex_unlock(&opts->lock);
dev->function.name = "printer";
dev->function.bind = printer_func_bind;
dev->function.setup = printer_func_setup;
dev->function.unbind = printer_func_unbind;
dev->function.set_alt = printer_func_set_alt;
dev->function.disable = printer_func_disable;
dev->function.req_match = gprinter_req_match;
dev->function.free_func = gprinter_free;
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
INIT_LIST_HEAD(&dev->rx_buffers);
INIT_LIST_HEAD(&dev->tx_reqs_active);
INIT_LIST_HEAD(&dev->rx_reqs_active);
spin_lock_init(&dev->lock);
mutex_init(&dev->lock_printer_io);
init_waitqueue_head(&dev->rx_wait);
init_waitqueue_head(&dev->tx_wait);
init_waitqueue_head(&dev->tx_flush_wait);
dev->interface = -1;
dev->printer_cdev_open = 0;
dev->printer_status = PRINTER_NOT_ERROR;
dev->current_rx_req = NULL;
dev->current_rx_bytes = 0;
dev->current_rx_buf = NULL;
return &dev->function;
}
DECLARE_USB_FUNCTION_INIT(printer, gprinter_alloc_inst, gprinter_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Craig Nadler");
static int gprinter_setup(int count)
{
int status;
dev_t devt;
status = class_register(&usb_gadget_class);
if (status)
return status;
status = alloc_chrdev_region(&devt, 0, count, "USB printer gadget");
if (status) {
pr_err("alloc_chrdev_region %d\n", status);
class_unregister(&usb_gadget_class);
return status;
}
major = MAJOR(devt);
minors = count;
return status;
}
static void gprinter_cleanup(void)
{
if (major) {
unregister_chrdev_region(MKDEV(major, 0), minors);
major = minors = 0;
}
class_unregister(&usb_gadget_class);
}
| linux-master | drivers/usb/gadget/function/f_printer.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <[email protected]>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <[email protected]>
*
* Based on dummy_hcd.c, which is:
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
*/
#include <linux/usb.h>
#include <linux/timer.h>
#include <linux/usb/ch9.h>
#include "vudc.h"
#define DEV_REQUEST (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
#define DEV_INREQUEST (DEV_REQUEST | USB_DIR_IN)
#define INTF_REQUEST (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
#define INTF_INREQUEST (INTF_REQUEST | USB_DIR_IN)
#define EP_REQUEST (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
#define EP_INREQUEST (EP_REQUEST | USB_DIR_IN)
static int get_frame_limit(enum usb_device_speed speed)
{
switch (speed) {
case USB_SPEED_LOW:
return 8 /*bytes*/ * 12 /*packets*/;
case USB_SPEED_FULL:
return 64 /*bytes*/ * 19 /*packets*/;
case USB_SPEED_HIGH:
return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
case USB_SPEED_SUPER:
/* Bus speed is 500000 bytes/ms, so use a little less */
return 490000;
default:
/* error */
return -1;
}
}
/*
* handle_control_request() - handles all control transfers
* @udc: pointer to vudc
* @urb: the urb request to handle
* @setup: pointer to the setup data for a USB device control
* request
* @status: pointer to request handling status
*
* Return 0 - if the request was handled
* 1 - if the request wasn't handles
* error code on error
*
* Adapted from drivers/usb/gadget/udc/dummy_hcd.c
*/
static int handle_control_request(struct vudc *udc, struct urb *urb,
struct usb_ctrlrequest *setup,
int *status)
{
struct vep *ep2;
int ret_val = 1;
unsigned int w_index;
unsigned int w_value;
w_index = le16_to_cpu(setup->wIndex);
w_value = le16_to_cpu(setup->wValue);
switch (setup->bRequest) {
case USB_REQ_SET_ADDRESS:
if (setup->bRequestType != DEV_REQUEST)
break;
udc->address = w_value;
ret_val = 0;
*status = 0;
break;
case USB_REQ_SET_FEATURE:
if (setup->bRequestType == DEV_REQUEST) {
ret_val = 0;
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
break;
case USB_DEVICE_B_HNP_ENABLE:
udc->gadget.b_hnp_enable = 1;
break;
case USB_DEVICE_A_HNP_SUPPORT:
udc->gadget.a_hnp_support = 1;
break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
udc->gadget.a_alt_hnp_support = 1;
break;
default:
ret_val = -EOPNOTSUPP;
}
if (ret_val == 0) {
udc->devstatus |= (1 << w_value);
*status = 0;
}
} else if (setup->bRequestType == EP_REQUEST) {
/* endpoint halt */
ep2 = vudc_find_endpoint(udc, w_index);
if (!ep2 || ep2->ep.name == udc->ep[0].ep.name) {
ret_val = -EOPNOTSUPP;
break;
}
ep2->halted = 1;
ret_val = 0;
*status = 0;
}
break;
case USB_REQ_CLEAR_FEATURE:
if (setup->bRequestType == DEV_REQUEST) {
ret_val = 0;
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
w_value = USB_DEVICE_REMOTE_WAKEUP;
break;
case USB_DEVICE_U1_ENABLE:
case USB_DEVICE_U2_ENABLE:
case USB_DEVICE_LTM_ENABLE:
ret_val = -EOPNOTSUPP;
break;
default:
ret_val = -EOPNOTSUPP;
break;
}
if (ret_val == 0) {
udc->devstatus &= ~(1 << w_value);
*status = 0;
}
} else if (setup->bRequestType == EP_REQUEST) {
/* endpoint halt */
ep2 = vudc_find_endpoint(udc, w_index);
if (!ep2) {
ret_val = -EOPNOTSUPP;
break;
}
if (!ep2->wedged)
ep2->halted = 0;
ret_val = 0;
*status = 0;
}
break;
case USB_REQ_GET_STATUS:
if (setup->bRequestType == DEV_INREQUEST
|| setup->bRequestType == INTF_INREQUEST
|| setup->bRequestType == EP_INREQUEST) {
char *buf;
/*
* device: remote wakeup, selfpowered
* interface: nothing
* endpoint: halt
*/
buf = (char *)urb->transfer_buffer;
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType == EP_INREQUEST) {
ep2 = vudc_find_endpoint(udc, w_index);
if (!ep2) {
ret_val = -EOPNOTSUPP;
break;
}
buf[0] = ep2->halted;
} else if (setup->bRequestType ==
DEV_INREQUEST) {
buf[0] = (u8)udc->devstatus;
} else
buf[0] = 0;
}
if (urb->transfer_buffer_length > 1)
buf[1] = 0;
urb->actual_length = min_t(u32, 2,
urb->transfer_buffer_length);
ret_val = 0;
*status = 0;
}
break;
}
return ret_val;
}
/* Adapted from dummy_hcd.c ; caller must hold lock */
static int transfer(struct vudc *udc,
struct urb *urb, struct vep *ep, int limit)
{
struct vrequest *req;
int sent = 0;
top:
/* if there's no request queued, the device is NAKing; return */
list_for_each_entry(req, &ep->req_queue, req_entry) {
unsigned int host_len, dev_len, len;
void *ubuf_pos, *rbuf_pos;
int is_short, to_host;
int rescan = 0;
/*
* 1..N packets of ep->ep.maxpacket each ... the last one
* may be short (including zero length).
*
* writer can send a zlp explicitly (length 0) or implicitly
* (length mod maxpacket zero, and 'zero' flag); they always
* terminate reads.
*/
host_len = urb->transfer_buffer_length - urb->actual_length;
dev_len = req->req.length - req->req.actual;
len = min(host_len, dev_len);
to_host = usb_pipein(urb->pipe);
if (unlikely(len == 0))
is_short = 1;
else {
/* send multiple of maxpacket first, then remainder */
if (len >= ep->ep.maxpacket) {
is_short = 0;
if (len % ep->ep.maxpacket > 0)
rescan = 1;
len -= len % ep->ep.maxpacket;
} else {
is_short = 1;
}
ubuf_pos = urb->transfer_buffer + urb->actual_length;
rbuf_pos = req->req.buf + req->req.actual;
if (urb->pipe & USB_DIR_IN)
memcpy(ubuf_pos, rbuf_pos, len);
else
memcpy(rbuf_pos, ubuf_pos, len);
urb->actual_length += len;
req->req.actual += len;
sent += len;
}
/*
* short packets terminate, maybe with overflow/underflow.
* it's only really an error to write too much.
*
* partially filling a buffer optionally blocks queue advances
* (so completion handlers can clean up the queue) but we don't
* need to emulate such data-in-flight.
*/
if (is_short) {
if (host_len == dev_len) {
req->req.status = 0;
urb->status = 0;
} else if (to_host) {
req->req.status = 0;
if (dev_len > host_len)
urb->status = -EOVERFLOW;
else
urb->status = 0;
} else {
urb->status = 0;
if (host_len > dev_len)
req->req.status = -EOVERFLOW;
else
req->req.status = 0;
}
/* many requests terminate without a short packet */
/* also check if we need to send zlp */
} else {
if (req->req.length == req->req.actual) {
if (req->req.zero && to_host)
rescan = 1;
else
req->req.status = 0;
}
if (urb->transfer_buffer_length == urb->actual_length) {
if (urb->transfer_flags & URB_ZERO_PACKET &&
!to_host)
rescan = 1;
else
urb->status = 0;
}
}
/* device side completion --> continuable */
if (req->req.status != -EINPROGRESS) {
list_del_init(&req->req_entry);
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
/* requests might have been unlinked... */
rescan = 1;
}
/* host side completion --> terminate */
if (urb->status != -EINPROGRESS)
break;
/* rescan to continue with any other queued i/o */
if (rescan)
goto top;
}
return sent;
}
static void v_timer(struct timer_list *t)
{
struct vudc *udc = from_timer(udc, t, tr_timer.timer);
struct transfer_timer *timer = &udc->tr_timer;
struct urbp *urb_p, *tmp;
unsigned long flags;
struct usb_ep *_ep;
struct vep *ep;
int ret = 0;
int total, limit;
spin_lock_irqsave(&udc->lock, flags);
total = get_frame_limit(udc->gadget.speed);
if (total < 0) { /* unknown speed, or not set yet */
timer->state = VUDC_TR_IDLE;
spin_unlock_irqrestore(&udc->lock, flags);
return;
}
/* is it next frame now? */
if (time_after(jiffies, timer->frame_start + msecs_to_jiffies(1))) {
timer->frame_limit = total;
/* FIXME: how to make it accurate? */
timer->frame_start = jiffies;
} else {
total = timer->frame_limit;
}
/* We have to clear ep0 flags separately as it's not on the list */
udc->ep[0].already_seen = 0;
list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
ep = to_vep(_ep);
ep->already_seen = 0;
}
restart:
list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
struct urb *urb = urb_p->urb;
ep = urb_p->ep;
if (urb->unlinked)
goto return_urb;
if (timer->state != VUDC_TR_RUNNING)
continue;
if (!ep) {
urb->status = -EPROTO;
goto return_urb;
}
/* Used up bandwidth? */
if (total <= 0 && ep->type == USB_ENDPOINT_XFER_BULK)
continue;
if (ep->already_seen)
continue;
ep->already_seen = 1;
if (ep == &udc->ep[0] && urb_p->new) {
ep->setup_stage = 1;
urb_p->new = 0;
}
if (ep->halted && !ep->setup_stage) {
urb->status = -EPIPE;
goto return_urb;
}
if (ep == &udc->ep[0] && ep->setup_stage) {
/* TODO - flush any stale requests */
ep->setup_stage = 0;
ep->halted = 0;
ret = handle_control_request(udc, urb,
(struct usb_ctrlrequest *) urb->setup_packet,
(&urb->status));
if (ret > 0) {
spin_unlock(&udc->lock);
ret = udc->driver->setup(&udc->gadget,
(struct usb_ctrlrequest *)
urb->setup_packet);
spin_lock(&udc->lock);
}
if (ret >= 0) {
/* no delays (max 64kb data stage) */
limit = 64 * 1024;
goto treat_control_like_bulk;
} else {
urb->status = -EPIPE;
urb->actual_length = 0;
goto return_urb;
}
}
limit = total;
switch (ep->type) {
case USB_ENDPOINT_XFER_ISOC:
/* TODO: support */
urb->status = -EXDEV;
break;
case USB_ENDPOINT_XFER_INT:
/*
* TODO: figure out bandwidth guarantees
* for now, give unlimited bandwidth
*/
limit += urb->transfer_buffer_length;
fallthrough;
default:
treat_control_like_bulk:
total -= transfer(udc, urb, ep, limit);
}
if (urb->status == -EINPROGRESS)
continue;
return_urb:
if (ep)
ep->already_seen = ep->setup_stage = 0;
spin_lock(&udc->lock_tx);
list_del(&urb_p->urb_entry);
if (!urb->unlinked) {
v_enqueue_ret_submit(udc, urb_p);
} else {
v_enqueue_ret_unlink(udc, urb_p->seqnum,
urb->unlinked);
free_urbp_and_urb(urb_p);
}
wake_up(&udc->tx_waitq);
spin_unlock(&udc->lock_tx);
goto restart;
}
/* TODO - also wait on empty usb_request queues? */
if (list_empty(&udc->urb_queue))
timer->state = VUDC_TR_IDLE;
else
mod_timer(&timer->timer,
timer->frame_start + msecs_to_jiffies(1));
spin_unlock_irqrestore(&udc->lock, flags);
}
/* All timer functions are run with udc->lock held */
void v_init_timer(struct vudc *udc)
{
struct transfer_timer *t = &udc->tr_timer;
timer_setup(&t->timer, v_timer, 0);
t->state = VUDC_TR_STOPPED;
}
void v_start_timer(struct vudc *udc)
{
struct transfer_timer *t = &udc->tr_timer;
dev_dbg(&udc->pdev->dev, "timer start");
switch (t->state) {
case VUDC_TR_RUNNING:
return;
case VUDC_TR_IDLE:
return v_kick_timer(udc, jiffies);
case VUDC_TR_STOPPED:
t->state = VUDC_TR_IDLE;
t->frame_start = jiffies;
t->frame_limit = get_frame_limit(udc->gadget.speed);
return v_kick_timer(udc, jiffies);
}
}
void v_kick_timer(struct vudc *udc, unsigned long time)
{
struct transfer_timer *t = &udc->tr_timer;
dev_dbg(&udc->pdev->dev, "timer kick");
switch (t->state) {
case VUDC_TR_RUNNING:
return;
case VUDC_TR_IDLE:
t->state = VUDC_TR_RUNNING;
fallthrough;
case VUDC_TR_STOPPED:
/* we may want to kick timer to unqueue urbs */
mod_timer(&t->timer, time);
}
}
void v_stop_timer(struct vudc *udc)
{
struct transfer_timer *t = &udc->tr_timer;
/* timer itself will take care of stopping */
dev_dbg(&udc->pdev->dev, "timer stop");
t->state = VUDC_TR_STOPPED;
}
| linux-master | drivers/usb/usbip/vudc_transfer.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*/
#include <asm/byteorder.h>
#include <linux/kthread.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "stub.h"
static int is_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
(req->bRequestType == USB_RECIP_ENDPOINT) &&
(req->wValue == USB_ENDPOINT_HALT);
}
static int is_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_INTERFACE) &&
(req->bRequestType == USB_RECIP_INTERFACE);
}
static int is_set_configuration_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
(req->bRequestType == USB_RECIP_DEVICE);
}
static int is_reset_device_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 value;
__u16 index;
req = (struct usb_ctrlrequest *) urb->setup_packet;
value = le16_to_cpu(req->wValue);
index = le16_to_cpu(req->wIndex);
if ((req->bRequest == USB_REQ_SET_FEATURE) &&
(req->bRequestType == USB_RT_PORT) &&
(value == USB_PORT_FEAT_RESET)) {
usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
return 1;
} else
return 0;
}
static int tweak_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
int target_endp;
int target_dir;
int target_pipe;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
/*
* The stalled endpoint is specified in the wIndex value. The endpoint
* of the urb is the target of this clear_halt request (i.e., control
* endpoint).
*/
target_endp = le16_to_cpu(req->wIndex) & 0x000f;
/* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
target_dir = le16_to_cpu(req->wIndex) & 0x0080;
if (target_dir)
target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
else
target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
ret = usb_clear_halt(urb->dev, target_pipe);
if (ret < 0)
dev_err(&urb->dev->dev,
"usb_clear_halt error: devnum %d endp %d ret %d\n",
urb->dev->devnum, target_endp, ret);
else
dev_info(&urb->dev->dev,
"usb_clear_halt done: devnum %d endp %d\n",
urb->dev->devnum, target_endp);
return ret;
}
static int tweak_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 alternate;
__u16 interface;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
alternate = le16_to_cpu(req->wValue);
interface = le16_to_cpu(req->wIndex);
usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
interface, alternate);
ret = usb_set_interface(urb->dev, interface, alternate);
if (ret < 0)
dev_err(&urb->dev->dev,
"usb_set_interface error: inf %u alt %u ret %d\n",
interface, alternate, ret);
else
dev_info(&urb->dev->dev,
"usb_set_interface done: inf %u alt %u\n",
interface, alternate);
return ret;
}
static int tweak_set_configuration_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
struct usb_ctrlrequest *req;
__u16 config;
int err;
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
usb_lock_device(sdev->udev);
err = usb_set_configuration(sdev->udev, config);
usb_unlock_device(sdev->udev);
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
return 0;
}
static int tweak_reset_device_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
return 0;
}
usb_reset_device(sdev->udev);
usb_unlock_device(sdev->udev);
return 0;
}
/*
* clear_halt, set_interface, and set_configuration require special tricks.
*/
static void tweak_special_requests(struct urb *urb)
{
if (!urb || !urb->setup_packet)
return;
if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
return;
if (is_clear_halt_cmd(urb))
/* tweak clear_halt */
tweak_clear_halt_cmd(urb);
else if (is_set_interface_cmd(urb))
/* tweak set_interface */
tweak_set_interface_cmd(urb);
else if (is_set_configuration_cmd(urb))
/* tweak set_configuration */
tweak_set_configuration_cmd(urb);
else if (is_reset_device_cmd(urb))
tweak_reset_device_cmd(urb);
else
usbip_dbg_stub_rx("no need to tweak\n");
}
/*
* stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
* By unlinking the urb asynchronously, stub_rx can continuously
* process coming urbs. Even if the urb is unlinked, its completion
* handler will be called and stub_tx will send a return pdu.
*
* See also comments about unlinking strategy in vhci_hcd.c.
*/
static int stub_recv_cmd_unlink(struct stub_device *sdev,
struct usbip_header *pdu)
{
int ret, i;
unsigned long flags;
struct stub_priv *priv;
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry(priv, &sdev->priv_init, list) {
if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
/*
* This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are
* cancelling it. The unlinking flag means that we are
* now not going to return the normal result pdu of a
* submission request, but going to return a result pdu
* of the unlink request.
*/
priv->unlinking = 1;
/*
* In the case that unlinking flag is on, prev->seqnum
* is changed from the seqnum of the cancelling urb to
* the seqnum of the unlink request. This will be used
* to make the result pdu of the unlink request.
*/
priv->seqnum = pdu->base.seqnum;
spin_unlock_irqrestore(&sdev->priv_lock, flags);
/*
* usb_unlink_urb() is now out of spinlocking to avoid
* spinlock recursion since stub_complete() is
* sometimes called in this context but not in the
* interrupt context. If stub_complete() is executed
* before we call usb_unlink_urb(), usb_unlink_urb()
* will return an error value. In this case, stub_tx
* will return the result pdu of this unlink request
* though submission is completed and actual unlinking
* is not executed. OK?
*/
/* In the above case, urb->status is not -ECONNRESET,
* so a driver in a client host will know the failure
* of the unlink request ?
*/
for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
ret = usb_unlink_urb(priv->urbs[i]);
if (ret != -EINPROGRESS)
dev_err(&priv->urbs[i]->dev->dev,
"failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
i + 1, priv->num_urbs,
priv->seqnum, ret);
}
return 0;
}
usbip_dbg_stub_rx("seqnum %d is not pending\n",
pdu->u.cmd_unlink.seqnum);
/*
* The urb of the unlink target is not found in priv_init queue. It was
* already completed and its results is/was going to be sent by a
* CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
* return the completeness of this unlink request to vhci_hcd.
*/
stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return 0;
}
static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usbip_device *ud = &sdev->ud;
int valid = 0;
if (pdu->base.devid == sdev->devid) {
spin_lock_irq(&ud->lock);
if (ud->status == SDEV_ST_USED) {
/* A request is valid. */
valid = 1;
}
spin_unlock_irq(&ud->lock);
}
return valid;
}
static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
struct usbip_header *pdu)
{
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
unsigned long flags;
spin_lock_irqsave(&sdev->priv_lock, flags);
priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
if (!priv) {
dev_err(&sdev->udev->dev, "alloc stub_priv\n");
spin_unlock_irqrestore(&sdev->priv_lock, flags);
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return NULL;
}
priv->seqnum = pdu->base.seqnum;
priv->sdev = sdev;
/*
* After a stub_priv is linked to a list_head,
* our error handler can free allocated data.
*/
list_add_tail(&priv->list, &sdev->priv_init);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return priv;
}
static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
int epnum = pdu->base.ep;
int dir = pdu->base.direction;
if (epnum < 0 || epnum > 15)
goto err_ret;
if (dir == USBIP_DIR_IN)
ep = udev->ep_in[epnum & 0x7f];
else
ep = udev->ep_out[epnum & 0x7f];
if (!ep)
goto err_ret;
epd = &ep->desc;
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
else
return usb_rcvctrlpipe(udev, epnum);
}
if (usb_endpoint_xfer_bulk(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndbulkpipe(udev, epnum);
else
return usb_rcvbulkpipe(udev, epnum);
}
if (usb_endpoint_xfer_int(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndintpipe(udev, epnum);
else
return usb_rcvintpipe(udev, epnum);
}
if (usb_endpoint_xfer_isoc(epd)) {
/* validate number of packets */
if (pdu->u.cmd_submit.number_of_packets < 0 ||
pdu->u.cmd_submit.number_of_packets >
USBIP_MAX_ISO_PACKETS) {
dev_err(&sdev->udev->dev,
"CMD_SUBMIT: isoc invalid num packets %d\n",
pdu->u.cmd_submit.number_of_packets);
return -1;
}
if (dir == USBIP_DIR_OUT)
return usb_sndisocpipe(udev, epnum);
else
return usb_rcvisocpipe(udev, epnum);
}
err_ret:
/* NOT REACHED */
dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
return -1;
}
static void masking_bogus_flags(struct urb *urb)
{
int xfertype;
struct usb_device *dev;
struct usb_host_endpoint *ep;
int is_out;
unsigned int allowed;
if (!urb || urb->hcpriv || !urb->complete)
return;
dev = urb->dev;
if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
return;
ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
[usb_pipeendpoint(urb->pipe)];
if (!ep)
return;
xfertype = usb_endpoint_type(&ep->desc);
if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *) urb->setup_packet;
if (!setup)
return;
is_out = !(setup->bRequestType & USB_DIR_IN) ||
!setup->wLength;
} else {
is_out = usb_endpoint_dir_out(&ep->desc);
}
/* enforce simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
URB_DIR_MASK | URB_FREE_BUFFER);
switch (xfertype) {
case USB_ENDPOINT_XFER_BULK:
if (is_out)
allowed |= URB_ZERO_PACKET;
fallthrough;
default: /* all non-iso endpoints */
if (!is_out)
allowed |= URB_SHORT_NOT_OK;
break;
case USB_ENDPOINT_XFER_ISOC:
allowed |= URB_ISO_ASAP;
break;
}
urb->transfer_flags &= allowed;
}
static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
{
int ret;
int i;
for (i = 0; i < priv->num_urbs; i++) {
ret = usbip_recv_xbuff(ud, priv->urbs[i]);
if (ret < 0)
break;
}
return ret;
}
static void stub_recv_cmd_submit(struct stub_device *sdev,
struct usbip_header *pdu)
{
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
struct scatterlist *sgl = NULL, *sg;
void *buffer = NULL;
unsigned long long buf_len;
int nents;
int num_urbs = 1;
int pipe = get_pipe(sdev, pdu);
int use_sg = pdu->u.cmd_submit.transfer_flags & USBIP_URB_DMA_MAP_SG;
int support_sg = 1;
int np = 0;
int ret, i;
if (pipe == -1)
return;
/*
* Smatch reported the error case where use_sg is true and buf_len is 0.
* In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
* released by stub event handler and connection will be shut down.
*/
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
if (use_sg && !buf_len) {
dev_err(&udev->dev, "sg buffer with zero length\n");
goto err_malloc;
}
/* allocate urb transfer buffer, if needed */
if (buf_len) {
if (use_sg) {
sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
if (!sgl)
goto err_malloc;
/* Check if the server's HCD supports SG */
if (!udev->bus->sg_tablesize) {
/*
* If the server's HCD doesn't support SG, break
* a single SG request into several URBs and map
* each SG list entry to corresponding URB
* buffer. The previously allocated SG list is
* stored in priv->sgl (If the server's HCD
* support SG, SG list is stored only in
* urb->sg) and it is used as an indicator that
* the server split single SG request into
* several URBs. Later, priv->sgl is used by
* stub_complete() and stub_send_ret_submit() to
* reassemble the divied URBs.
*/
support_sg = 0;
num_urbs = nents;
priv->completed_urbs = 0;
pdu->u.cmd_submit.transfer_flags &=
~USBIP_URB_DMA_MAP_SG;
}
} else {
buffer = kzalloc(buf_len, GFP_KERNEL);
if (!buffer)
goto err_malloc;
}
}
/* allocate urb array */
priv->num_urbs = num_urbs;
priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
if (!priv->urbs)
goto err_urbs;
/* setup a urb */
if (support_sg) {
if (usb_pipeisoc(pipe))
np = pdu->u.cmd_submit.number_of_packets;
priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
if (!priv->urbs[0])
goto err_urb;
if (buf_len) {
if (use_sg) {
priv->urbs[0]->sg = sgl;
priv->urbs[0]->num_sgs = nents;
priv->urbs[0]->transfer_buffer = NULL;
} else {
priv->urbs[0]->transfer_buffer = buffer;
}
}
/* copy urb setup packet */
priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
8, GFP_KERNEL);
if (!priv->urbs[0]->setup_packet) {
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
} else {
for_each_sg(sgl, sg, nents, i) {
priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
/* The URBs which is previously allocated will be freed
* in stub_device_cleanup_urbs() if error occurs.
*/
if (!priv->urbs[i])
goto err_urb;
usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
priv->urbs[i]->transfer_buffer = sg_virt(sg);
priv->urbs[i]->transfer_buffer_length = sg->length;
}
priv->sgl = sgl;
}
for (i = 0; i < num_urbs; i++) {
/* set other members from the base header of pdu */
priv->urbs[i]->context = (void *) priv;
priv->urbs[i]->dev = udev;
priv->urbs[i]->pipe = pipe;
priv->urbs[i]->complete = stub_complete;
/* no need to submit an intercepted request, but harmless? */
tweak_special_requests(priv->urbs[i]);
masking_bogus_flags(priv->urbs[i]);
}
if (stub_recv_xbuff(ud, priv) < 0)
return;
if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
return;
/* urb is now ready to submit */
for (i = 0; i < priv->num_urbs; i++) {
ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
if (ret == 0)
usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
pdu->base.seqnum);
else {
dev_err(&udev->dev, "submit_urb error, %d\n", ret);
usbip_dump_header(pdu);
usbip_dump_urb(priv->urbs[i]);
/*
* Pessimistic.
* This connection will be discarded.
*/
usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
break;
}
}
usbip_dbg_stub_rx("Leave\n");
return;
err_urb:
kfree(priv->urbs);
err_urbs:
kfree(buffer);
sgl_free(sgl);
err_malloc:
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
}
/* recv a pdu */
static void stub_rx_pdu(struct usbip_device *ud)
{
int ret;
struct usbip_header pdu;
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
struct device *dev = &sdev->udev->dev;
usbip_dbg_stub_rx("Enter\n");
memset(&pdu, 0, sizeof(pdu));
/* receive a pdu header */
ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
dev_err(dev, "recv a header, %d\n", ret);
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
usbip_header_correct_endian(&pdu, 0);
if (usbip_dbg_flag_stub_rx)
usbip_dump_header(&pdu);
if (!valid_request(sdev, &pdu)) {
dev_err(dev, "recv invalid request\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
switch (pdu.base.command) {
case USBIP_CMD_UNLINK:
stub_recv_cmd_unlink(sdev, &pdu);
break;
case USBIP_CMD_SUBMIT:
stub_recv_cmd_submit(sdev, &pdu);
break;
default:
/* NOTREACHED */
dev_err(dev, "unknown pdu\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
break;
}
}
int stub_rx_loop(void *data)
{
struct usbip_device *ud = data;
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
stub_rx_pdu(ud);
}
return 0;
}
| linux-master | drivers/usb/usbip/stub_rx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015 Nobuo Iwata
*/
#include <linux/kthread.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "usbip_common.h"
struct usbip_event {
struct list_head node;
struct usbip_device *ud;
};
static DEFINE_SPINLOCK(event_lock);
static LIST_HEAD(event_list);
static void set_event(struct usbip_device *ud, unsigned long event)
{
unsigned long flags;
spin_lock_irqsave(&ud->lock, flags);
ud->event |= event;
spin_unlock_irqrestore(&ud->lock, flags);
}
static void unset_event(struct usbip_device *ud, unsigned long event)
{
unsigned long flags;
spin_lock_irqsave(&ud->lock, flags);
ud->event &= ~event;
spin_unlock_irqrestore(&ud->lock, flags);
}
static struct usbip_device *get_event(void)
{
struct usbip_event *ue = NULL;
struct usbip_device *ud = NULL;
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
if (!list_empty(&event_list)) {
ue = list_first_entry(&event_list, struct usbip_event, node);
list_del(&ue->node);
}
spin_unlock_irqrestore(&event_lock, flags);
if (ue) {
ud = ue->ud;
kfree(ue);
}
return ud;
}
static struct task_struct *worker_context;
static void event_handler(struct work_struct *work)
{
struct usbip_device *ud;
if (worker_context == NULL) {
worker_context = current;
}
while ((ud = get_event()) != NULL) {
usbip_dbg_eh("pending event %lx\n", ud->event);
mutex_lock(&ud->sysfs_lock);
/*
* NOTE: shutdown must come first.
* Shutdown the device.
*/
if (ud->event & USBIP_EH_SHUTDOWN) {
ud->eh_ops.shutdown(ud);
unset_event(ud, USBIP_EH_SHUTDOWN);
}
/* Reset the device. */
if (ud->event & USBIP_EH_RESET) {
ud->eh_ops.reset(ud);
unset_event(ud, USBIP_EH_RESET);
}
/* Mark the device as unusable. */
if (ud->event & USBIP_EH_UNUSABLE) {
ud->eh_ops.unusable(ud);
unset_event(ud, USBIP_EH_UNUSABLE);
}
mutex_unlock(&ud->sysfs_lock);
wake_up(&ud->eh_waitq);
}
}
int usbip_start_eh(struct usbip_device *ud)
{
init_waitqueue_head(&ud->eh_waitq);
ud->event = 0;
return 0;
}
EXPORT_SYMBOL_GPL(usbip_start_eh);
void usbip_stop_eh(struct usbip_device *ud)
{
unsigned long pending = ud->event & ~USBIP_EH_BYE;
if (!(ud->event & USBIP_EH_BYE))
usbip_dbg_eh("usbip_eh stopping but not removed\n");
if (pending)
usbip_dbg_eh("usbip_eh waiting completion %lx\n", pending);
wait_event_interruptible(ud->eh_waitq, !(ud->event & ~USBIP_EH_BYE));
usbip_dbg_eh("usbip_eh has stopped\n");
}
EXPORT_SYMBOL_GPL(usbip_stop_eh);
#define WORK_QUEUE_NAME "usbip_event"
static struct workqueue_struct *usbip_queue;
static DECLARE_WORK(usbip_work, event_handler);
int usbip_init_eh(void)
{
usbip_queue = create_singlethread_workqueue(WORK_QUEUE_NAME);
if (usbip_queue == NULL) {
pr_err("failed to create usbip_event\n");
return -ENOMEM;
}
return 0;
}
void usbip_finish_eh(void)
{
destroy_workqueue(usbip_queue);
usbip_queue = NULL;
}
void usbip_event_add(struct usbip_device *ud, unsigned long event)
{
struct usbip_event *ue;
unsigned long flags;
if (ud->event & USBIP_EH_BYE)
return;
set_event(ud, event);
spin_lock_irqsave(&event_lock, flags);
list_for_each_entry_reverse(ue, &event_list, node) {
if (ue->ud == ud)
goto out;
}
ue = kmalloc(sizeof(struct usbip_event), GFP_ATOMIC);
if (ue == NULL)
goto out;
ue->ud = ud;
list_add_tail(&ue->node, &event_list);
queue_work(usbip_queue, &usbip_work);
out:
spin_unlock_irqrestore(&event_lock, flags);
}
EXPORT_SYMBOL_GPL(usbip_event_add);
int usbip_event_happened(struct usbip_device *ud)
{
int happened = 0;
unsigned long flags;
spin_lock_irqsave(&ud->lock, flags);
if (ud->event != 0)
happened = 1;
spin_unlock_irqrestore(&ud->lock, flags);
return happened;
}
EXPORT_SYMBOL_GPL(usbip_event_happened);
int usbip_in_eh(struct task_struct *task)
{
if (task == worker_context)
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(usbip_in_eh);
| linux-master | drivers/usb/usbip/usbip_event.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <[email protected]>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <[email protected]>
* Krzysztof Opasiak <[email protected]>
*/
#include <linux/device.h>
#include <linux/list.h>
#include <linux/usb/gadget.h>
#include <linux/usb/ch9.h>
#include <linux/sysfs.h>
#include <linux/kthread.h>
#include <linux/byteorder/generic.h>
#include "usbip_common.h"
#include "vudc.h"
#include <net/sock.h>
/* called with udc->lock held */
int get_gadget_descs(struct vudc *udc)
{
struct vrequest *usb_req;
struct vep *ep0 = to_vep(udc->gadget.ep0);
struct usb_device_descriptor *ddesc = &udc->dev_desc;
struct usb_ctrlrequest req;
int ret;
if (!udc->driver || !udc->pullup)
return -EINVAL;
req.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
req.bRequest = USB_REQ_GET_DESCRIPTOR;
req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
req.wIndex = cpu_to_le16(0);
req.wLength = cpu_to_le16(sizeof(*ddesc));
spin_unlock(&udc->lock);
ret = udc->driver->setup(&(udc->gadget), &req);
spin_lock(&udc->lock);
if (ret < 0)
goto out;
/* assuming request queue is empty; request is now on top */
usb_req = list_last_entry(&ep0->req_queue, struct vrequest, req_entry);
list_del(&usb_req->req_entry);
if (usb_req->req.length > sizeof(*ddesc)) {
ret = -EOVERFLOW;
goto giveback_req;
}
memcpy(ddesc, usb_req->req.buf, sizeof(*ddesc));
udc->desc_cached = 1;
ret = 0;
giveback_req:
usb_req->req.status = 0;
usb_req->req.actual = usb_req->req.length;
usb_gadget_giveback_request(&(ep0->ep), &(usb_req->req));
out:
return ret;
}
/*
* Exposes device descriptor from the gadget driver.
*/
static ssize_t dev_desc_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *out,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct vudc *udc = (struct vudc *)dev_get_drvdata(dev);
char *desc_ptr = (char *) &udc->dev_desc;
unsigned long flags;
int ret;
spin_lock_irqsave(&udc->lock, flags);
if (!udc->desc_cached) {
ret = -ENODEV;
goto unlock;
}
memcpy(out, desc_ptr + off, count);
ret = count;
unlock:
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
static ssize_t usbip_sockfd_store(struct device *dev,
struct device_attribute *attr,
const char *in, size_t count)
{
struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
int rv;
int sockfd = 0;
int err;
struct socket *socket;
unsigned long flags;
int ret;
struct task_struct *tcp_rx = NULL;
struct task_struct *tcp_tx = NULL;
rv = kstrtoint(in, 0, &sockfd);
if (rv != 0)
return -EINVAL;
if (!udc) {
dev_err(dev, "no device");
return -ENODEV;
}
mutex_lock(&udc->ud.sysfs_lock);
spin_lock_irqsave(&udc->lock, flags);
/* Don't export what we don't have */
if (!udc->driver || !udc->pullup) {
dev_err(dev, "gadget not bound");
ret = -ENODEV;
goto unlock;
}
if (sockfd != -1) {
if (udc->connected) {
dev_err(dev, "Device already connected");
ret = -EBUSY;
goto unlock;
}
spin_lock(&udc->ud.lock);
if (udc->ud.status != SDEV_ST_AVAILABLE) {
ret = -EINVAL;
goto unlock_ud;
}
socket = sockfd_lookup(sockfd, &err);
if (!socket) {
dev_err(dev, "failed to lookup sock");
ret = -EINVAL;
goto unlock_ud;
}
if (socket->type != SOCK_STREAM) {
dev_err(dev, "Expecting SOCK_STREAM - found %d",
socket->type);
ret = -EINVAL;
goto sock_err;
}
/* unlock and create threads and get tasks */
spin_unlock(&udc->ud.lock);
spin_unlock_irqrestore(&udc->lock, flags);
tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
if (IS_ERR(tcp_rx)) {
sockfd_put(socket);
mutex_unlock(&udc->ud.sysfs_lock);
return -EINVAL;
}
tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
if (IS_ERR(tcp_tx)) {
kthread_stop(tcp_rx);
sockfd_put(socket);
mutex_unlock(&udc->ud.sysfs_lock);
return -EINVAL;
}
/* get task structs now */
get_task_struct(tcp_rx);
get_task_struct(tcp_tx);
/* lock and update udc->ud state */
spin_lock_irqsave(&udc->lock, flags);
spin_lock(&udc->ud.lock);
udc->ud.tcp_socket = socket;
udc->ud.tcp_rx = tcp_rx;
udc->ud.tcp_tx = tcp_tx;
udc->ud.status = SDEV_ST_USED;
spin_unlock(&udc->ud.lock);
ktime_get_ts64(&udc->start_time);
v_start_timer(udc);
udc->connected = 1;
spin_unlock_irqrestore(&udc->lock, flags);
wake_up_process(udc->ud.tcp_rx);
wake_up_process(udc->ud.tcp_tx);
mutex_unlock(&udc->ud.sysfs_lock);
return count;
} else {
if (!udc->connected) {
dev_err(dev, "Device not connected");
ret = -EINVAL;
goto unlock;
}
spin_lock(&udc->ud.lock);
if (udc->ud.status != SDEV_ST_USED) {
ret = -EINVAL;
goto unlock_ud;
}
spin_unlock(&udc->ud.lock);
usbip_event_add(&udc->ud, VUDC_EVENT_DOWN);
}
spin_unlock_irqrestore(&udc->lock, flags);
mutex_unlock(&udc->ud.sysfs_lock);
return count;
sock_err:
sockfd_put(socket);
unlock_ud:
spin_unlock(&udc->ud.lock);
unlock:
spin_unlock_irqrestore(&udc->lock, flags);
mutex_unlock(&udc->ud.sysfs_lock);
return ret;
}
static DEVICE_ATTR_WO(usbip_sockfd);
static ssize_t usbip_status_show(struct device *dev,
struct device_attribute *attr, char *out)
{
struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
int status;
if (!udc) {
dev_err(dev, "no device");
return -ENODEV;
}
spin_lock_irq(&udc->ud.lock);
status = udc->ud.status;
spin_unlock_irq(&udc->ud.lock);
return sysfs_emit(out, "%d\n", status);
}
static DEVICE_ATTR_RO(usbip_status);
static struct attribute *dev_attrs[] = {
&dev_attr_usbip_sockfd.attr,
&dev_attr_usbip_status.attr,
NULL,
};
static struct bin_attribute *dev_bin_attrs[] = {
&bin_attr_dev_desc,
NULL,
};
static const struct attribute_group vudc_attr_group = {
.attrs = dev_attrs,
.bin_attrs = dev_bin_attrs,
};
const struct attribute_group *vudc_groups[] = {
&vudc_attr_group,
NULL,
};
| linux-master | drivers/usb/usbip/vudc_sysfs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <[email protected]>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <[email protected]>
*/
#include <net/sock.h>
#include <linux/list.h>
#include <linux/kthread.h>
#include "usbip_common.h"
#include "vudc.h"
static inline void setup_base_pdu(struct usbip_header_basic *base,
__u32 command, __u32 seqnum)
{
base->command = command;
base->seqnum = seqnum;
base->devid = 0;
base->ep = 0;
base->direction = 0;
}
static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p)
{
setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum);
usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1);
}
static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
struct v_unlink *unlink)
{
setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
rpdu->u.ret_unlink.status = unlink->status;
}
static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink)
{
struct msghdr msg;
struct kvec iov[1];
size_t txsize;
int ret;
struct usbip_header pdu_header;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
/* 1. setup usbip_header */
setup_ret_unlink_pdu(&pdu_header, unlink);
usbip_header_correct_endian(&pdu_header, 1);
iov[0].iov_base = &pdu_header;
iov[0].iov_len = sizeof(pdu_header);
txsize += sizeof(pdu_header);
ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov,
1, txsize);
if (ret != txsize) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
if (ret >= 0)
return -EPIPE;
return ret;
}
kfree(unlink);
return txsize;
}
static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
{
struct urb *urb = urb_p->urb;
struct usbip_header pdu_header;
struct usbip_iso_packet_descriptor *iso_buffer = NULL;
struct kvec *iov = NULL;
int iovnum = 0;
int ret = 0;
size_t txsize;
struct msghdr msg;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
if (urb->actual_length > 0 && !urb->transfer_buffer) {
dev_err(&udc->gadget.dev,
"urb: actual_length %d transfer_buffer null\n",
urb->actual_length);
return -1;
}
if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
iovnum = 2 + urb->number_of_packets;
else
iovnum = 2;
iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
if (!iov) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
ret = -ENOMEM;
goto out;
}
iovnum = 0;
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb_p);
usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
iov[iovnum].iov_len = sizeof(pdu_header);
iovnum++;
txsize += sizeof(pdu_header);
/* 2. setup transfer buffer */
if (urb_p->type != USB_ENDPOINT_XFER_ISOC &&
usb_pipein(urb->pipe) && urb->actual_length > 0) {
iov[iovnum].iov_base = urb->transfer_buffer;
iov[iovnum].iov_len = urb->actual_length;
iovnum++;
txsize += urb->actual_length;
} else if (urb_p->type == USB_ENDPOINT_XFER_ISOC &&
usb_pipein(urb->pipe)) {
/* FIXME - copypasted from stub_tx, refactor */
int i;
for (i = 0; i < urb->number_of_packets; i++) {
iov[iovnum].iov_base = urb->transfer_buffer +
urb->iso_frame_desc[i].offset;
iov[iovnum].iov_len =
urb->iso_frame_desc[i].actual_length;
iovnum++;
txsize += urb->iso_frame_desc[i].actual_length;
}
if (txsize != sizeof(pdu_header) + urb->actual_length) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
ret = -EPIPE;
goto out;
}
}
/* else - no buffer to send */
/* 3. setup iso_packet_descriptor */
if (urb_p->type == USB_ENDPOINT_XFER_ISOC) {
ssize_t len = 0;
iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
if (!iso_buffer) {
usbip_event_add(&udc->ud,
VUDC_EVENT_ERROR_MALLOC);
ret = -ENOMEM;
goto out;
}
iov[iovnum].iov_base = iso_buffer;
iov[iovnum].iov_len = len;
txsize += len;
iovnum++;
}
ret = kernel_sendmsg(udc->ud.tcp_socket, &msg,
iov, iovnum, txsize);
if (ret != txsize) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
if (ret >= 0)
ret = -EPIPE;
goto out;
}
out:
kfree(iov);
kfree(iso_buffer);
free_urbp_and_urb(urb_p);
if (ret < 0)
return ret;
return txsize;
}
static int v_send_ret(struct vudc *udc)
{
unsigned long flags;
struct tx_item *txi;
size_t total_size = 0;
int ret = 0;
spin_lock_irqsave(&udc->lock_tx, flags);
while (!list_empty(&udc->tx_queue)) {
txi = list_first_entry(&udc->tx_queue, struct tx_item,
tx_entry);
list_del(&txi->tx_entry);
spin_unlock_irqrestore(&udc->lock_tx, flags);
switch (txi->type) {
case TX_SUBMIT:
ret = v_send_ret_submit(udc, txi->s);
break;
case TX_UNLINK:
ret = v_send_ret_unlink(udc, txi->u);
break;
}
kfree(txi);
if (ret < 0)
return ret;
total_size += ret;
spin_lock_irqsave(&udc->lock_tx, flags);
}
spin_unlock_irqrestore(&udc->lock_tx, flags);
return total_size;
}
int v_tx_loop(void *data)
{
struct usbip_device *ud = (struct usbip_device *) data;
struct vudc *udc = container_of(ud, struct vudc, ud);
int ret;
while (!kthread_should_stop()) {
if (usbip_event_happened(&udc->ud))
break;
ret = v_send_ret(udc);
if (ret < 0) {
pr_warn("v_tx exit with error %d", ret);
break;
}
wait_event_interruptible(udc->tx_waitq,
(!list_empty(&udc->tx_queue) ||
kthread_should_stop()));
}
return 0;
}
/* called with spinlocks held */
void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status)
{
struct tx_item *txi;
struct v_unlink *unlink;
txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
if (!txi) {
usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
return;
}
unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC);
if (!unlink) {
kfree(txi);
usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
return;
}
unlink->seqnum = seqnum;
unlink->status = status;
txi->type = TX_UNLINK;
txi->u = unlink;
list_add_tail(&txi->tx_entry, &udc->tx_queue);
}
/* called with spinlocks held */
void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p)
{
struct tx_item *txi;
txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
if (!txi) {
usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
return;
}
txi->type = TX_SUBMIT;
txi->s = urb_p;
list_add_tail(&txi->tx_entry, &udc->tx_queue);
}
| linux-master | drivers/usb/usbip/vudc_tx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include "usbip_common.h"
#include "vhci.h"
/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
{
struct vhci_priv *priv, *tmp;
struct urb *urb = NULL;
int status;
list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
if (priv->seqnum != seqnum)
continue;
urb = priv->urb;
status = urb->status;
usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
switch (status) {
case -ENOENT:
fallthrough;
case -ECONNRESET:
dev_dbg(&urb->dev->dev,
"urb seq# %u was unlinked %ssynchronously\n",
seqnum, status == -ENOENT ? "" : "a");
break;
case -EINPROGRESS:
/* no info output */
break;
default:
dev_dbg(&urb->dev->dev,
"urb seq# %u may be in a error, status %d\n",
seqnum, status);
}
list_del(&priv->list);
kfree(priv);
urb->hcpriv = NULL;
break;
}
return urb;
}
static void vhci_recv_ret_submit(struct vhci_device *vdev,
struct usbip_header *pdu)
{
struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
struct vhci *vhci = vhci_hcd->vhci;
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
unsigned long flags;
spin_lock_irqsave(&vdev->priv_lock, flags);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
pdu->base.seqnum,
atomic_read(&vhci_hcd->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
/* unpack the pdu to a urb */
usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
/* recv transfer buffer */
if (usbip_recv_xbuff(ud, urb) < 0) {
urb->status = -EPROTO;
goto error;
}
/* recv iso_packet_descriptor */
if (usbip_recv_iso(ud, urb) < 0) {
urb->status = -EPROTO;
goto error;
}
/* restore the padding in iso packets */
usbip_pad_iso(ud, urb);
error:
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
if (urb->num_sgs)
urb->transfer_flags &= ~URB_DMA_MAP_SG;
usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
spin_lock_irqsave(&vhci->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
spin_unlock_irqrestore(&vhci->lock, flags);
usb_hcd_giveback_urb(vhci_hcd_to_hcd(vhci_hcd), urb, urb->status);
usbip_dbg_vhci_rx("Leave\n");
}
static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
struct usbip_header *pdu)
{
struct vhci_unlink *unlink, *tmp;
unsigned long flags;
spin_lock_irqsave(&vdev->priv_lock, flags);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
pr_info("unlink->seqnum %lu\n", unlink->seqnum);
if (unlink->seqnum == pdu->base.seqnum) {
usbip_dbg_vhci_rx("found pending unlink, %lu\n",
unlink->seqnum);
list_del(&unlink->list);
spin_unlock_irqrestore(&vdev->priv_lock, flags);
return unlink;
}
}
spin_unlock_irqrestore(&vdev->priv_lock, flags);
return NULL;
}
static void vhci_recv_ret_unlink(struct vhci_device *vdev,
struct usbip_header *pdu)
{
struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
struct vhci *vhci = vhci_hcd->vhci;
struct vhci_unlink *unlink;
struct urb *urb;
unsigned long flags;
usbip_dump_header(pdu);
unlink = dequeue_pending_unlink(vdev, pdu);
if (!unlink) {
pr_info("cannot find the pending unlink %u\n",
pdu->base.seqnum);
return;
}
spin_lock_irqsave(&vdev->priv_lock, flags);
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
/*
* I get the result of a unlink request. But, it seems that I
* already received the result of its submit result and gave
* back the URB.
*/
pr_info("the urb (seqnum %d) was already given back\n",
pdu->base.seqnum);
} else {
usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
/* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
spin_lock_irqsave(&vhci->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
spin_unlock_irqrestore(&vhci->lock, flags);
usb_hcd_giveback_urb(vhci_hcd_to_hcd(vhci_hcd), urb, urb->status);
}
kfree(unlink);
}
static int vhci_priv_tx_empty(struct vhci_device *vdev)
{
int empty = 0;
unsigned long flags;
spin_lock_irqsave(&vdev->priv_lock, flags);
empty = list_empty(&vdev->priv_rx);
spin_unlock_irqrestore(&vdev->priv_lock, flags);
return empty;
}
/* recv a pdu */
static void vhci_rx_pdu(struct usbip_device *ud)
{
int ret;
struct usbip_header pdu;
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
usbip_dbg_vhci_rx("Enter\n");
memset(&pdu, 0, sizeof(pdu));
/* receive a pdu header */
ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret < 0) {
if (ret == -ECONNRESET)
pr_info("connection reset by peer\n");
else if (ret == -EAGAIN) {
/* ignore if connection was idle */
if (vhci_priv_tx_empty(vdev))
return;
pr_info("connection timed out with pending urbs\n");
} else if (ret != -ERESTARTSYS)
pr_info("xmit failed %d\n", ret);
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
if (ret == 0) {
pr_info("connection closed");
usbip_event_add(ud, VDEV_EVENT_DOWN);
return;
}
if (ret != sizeof(pdu)) {
pr_err("received pdu size is %d, should be %d\n", ret,
(unsigned int)sizeof(pdu));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
usbip_header_correct_endian(&pdu, 0);
if (usbip_dbg_flag_vhci_rx)
usbip_dump_header(&pdu);
switch (pdu.base.command) {
case USBIP_RET_SUBMIT:
vhci_recv_ret_submit(vdev, &pdu);
break;
case USBIP_RET_UNLINK:
vhci_recv_ret_unlink(vdev, &pdu);
break;
default:
/* NOT REACHED */
pr_err("unknown pdu %u\n", pdu.base.command);
usbip_dump_header(&pdu);
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
break;
}
}
int vhci_rx_loop(void *data)
{
struct usbip_device *ud = data;
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
usbip_kcov_remote_start(ud);
vhci_rx_pdu(ud);
usbip_kcov_remote_stop();
}
return 0;
}
| linux-master | drivers/usb/usbip/vhci_rx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <[email protected]>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <[email protected]>
* Krzysztof Opasiak <[email protected]>
*/
#include <linux/device.h>
#include <linux/list.h>
#include <linux/module.h>
#include "vudc.h"
static unsigned int vudc_number = 1;
module_param_named(num, vudc_number, uint, S_IRUGO);
MODULE_PARM_DESC(num, "number of emulated controllers");
static struct platform_driver vudc_driver = {
.probe = vudc_probe,
.remove = vudc_remove,
.driver = {
.name = GADGET_NAME,
.dev_groups = vudc_groups,
},
};
static LIST_HEAD(vudc_devices);
static int __init vudc_init(void)
{
int retval = -ENOMEM;
int i;
struct vudc_device *udc_dev = NULL, *udc_dev2 = NULL;
if (usb_disabled())
return -ENODEV;
if (vudc_number < 1) {
pr_err("Number of emulated UDC must be no less than 1");
return -EINVAL;
}
retval = platform_driver_register(&vudc_driver);
if (retval < 0)
goto out;
for (i = 0; i < vudc_number; i++) {
udc_dev = alloc_vudc_device(i);
if (!udc_dev) {
retval = -ENOMEM;
goto cleanup;
}
retval = platform_device_add(udc_dev->pdev);
if (retval < 0) {
put_vudc_device(udc_dev);
goto cleanup;
}
list_add_tail(&udc_dev->dev_entry, &vudc_devices);
if (!platform_get_drvdata(udc_dev->pdev)) {
/*
* The udc was added successfully but its probe
* function failed for some reason.
*/
retval = -EINVAL;
goto cleanup;
}
}
goto out;
cleanup:
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
/*
* Just do platform_device_del() here, put_vudc_device()
* calls the platform_device_put()
*/
platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
platform_driver_unregister(&vudc_driver);
out:
return retval;
}
module_init(vudc_init);
static void __exit vudc_cleanup(void)
{
struct vudc_device *udc_dev = NULL, *udc_dev2 = NULL;
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
/*
* Just do platform_device_del() here, put_vudc_device()
* calls the platform_device_put()
*/
platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
platform_driver_unregister(&vudc_driver);
}
module_exit(vudc_cleanup);
MODULE_DESCRIPTION("USB over IP Device Controller");
MODULE_AUTHOR("Krzysztof Opasiak, Karol Kosik, Igor Kotrasinski");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/usbip/vudc_main.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <[email protected]>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <[email protected]>
*/
#include <net/sock.h>
#include <linux/list.h>
#include <linux/kthread.h>
#include "usbip_common.h"
#include "vudc.h"
static int alloc_urb_from_cmd(struct urb **urbp,
struct usbip_header *pdu, u8 type)
{
struct urb *urb;
if (type == USB_ENDPOINT_XFER_ISOC)
urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
GFP_KERNEL);
else
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
goto err;
usbip_pack_pdu(pdu, urb, USBIP_CMD_SUBMIT, 0);
if (urb->transfer_buffer_length > 0) {
urb->transfer_buffer = kzalloc(urb->transfer_buffer_length,
GFP_KERNEL);
if (!urb->transfer_buffer)
goto free_urb;
}
urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
GFP_KERNEL);
if (!urb->setup_packet)
goto free_buffer;
/*
* FIXME - we only setup pipe enough for usbip functions
* to behave nicely
*/
urb->pipe |= pdu->base.direction == USBIP_DIR_IN ?
USB_DIR_IN : USB_DIR_OUT;
*urbp = urb;
return 0;
free_buffer:
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
free_urb:
usb_free_urb(urb);
err:
return -ENOMEM;
}
static int v_recv_cmd_unlink(struct vudc *udc,
struct usbip_header *pdu)
{
unsigned long flags;
struct urbp *urb_p;
spin_lock_irqsave(&udc->lock, flags);
list_for_each_entry(urb_p, &udc->urb_queue, urb_entry) {
if (urb_p->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
urb_p->urb->unlinked = -ECONNRESET;
urb_p->seqnum = pdu->base.seqnum;
v_kick_timer(udc, jiffies);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/* Not found, completed / not queued */
spin_lock(&udc->lock_tx);
v_enqueue_ret_unlink(udc, pdu->base.seqnum, 0);
wake_up(&udc->tx_waitq);
spin_unlock(&udc->lock_tx);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int v_recv_cmd_submit(struct vudc *udc,
struct usbip_header *pdu)
{
int ret = 0;
struct urbp *urb_p;
u8 address;
unsigned long flags;
urb_p = alloc_urbp();
if (!urb_p) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
return -ENOMEM;
}
/* base.ep is pipeendpoint(pipe) */
address = pdu->base.ep;
if (pdu->base.direction == USBIP_DIR_IN)
address |= USB_DIR_IN;
spin_lock_irqsave(&udc->lock, flags);
urb_p->ep = vudc_find_endpoint(udc, address);
if (!urb_p->ep) {
/* we don't know the type, there may be isoc data! */
dev_err(&udc->pdev->dev, "request to nonexistent endpoint");
spin_unlock_irqrestore(&udc->lock, flags);
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
ret = -EPIPE;
goto free_urbp;
}
urb_p->type = urb_p->ep->type;
spin_unlock_irqrestore(&udc->lock, flags);
urb_p->new = 1;
urb_p->seqnum = pdu->base.seqnum;
if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
/* validate packet size and number of packets */
unsigned int maxp, packets, bytes;
maxp = usb_endpoint_maxp(urb_p->ep->desc);
maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
bytes = pdu->u.cmd_submit.transfer_buffer_length;
packets = DIV_ROUND_UP(bytes, maxp);
if (pdu->u.cmd_submit.number_of_packets < 0 ||
pdu->u.cmd_submit.number_of_packets > packets) {
dev_err(&udc->gadget.dev,
"CMD_SUBMIT: isoc invalid num packets %d\n",
pdu->u.cmd_submit.number_of_packets);
ret = -EMSGSIZE;
goto free_urbp;
}
}
ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
if (ret) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
ret = -ENOMEM;
goto free_urbp;
}
urb_p->urb->status = -EINPROGRESS;
/* FIXME: more pipe setup to please usbip_common */
BUILD_BUG_ON_MSG(PIPE_BULK != 3, "PIPE_* doesn't range from 0 to 3");
urb_p->urb->pipe &= ~(PIPE_BULK << 30);
switch (urb_p->ep->type) {
case USB_ENDPOINT_XFER_BULK:
urb_p->urb->pipe |= (PIPE_BULK << 30);
break;
case USB_ENDPOINT_XFER_INT:
urb_p->urb->pipe |= (PIPE_INTERRUPT << 30);
break;
case USB_ENDPOINT_XFER_CONTROL:
urb_p->urb->pipe |= (PIPE_CONTROL << 30);
break;
case USB_ENDPOINT_XFER_ISOC:
urb_p->urb->pipe |= (PIPE_ISOCHRONOUS << 30);
break;
}
ret = usbip_recv_xbuff(&udc->ud, urb_p->urb);
if (ret < 0)
goto free_urbp;
ret = usbip_recv_iso(&udc->ud, urb_p->urb);
if (ret < 0)
goto free_urbp;
spin_lock_irqsave(&udc->lock, flags);
v_kick_timer(udc, jiffies);
list_add_tail(&urb_p->urb_entry, &udc->urb_queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
free_urbp:
free_urbp_and_urb(urb_p);
return ret;
}
static int v_rx_pdu(struct usbip_device *ud)
{
int ret;
struct usbip_header pdu;
struct vudc *udc = container_of(ud, struct vudc, ud);
memset(&pdu, 0, sizeof(pdu));
ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
usbip_event_add(ud, VUDC_EVENT_ERROR_TCP);
if (ret >= 0)
return -EPIPE;
return ret;
}
usbip_header_correct_endian(&pdu, 0);
spin_lock_irq(&ud->lock);
ret = (ud->status == SDEV_ST_USED);
spin_unlock_irq(&ud->lock);
if (!ret) {
usbip_event_add(ud, VUDC_EVENT_ERROR_TCP);
return -EBUSY;
}
switch (pdu.base.command) {
case USBIP_CMD_UNLINK:
ret = v_recv_cmd_unlink(udc, &pdu);
break;
case USBIP_CMD_SUBMIT:
ret = v_recv_cmd_submit(udc, &pdu);
break;
default:
ret = -EPIPE;
pr_err("rx: unknown command");
break;
}
return ret;
}
int v_rx_loop(void *data)
{
struct usbip_device *ud = data;
int ret = 0;
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
ret = v_rx_pdu(ud);
if (ret < 0) {
pr_warn("v_rx exit with error %d", ret);
break;
}
}
return ret;
}
| linux-master | drivers/usb/usbip/vudc_rx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Nobuo Iwata
*/
#include <linux/kthread.h>
#include <linux/file.h>
#include <linux/net.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/* Hardening for Spectre-v1 */
#include <linux/nospec.h>
#include "usbip_common.h"
#include "vhci.h"
/* TODO: refine locking ?*/
/*
* output example:
* hub port sta spd dev sockfd local_busid
* hs 0000 004 000 00000000 000003 1-2.3
* ................................................
* ss 0008 004 000 00000000 000004 2-3.4
* ................................................
*
* Output includes socket fd instead of socket pointer address to avoid
* leaking kernel memory address in:
* /sys/devices/platform/vhci_hcd.0/status and in debug output.
* The socket pointer address is not used at the moment and it was made
* visible as a convenient way to find IP address from socket pointer
* address by looking up /proc/net/{tcp,tcp6}. As this opens a security
* hole, the change is made to use sockfd instead.
*
*/
static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
{
if (hub == HUB_SPEED_HIGH)
*out += sprintf(*out, "hs %04u %03u ",
port, vdev->ud.status);
else /* hub == HUB_SPEED_SUPER */
*out += sprintf(*out, "ss %04u %03u ",
port, vdev->ud.status);
if (vdev->ud.status == VDEV_ST_USED) {
*out += sprintf(*out, "%03u %08x ",
vdev->speed, vdev->devid);
*out += sprintf(*out, "%06u %s",
vdev->ud.sockfd,
dev_name(&vdev->udev->dev));
} else {
*out += sprintf(*out, "000 00000000 ");
*out += sprintf(*out, "000000 0-0");
}
*out += sprintf(*out, "\n");
}
/* Sysfs entry to show port status */
static ssize_t status_show_vhci(int pdev_nr, char *out)
{
struct platform_device *pdev = vhcis[pdev_nr].pdev;
struct vhci *vhci;
struct usb_hcd *hcd;
struct vhci_hcd *vhci_hcd;
char *s = out;
int i;
unsigned long flags;
if (!pdev || !out) {
usbip_dbg_vhci_sysfs("show status error\n");
return 0;
}
hcd = platform_get_drvdata(pdev);
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
spin_lock_irqsave(&vhci->lock, flags);
for (i = 0; i < VHCI_HC_PORTS; i++) {
struct vhci_device *vdev = &vhci->vhci_hcd_hs->vdev[i];
spin_lock(&vdev->ud.lock);
port_show_vhci(&out, HUB_SPEED_HIGH,
pdev_nr * VHCI_PORTS + i, vdev);
spin_unlock(&vdev->ud.lock);
}
for (i = 0; i < VHCI_HC_PORTS; i++) {
struct vhci_device *vdev = &vhci->vhci_hcd_ss->vdev[i];
spin_lock(&vdev->ud.lock);
port_show_vhci(&out, HUB_SPEED_SUPER,
pdev_nr * VHCI_PORTS + VHCI_HC_PORTS + i, vdev);
spin_unlock(&vdev->ud.lock);
}
spin_unlock_irqrestore(&vhci->lock, flags);
return out - s;
}
static ssize_t status_show_not_ready(int pdev_nr, char *out)
{
char *s = out;
int i = 0;
for (i = 0; i < VHCI_HC_PORTS; i++) {
out += sprintf(out, "hs %04u %03u ",
(pdev_nr * VHCI_PORTS) + i,
VDEV_ST_NOTASSIGNED);
out += sprintf(out, "000 00000000 0000000000000000 0-0");
out += sprintf(out, "\n");
}
for (i = 0; i < VHCI_HC_PORTS; i++) {
out += sprintf(out, "ss %04u %03u ",
(pdev_nr * VHCI_PORTS) + VHCI_HC_PORTS + i,
VDEV_ST_NOTASSIGNED);
out += sprintf(out, "000 00000000 0000000000000000 0-0");
out += sprintf(out, "\n");
}
return out - s;
}
static int status_name_to_id(const char *name)
{
char *c;
long val;
int ret;
c = strchr(name, '.');
if (c == NULL)
return 0;
ret = kstrtol(c+1, 10, &val);
if (ret < 0)
return ret;
return val;
}
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *out)
{
char *s = out;
int pdev_nr;
out += sprintf(out,
"hub port sta spd dev sockfd local_busid\n");
pdev_nr = status_name_to_id(attr->attr.name);
if (pdev_nr < 0)
out += status_show_not_ready(pdev_nr, out);
else
out += status_show_vhci(pdev_nr, out);
return out - s;
}
static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
char *out)
{
char *s = out;
/*
* Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
* thus the * 2.
*/
out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
return out - s;
}
static DEVICE_ATTR_RO(nports);
/* Sysfs entry to shutdown a virtual connection */
static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
{
struct vhci_device *vdev = &vhci_hcd->vdev[rhport];
struct vhci *vhci = vhci_hcd->vhci;
unsigned long flags;
usbip_dbg_vhci_sysfs("enter\n");
mutex_lock(&vdev->ud.sysfs_lock);
/* lock */
spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->ud.lock);
if (vdev->ud.status == VDEV_ST_NULL) {
pr_err("not connected %d\n", vdev->ud.status);
/* unlock */
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
mutex_unlock(&vdev->ud.sysfs_lock);
return -EINVAL;
}
/* unlock */
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
mutex_unlock(&vdev->ud.sysfs_lock);
return 0;
}
static int valid_port(__u32 *pdev_nr, __u32 *rhport)
{
if (*pdev_nr >= vhci_num_controllers) {
pr_err("pdev %u\n", *pdev_nr);
return 0;
}
*pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
if (*rhport >= VHCI_HC_PORTS) {
pr_err("rhport %u\n", *rhport);
return 0;
}
*rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
return 1;
}
static ssize_t detach_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
__u32 port = 0, pdev_nr = 0, rhport = 0;
struct usb_hcd *hcd;
struct vhci_hcd *vhci_hcd;
int ret;
if (kstrtoint(buf, 10, &port) < 0)
return -EINVAL;
pdev_nr = port_to_pdev_nr(port);
rhport = port_to_rhport(port);
if (!valid_port(&pdev_nr, &rhport))
return -EINVAL;
hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
if (hcd == NULL) {
dev_err(dev, "port is not ready %u\n", port);
return -EAGAIN;
}
usbip_dbg_vhci_sysfs("rhport %d\n", rhport);
if ((port / VHCI_HC_PORTS) % 2)
vhci_hcd = hcd_to_vhci_hcd(hcd)->vhci->vhci_hcd_ss;
else
vhci_hcd = hcd_to_vhci_hcd(hcd)->vhci->vhci_hcd_hs;
ret = vhci_port_disconnect(vhci_hcd, rhport);
if (ret < 0)
return -EINVAL;
usbip_dbg_vhci_sysfs("Leave\n");
return count;
}
static DEVICE_ATTR_WO(detach);
static int valid_args(__u32 *pdev_nr, __u32 *rhport,
enum usb_device_speed speed)
{
if (!valid_port(pdev_nr, rhport)) {
return 0;
}
switch (speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_WIRELESS:
case USB_SPEED_SUPER:
break;
default:
pr_err("Failed attach request for unsupported USB speed: %s\n",
usb_speed_string(speed));
return 0;
}
return 1;
}
/* Sysfs entry to establish a virtual connection */
/*
* To start a new USB/IP attachment, a userland program needs to setup a TCP
* connection and then write its socket descriptor with remote device
* information into this sysfs file.
*
* A remote device is virtually attached to the root-hub port of @rhport with
* @speed. @devid is embedded into a request to specify the remote device in a
* server host.
*
* write() returns 0 on success, else negative errno.
*/
static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct socket *socket;
int sockfd = 0;
__u32 port = 0, pdev_nr = 0, rhport = 0, devid = 0, speed = 0;
struct usb_hcd *hcd;
struct vhci_hcd *vhci_hcd;
struct vhci_device *vdev;
struct vhci *vhci;
int err;
unsigned long flags;
struct task_struct *tcp_rx = NULL;
struct task_struct *tcp_tx = NULL;
/*
* @rhport: port number of vhci_hcd
* @sockfd: socket descriptor of an established TCP connection
* @devid: unique device identifier in a remote host
* @speed: usb device speed in a remote host
*/
if (sscanf(buf, "%u %u %u %u", &port, &sockfd, &devid, &speed) != 4)
return -EINVAL;
pdev_nr = port_to_pdev_nr(port);
rhport = port_to_rhport(port);
usbip_dbg_vhci_sysfs("port(%u) pdev(%d) rhport(%u)\n",
port, pdev_nr, rhport);
usbip_dbg_vhci_sysfs("sockfd(%u) devid(%u) speed(%u)\n",
sockfd, devid, speed);
/* check received parameters */
if (!valid_args(&pdev_nr, &rhport, speed))
return -EINVAL;
hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
if (hcd == NULL) {
dev_err(dev, "port %d is not ready\n", port);
return -EAGAIN;
}
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
if (speed == USB_SPEED_SUPER)
vdev = &vhci->vhci_hcd_ss->vdev[rhport];
else
vdev = &vhci->vhci_hcd_hs->vdev[rhport];
mutex_lock(&vdev->ud.sysfs_lock);
/* Extract socket from fd. */
socket = sockfd_lookup(sockfd, &err);
if (!socket) {
dev_err(dev, "failed to lookup sock");
err = -EINVAL;
goto unlock_mutex;
}
if (socket->type != SOCK_STREAM) {
dev_err(dev, "Expecting SOCK_STREAM - found %d",
socket->type);
sockfd_put(socket);
err = -EINVAL;
goto unlock_mutex;
}
/* create threads before locking */
tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
if (IS_ERR(tcp_rx)) {
sockfd_put(socket);
err = -EINVAL;
goto unlock_mutex;
}
tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
if (IS_ERR(tcp_tx)) {
kthread_stop(tcp_rx);
sockfd_put(socket);
err = -EINVAL;
goto unlock_mutex;
}
/* get task structs now */
get_task_struct(tcp_rx);
get_task_struct(tcp_tx);
/* now begin lock until setting vdev status set */
spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->ud.lock);
if (vdev->ud.status != VDEV_ST_NULL) {
/* end of the lock */
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
sockfd_put(socket);
kthread_stop_put(tcp_rx);
kthread_stop_put(tcp_tx);
dev_err(dev, "port %d already used\n", rhport);
/*
* Will be retried from userspace
* if there's another free port.
*/
err = -EBUSY;
goto unlock_mutex;
}
dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
pdev_nr, rhport, sockfd);
dev_info(dev, "devid(%u) speed(%u) speed_str(%s)\n",
devid, speed, usb_speed_string(speed));
vdev->devid = devid;
vdev->speed = speed;
vdev->ud.sockfd = sockfd;
vdev->ud.tcp_socket = socket;
vdev->ud.tcp_rx = tcp_rx;
vdev->ud.tcp_tx = tcp_tx;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
usbip_kcov_handle_init(&vdev->ud);
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
/* end the lock */
wake_up_process(vdev->ud.tcp_rx);
wake_up_process(vdev->ud.tcp_tx);
rh_port_connect(vdev, speed);
dev_info(dev, "Device attached\n");
mutex_unlock(&vdev->ud.sysfs_lock);
return count;
unlock_mutex:
mutex_unlock(&vdev->ud.sysfs_lock);
return err;
}
static DEVICE_ATTR_WO(attach);
#define MAX_STATUS_NAME 16
struct status_attr {
struct device_attribute attr;
char name[MAX_STATUS_NAME+1];
};
static struct status_attr *status_attrs;
static void set_status_attr(int id)
{
struct status_attr *status;
status = status_attrs + id;
if (id == 0)
strcpy(status->name, "status");
else
snprintf(status->name, MAX_STATUS_NAME+1, "status.%d", id);
status->attr.attr.name = status->name;
status->attr.attr.mode = S_IRUGO;
status->attr.show = status_show;
sysfs_attr_init(&status->attr.attr);
}
static int init_status_attrs(void)
{
int id;
status_attrs = kcalloc(vhci_num_controllers, sizeof(struct status_attr),
GFP_KERNEL);
if (status_attrs == NULL)
return -ENOMEM;
for (id = 0; id < vhci_num_controllers; id++)
set_status_attr(id);
return 0;
}
static void finish_status_attrs(void)
{
kfree(status_attrs);
}
struct attribute_group vhci_attr_group = {
.attrs = NULL,
};
int vhci_init_attr_group(void)
{
struct attribute **attrs;
int ret, i;
attrs = kcalloc((vhci_num_controllers + 5), sizeof(struct attribute *),
GFP_KERNEL);
if (attrs == NULL)
return -ENOMEM;
ret = init_status_attrs();
if (ret) {
kfree(attrs);
return ret;
}
*attrs = &dev_attr_nports.attr;
*(attrs + 1) = &dev_attr_detach.attr;
*(attrs + 2) = &dev_attr_attach.attr;
*(attrs + 3) = &dev_attr_usbip_debug.attr;
for (i = 0; i < vhci_num_controllers; i++)
*(attrs + i + 4) = &((status_attrs + i)->attr.attr);
vhci_attr_group.attrs = attrs;
return 0;
}
void vhci_finish_attr_group(void)
{
finish_status_attrs();
kfree(vhci_attr_group.attrs);
}
| linux-master | drivers/usb/usbip/vhci_sysfs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "vhci.h"
static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb)
{
struct vhci_priv *priv = ((struct vhci_priv *)urb->hcpriv);
struct vhci_device *vdev = priv->vdev;
usbip_dbg_vhci_tx("URB, local devnum %u, remote devid %u\n",
usb_pipedevice(urb->pipe), vdev->devid);
pdup->base.command = USBIP_CMD_SUBMIT;
pdup->base.seqnum = priv->seqnum;
pdup->base.devid = vdev->devid;
pdup->base.direction = usb_pipein(urb->pipe) ?
USBIP_DIR_IN : USBIP_DIR_OUT;
pdup->base.ep = usb_pipeendpoint(urb->pipe);
usbip_pack_pdu(pdup, urb, USBIP_CMD_SUBMIT, 1);
if (urb->setup_packet)
memcpy(pdup->u.cmd_submit.setup, urb->setup_packet, 8);
}
static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
{
struct vhci_priv *priv, *tmp;
unsigned long flags;
spin_lock_irqsave(&vdev->priv_lock, flags);
list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
list_move_tail(&priv->list, &vdev->priv_rx);
spin_unlock_irqrestore(&vdev->priv_lock, flags);
return priv;
}
spin_unlock_irqrestore(&vdev->priv_lock, flags);
return NULL;
}
static int vhci_send_cmd_submit(struct vhci_device *vdev)
{
struct usbip_iso_packet_descriptor *iso_buffer = NULL;
struct vhci_priv *priv = NULL;
struct scatterlist *sg;
struct msghdr msg;
struct kvec *iov;
size_t txsize;
size_t total_size = 0;
int iovnum;
int err = -ENOMEM;
int i;
while ((priv = dequeue_from_priv_tx(vdev)) != NULL) {
int ret;
struct urb *urb = priv->urb;
struct usbip_header pdu_header;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
priv->seqnum);
if (urb->num_sgs && usb_pipeout(urb->pipe))
iovnum = 2 + urb->num_sgs;
else
iovnum = 3;
iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
if (!iov) {
usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC);
return -ENOMEM;
}
if (urb->num_sgs)
urb->transfer_flags |= URB_DMA_MAP_SG;
/* 1. setup usbip_header */
setup_cmd_submit_pdu(&pdu_header, urb);
usbip_header_correct_endian(&pdu_header, 1);
iovnum = 0;
iov[iovnum].iov_base = &pdu_header;
iov[iovnum].iov_len = sizeof(pdu_header);
txsize += sizeof(pdu_header);
iovnum++;
/* 2. setup transfer buffer */
if (!usb_pipein(urb->pipe) && urb->transfer_buffer_length > 0) {
if (urb->num_sgs &&
!usb_endpoint_xfer_isoc(&urb->ep->desc)) {
for_each_sg(urb->sg, sg, urb->num_sgs, i) {
iov[iovnum].iov_base = sg_virt(sg);
iov[iovnum].iov_len = sg->length;
iovnum++;
}
} else {
iov[iovnum].iov_base = urb->transfer_buffer;
iov[iovnum].iov_len =
urb->transfer_buffer_length;
iovnum++;
}
txsize += urb->transfer_buffer_length;
}
/* 3. setup iso_packet_descriptor */
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
ssize_t len = 0;
iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
if (!iso_buffer) {
usbip_event_add(&vdev->ud,
SDEV_EVENT_ERROR_MALLOC);
goto err_iso_buffer;
}
iov[iovnum].iov_base = iso_buffer;
iov[iovnum].iov_len = len;
iovnum++;
txsize += len;
}
ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, iovnum,
txsize);
if (ret != txsize) {
pr_err("sendmsg failed!, ret=%d for %zd\n", ret,
txsize);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP);
err = -EPIPE;
goto err_tx;
}
kfree(iov);
/* This is only for isochronous case */
kfree(iso_buffer);
iso_buffer = NULL;
usbip_dbg_vhci_tx("send txdata\n");
total_size += txsize;
}
return total_size;
err_tx:
kfree(iso_buffer);
err_iso_buffer:
kfree(iov);
return err;
}
static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
{
struct vhci_unlink *unlink, *tmp;
unsigned long flags;
spin_lock_irqsave(&vdev->priv_lock, flags);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
list_move_tail(&unlink->list, &vdev->unlink_rx);
spin_unlock_irqrestore(&vdev->priv_lock, flags);
return unlink;
}
spin_unlock_irqrestore(&vdev->priv_lock, flags);
return NULL;
}
static int vhci_send_cmd_unlink(struct vhci_device *vdev)
{
struct vhci_unlink *unlink = NULL;
struct msghdr msg;
struct kvec iov;
size_t txsize;
size_t total_size = 0;
while ((unlink = dequeue_from_unlink_tx(vdev)) != NULL) {
int ret;
struct usbip_header pdu_header;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
usbip_dbg_vhci_tx("setup cmd unlink, %lu\n", unlink->seqnum);
/* 1. setup usbip_header */
pdu_header.base.command = USBIP_CMD_UNLINK;
pdu_header.base.seqnum = unlink->seqnum;
pdu_header.base.devid = vdev->devid;
pdu_header.base.ep = 0;
pdu_header.u.cmd_unlink.seqnum = unlink->unlink_seqnum;
usbip_header_correct_endian(&pdu_header, 1);
iov.iov_base = &pdu_header;
iov.iov_len = sizeof(pdu_header);
txsize = sizeof(pdu_header);
ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, &iov, 1, txsize);
if (ret != txsize) {
pr_err("sendmsg failed!, ret=%d for %zd\n", ret,
txsize);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP);
return -1;
}
usbip_dbg_vhci_tx("send txdata\n");
total_size += txsize;
}
return total_size;
}
int vhci_tx_loop(void *data)
{
struct usbip_device *ud = data;
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
while (!kthread_should_stop()) {
if (vhci_send_cmd_submit(vdev) < 0)
break;
if (vhci_send_cmd_unlink(vdev) < 0)
break;
wait_event_interruptible(vdev->waitq_tx,
(!list_empty(&vdev->priv_tx) ||
!list_empty(&vdev->unlink_tx) ||
kthread_should_stop()));
usbip_dbg_vhci_tx("pending urbs ?, now wake up\n");
}
return 0;
}
| linux-master | drivers/usb/usbip/vhci_tx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*/
#include <linux/device.h>
#include <linux/file.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include "usbip_common.h"
#include "stub.h"
/*
* usbip_status shows the status of usbip-host as long as this driver is bound
* to the target device.
*/
static ssize_t usbip_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stub_device *sdev = dev_get_drvdata(dev);
int status;
if (!sdev) {
dev_err(dev, "sdev is null\n");
return -ENODEV;
}
spin_lock_irq(&sdev->ud.lock);
status = sdev->ud.status;
spin_unlock_irq(&sdev->ud.lock);
return sysfs_emit(buf, "%d\n", status);
}
static DEVICE_ATTR_RO(usbip_status);
/*
* usbip_sockfd gets a socket descriptor of an established TCP connection that
* is used to transfer usbip requests by kernel threads. -1 is a magic number
* by which usbip connection is finished.
*/
static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct stub_device *sdev = dev_get_drvdata(dev);
int sockfd = 0;
struct socket *socket;
int rv;
struct task_struct *tcp_rx = NULL;
struct task_struct *tcp_tx = NULL;
if (!sdev) {
dev_err(dev, "sdev is null\n");
return -ENODEV;
}
rv = sscanf(buf, "%d", &sockfd);
if (rv != 1)
return -EINVAL;
if (sockfd != -1) {
int err;
dev_info(dev, "stub up\n");
mutex_lock(&sdev->ud.sysfs_lock);
spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_AVAILABLE) {
dev_err(dev, "not ready\n");
goto err;
}
socket = sockfd_lookup(sockfd, &err);
if (!socket) {
dev_err(dev, "failed to lookup sock");
goto err;
}
if (socket->type != SOCK_STREAM) {
dev_err(dev, "Expecting SOCK_STREAM - found %d",
socket->type);
goto sock_err;
}
/* unlock and create threads and get tasks */
spin_unlock_irq(&sdev->ud.lock);
tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
if (IS_ERR(tcp_rx)) {
sockfd_put(socket);
goto unlock_mutex;
}
tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
if (IS_ERR(tcp_tx)) {
kthread_stop(tcp_rx);
sockfd_put(socket);
goto unlock_mutex;
}
/* get task structs now */
get_task_struct(tcp_rx);
get_task_struct(tcp_tx);
/* lock and update sdev->ud state */
spin_lock_irq(&sdev->ud.lock);
sdev->ud.tcp_socket = socket;
sdev->ud.sockfd = sockfd;
sdev->ud.tcp_rx = tcp_rx;
sdev->ud.tcp_tx = tcp_tx;
sdev->ud.status = SDEV_ST_USED;
spin_unlock_irq(&sdev->ud.lock);
wake_up_process(sdev->ud.tcp_rx);
wake_up_process(sdev->ud.tcp_tx);
mutex_unlock(&sdev->ud.sysfs_lock);
} else {
dev_info(dev, "stub down\n");
mutex_lock(&sdev->ud.sysfs_lock);
spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_USED)
goto err;
spin_unlock_irq(&sdev->ud.lock);
usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
mutex_unlock(&sdev->ud.sysfs_lock);
}
return count;
sock_err:
sockfd_put(socket);
err:
spin_unlock_irq(&sdev->ud.lock);
unlock_mutex:
mutex_unlock(&sdev->ud.sysfs_lock);
return -EINVAL;
}
static DEVICE_ATTR_WO(usbip_sockfd);
static struct attribute *usbip_attrs[] = {
&dev_attr_usbip_status.attr,
&dev_attr_usbip_sockfd.attr,
&dev_attr_usbip_debug.attr,
NULL,
};
ATTRIBUTE_GROUPS(usbip);
static void stub_shutdown_connection(struct usbip_device *ud)
{
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
/*
* When removing an exported device, kernel panic sometimes occurred
* and then EIP was sk_wait_data of stub_rx thread. Is this because
* sk_wait_data returned though stub_rx thread was already finished by
* step 1?
*/
if (ud->tcp_socket) {
dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
/* 1. stop threads */
if (ud->tcp_rx) {
kthread_stop_put(ud->tcp_rx);
ud->tcp_rx = NULL;
}
if (ud->tcp_tx) {
kthread_stop_put(ud->tcp_tx);
ud->tcp_tx = NULL;
}
/*
* 2. close the socket
*
* tcp_socket is freed after threads are killed so that usbip_xmit does
* not touch NULL socket.
*/
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
ud->sockfd = -1;
}
/* 3. free used data */
stub_device_cleanup_urbs(sdev);
/* 4. free stub_unlink */
{
unsigned long flags;
struct stub_unlink *unlink, *tmp;
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) {
list_del(&unlink->list);
kfree(unlink);
}
list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free,
list) {
list_del(&unlink->list);
kfree(unlink);
}
spin_unlock_irqrestore(&sdev->priv_lock, flags);
}
}
static void stub_device_reset(struct usbip_device *ud)
{
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
struct usb_device *udev = sdev->udev;
int ret;
dev_dbg(&udev->dev, "device reset");
ret = usb_lock_device_for_reset(udev, NULL);
if (ret < 0) {
dev_err(&udev->dev, "lock for reset\n");
spin_lock_irq(&ud->lock);
ud->status = SDEV_ST_ERROR;
spin_unlock_irq(&ud->lock);
return;
}
/* try to reset the device */
ret = usb_reset_device(udev);
usb_unlock_device(udev);
spin_lock_irq(&ud->lock);
if (ret) {
dev_err(&udev->dev, "device reset\n");
ud->status = SDEV_ST_ERROR;
} else {
dev_info(&udev->dev, "device reset\n");
ud->status = SDEV_ST_AVAILABLE;
}
spin_unlock_irq(&ud->lock);
}
static void stub_device_unusable(struct usbip_device *ud)
{
spin_lock_irq(&ud->lock);
ud->status = SDEV_ST_ERROR;
spin_unlock_irq(&ud->lock);
}
/**
* stub_device_alloc - allocate a new stub_device struct
* @udev: usb_device of a new device
*
* Allocates and initializes a new stub_device struct.
*/
static struct stub_device *stub_device_alloc(struct usb_device *udev)
{
struct stub_device *sdev;
int busnum = udev->bus->busnum;
int devnum = udev->devnum;
dev_dbg(&udev->dev, "allocating stub device");
/* yes, it's a new device */
sdev = kzalloc(sizeof(struct stub_device), GFP_KERNEL);
if (!sdev)
return NULL;
sdev->udev = usb_get_dev(udev);
/*
* devid is defined with devnum when this driver is first allocated.
* devnum may change later if a device is reset. However, devid never
* changes during a usbip connection.
*/
sdev->devid = (busnum << 16) | devnum;
sdev->ud.side = USBIP_STUB;
sdev->ud.status = SDEV_ST_AVAILABLE;
spin_lock_init(&sdev->ud.lock);
mutex_init(&sdev->ud.sysfs_lock);
sdev->ud.tcp_socket = NULL;
sdev->ud.sockfd = -1;
INIT_LIST_HEAD(&sdev->priv_init);
INIT_LIST_HEAD(&sdev->priv_tx);
INIT_LIST_HEAD(&sdev->priv_free);
INIT_LIST_HEAD(&sdev->unlink_free);
INIT_LIST_HEAD(&sdev->unlink_tx);
spin_lock_init(&sdev->priv_lock);
init_waitqueue_head(&sdev->tx_waitq);
sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
sdev->ud.eh_ops.reset = stub_device_reset;
sdev->ud.eh_ops.unusable = stub_device_unusable;
usbip_start_eh(&sdev->ud);
dev_dbg(&udev->dev, "register new device\n");
return sdev;
}
static void stub_device_free(struct stub_device *sdev)
{
kfree(sdev);
}
static int stub_probe(struct usb_device *udev)
{
struct stub_device *sdev = NULL;
const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
int rc = 0;
char save_status;
dev_dbg(&udev->dev, "Enter probe\n");
/* Not sure if this is our device. Allocate here to avoid
* calling alloc while holding busid_table lock.
*/
sdev = stub_device_alloc(udev);
if (!sdev)
return -ENOMEM;
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
(busid_priv->status == STUB_BUSID_OTHER)) {
dev_info(&udev->dev,
"%s is not in match_busid table... skip!\n",
udev_busid);
/*
* Return value should be ENODEV or ENOXIO to continue trying
* other matched drivers by the driver core.
* See driver_probe_device() in driver/base/dd.c
*/
rc = -ENODEV;
if (!busid_priv)
goto sdev_free;
goto call_put_busid_priv;
}
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
udev_busid);
rc = -ENODEV;
goto call_put_busid_priv;
}
if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
dev_dbg(&udev->dev,
"%s is attached on vhci_hcd... skip!\n",
udev_busid);
rc = -ENODEV;
goto call_put_busid_priv;
}
dev_info(&udev->dev,
"usbip-host: register new device (bus %u dev %u)\n",
udev->bus->busnum, udev->devnum);
busid_priv->shutdown_busid = 0;
/* set private data to usb_device */
dev_set_drvdata(&udev->dev, sdev);
busid_priv->sdev = sdev;
busid_priv->udev = udev;
save_status = busid_priv->status;
busid_priv->status = STUB_BUSID_ALLOC;
/* release the busid_lock */
put_busid_priv(busid_priv);
/*
* Claim this hub port.
* It doesn't matter what value we pass as owner
* (struct dev_state) as long as it is unique.
*/
rc = usb_hub_claim_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to claim port\n");
goto err_port;
}
return 0;
err_port:
dev_set_drvdata(&udev->dev, NULL);
/* we already have busid_priv, just lock busid_lock */
spin_lock(&busid_priv->busid_lock);
busid_priv->sdev = NULL;
busid_priv->status = save_status;
spin_unlock(&busid_priv->busid_lock);
/* lock is released - go to free */
goto sdev_free;
call_put_busid_priv:
/* release the busid_lock */
put_busid_priv(busid_priv);
sdev_free:
usb_put_dev(udev);
stub_device_free(sdev);
return rc;
}
static void shutdown_busid(struct bus_id_priv *busid_priv)
{
usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED);
/* wait for the stop of the event handler */
usbip_stop_eh(&busid_priv->sdev->ud);
}
/*
* called in usb_disconnect() or usb_deregister()
* but only if actconfig(active configuration) exists
*/
static void stub_disconnect(struct usb_device *udev)
{
struct stub_device *sdev;
const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
int rc;
dev_dbg(&udev->dev, "Enter disconnect\n");
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv) {
BUG();
return;
}
sdev = dev_get_drvdata(&udev->dev);
/* get stub_device */
if (!sdev) {
dev_err(&udev->dev, "could not get device");
/* release busid_lock */
put_busid_priv(busid_priv);
return;
}
dev_set_drvdata(&udev->dev, NULL);
/* release busid_lock before call to remove device files */
put_busid_priv(busid_priv);
/*
* NOTE: rx/tx threads are invoked for each usb_device.
*/
/* release port */
rc = usb_hub_release_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to release port\n");
return;
}
/* If usb reset is called from event handler */
if (usbip_in_eh(current))
return;
/* we already have busid_priv, just lock busid_lock */
spin_lock(&busid_priv->busid_lock);
if (!busid_priv->shutdown_busid)
busid_priv->shutdown_busid = 1;
/* release busid_lock */
spin_unlock(&busid_priv->busid_lock);
/* shutdown the current connection */
shutdown_busid(busid_priv);
usb_put_dev(sdev->udev);
/* we already have busid_priv, just lock busid_lock */
spin_lock(&busid_priv->busid_lock);
/* free sdev */
busid_priv->sdev = NULL;
stub_device_free(sdev);
if (busid_priv->status == STUB_BUSID_ALLOC)
busid_priv->status = STUB_BUSID_ADDED;
/* release busid_lock */
spin_unlock(&busid_priv->busid_lock);
return;
}
#ifdef CONFIG_PM
/* These functions need usb_port_suspend and usb_port_resume,
* which reside in drivers/usb/core/usb.h. Skip for now. */
static int stub_suspend(struct usb_device *udev, pm_message_t message)
{
dev_dbg(&udev->dev, "stub_suspend\n");
return 0;
}
static int stub_resume(struct usb_device *udev, pm_message_t message)
{
dev_dbg(&udev->dev, "stub_resume\n");
return 0;
}
#endif /* CONFIG_PM */
struct usb_device_driver stub_driver = {
.name = "usbip-host",
.probe = stub_probe,
.disconnect = stub_disconnect,
#ifdef CONFIG_PM
.suspend = stub_suspend,
.resume = stub_resume,
#endif
.supports_autosuspend = 0,
.dev_groups = usbip_groups,
};
| linux-master | drivers/usb/usbip/stub_dev.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <[email protected]>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <[email protected]>
* Krzysztof Opasiak <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
#include <linux/kthread.h>
#include <linux/file.h>
#include <linux/byteorder/generic.h>
#include "usbip_common.h"
#include "vudc.h"
#define VIRTUAL_ENDPOINTS (1 /* ep0 */ + 15 /* in eps */ + 15 /* out eps */)
/* urb-related structures alloc / free */
static void free_urb(struct urb *urb)
{
if (!urb)
return;
kfree(urb->setup_packet);
urb->setup_packet = NULL;
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
usb_free_urb(urb);
}
struct urbp *alloc_urbp(void)
{
struct urbp *urb_p;
urb_p = kzalloc(sizeof(*urb_p), GFP_KERNEL);
if (!urb_p)
return urb_p;
urb_p->urb = NULL;
urb_p->ep = NULL;
INIT_LIST_HEAD(&urb_p->urb_entry);
return urb_p;
}
static void free_urbp(struct urbp *urb_p)
{
kfree(urb_p);
}
void free_urbp_and_urb(struct urbp *urb_p)
{
if (!urb_p)
return;
free_urb(urb_p->urb);
free_urbp(urb_p);
}
/* utilities ; almost verbatim from dummy_hcd.c */
/* called with spinlock held */
static void nuke(struct vudc *udc, struct vep *ep)
{
struct vrequest *req;
while (!list_empty(&ep->req_queue)) {
req = list_first_entry(&ep->req_queue, struct vrequest,
req_entry);
list_del_init(&req->req_entry);
req->req.status = -ESHUTDOWN;
spin_unlock(&udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
}
/* caller must hold lock */
static void stop_activity(struct vudc *udc)
{
int i;
struct urbp *urb_p, *tmp;
udc->address = 0;
for (i = 0; i < VIRTUAL_ENDPOINTS; i++)
nuke(udc, &udc->ep[i]);
list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
list_del(&urb_p->urb_entry);
free_urbp_and_urb(urb_p);
}
}
struct vep *vudc_find_endpoint(struct vudc *udc, u8 address)
{
int i;
if ((address & ~USB_DIR_IN) == 0)
return &udc->ep[0];
for (i = 1; i < VIRTUAL_ENDPOINTS; i++) {
struct vep *ep = &udc->ep[i];
if (!ep->desc)
continue;
if (ep->desc->bEndpointAddress == address)
return ep;
}
return NULL;
}
/* gadget ops */
static int vgadget_get_frame(struct usb_gadget *_gadget)
{
struct timespec64 now;
struct vudc *udc = usb_gadget_to_vudc(_gadget);
ktime_get_ts64(&now);
return ((now.tv_sec - udc->start_time.tv_sec) * 1000 +
(now.tv_nsec - udc->start_time.tv_nsec) / NSEC_PER_MSEC)
& 0x7FF;
}
static int vgadget_set_selfpowered(struct usb_gadget *_gadget, int value)
{
struct vudc *udc = usb_gadget_to_vudc(_gadget);
if (value)
udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
else
udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
return 0;
}
static int vgadget_pullup(struct usb_gadget *_gadget, int value)
{
struct vudc *udc = usb_gadget_to_vudc(_gadget);
unsigned long flags;
int ret;
spin_lock_irqsave(&udc->lock, flags);
value = !!value;
if (value == udc->pullup)
goto unlock;
udc->pullup = value;
if (value) {
udc->gadget.speed = min_t(u8, USB_SPEED_HIGH,
udc->driver->max_speed);
udc->ep[0].ep.maxpacket = 64;
/*
* This is the first place where we can ask our
* gadget driver for descriptors.
*/
ret = get_gadget_descs(udc);
if (ret) {
dev_err(&udc->gadget.dev, "Unable go get desc: %d", ret);
goto unlock;
}
spin_unlock_irqrestore(&udc->lock, flags);
usbip_start_eh(&udc->ud);
} else {
/* Invalidate descriptors */
udc->desc_cached = 0;
spin_unlock_irqrestore(&udc->lock, flags);
usbip_event_add(&udc->ud, VUDC_EVENT_REMOVED);
usbip_stop_eh(&udc->ud); /* Wait for eh completion */
}
return 0;
unlock:
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int vgadget_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct vudc *udc = usb_gadget_to_vudc(g);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->driver = driver;
udc->pullup = udc->connected = udc->desc_cached = 0;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int vgadget_udc_stop(struct usb_gadget *g)
{
struct vudc *udc = usb_gadget_to_vudc(g);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->driver = NULL;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static const struct usb_gadget_ops vgadget_ops = {
.get_frame = vgadget_get_frame,
.set_selfpowered = vgadget_set_selfpowered,
.pullup = vgadget_pullup,
.udc_start = vgadget_udc_start,
.udc_stop = vgadget_udc_stop,
};
/* endpoint ops */
static int vep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct vep *ep;
struct vudc *udc;
unsigned int maxp;
unsigned long flags;
ep = to_vep(_ep);
udc = ep_to_vudc(ep);
if (!_ep || !desc || ep->desc || _ep->caps.type_control
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
if (!udc->driver)
return -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
maxp = usb_endpoint_maxp(desc);
_ep->maxpacket = maxp;
ep->desc = desc;
ep->type = usb_endpoint_type(desc);
ep->halted = ep->wedged = 0;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int vep_disable(struct usb_ep *_ep)
{
struct vep *ep;
struct vudc *udc;
unsigned long flags;
ep = to_vep(_ep);
udc = ep_to_vudc(ep);
if (!_ep || !ep->desc || _ep->caps.type_control)
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
ep->desc = NULL;
nuke(udc, ep);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_request *vep_alloc_request(struct usb_ep *_ep,
gfp_t mem_flags)
{
struct vrequest *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof(*req), mem_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->req_entry);
return &req->req;
}
static void vep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct vrequest *req;
/* ep is always valid here - see usb_ep_free_request() */
if (!_req)
return;
req = to_vrequest(_req);
kfree(req);
}
static int vep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t mem_flags)
{
struct vep *ep;
struct vrequest *req;
struct vudc *udc;
unsigned long flags;
if (!_ep || !_req)
return -EINVAL;
ep = to_vep(_ep);
req = to_vrequest(_req);
udc = ep_to_vudc(ep);
spin_lock_irqsave(&udc->lock, flags);
_req->actual = 0;
_req->status = -EINPROGRESS;
list_add_tail(&req->req_entry, &ep->req_queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int vep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct vep *ep;
struct vrequest *req;
struct vudc *udc;
struct vrequest *lst;
unsigned long flags;
int ret = -EINVAL;
if (!_ep || !_req)
return ret;
ep = to_vep(_ep);
req = to_vrequest(_req);
udc = req->udc;
if (!udc->driver)
return -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
list_for_each_entry(lst, &ep->req_queue, req_entry) {
if (&lst->req == _req) {
list_del_init(&lst->req_entry);
_req->status = -ECONNRESET;
ret = 0;
break;
}
}
spin_unlock_irqrestore(&udc->lock, flags);
if (ret == 0)
usb_gadget_giveback_request(_ep, _req);
return ret;
}
static int
vep_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct vep *ep;
struct vudc *udc;
unsigned long flags;
int ret = 0;
ep = to_vep(_ep);
if (!_ep)
return -EINVAL;
udc = ep_to_vudc(ep);
if (!udc->driver)
return -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (!value)
ep->halted = ep->wedged = 0;
else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
!list_empty(&ep->req_queue))
ret = -EAGAIN;
else {
ep->halted = 1;
if (wedged)
ep->wedged = 1;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int
vep_set_halt(struct usb_ep *_ep, int value)
{
return vep_set_halt_and_wedge(_ep, value, 0);
}
static int vep_set_wedge(struct usb_ep *_ep)
{
return vep_set_halt_and_wedge(_ep, 1, 1);
}
static const struct usb_ep_ops vep_ops = {
.enable = vep_enable,
.disable = vep_disable,
.alloc_request = vep_alloc_request,
.free_request = vep_free_request,
.queue = vep_queue,
.dequeue = vep_dequeue,
.set_halt = vep_set_halt,
.set_wedge = vep_set_wedge,
};
/* shutdown / reset / error handlers */
static void vudc_shutdown(struct usbip_device *ud)
{
struct vudc *udc = container_of(ud, struct vudc, ud);
int call_disconnect = 0;
unsigned long flags;
dev_dbg(&udc->pdev->dev, "device shutdown");
if (ud->tcp_socket)
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
if (ud->tcp_rx) {
kthread_stop_put(ud->tcp_rx);
ud->tcp_rx = NULL;
}
if (ud->tcp_tx) {
kthread_stop_put(ud->tcp_tx);
ud->tcp_tx = NULL;
}
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
}
spin_lock_irqsave(&udc->lock, flags);
stop_activity(udc);
if (udc->connected && udc->driver->disconnect)
call_disconnect = 1;
udc->connected = 0;
spin_unlock_irqrestore(&udc->lock, flags);
if (call_disconnect)
udc->driver->disconnect(&udc->gadget);
}
static void vudc_device_reset(struct usbip_device *ud)
{
struct vudc *udc = container_of(ud, struct vudc, ud);
unsigned long flags;
dev_dbg(&udc->pdev->dev, "device reset");
spin_lock_irqsave(&udc->lock, flags);
stop_activity(udc);
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->driver)
usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_lock_irqsave(&ud->lock, flags);
ud->status = SDEV_ST_AVAILABLE;
spin_unlock_irqrestore(&ud->lock, flags);
}
static void vudc_device_unusable(struct usbip_device *ud)
{
unsigned long flags;
spin_lock_irqsave(&ud->lock, flags);
ud->status = SDEV_ST_ERROR;
spin_unlock_irqrestore(&ud->lock, flags);
}
/* device setup / cleanup */
struct vudc_device *alloc_vudc_device(int devid)
{
struct vudc_device *udc_dev;
udc_dev = kzalloc(sizeof(*udc_dev), GFP_KERNEL);
if (!udc_dev)
return NULL;
INIT_LIST_HEAD(&udc_dev->dev_entry);
udc_dev->pdev = platform_device_alloc(GADGET_NAME, devid);
if (!udc_dev->pdev) {
kfree(udc_dev);
udc_dev = NULL;
}
return udc_dev;
}
void put_vudc_device(struct vudc_device *udc_dev)
{
platform_device_put(udc_dev->pdev);
kfree(udc_dev);
}
static int init_vudc_hw(struct vudc *udc)
{
int i;
struct usbip_device *ud = &udc->ud;
struct vep *ep;
udc->ep = kcalloc(VIRTUAL_ENDPOINTS, sizeof(*udc->ep), GFP_KERNEL);
if (!udc->ep)
goto nomem_ep;
INIT_LIST_HEAD(&udc->gadget.ep_list);
/* create ep0 and 15 in, 15 out general purpose eps */
for (i = 0; i < VIRTUAL_ENDPOINTS; ++i) {
int is_out = i % 2;
int num = (i + 1) / 2;
ep = &udc->ep[i];
sprintf(ep->name, "ep%d%s", num,
i ? (is_out ? "out" : "in") : "");
ep->ep.name = ep->name;
ep->ep.ops = &vep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.max_streams = 16;
ep->gadget = &udc->gadget;
INIT_LIST_HEAD(&ep->req_queue);
if (i == 0) {
/* ep0 */
ep->ep.caps.type_control = true;
ep->ep.caps.dir_out = true;
ep->ep.caps.dir_in = true;
udc->gadget.ep0 = &ep->ep;
} else {
/* All other eps */
ep->ep.caps.type_iso = true;
ep->ep.caps.type_int = true;
ep->ep.caps.type_bulk = true;
if (is_out)
ep->ep.caps.dir_out = true;
else
ep->ep.caps.dir_in = true;
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
}
spin_lock_init(&udc->lock);
spin_lock_init(&udc->lock_tx);
INIT_LIST_HEAD(&udc->urb_queue);
INIT_LIST_HEAD(&udc->tx_queue);
init_waitqueue_head(&udc->tx_waitq);
spin_lock_init(&ud->lock);
mutex_init(&ud->sysfs_lock);
ud->status = SDEV_ST_AVAILABLE;
ud->side = USBIP_VUDC;
ud->eh_ops.shutdown = vudc_shutdown;
ud->eh_ops.reset = vudc_device_reset;
ud->eh_ops.unusable = vudc_device_unusable;
v_init_timer(udc);
return 0;
nomem_ep:
return -ENOMEM;
}
static void cleanup_vudc_hw(struct vudc *udc)
{
kfree(udc->ep);
}
/* platform driver ops */
int vudc_probe(struct platform_device *pdev)
{
struct vudc *udc;
int ret = -ENOMEM;
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
goto out;
udc->gadget.name = GADGET_NAME;
udc->gadget.ops = &vgadget_ops;
udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.dev.parent = &pdev->dev;
udc->pdev = pdev;
ret = init_vudc_hw(udc);
if (ret)
goto err_init_vudc_hw;
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret < 0)
goto err_add_udc;
platform_set_drvdata(pdev, udc);
return ret;
err_add_udc:
cleanup_vudc_hw(udc);
err_init_vudc_hw:
kfree(udc);
out:
return ret;
}
int vudc_remove(struct platform_device *pdev)
{
struct vudc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
cleanup_vudc_hw(udc);
kfree(udc);
return 0;
}
| linux-master | drivers/usb/usbip/vudc_dev.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*/
#include <linux/kthread.h>
#include <linux/socket.h>
#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "stub.h"
/* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
__u32 status)
{
struct stub_unlink *unlink;
unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC);
if (!unlink) {
usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC);
return;
}
unlink->seqnum = seqnum;
unlink->status = status;
list_add_tail(&unlink->list, &sdev->unlink_tx);
}
/**
* stub_complete - completion handler of a usbip urb
* @urb: pointer to the urb completed
*
* When a urb has completed, the USB core driver calls this function mostly in
* the interrupt context. To return the result of a urb, the completed urb is
* linked to the pending list of returning.
*
*/
void stub_complete(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
unsigned long flags;
usbip_dbg_stub_tx("complete! status %d\n", urb->status);
switch (urb->status) {
case 0:
/* OK */
break;
case -ENOENT:
dev_info(&urb->dev->dev,
"stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
return;
case -ECONNRESET:
dev_info(&urb->dev->dev,
"unlinked by a call to usb_unlink_urb()\n");
break;
case -EPIPE:
dev_info(&urb->dev->dev, "endpoint %d is stalled\n",
usb_pipeendpoint(urb->pipe));
break;
case -ESHUTDOWN:
dev_info(&urb->dev->dev, "device removed?\n");
break;
default:
dev_info(&urb->dev->dev,
"urb completion with non-zero status %d\n",
urb->status);
break;
}
/*
* If the server breaks single SG request into the several URBs, the
* URBs must be reassembled before sending completed URB to the vhci.
* Don't wake up the tx thread until all the URBs are completed.
*/
if (priv->sgl) {
priv->completed_urbs++;
/* Only save the first error status */
if (urb->status && !priv->urb_status)
priv->urb_status = urb->status;
if (priv->completed_urbs < priv->num_urbs)
return;
}
/* link a urb to the queue of tx. */
spin_lock_irqsave(&sdev->priv_lock, flags);
if (sdev->ud.tcp_socket == NULL) {
usbip_dbg_stub_tx("ignore urb for closed connection\n");
/* It will be freed in stub_device_cleanup_urbs(). */
} else if (priv->unlinking) {
stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
stub_free_priv_and_urb(priv);
} else {
list_move_tail(&priv->list, &sdev->priv_tx);
}
spin_unlock_irqrestore(&sdev->priv_lock, flags);
/* wake up tx_thread */
wake_up(&sdev->tx_waitq);
}
static inline void setup_base_pdu(struct usbip_header_basic *base,
__u32 command, __u32 seqnum)
{
base->command = command;
base->seqnum = seqnum;
base->devid = 0;
base->ep = 0;
base->direction = 0;
}
static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum);
usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1);
}
static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
struct stub_unlink *unlink)
{
setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
rpdu->u.ret_unlink.status = unlink->status;
}
static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev)
{
unsigned long flags;
struct stub_priv *priv, *tmp;
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) {
list_move_tail(&priv->list, &sdev->priv_free);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return priv;
}
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return NULL;
}
static int stub_send_ret_submit(struct stub_device *sdev)
{
unsigned long flags;
struct stub_priv *priv, *tmp;
struct msghdr msg;
size_t txsize;
size_t total_size = 0;
while ((priv = dequeue_from_priv_tx(sdev)) != NULL) {
struct urb *urb = priv->urbs[0];
struct usbip_header pdu_header;
struct usbip_iso_packet_descriptor *iso_buffer = NULL;
struct kvec *iov = NULL;
struct scatterlist *sg;
u32 actual_length = 0;
int iovnum = 0;
int ret;
int i;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
if (urb->actual_length > 0 && !urb->transfer_buffer &&
!urb->num_sgs) {
dev_err(&sdev->udev->dev,
"urb: actual_length %d transfer_buffer null\n",
urb->actual_length);
return -1;
}
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
iovnum = 2 + urb->number_of_packets;
else if (usb_pipein(urb->pipe) && urb->actual_length > 0 &&
urb->num_sgs)
iovnum = 1 + urb->num_sgs;
else if (usb_pipein(urb->pipe) && priv->sgl)
iovnum = 1 + priv->num_urbs;
else
iovnum = 2;
iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL);
if (!iov) {
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
return -1;
}
iovnum = 0;
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb);
usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
pdu_header.base.seqnum);
if (priv->sgl) {
for (i = 0; i < priv->num_urbs; i++)
actual_length += priv->urbs[i]->actual_length;
pdu_header.u.ret_submit.status = priv->urb_status;
pdu_header.u.ret_submit.actual_length = actual_length;
}
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
iov[iovnum].iov_len = sizeof(pdu_header);
iovnum++;
txsize += sizeof(pdu_header);
/* 2. setup transfer buffer */
if (usb_pipein(urb->pipe) && priv->sgl) {
/* If the server split a single SG request into several
* URBs because the server's HCD doesn't support SG,
* reassemble the split URB buffers into a single
* return command.
*/
for (i = 0; i < priv->num_urbs; i++) {
iov[iovnum].iov_base =
priv->urbs[i]->transfer_buffer;
iov[iovnum].iov_len =
priv->urbs[i]->actual_length;
iovnum++;
}
txsize += actual_length;
} else if (usb_pipein(urb->pipe) &&
usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
urb->actual_length > 0) {
if (urb->num_sgs) {
unsigned int copy = urb->actual_length;
int size;
for_each_sg(urb->sg, sg, urb->num_sgs, i) {
if (copy == 0)
break;
if (copy < sg->length)
size = copy;
else
size = sg->length;
iov[iovnum].iov_base = sg_virt(sg);
iov[iovnum].iov_len = size;
iovnum++;
copy -= size;
}
} else {
iov[iovnum].iov_base = urb->transfer_buffer;
iov[iovnum].iov_len = urb->actual_length;
iovnum++;
}
txsize += urb->actual_length;
} else if (usb_pipein(urb->pipe) &&
usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
/*
* For isochronous packets: actual length is the sum of
* the actual length of the individual, packets, but as
* the packet offsets are not changed there will be
* padding between the packets. To optimally use the
* bandwidth the padding is not transmitted.
*/
int i;
for (i = 0; i < urb->number_of_packets; i++) {
iov[iovnum].iov_base = urb->transfer_buffer +
urb->iso_frame_desc[i].offset;
iov[iovnum].iov_len =
urb->iso_frame_desc[i].actual_length;
iovnum++;
txsize += urb->iso_frame_desc[i].actual_length;
}
if (txsize != sizeof(pdu_header) + urb->actual_length) {
dev_err(&sdev->udev->dev,
"actual length of urb %d does not match iso packet sizes %zu\n",
urb->actual_length,
txsize-sizeof(pdu_header));
kfree(iov);
usbip_event_add(&sdev->ud,
SDEV_EVENT_ERROR_TCP);
return -1;
}
}
/* 3. setup iso_packet_descriptor */
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
ssize_t len = 0;
iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
if (!iso_buffer) {
usbip_event_add(&sdev->ud,
SDEV_EVENT_ERROR_MALLOC);
kfree(iov);
return -1;
}
iov[iovnum].iov_base = iso_buffer;
iov[iovnum].iov_len = len;
txsize += len;
iovnum++;
}
ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
iov, iovnum, txsize);
if (ret != txsize) {
dev_err(&sdev->udev->dev,
"sendmsg failed!, retval %d for %zd\n",
ret, txsize);
kfree(iov);
kfree(iso_buffer);
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
return -1;
}
kfree(iov);
kfree(iso_buffer);
total_size += txsize;
}
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
stub_free_priv_and_urb(priv);
}
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return total_size;
}
static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev)
{
unsigned long flags;
struct stub_unlink *unlink, *tmp;
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) {
list_move_tail(&unlink->list, &sdev->unlink_free);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return unlink;
}
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return NULL;
}
static int stub_send_ret_unlink(struct stub_device *sdev)
{
unsigned long flags;
struct stub_unlink *unlink, *tmp;
struct msghdr msg;
struct kvec iov[1];
size_t txsize;
size_t total_size = 0;
while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) {
int ret;
struct usbip_header pdu_header;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum);
/* 1. setup usbip_header */
setup_ret_unlink_pdu(&pdu_header, unlink);
usbip_header_correct_endian(&pdu_header, 1);
iov[0].iov_base = &pdu_header;
iov[0].iov_len = sizeof(pdu_header);
txsize += sizeof(pdu_header);
ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
1, txsize);
if (ret != txsize) {
dev_err(&sdev->udev->dev,
"sendmsg failed!, retval %d for %zd\n",
ret, txsize);
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
return -1;
}
usbip_dbg_stub_tx("send txdata\n");
total_size += txsize;
}
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) {
list_del(&unlink->list);
kfree(unlink);
}
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return total_size;
}
int stub_tx_loop(void *data)
{
struct usbip_device *ud = data;
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
/*
* send_ret_submit comes earlier than send_ret_unlink. stub_rx
* looks at only priv_init queue. If the completion of a URB is
* earlier than the receive of CMD_UNLINK, priv is moved to
* priv_tx queue and stub_rx does not find the target priv. In
* this case, vhci_rx receives the result of the submit request
* and then receives the result of the unlink request. The
* result of the submit is given back to the usbcore as the
* completion of the unlink request. The request of the
* unlink is ignored. This is ok because a driver who calls
* usb_unlink_urb() understands the unlink was too late by
* getting the status of the given-backed URB which has the
* status of usb_submit_urb().
*/
if (stub_send_ret_submit(sdev) < 0)
break;
if (stub_send_ret_unlink(sdev) < 0)
break;
wait_event_interruptible(sdev->tx_waitq,
(!list_empty(&sdev->priv_tx) ||
!list_empty(&sdev->unlink_tx) ||
kthread_should_stop()));
}
return 0;
}
| linux-master | drivers/usb/usbip/stub_tx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "stub.h"
#define DRIVER_AUTHOR "Takahiro Hirofuchi"
#define DRIVER_DESC "USB/IP Host Driver"
struct kmem_cache *stub_priv_cache;
/*
* busid_tables defines matching busids that usbip can grab. A user can change
* dynamically what device is locally used and what device is exported to a
* remote host.
*/
#define MAX_BUSID 16
static struct bus_id_priv busid_table[MAX_BUSID];
static DEFINE_SPINLOCK(busid_table_lock);
static void init_busid_table(void)
{
int i;
/*
* This also sets the bus_table[i].status to
* STUB_BUSID_OTHER, which is 0.
*/
memset(busid_table, 0, sizeof(busid_table));
for (i = 0; i < MAX_BUSID; i++)
spin_lock_init(&busid_table[i].busid_lock);
}
/*
* Find the index of the busid by name.
* Must be called with busid_table_lock held.
*/
static int get_busid_idx(const char *busid)
{
int i;
int idx = -1;
for (i = 0; i < MAX_BUSID; i++) {
spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
idx = i;
spin_unlock(&busid_table[i].busid_lock);
break;
}
spin_unlock(&busid_table[i].busid_lock);
}
return idx;
}
/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
struct bus_id_priv *get_busid_priv(const char *busid)
{
int idx;
struct bus_id_priv *bid = NULL;
spin_lock(&busid_table_lock);
idx = get_busid_idx(busid);
if (idx >= 0) {
bid = &(busid_table[idx]);
/* get busid_lock before returning */
spin_lock(&bid->busid_lock);
}
spin_unlock(&busid_table_lock);
return bid;
}
void put_busid_priv(struct bus_id_priv *bid)
{
if (bid)
spin_unlock(&bid->busid_lock);
}
static int add_match_busid(char *busid)
{
int i;
int ret = -1;
spin_lock(&busid_table_lock);
/* already registered? */
if (get_busid_idx(busid) >= 0) {
ret = 0;
goto out;
}
for (i = 0; i < MAX_BUSID; i++) {
spin_lock(&busid_table[i].busid_lock);
if (!busid_table[i].name[0]) {
strscpy(busid_table[i].name, busid, BUSID_SIZE);
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
ret = 0;
spin_unlock(&busid_table[i].busid_lock);
break;
}
spin_unlock(&busid_table[i].busid_lock);
}
out:
spin_unlock(&busid_table_lock);
return ret;
}
int del_match_busid(char *busid)
{
int idx;
int ret = -1;
spin_lock(&busid_table_lock);
idx = get_busid_idx(busid);
if (idx < 0)
goto out;
/* found */
ret = 0;
spin_lock(&busid_table[idx].busid_lock);
if (busid_table[idx].status == STUB_BUSID_OTHER)
memset(busid_table[idx].name, 0, BUSID_SIZE);
if ((busid_table[idx].status != STUB_BUSID_OTHER) &&
(busid_table[idx].status != STUB_BUSID_ADDED))
busid_table[idx].status = STUB_BUSID_REMOV;
spin_unlock(&busid_table[idx].busid_lock);
out:
spin_unlock(&busid_table_lock);
return ret;
}
static ssize_t match_busid_show(struct device_driver *drv, char *buf)
{
int i;
char *out = buf;
spin_lock(&busid_table_lock);
for (i = 0; i < MAX_BUSID; i++) {
spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
out += sprintf(out, "%s ", busid_table[i].name);
spin_unlock(&busid_table[i].busid_lock);
}
spin_unlock(&busid_table_lock);
out += sprintf(out, "\n");
return out - buf;
}
static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
size_t count)
{
char busid[BUSID_SIZE];
if (count < 5)
return -EINVAL;
/* busid needs to include \0 termination */
if (strscpy(busid, buf + 4, BUSID_SIZE) < 0)
return -EINVAL;
if (!strncmp(buf, "add ", 4)) {
if (add_match_busid(busid) < 0)
return -ENOMEM;
pr_debug("add busid %s\n", busid);
return count;
}
if (!strncmp(buf, "del ", 4)) {
if (del_match_busid(busid) < 0)
return -ENODEV;
pr_debug("del busid %s\n", busid);
return count;
}
return -EINVAL;
}
static DRIVER_ATTR_RW(match_busid);
static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
{
int ret = 0;
/* device_attach() callers should hold parent lock for USB */
if (busid_priv->udev->dev.parent)
device_lock(busid_priv->udev->dev.parent);
ret = device_attach(&busid_priv->udev->dev);
if (busid_priv->udev->dev.parent)
device_unlock(busid_priv->udev->dev.parent);
if (ret < 0)
dev_err(&busid_priv->udev->dev, "rebind failed\n");
return ret;
}
static void stub_device_rebind(void)
{
#if IS_MODULE(CONFIG_USBIP_HOST)
struct bus_id_priv *busid_priv;
int i;
/* update status to STUB_BUSID_OTHER so probe ignores the device */
spin_lock(&busid_table_lock);
for (i = 0; i < MAX_BUSID; i++) {
if (busid_table[i].name[0] &&
busid_table[i].shutdown_busid) {
busid_priv = &(busid_table[i]);
busid_priv->status = STUB_BUSID_OTHER;
}
}
spin_unlock(&busid_table_lock);
/* now run rebind - no need to hold locks. driver files are removed */
for (i = 0; i < MAX_BUSID; i++) {
if (busid_table[i].name[0] &&
busid_table[i].shutdown_busid) {
busid_priv = &(busid_table[i]);
do_rebind(busid_table[i].name, busid_priv);
}
}
#endif
}
static ssize_t rebind_store(struct device_driver *dev, const char *buf,
size_t count)
{
int ret;
int len;
struct bus_id_priv *bid;
/* buf length should be less that BUSID_SIZE */
len = strnlen(buf, BUSID_SIZE);
if (!(len < BUSID_SIZE))
return -EINVAL;
bid = get_busid_priv(buf);
if (!bid)
return -ENODEV;
/* mark the device for deletion so probe ignores it during rescan */
bid->status = STUB_BUSID_OTHER;
/* release the busid lock */
put_busid_priv(bid);
ret = do_rebind((char *) buf, bid);
if (ret < 0)
return ret;
/* delete device from busid_table */
del_match_busid((char *) buf);
return count;
}
static DRIVER_ATTR_WO(rebind);
static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead)
{
struct stub_priv *priv, *tmp;
list_for_each_entry_safe(priv, tmp, listhead, list) {
list_del_init(&priv->list);
return priv;
}
return NULL;
}
void stub_free_priv_and_urb(struct stub_priv *priv)
{
struct urb *urb;
int i;
for (i = 0; i < priv->num_urbs; i++) {
urb = priv->urbs[i];
if (!urb)
return;
kfree(urb->setup_packet);
urb->setup_packet = NULL;
if (urb->transfer_buffer && !priv->sgl) {
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
}
if (urb->num_sgs) {
sgl_free(urb->sg);
urb->sg = NULL;
urb->num_sgs = 0;
}
usb_free_urb(urb);
}
if (!list_empty(&priv->list))
list_del(&priv->list);
if (priv->sgl)
sgl_free(priv->sgl);
kfree(priv->urbs);
kmem_cache_free(stub_priv_cache, priv);
}
static struct stub_priv *stub_priv_pop(struct stub_device *sdev)
{
unsigned long flags;
struct stub_priv *priv;
spin_lock_irqsave(&sdev->priv_lock, flags);
priv = stub_priv_pop_from_listhead(&sdev->priv_init);
if (priv)
goto done;
priv = stub_priv_pop_from_listhead(&sdev->priv_tx);
if (priv)
goto done;
priv = stub_priv_pop_from_listhead(&sdev->priv_free);
done:
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return priv;
}
void stub_device_cleanup_urbs(struct stub_device *sdev)
{
struct stub_priv *priv;
int i;
dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
while ((priv = stub_priv_pop(sdev))) {
for (i = 0; i < priv->num_urbs; i++)
usb_kill_urb(priv->urbs[i]);
stub_free_priv_and_urb(priv);
}
}
static int __init usbip_host_init(void)
{
int ret;
init_busid_table();
stub_priv_cache = KMEM_CACHE(stub_priv, SLAB_HWCACHE_ALIGN);
if (!stub_priv_cache) {
pr_err("kmem_cache_create failed\n");
return -ENOMEM;
}
ret = usb_register_device_driver(&stub_driver, THIS_MODULE);
if (ret) {
pr_err("usb_register failed %d\n", ret);
goto err_usb_register;
}
ret = driver_create_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
if (ret) {
pr_err("driver_create_file failed\n");
goto err_create_file;
}
ret = driver_create_file(&stub_driver.drvwrap.driver,
&driver_attr_rebind);
if (ret) {
pr_err("driver_create_file failed\n");
goto err_create_file;
}
return ret;
err_create_file:
usb_deregister_device_driver(&stub_driver);
err_usb_register:
kmem_cache_destroy(stub_priv_cache);
return ret;
}
static void __exit usbip_host_exit(void)
{
driver_remove_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
driver_remove_file(&stub_driver.drvwrap.driver,
&driver_attr_rebind);
/*
* deregister() calls stub_disconnect() for all devices. Device
* specific data is cleared in stub_disconnect().
*/
usb_deregister_device_driver(&stub_driver);
/* initiate scan to attach devices */
stub_device_rebind();
kmem_cache_destroy(stub_priv_cache);
}
module_init(usbip_host_init);
module_exit(usbip_host_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/usbip/stub_main.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Krzysztof Opasiak <[email protected]>
*/
#include <asm/byteorder.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <net/sock.h>
#include "usbip_common.h"
#define DRIVER_AUTHOR "Takahiro Hirofuchi <[email protected]>"
#define DRIVER_DESC "USB/IP Core"
#ifdef CONFIG_USBIP_DEBUG
unsigned long usbip_debug_flag = 0xffffffff;
#else
unsigned long usbip_debug_flag;
#endif
EXPORT_SYMBOL_GPL(usbip_debug_flag);
module_param(usbip_debug_flag, ulong, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(usbip_debug_flag, "debug flags (defined in usbip_common.h)");
/* FIXME */
struct device_attribute dev_attr_usbip_debug;
EXPORT_SYMBOL_GPL(dev_attr_usbip_debug);
static ssize_t usbip_debug_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lx\n", usbip_debug_flag);
}
static ssize_t usbip_debug_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
if (sscanf(buf, "%lx", &usbip_debug_flag) != 1)
return -EINVAL;
return count;
}
DEVICE_ATTR_RW(usbip_debug);
static void usbip_dump_buffer(char *buff, int bufflen)
{
print_hex_dump(KERN_DEBUG, "usbip-core", DUMP_PREFIX_OFFSET, 16, 4,
buff, bufflen, false);
}
static void usbip_dump_pipe(unsigned int p)
{
unsigned char type = usb_pipetype(p);
unsigned char ep = usb_pipeendpoint(p);
unsigned char dev = usb_pipedevice(p);
unsigned char dir = usb_pipein(p);
pr_debug("dev(%d) ep(%d) [%s] ", dev, ep, dir ? "IN" : "OUT");
switch (type) {
case PIPE_ISOCHRONOUS:
pr_debug("ISO\n");
break;
case PIPE_INTERRUPT:
pr_debug("INT\n");
break;
case PIPE_CONTROL:
pr_debug("CTRL\n");
break;
case PIPE_BULK:
pr_debug("BULK\n");
break;
default:
pr_debug("ERR\n");
break;
}
}
static void usbip_dump_usb_device(struct usb_device *udev)
{
struct device *dev = &udev->dev;
int i;
dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
udev->devnum, udev->devpath, usb_speed_string(udev->speed));
pr_debug("tt hub ttport %d\n", udev->ttport);
dev_dbg(dev, " ");
for (i = 0; i < 16; i++)
pr_debug(" %2u", i);
pr_debug("\n");
dev_dbg(dev, " toggle0(IN) :");
for (i = 0; i < 16; i++)
pr_debug(" %2u", (udev->toggle[0] & (1 << i)) ? 1 : 0);
pr_debug("\n");
dev_dbg(dev, " toggle1(OUT):");
for (i = 0; i < 16; i++)
pr_debug(" %2u", (udev->toggle[1] & (1 << i)) ? 1 : 0);
pr_debug("\n");
dev_dbg(dev, " epmaxp_in :");
for (i = 0; i < 16; i++) {
if (udev->ep_in[i])
pr_debug(" %2u",
le16_to_cpu(udev->ep_in[i]->desc.wMaxPacketSize));
}
pr_debug("\n");
dev_dbg(dev, " epmaxp_out :");
for (i = 0; i < 16; i++) {
if (udev->ep_out[i])
pr_debug(" %2u",
le16_to_cpu(udev->ep_out[i]->desc.wMaxPacketSize));
}
pr_debug("\n");
dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
udev->bus->bus_name);
dev_dbg(dev, "have_langid %d, string_langid %d\n",
udev->have_langid, udev->string_langid);
dev_dbg(dev, "maxchild %d\n", udev->maxchild);
}
static void usbip_dump_request_type(__u8 rt)
{
switch (rt & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
pr_debug("DEVICE");
break;
case USB_RECIP_INTERFACE:
pr_debug("INTERF");
break;
case USB_RECIP_ENDPOINT:
pr_debug("ENDPOI");
break;
case USB_RECIP_OTHER:
pr_debug("OTHER ");
break;
default:
pr_debug("------");
break;
}
}
static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
{
if (!cmd) {
pr_debug(" : null pointer\n");
return;
}
pr_debug(" ");
pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) wLength(%04X) ",
cmd->bRequestType, cmd->bRequest,
cmd->wValue, cmd->wIndex, cmd->wLength);
pr_debug("\n ");
if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
pr_debug("STANDARD ");
switch (cmd->bRequest) {
case USB_REQ_GET_STATUS:
pr_debug("GET_STATUS\n");
break;
case USB_REQ_CLEAR_FEATURE:
pr_debug("CLEAR_FEAT\n");
break;
case USB_REQ_SET_FEATURE:
pr_debug("SET_FEAT\n");
break;
case USB_REQ_SET_ADDRESS:
pr_debug("SET_ADDRRS\n");
break;
case USB_REQ_GET_DESCRIPTOR:
pr_debug("GET_DESCRI\n");
break;
case USB_REQ_SET_DESCRIPTOR:
pr_debug("SET_DESCRI\n");
break;
case USB_REQ_GET_CONFIGURATION:
pr_debug("GET_CONFIG\n");
break;
case USB_REQ_SET_CONFIGURATION:
pr_debug("SET_CONFIG\n");
break;
case USB_REQ_GET_INTERFACE:
pr_debug("GET_INTERF\n");
break;
case USB_REQ_SET_INTERFACE:
pr_debug("SET_INTERF\n");
break;
case USB_REQ_SYNCH_FRAME:
pr_debug("SYNC_FRAME\n");
break;
default:
pr_debug("REQ(%02X)\n", cmd->bRequest);
break;
}
usbip_dump_request_type(cmd->bRequestType);
} else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
pr_debug("CLASS\n");
} else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
pr_debug("VENDOR\n");
} else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_RESERVED) {
pr_debug("RESERVED\n");
}
}
void usbip_dump_urb(struct urb *urb)
{
struct device *dev;
if (!urb) {
pr_debug("urb: null pointer!!\n");
return;
}
if (!urb->dev) {
pr_debug("urb->dev: null pointer!!\n");
return;
}
dev = &urb->dev->dev;
usbip_dump_usb_device(urb->dev);
dev_dbg(dev, " pipe :%08x ", urb->pipe);
usbip_dump_pipe(urb->pipe);
dev_dbg(dev, " status :%d\n", urb->status);
dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags);
dev_dbg(dev, " transfer_buffer_length:%d\n",
urb->transfer_buffer_length);
dev_dbg(dev, " actual_length :%d\n", urb->actual_length);
if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
usbip_dump_usb_ctrlrequest(
(struct usb_ctrlrequest *)urb->setup_packet);
dev_dbg(dev, " start_frame :%d\n", urb->start_frame);
dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets);
dev_dbg(dev, " interval :%d\n", urb->interval);
dev_dbg(dev, " error_count :%d\n", urb->error_count);
}
EXPORT_SYMBOL_GPL(usbip_dump_urb);
void usbip_dump_header(struct usbip_header *pdu)
{
pr_debug("BASE: cmd %u seq %u devid %u dir %u ep %u\n",
pdu->base.command,
pdu->base.seqnum,
pdu->base.devid,
pdu->base.direction,
pdu->base.ep);
switch (pdu->base.command) {
case USBIP_CMD_SUBMIT:
pr_debug("USBIP_CMD_SUBMIT: x_flags %u x_len %u sf %u #p %d iv %d\n",
pdu->u.cmd_submit.transfer_flags,
pdu->u.cmd_submit.transfer_buffer_length,
pdu->u.cmd_submit.start_frame,
pdu->u.cmd_submit.number_of_packets,
pdu->u.cmd_submit.interval);
break;
case USBIP_CMD_UNLINK:
pr_debug("USBIP_CMD_UNLINK: seq %u\n",
pdu->u.cmd_unlink.seqnum);
break;
case USBIP_RET_SUBMIT:
pr_debug("USBIP_RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n",
pdu->u.ret_submit.status,
pdu->u.ret_submit.actual_length,
pdu->u.ret_submit.start_frame,
pdu->u.ret_submit.number_of_packets,
pdu->u.ret_submit.error_count);
break;
case USBIP_RET_UNLINK:
pr_debug("USBIP_RET_UNLINK: status %d\n",
pdu->u.ret_unlink.status);
break;
default:
/* NOT REACHED */
pr_err("unknown command\n");
break;
}
}
EXPORT_SYMBOL_GPL(usbip_dump_header);
/* Receive data over TCP/IP. */
int usbip_recv(struct socket *sock, void *buf, int size)
{
int result;
struct kvec iov = {.iov_base = buf, .iov_len = size};
struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
int total = 0;
if (!sock || !buf || !size)
return -EINVAL;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size);
usbip_dbg_xmit("enter\n");
do {
sock->sk->sk_allocation = GFP_NOIO;
sock->sk->sk_use_task_frag = false;
result = sock_recvmsg(sock, &msg, MSG_WAITALL);
if (result <= 0)
goto err;
total += result;
} while (msg_data_left(&msg));
if (usbip_dbg_flag_xmit) {
pr_debug("receiving....\n");
usbip_dump_buffer(buf, size);
pr_debug("received, osize %d ret %d size %zd total %d\n",
size, result, msg_data_left(&msg), total);
}
return total;
err:
return result;
}
EXPORT_SYMBOL_GPL(usbip_recv);
/* there may be more cases to tweak the flags. */
static unsigned int tweak_transfer_flags(unsigned int flags)
{
flags &= ~URB_NO_TRANSFER_DMA_MAP;
return flags;
}
/*
* USBIP driver packs URB transfer flags in PDUs that are exchanged
* between Server (usbip_host) and Client (vhci_hcd). URB_* flags
* are internal to kernel and could change. Where as USBIP URB flags
* exchanged in PDUs are USBIP user API must not change.
*
* USBIP_URB* flags are exported as explicit API and client and server
* do mapping from kernel flags to USBIP_URB*. Details as follows:
*
* Client tx path (USBIP_CMD_SUBMIT):
* - Maps URB_* to USBIP_URB_* when it sends USBIP_CMD_SUBMIT packet.
*
* Server rx path (USBIP_CMD_SUBMIT):
* - Maps USBIP_URB_* to URB_* when it receives USBIP_CMD_SUBMIT packet.
*
* Flags aren't included in USBIP_CMD_UNLINK and USBIP_RET_SUBMIT packets
* and no special handling is needed for them in the following cases:
* - Server rx path (USBIP_CMD_UNLINK)
* - Client rx path & Server tx path (USBIP_RET_SUBMIT)
*
* Code paths:
* usbip_pack_pdu() is the common routine that handles packing pdu from
* urb and unpack pdu to an urb.
*
* usbip_pack_cmd_submit() and usbip_pack_ret_submit() handle
* USBIP_CMD_SUBMIT and USBIP_RET_SUBMIT respectively.
*
* usbip_map_urb_to_usbip() and usbip_map_usbip_to_urb() are used
* by usbip_pack_cmd_submit() and usbip_pack_ret_submit() to map
* flags.
*/
struct urb_to_usbip_flags {
u32 urb_flag;
u32 usbip_flag;
};
#define NUM_USBIP_FLAGS 17
static const struct urb_to_usbip_flags flag_map[NUM_USBIP_FLAGS] = {
{URB_SHORT_NOT_OK, USBIP_URB_SHORT_NOT_OK},
{URB_ISO_ASAP, USBIP_URB_ISO_ASAP},
{URB_NO_TRANSFER_DMA_MAP, USBIP_URB_NO_TRANSFER_DMA_MAP},
{URB_ZERO_PACKET, USBIP_URB_ZERO_PACKET},
{URB_NO_INTERRUPT, USBIP_URB_NO_INTERRUPT},
{URB_FREE_BUFFER, USBIP_URB_FREE_BUFFER},
{URB_DIR_IN, USBIP_URB_DIR_IN},
{URB_DIR_OUT, USBIP_URB_DIR_OUT},
{URB_DIR_MASK, USBIP_URB_DIR_MASK},
{URB_DMA_MAP_SINGLE, USBIP_URB_DMA_MAP_SINGLE},
{URB_DMA_MAP_PAGE, USBIP_URB_DMA_MAP_PAGE},
{URB_DMA_MAP_SG, USBIP_URB_DMA_MAP_SG},
{URB_MAP_LOCAL, USBIP_URB_MAP_LOCAL},
{URB_SETUP_MAP_SINGLE, USBIP_URB_SETUP_MAP_SINGLE},
{URB_SETUP_MAP_LOCAL, USBIP_URB_SETUP_MAP_LOCAL},
{URB_DMA_SG_COMBINED, USBIP_URB_DMA_SG_COMBINED},
{URB_ALIGNED_TEMP_BUFFER, USBIP_URB_ALIGNED_TEMP_BUFFER},
};
static unsigned int urb_to_usbip(unsigned int flags)
{
unsigned int map_flags = 0;
int loop;
for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) {
if (flags & flag_map[loop].urb_flag)
map_flags |= flag_map[loop].usbip_flag;
}
return map_flags;
}
static unsigned int usbip_to_urb(unsigned int flags)
{
unsigned int map_flags = 0;
int loop;
for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) {
if (flags & flag_map[loop].usbip_flag)
map_flags |= flag_map[loop].urb_flag;
}
return map_flags;
}
static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb,
int pack)
{
struct usbip_header_cmd_submit *spdu = &pdu->u.cmd_submit;
/*
* Some members are not still implemented in usbip. I hope this issue
* will be discussed when usbip is ported to other operating systems.
*/
if (pack) {
/* map after tweaking the urb flags */
spdu->transfer_flags = urb_to_usbip(tweak_transfer_flags(urb->transfer_flags));
spdu->transfer_buffer_length = urb->transfer_buffer_length;
spdu->start_frame = urb->start_frame;
spdu->number_of_packets = urb->number_of_packets;
spdu->interval = urb->interval;
} else {
urb->transfer_flags = usbip_to_urb(spdu->transfer_flags);
urb->transfer_buffer_length = spdu->transfer_buffer_length;
urb->start_frame = spdu->start_frame;
urb->number_of_packets = spdu->number_of_packets;
urb->interval = spdu->interval;
}
}
static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb,
int pack)
{
struct usbip_header_ret_submit *rpdu = &pdu->u.ret_submit;
if (pack) {
rpdu->status = urb->status;
rpdu->actual_length = urb->actual_length;
rpdu->start_frame = urb->start_frame;
rpdu->number_of_packets = urb->number_of_packets;
rpdu->error_count = urb->error_count;
} else {
urb->status = rpdu->status;
urb->actual_length = rpdu->actual_length;
urb->start_frame = rpdu->start_frame;
urb->number_of_packets = rpdu->number_of_packets;
urb->error_count = rpdu->error_count;
}
}
void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
int pack)
{
switch (cmd) {
case USBIP_CMD_SUBMIT:
usbip_pack_cmd_submit(pdu, urb, pack);
break;
case USBIP_RET_SUBMIT:
usbip_pack_ret_submit(pdu, urb, pack);
break;
default:
/* NOT REACHED */
pr_err("unknown command\n");
break;
}
}
EXPORT_SYMBOL_GPL(usbip_pack_pdu);
static void correct_endian_basic(struct usbip_header_basic *base, int send)
{
if (send) {
base->command = cpu_to_be32(base->command);
base->seqnum = cpu_to_be32(base->seqnum);
base->devid = cpu_to_be32(base->devid);
base->direction = cpu_to_be32(base->direction);
base->ep = cpu_to_be32(base->ep);
} else {
base->command = be32_to_cpu(base->command);
base->seqnum = be32_to_cpu(base->seqnum);
base->devid = be32_to_cpu(base->devid);
base->direction = be32_to_cpu(base->direction);
base->ep = be32_to_cpu(base->ep);
}
}
static void correct_endian_cmd_submit(struct usbip_header_cmd_submit *pdu,
int send)
{
if (send) {
pdu->transfer_flags = cpu_to_be32(pdu->transfer_flags);
cpu_to_be32s(&pdu->transfer_buffer_length);
cpu_to_be32s(&pdu->start_frame);
cpu_to_be32s(&pdu->number_of_packets);
cpu_to_be32s(&pdu->interval);
} else {
pdu->transfer_flags = be32_to_cpu(pdu->transfer_flags);
be32_to_cpus(&pdu->transfer_buffer_length);
be32_to_cpus(&pdu->start_frame);
be32_to_cpus(&pdu->number_of_packets);
be32_to_cpus(&pdu->interval);
}
}
static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu,
int send)
{
if (send) {
cpu_to_be32s(&pdu->status);
cpu_to_be32s(&pdu->actual_length);
cpu_to_be32s(&pdu->start_frame);
cpu_to_be32s(&pdu->number_of_packets);
cpu_to_be32s(&pdu->error_count);
} else {
be32_to_cpus(&pdu->status);
be32_to_cpus(&pdu->actual_length);
be32_to_cpus(&pdu->start_frame);
be32_to_cpus(&pdu->number_of_packets);
be32_to_cpus(&pdu->error_count);
}
}
static void correct_endian_cmd_unlink(struct usbip_header_cmd_unlink *pdu,
int send)
{
if (send)
pdu->seqnum = cpu_to_be32(pdu->seqnum);
else
pdu->seqnum = be32_to_cpu(pdu->seqnum);
}
static void correct_endian_ret_unlink(struct usbip_header_ret_unlink *pdu,
int send)
{
if (send)
cpu_to_be32s(&pdu->status);
else
be32_to_cpus(&pdu->status);
}
void usbip_header_correct_endian(struct usbip_header *pdu, int send)
{
__u32 cmd = 0;
if (send)
cmd = pdu->base.command;
correct_endian_basic(&pdu->base, send);
if (!send)
cmd = pdu->base.command;
switch (cmd) {
case USBIP_CMD_SUBMIT:
correct_endian_cmd_submit(&pdu->u.cmd_submit, send);
break;
case USBIP_RET_SUBMIT:
correct_endian_ret_submit(&pdu->u.ret_submit, send);
break;
case USBIP_CMD_UNLINK:
correct_endian_cmd_unlink(&pdu->u.cmd_unlink, send);
break;
case USBIP_RET_UNLINK:
correct_endian_ret_unlink(&pdu->u.ret_unlink, send);
break;
default:
/* NOT REACHED */
pr_err("unknown command\n");
break;
}
}
EXPORT_SYMBOL_GPL(usbip_header_correct_endian);
static void usbip_iso_packet_correct_endian(
struct usbip_iso_packet_descriptor *iso, int send)
{
/* does not need all members. but copy all simply. */
if (send) {
iso->offset = cpu_to_be32(iso->offset);
iso->length = cpu_to_be32(iso->length);
iso->status = cpu_to_be32(iso->status);
iso->actual_length = cpu_to_be32(iso->actual_length);
} else {
iso->offset = be32_to_cpu(iso->offset);
iso->length = be32_to_cpu(iso->length);
iso->status = be32_to_cpu(iso->status);
iso->actual_length = be32_to_cpu(iso->actual_length);
}
}
static void usbip_pack_iso(struct usbip_iso_packet_descriptor *iso,
struct usb_iso_packet_descriptor *uiso, int pack)
{
if (pack) {
iso->offset = uiso->offset;
iso->length = uiso->length;
iso->status = uiso->status;
iso->actual_length = uiso->actual_length;
} else {
uiso->offset = iso->offset;
uiso->length = iso->length;
uiso->status = iso->status;
uiso->actual_length = iso->actual_length;
}
}
/* must free buffer */
struct usbip_iso_packet_descriptor*
usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen)
{
struct usbip_iso_packet_descriptor *iso;
int np = urb->number_of_packets;
ssize_t size = np * sizeof(*iso);
int i;
iso = kzalloc(size, GFP_KERNEL);
if (!iso)
return NULL;
for (i = 0; i < np; i++) {
usbip_pack_iso(&iso[i], &urb->iso_frame_desc[i], 1);
usbip_iso_packet_correct_endian(&iso[i], 1);
}
*bufflen = size;
return iso;
}
EXPORT_SYMBOL_GPL(usbip_alloc_iso_desc_pdu);
/* some members of urb must be substituted before. */
int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
{
void *buff;
struct usbip_iso_packet_descriptor *iso;
int np = urb->number_of_packets;
int size = np * sizeof(*iso);
int i;
int ret;
int total_length = 0;
if (!usb_pipeisoc(urb->pipe))
return 0;
/* my Bluetooth dongle gets ISO URBs which are np = 0 */
if (np == 0)
return 0;
buff = kzalloc(size, GFP_KERNEL);
if (!buff)
return -ENOMEM;
ret = usbip_recv(ud->tcp_socket, buff, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv iso_frame_descriptor, %d\n",
ret);
kfree(buff);
if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC)
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
else
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return -EPIPE;
}
iso = (struct usbip_iso_packet_descriptor *) buff;
for (i = 0; i < np; i++) {
usbip_iso_packet_correct_endian(&iso[i], 0);
usbip_pack_iso(&iso[i], &urb->iso_frame_desc[i], 0);
total_length += urb->iso_frame_desc[i].actual_length;
}
kfree(buff);
if (total_length != urb->actual_length) {
dev_err(&urb->dev->dev,
"total length of iso packets %d not equal to actual length of buffer %d\n",
total_length, urb->actual_length);
if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC)
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
else
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return -EPIPE;
}
return ret;
}
EXPORT_SYMBOL_GPL(usbip_recv_iso);
/*
* This functions restores the padding which was removed for optimizing
* the bandwidth during transfer over tcp/ip
*
* buffer and iso packets need to be stored and be in propeper endian in urb
* before calling this function
*/
void usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
{
int np = urb->number_of_packets;
int i;
int actualoffset = urb->actual_length;
if (!usb_pipeisoc(urb->pipe))
return;
/* if no packets or length of data is 0, then nothing to unpack */
if (np == 0 || urb->actual_length == 0)
return;
/*
* if actual_length is transfer_buffer_length then no padding is
* present.
*/
if (urb->actual_length == urb->transfer_buffer_length)
return;
/*
* loop over all packets from last to first (to prevent overwriting
* memory when padding) and move them into the proper place
*/
for (i = np-1; i > 0; i--) {
actualoffset -= urb->iso_frame_desc[i].actual_length;
memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset,
urb->transfer_buffer + actualoffset,
urb->iso_frame_desc[i].actual_length);
}
}
EXPORT_SYMBOL_GPL(usbip_pad_iso);
/* some members of urb must be substituted before. */
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
{
struct scatterlist *sg;
int ret = 0;
int recv;
int size;
int copy;
int i;
if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) {
/* the direction of urb must be OUT. */
if (usb_pipein(urb->pipe))
return 0;
size = urb->transfer_buffer_length;
} else {
/* the direction of urb must be IN. */
if (usb_pipeout(urb->pipe))
return 0;
size = urb->actual_length;
}
/* no need to recv xbuff */
if (!(size > 0))
return 0;
if (size > urb->transfer_buffer_length)
/* should not happen, probably malicious packet */
goto error;
if (urb->num_sgs) {
copy = size;
for_each_sg(urb->sg, sg, urb->num_sgs, i) {
int recv_size;
if (copy < sg->length)
recv_size = copy;
else
recv_size = sg->length;
recv = usbip_recv(ud->tcp_socket, sg_virt(sg),
recv_size);
if (recv != recv_size)
goto error;
copy -= recv;
ret += recv;
if (!copy)
break;
}
if (ret != size)
goto error;
} else {
ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size)
goto error;
}
return ret;
error:
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC)
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
else
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return -EPIPE;
}
EXPORT_SYMBOL_GPL(usbip_recv_xbuff);
static int __init usbip_core_init(void)
{
return usbip_init_eh();
}
static void __exit usbip_core_exit(void)
{
usbip_finish_eh();
return;
}
module_init(usbip_core_init);
module_exit(usbip_core_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/usbip/usbip_common.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Nobuo Iwata
*/
#include <linux/init.h>
#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "usbip_common.h"
#include "vhci.h"
#define DRIVER_AUTHOR "Takahiro Hirofuchi"
#define DRIVER_DESC "USB/IP 'Virtual' Host Controller (VHCI) Driver"
/*
* TODO
* - update root hub emulation
* - move the emulation code to userland ?
* porting to other operating systems
* minimize kernel code
* - add suspend/resume code
* - clean up everything
*/
/* See usb gadget dummy hcd */
static int vhci_hub_status(struct usb_hcd *hcd, char *buff);
static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buff, u16 wLength);
static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags);
static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
static int vhci_start(struct usb_hcd *vhci_hcd);
static void vhci_stop(struct usb_hcd *hcd);
static int vhci_get_frame_number(struct usb_hcd *hcd);
static const char driver_name[] = "vhci_hcd";
static const char driver_desc[] = "USB/IP Virtual Host Controller";
int vhci_num_controllers = VHCI_NR_HCS;
struct vhci *vhcis;
static const char * const bit_desc[] = {
"CONNECTION", /*0*/
"ENABLE", /*1*/
"SUSPEND", /*2*/
"OVER_CURRENT", /*3*/
"RESET", /*4*/
"L1", /*5*/
"R6", /*6*/
"R7", /*7*/
"POWER", /*8*/
"LOWSPEED", /*9*/
"HIGHSPEED", /*10*/
"PORT_TEST", /*11*/
"INDICATOR", /*12*/
"R13", /*13*/
"R14", /*14*/
"R15", /*15*/
"C_CONNECTION", /*16*/
"C_ENABLE", /*17*/
"C_SUSPEND", /*18*/
"C_OVER_CURRENT", /*19*/
"C_RESET", /*20*/
"C_L1", /*21*/
"R22", /*22*/
"R23", /*23*/
"R24", /*24*/
"R25", /*25*/
"R26", /*26*/
"R27", /*27*/
"R28", /*28*/
"R29", /*29*/
"R30", /*30*/
"R31", /*31*/
};
static const char * const bit_desc_ss[] = {
"CONNECTION", /*0*/
"ENABLE", /*1*/
"SUSPEND", /*2*/
"OVER_CURRENT", /*3*/
"RESET", /*4*/
"L1", /*5*/
"R6", /*6*/
"R7", /*7*/
"R8", /*8*/
"POWER", /*9*/
"HIGHSPEED", /*10*/
"PORT_TEST", /*11*/
"INDICATOR", /*12*/
"R13", /*13*/
"R14", /*14*/
"R15", /*15*/
"C_CONNECTION", /*16*/
"C_ENABLE", /*17*/
"C_SUSPEND", /*18*/
"C_OVER_CURRENT", /*19*/
"C_RESET", /*20*/
"C_BH_RESET", /*21*/
"C_LINK_STATE", /*22*/
"C_CONFIG_ERROR", /*23*/
"R24", /*24*/
"R25", /*25*/
"R26", /*26*/
"R27", /*27*/
"R28", /*28*/
"R29", /*29*/
"R30", /*30*/
"R31", /*31*/
};
static void dump_port_status_diff(u32 prev_status, u32 new_status, bool usb3)
{
int i = 0;
u32 bit = 1;
const char * const *desc = bit_desc;
if (usb3)
desc = bit_desc_ss;
pr_debug("status prev -> new: %08x -> %08x\n", prev_status, new_status);
while (bit) {
u32 prev = prev_status & bit;
u32 new = new_status & bit;
char change;
if (!prev && new)
change = '+';
else if (prev && !new)
change = '-';
else
change = ' ';
if (prev || new) {
pr_debug(" %c%s\n", change, desc[i]);
if (bit == 1) /* USB_PORT_STAT_CONNECTION */
pr_debug(" %c%s\n", change, "USB_PORT_STAT_SPEED_5GBPS");
}
bit <<= 1;
i++;
}
pr_debug("\n");
}
void rh_port_connect(struct vhci_device *vdev, enum usb_device_speed speed)
{
struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
struct vhci *vhci = vhci_hcd->vhci;
int rhport = vdev->rhport;
u32 status;
unsigned long flags;
usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
spin_lock_irqsave(&vhci->lock, flags);
status = vhci_hcd->port_status[rhport];
status |= USB_PORT_STAT_CONNECTION | (1 << USB_PORT_FEAT_C_CONNECTION);
switch (speed) {
case USB_SPEED_HIGH:
status |= USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_LOW:
status |= USB_PORT_STAT_LOW_SPEED;
break;
default:
break;
}
vhci_hcd->port_status[rhport] = status;
spin_unlock_irqrestore(&vhci->lock, flags);
usb_hcd_poll_rh_status(vhci_hcd_to_hcd(vhci_hcd));
}
static void rh_port_disconnect(struct vhci_device *vdev)
{
struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
struct vhci *vhci = vhci_hcd->vhci;
int rhport = vdev->rhport;
u32 status;
unsigned long flags;
usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
spin_lock_irqsave(&vhci->lock, flags);
status = vhci_hcd->port_status[rhport];
status &= ~USB_PORT_STAT_CONNECTION;
status |= (1 << USB_PORT_FEAT_C_CONNECTION);
vhci_hcd->port_status[rhport] = status;
spin_unlock_irqrestore(&vhci->lock, flags);
usb_hcd_poll_rh_status(vhci_hcd_to_hcd(vhci_hcd));
}
#define PORT_C_MASK \
((USB_PORT_STAT_C_CONNECTION \
| USB_PORT_STAT_C_ENABLE \
| USB_PORT_STAT_C_SUSPEND \
| USB_PORT_STAT_C_OVERCURRENT \
| USB_PORT_STAT_C_RESET) << 16)
/*
* Returns 0 if the status hasn't changed, or the number of bytes in buf.
* Ports are 0-indexed from the HCD point of view,
* and 1-indexed from the USB core pointer of view.
*
* @buf: a bitmap to show which port status has been changed.
* bit 0: reserved
* bit 1: the status of port 0 has been changed.
* bit 2: the status of port 1 has been changed.
* ...
*/
static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
{
struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
struct vhci *vhci = vhci_hcd->vhci;
int retval = DIV_ROUND_UP(VHCI_HC_PORTS + 1, 8);
int rhport;
int changed = 0;
unsigned long flags;
memset(buf, 0, retval);
spin_lock_irqsave(&vhci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd)) {
usbip_dbg_vhci_rh("hw accessible flag not on?\n");
goto done;
}
/* check pseudo status register for each port */
for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
if ((vhci_hcd->port_status[rhport] & PORT_C_MASK)) {
/* The status of a port has been changed, */
usbip_dbg_vhci_rh("port %d status changed\n", rhport);
buf[(rhport + 1) / 8] |= 1 << (rhport + 1) % 8;
changed = 1;
}
}
if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
usb_hcd_resume_root_hub(hcd);
done:
spin_unlock_irqrestore(&vhci->lock, flags);
return changed ? retval : 0;
}
/* usb 3.0 root hub device descriptor */
static struct {
struct usb_bos_descriptor bos;
struct usb_ss_cap_descriptor ss_cap;
} __packed usb3_bos_desc = {
.bos = {
.bLength = USB_DT_BOS_SIZE,
.bDescriptorType = USB_DT_BOS,
.wTotalLength = cpu_to_le16(sizeof(usb3_bos_desc)),
.bNumDeviceCaps = 1,
},
.ss_cap = {
.bLength = USB_DT_USB_SS_CAP_SIZE,
.bDescriptorType = USB_DT_DEVICE_CAPABILITY,
.bDevCapabilityType = USB_SS_CAP_TYPE,
.wSpeedSupported = cpu_to_le16(USB_5GBPS_OPERATION),
.bFunctionalitySupport = ilog2(USB_5GBPS_OPERATION),
},
};
static inline void
ss_hub_descriptor(struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof *desc);
desc->bDescriptorType = USB_DT_SS_HUB;
desc->bDescLength = 12;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = VHCI_HC_PORTS;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
desc->u.ss.DeviceRemovable = 0xffff;
}
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
{
int width;
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = USB_DT_HUB;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = VHCI_HC_PORTS;
BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN);
width = desc->bNbrPorts / 8 + 1;
desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
memset(&desc->u.hs.DeviceRemovable[0], 0, width);
memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
}
static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct vhci_hcd *vhci_hcd;
struct vhci *vhci;
int retval = 0;
int rhport = -1;
unsigned long flags;
bool invalid_rhport = false;
u32 prev_port_status[VHCI_HC_PORTS];
if (!HCD_HW_ACCESSIBLE(hcd))
return -ETIMEDOUT;
/*
* NOTE:
* wIndex (bits 0-7) shows the port number and begins from 1?
*/
wIndex = ((__u8)(wIndex & 0x00ff));
usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
wIndex);
/*
* wIndex can be 0 for some request types (typeReq). rhport is
* in valid range when wIndex >= 1 and < VHCI_HC_PORTS.
*
* Reference port_status[] only with valid rhport when
* invalid_rhport is false.
*/
if (wIndex < 1 || wIndex > VHCI_HC_PORTS) {
invalid_rhport = true;
if (wIndex > VHCI_HC_PORTS)
pr_err("invalid port number %d\n", wIndex);
} else
rhport = wIndex - 1;
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
spin_lock_irqsave(&vhci->lock, flags);
/* store old status and compare now and old later */
if (usbip_dbg_flag_vhci_rh) {
if (!invalid_rhport)
memcpy(prev_port_status, vhci_hcd->port_status,
sizeof(prev_port_status));
}
switch (typeReq) {
case ClearHubFeature:
usbip_dbg_vhci_rh(" ClearHubFeature\n");
break;
case ClearPortFeature:
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed == HCD_USB3) {
pr_err(" ClearPortFeature: USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
}
usbip_dbg_vhci_rh(
" ClearPortFeature: USB_PORT_FEAT_SUSPEND\n");
if (vhci_hcd->port_status[rhport] & USB_PORT_STAT_SUSPEND) {
/* 20msec signaling */
vhci_hcd->resuming = 1;
vhci_hcd->re_timeout = jiffies + msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" ClearPortFeature: USB_PORT_FEAT_POWER\n");
if (hcd->speed == HCD_USB3)
vhci_hcd->port_status[rhport] &= ~USB_SS_PORT_STAT_POWER;
else
vhci_hcd->port_status[rhport] &= ~USB_PORT_STAT_POWER;
break;
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
if (wValue >= 32)
goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
break;
case GetHubDescriptor:
usbip_dbg_vhci_rh(" GetHubDescriptor\n");
if (hcd->speed == HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
pr_err("Wrong hub descriptor type for USB 3.0 roothub.\n");
goto error;
}
if (hcd->speed == HCD_USB3)
ss_hub_descriptor((struct usb_hub_descriptor *) buf);
else
hub_descriptor((struct usb_hub_descriptor *) buf);
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
if (hcd->speed != HCD_USB3)
goto error;
if ((wValue >> 8) != USB_DT_BOS)
goto error;
memcpy(buf, &usb3_bos_desc, sizeof(usb3_bos_desc));
retval = sizeof(usb3_bos_desc);
break;
case GetHubStatus:
usbip_dbg_vhci_rh(" GetHubStatus\n");
*(__le32 *) buf = cpu_to_le32(0);
break;
case GetPortStatus:
usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
retval = -EPIPE;
goto error;
}
/* we do not care about resume. */
/* whoever resets or resumes must GetPortStatus to
* complete it!!
*/
if (vhci_hcd->resuming && time_after(jiffies, vhci_hcd->re_timeout)) {
vhci_hcd->port_status[rhport] |= (1 << USB_PORT_FEAT_C_SUSPEND);
vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_SUSPEND);
vhci_hcd->resuming = 0;
vhci_hcd->re_timeout = 0;
}
if ((vhci_hcd->port_status[rhport] & (1 << USB_PORT_FEAT_RESET)) !=
0 && time_after(jiffies, vhci_hcd->re_timeout)) {
vhci_hcd->port_status[rhport] |= (1 << USB_PORT_FEAT_C_RESET);
vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET);
vhci_hcd->re_timeout = 0;
/*
* A few drivers do usb reset during probe when
* the device could be in VDEV_ST_USED state
*/
if (vhci_hcd->vdev[rhport].ud.status ==
VDEV_ST_NOTASSIGNED ||
vhci_hcd->vdev[rhport].ud.status ==
VDEV_ST_USED) {
usbip_dbg_vhci_rh(
" enable rhport %d (status %u)\n",
rhport,
vhci_hcd->vdev[rhport].ud.status);
vhci_hcd->port_status[rhport] |=
USB_PORT_STAT_ENABLE;
}
if (hcd->speed < HCD_USB3) {
switch (vhci_hcd->vdev[rhport].speed) {
case USB_SPEED_HIGH:
vhci_hcd->port_status[rhport] |=
USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_LOW:
vhci_hcd->port_status[rhport] |=
USB_PORT_STAT_LOW_SPEED;
break;
default:
pr_err("vhci_device speed not set\n");
break;
}
}
}
((__le16 *) buf)[0] = cpu_to_le16(vhci_hcd->port_status[rhport]);
((__le16 *) buf)[1] =
cpu_to_le16(vhci_hcd->port_status[rhport] >> 16);
usbip_dbg_vhci_rh(" GetPortStatus bye %x %x\n", ((u16 *)buf)[0],
((u16 *)buf)[1]);
break;
case SetHubFeature:
usbip_dbg_vhci_rh(" SetHubFeature\n");
retval = -EPIPE;
break;
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_LINK_STATE:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_LINK_STATE\n");
if (hcd->speed != HCD_USB3) {
pr_err("USB_PORT_FEAT_LINK_STATE req not "
"supported for USB 2.0 roothub\n");
goto error;
}
/*
* Since this is dummy we don't have an actual link so
* there is nothing to do for the SET_LINK_STATE cmd
*/
break;
case USB_PORT_FEAT_U1_TIMEOUT:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_U1_TIMEOUT\n");
fallthrough;
case USB_PORT_FEAT_U2_TIMEOUT:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_U2_TIMEOUT\n");
/* TODO: add suspend/resume support! */
if (hcd->speed != HCD_USB3) {
pr_err("USB_PORT_FEAT_U1/2_TIMEOUT req not "
"supported for USB 2.0 roothub\n");
goto error;
}
break;
case USB_PORT_FEAT_SUSPEND:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_SUSPEND\n");
/* Applicable only for USB2.0 hub */
if (hcd->speed == HCD_USB3) {
pr_err("USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
}
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
break;
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_POWER\n");
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
if (hcd->speed == HCD_USB3)
vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
else
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_BH_PORT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
/* Applicable only for USB3.0 hub */
if (hcd->speed != HCD_USB3) {
pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
"supported for USB 2.0 roothub\n");
goto error;
}
fallthrough;
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_RESET\n");
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
vhci_hcd->port_status[rhport] = 0;
vhci_hcd->port_status[rhport] =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
} else if (vhci_hcd->port_status[rhport] & USB_PORT_STAT_ENABLE) {
vhci_hcd->port_status[rhport] &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
}
/* 50msec reset signaling */
vhci_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
fallthrough;
default:
usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
wValue);
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
if (wValue >= 32)
goto error;
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
vhci_hcd->port_status[rhport] |= (1 << wValue);
}
} else
if ((vhci_hcd->port_status[rhport] &
USB_PORT_STAT_POWER) != 0) {
vhci_hcd->port_status[rhport] |= (1 << wValue);
}
}
break;
case GetPortErrorCount:
usbip_dbg_vhci_rh(" GetPortErrorCount\n");
if (hcd->speed != HCD_USB3) {
pr_err("GetPortErrorCount req not "
"supported for USB 2.0 roothub\n");
goto error;
}
/* We'll always return 0 since this is a dummy hub */
*(__le32 *) buf = cpu_to_le32(0);
break;
case SetHubDepth:
usbip_dbg_vhci_rh(" SetHubDepth\n");
if (hcd->speed != HCD_USB3) {
pr_err("SetHubDepth req not supported for "
"USB 2.0 roothub\n");
goto error;
}
break;
default:
pr_err("default hub control req: %04x v%04x i%04x l%d\n",
typeReq, wValue, wIndex, wLength);
error:
/* "protocol stall" on error */
retval = -EPIPE;
}
if (usbip_dbg_flag_vhci_rh) {
pr_debug("port %d\n", rhport);
/* Only dump valid port status */
if (!invalid_rhport) {
dump_port_status_diff(prev_port_status[rhport],
vhci_hcd->port_status[rhport],
hcd->speed == HCD_USB3);
}
}
usbip_dbg_vhci_rh(" bye\n");
spin_unlock_irqrestore(&vhci->lock, flags);
if (!invalid_rhport &&
(vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0) {
usb_hcd_poll_rh_status(hcd);
}
return retval;
}
static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
{
struct vhci_priv *priv;
struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
unsigned long flags;
priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
if (!priv) {
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
return;
}
spin_lock_irqsave(&vdev->priv_lock, flags);
priv->seqnum = atomic_inc_return(&vhci_hcd->seqnum);
if (priv->seqnum == 0xffff)
dev_info(&urb->dev->dev, "seqnum max\n");
priv->vdev = vdev;
priv->urb = urb;
urb->hcpriv = (void *) priv;
list_add_tail(&priv->list, &vdev->priv_tx);
wake_up(&vdev->waitq_tx);
spin_unlock_irqrestore(&vdev->priv_lock, flags);
}
static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
struct vhci *vhci = vhci_hcd->vhci;
struct device *dev = &urb->dev->dev;
u8 portnum = urb->dev->portnum;
int ret = 0;
struct vhci_device *vdev;
unsigned long flags;
if (portnum > VHCI_HC_PORTS) {
pr_err("invalid port number %d\n", portnum);
return -ENODEV;
}
vdev = &vhci_hcd->vdev[portnum-1];
if (!urb->transfer_buffer && !urb->num_sgs &&
urb->transfer_buffer_length) {
dev_dbg(dev, "Null URB transfer buffer\n");
return -EINVAL;
}
spin_lock_irqsave(&vhci->lock, flags);
if (urb->status != -EINPROGRESS) {
dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
spin_unlock_irqrestore(&vhci->lock, flags);
return urb->status;
}
/* refuse enqueue for dead connection */
spin_lock(&vdev->ud.lock);
if (vdev->ud.status == VDEV_ST_NULL ||
vdev->ud.status == VDEV_ST_ERROR) {
dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
return -ENODEV;
}
spin_unlock(&vdev->ud.lock);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto no_need_unlink;
/*
* The enumeration process is as follows;
*
* 1. Get_Descriptor request to DevAddrs(0) EndPoint(0)
* to get max packet length of default pipe
*
* 2. Set_Address request to DevAddr(0) EndPoint(0)
*
*/
if (usb_pipedevice(urb->pipe) == 0) {
__u8 type = usb_pipetype(urb->pipe);
struct usb_ctrlrequest *ctrlreq =
(struct usb_ctrlrequest *) urb->setup_packet;
if (type != PIPE_CONTROL || !ctrlreq) {
dev_err(dev, "invalid request to devnum 0\n");
ret = -EINVAL;
goto no_need_xmit;
}
switch (ctrlreq->bRequest) {
case USB_REQ_SET_ADDRESS:
/* set_address may come when a device is reset */
dev_info(dev, "SetAddress Request (%d) to port %d\n",
ctrlreq->wValue, vdev->rhport);
usb_put_dev(vdev->udev);
vdev->udev = usb_get_dev(urb->dev);
spin_lock(&vdev->ud.lock);
vdev->ud.status = VDEV_ST_USED;
spin_unlock(&vdev->ud.lock);
if (urb->status == -EINPROGRESS) {
/* This request is successfully completed. */
/* If not -EINPROGRESS, possibly unlinked. */
urb->status = 0;
}
goto no_need_xmit;
case USB_REQ_GET_DESCRIPTOR:
if (ctrlreq->wValue == cpu_to_le16(USB_DT_DEVICE << 8))
usbip_dbg_vhci_hc(
"Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
usb_put_dev(vdev->udev);
vdev->udev = usb_get_dev(urb->dev);
goto out;
default:
/* NOT REACHED */
dev_err(dev,
"invalid request to devnum 0 bRequest %u, wValue %u\n",
ctrlreq->bRequest,
ctrlreq->wValue);
ret = -EINVAL;
goto no_need_xmit;
}
}
out:
vhci_tx_urb(urb, vdev);
spin_unlock_irqrestore(&vhci->lock, flags);
return 0;
no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
spin_unlock_irqrestore(&vhci->lock, flags);
if (!ret) {
/* usb_hcd_giveback_urb() should be called with
* irqs disabled
*/
local_irq_disable();
usb_hcd_giveback_urb(hcd, urb, urb->status);
local_irq_enable();
}
return ret;
}
/*
* vhci_rx gives back the urb after receiving the reply of the urb. If an
* unlink pdu is sent or not, vhci_rx receives a normal return pdu and gives
* back its urb. For the driver unlinking the urb, the content of the urb is
* not important, but the calling to its completion handler is important; the
* completion of unlinking is notified by the completion handler.
*
*
* CLIENT SIDE
*
* - When vhci_hcd receives RET_SUBMIT,
*
* - case 1a). the urb of the pdu is not unlinking.
* - normal case
* => just give back the urb
*
* - case 1b). the urb of the pdu is unlinking.
* - usbip.ko will return a reply of the unlinking request.
* => give back the urb now and go to case 2b).
*
* - When vhci_hcd receives RET_UNLINK,
*
* - case 2a). a submit request is still pending in vhci_hcd.
* - urb was really pending in usbip.ko and urb_unlink_urb() was
* completed there.
* => free a pending submit request
* => notify unlink completeness by giving back the urb
*
* - case 2b). a submit request is *not* pending in vhci_hcd.
* - urb was already given back to the core driver.
* => do not give back the urb
*
*
* SERVER SIDE
*
* - When usbip receives CMD_UNLINK,
*
* - case 3a). the urb of the unlink request is now in submission.
* => do usb_unlink_urb().
* => after the unlink is completed, send RET_UNLINK.
*
* - case 3b). the urb of the unlink request is not in submission.
* - may be already completed or never be received
* => send RET_UNLINK
*
*/
static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
struct vhci *vhci = vhci_hcd->vhci;
struct vhci_priv *priv;
struct vhci_device *vdev;
unsigned long flags;
spin_lock_irqsave(&vhci->lock, flags);
priv = urb->hcpriv;
if (!priv) {
/* URB was never linked! or will be soon given back by
* vhci_rx. */
spin_unlock_irqrestore(&vhci->lock, flags);
return -EIDRM;
}
{
int ret = 0;
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret) {
spin_unlock_irqrestore(&vhci->lock, flags);
return ret;
}
}
/* send unlink request here? */
vdev = priv->vdev;
if (!vdev->ud.tcp_socket) {
/* tcp connection is closed */
spin_lock(&vdev->priv_lock);
list_del(&priv->list);
kfree(priv);
urb->hcpriv = NULL;
spin_unlock(&vdev->priv_lock);
/*
* If tcp connection is alive, we have sent CMD_UNLINK.
* vhci_rx will receive RET_UNLINK and give back the URB.
* Otherwise, we give back it here.
*/
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&vhci->lock, flags);
usb_hcd_giveback_urb(hcd, urb, urb->status);
spin_lock_irqsave(&vhci->lock, flags);
} else {
/* tcp connection is alive */
struct vhci_unlink *unlink;
spin_lock(&vdev->priv_lock);
/* setup CMD_UNLINK pdu */
unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
if (!unlink) {
spin_unlock(&vdev->priv_lock);
spin_unlock_irqrestore(&vhci->lock, flags);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
return -ENOMEM;
}
unlink->seqnum = atomic_inc_return(&vhci_hcd->seqnum);
if (unlink->seqnum == 0xffff)
pr_info("seqnum max\n");
unlink->unlink_seqnum = priv->seqnum;
/* send cmd_unlink and try to cancel the pending URB in the
* peer */
list_add_tail(&unlink->list, &vdev->unlink_tx);
wake_up(&vdev->waitq_tx);
spin_unlock(&vdev->priv_lock);
}
spin_unlock_irqrestore(&vhci->lock, flags);
usbip_dbg_vhci_hc("leave\n");
return 0;
}
static void vhci_cleanup_unlink_list(struct vhci_device *vdev,
struct list_head *unlink_list)
{
struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
struct usb_hcd *hcd = vhci_hcd_to_hcd(vhci_hcd);
struct vhci *vhci = vhci_hcd->vhci;
struct vhci_unlink *unlink, *tmp;
unsigned long flags;
spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, unlink_list, list) {
struct urb *urb;
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
if (!urb) {
list_del(&unlink->list);
kfree(unlink);
continue;
}
urb->status = -ENODEV;
usb_hcd_unlink_urb_from_ep(hcd, urb);
list_del(&unlink->list);
spin_unlock(&vdev->priv_lock);
spin_unlock_irqrestore(&vhci->lock, flags);
usb_hcd_giveback_urb(hcd, urb, urb->status);
spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->priv_lock);
kfree(unlink);
}
spin_unlock(&vdev->priv_lock);
spin_unlock_irqrestore(&vhci->lock, flags);
}
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
{
/* give back URB of unsent unlink request */
vhci_cleanup_unlink_list(vdev, &vdev->unlink_tx);
/* give back URB of unanswered unlink request */
vhci_cleanup_unlink_list(vdev, &vdev->unlink_rx);
}
/*
* The important thing is that only one context begins cleanup.
* This is why error handling and cleanup become simple.
* We do not want to consider race condition as possible.
*/
static void vhci_shutdown_connection(struct usbip_device *ud)
{
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
/* need this? see stub_dev.c */
if (ud->tcp_socket) {
pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
/* kill threads related to this sdev */
if (vdev->ud.tcp_rx) {
kthread_stop_put(vdev->ud.tcp_rx);
vdev->ud.tcp_rx = NULL;
}
if (vdev->ud.tcp_tx) {
kthread_stop_put(vdev->ud.tcp_tx);
vdev->ud.tcp_tx = NULL;
}
pr_info("stop threads\n");
/* active connection is closed */
if (vdev->ud.tcp_socket) {
sockfd_put(vdev->ud.tcp_socket);
vdev->ud.tcp_socket = NULL;
vdev->ud.sockfd = -1;
}
pr_info("release socket\n");
vhci_device_unlink_cleanup(vdev);
/*
* rh_port_disconnect() is a trigger of ...
* usb_disable_device():
* disable all the endpoints for a USB device.
* usb_disable_endpoint():
* disable endpoints. pending urbs are unlinked(dequeued).
*
* NOTE: After calling rh_port_disconnect(), the USB device drivers of a
* detached device should release used urbs in a cleanup function (i.e.
* xxx_disconnect()). Therefore, vhci_hcd does not need to release
* pushed urbs and their private data in this function.
*
* NOTE: vhci_dequeue() must be considered carefully. When shutting down
* a connection, vhci_shutdown_connection() expects vhci_dequeue()
* gives back pushed urbs and frees their private data by request of
* the cleanup function of a USB driver. When unlinking a urb with an
* active connection, vhci_dequeue() does not give back the urb which
* is actually given back by vhci_rx after receiving its return pdu.
*
*/
rh_port_disconnect(vdev);
pr_info("disconnect device\n");
}
static void vhci_device_reset(struct usbip_device *ud)
{
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
unsigned long flags;
spin_lock_irqsave(&ud->lock, flags);
vdev->speed = 0;
vdev->devid = 0;
usb_put_dev(vdev->udev);
vdev->udev = NULL;
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
ud->sockfd = -1;
}
ud->status = VDEV_ST_NULL;
spin_unlock_irqrestore(&ud->lock, flags);
}
static void vhci_device_unusable(struct usbip_device *ud)
{
unsigned long flags;
spin_lock_irqsave(&ud->lock, flags);
ud->status = VDEV_ST_ERROR;
spin_unlock_irqrestore(&ud->lock, flags);
}
static void vhci_device_init(struct vhci_device *vdev)
{
memset(vdev, 0, sizeof(struct vhci_device));
vdev->ud.side = USBIP_VHCI;
vdev->ud.status = VDEV_ST_NULL;
spin_lock_init(&vdev->ud.lock);
mutex_init(&vdev->ud.sysfs_lock);
INIT_LIST_HEAD(&vdev->priv_rx);
INIT_LIST_HEAD(&vdev->priv_tx);
INIT_LIST_HEAD(&vdev->unlink_tx);
INIT_LIST_HEAD(&vdev->unlink_rx);
spin_lock_init(&vdev->priv_lock);
init_waitqueue_head(&vdev->waitq_tx);
vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
vdev->ud.eh_ops.reset = vhci_device_reset;
vdev->ud.eh_ops.unusable = vhci_device_unusable;
usbip_start_eh(&vdev->ud);
}
static int hcd_name_to_id(const char *name)
{
char *c;
long val;
int ret;
c = strchr(name, '.');
if (c == NULL)
return 0;
ret = kstrtol(c+1, 10, &val);
if (ret < 0)
return ret;
return val;
}
static int vhci_setup(struct usb_hcd *hcd)
{
struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
if (usb_hcd_is_primary_hcd(hcd)) {
vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
vhci->vhci_hcd_hs->vhci = vhci;
/*
* Mark the first roothub as being USB 2.0.
* The USB 3.0 roothub will be registered later by
* vhci_hcd_probe()
*/
hcd->speed = HCD_USB2;
hcd->self.root_hub->speed = USB_SPEED_HIGH;
} else {
vhci->vhci_hcd_ss = hcd_to_vhci_hcd(hcd);
vhci->vhci_hcd_ss->vhci = vhci;
hcd->speed = HCD_USB3;
hcd->self.root_hub->speed = USB_SPEED_SUPER;
}
/*
* Support SG.
* sg_tablesize is an arbitrary value to alleviate memory pressure
* on the host.
*/
hcd->self.sg_tablesize = 32;
hcd->self.no_sg_constraint = 1;
return 0;
}
static int vhci_start(struct usb_hcd *hcd)
{
struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
int id, rhport;
int err;
usbip_dbg_vhci_hc("enter vhci_start\n");
if (usb_hcd_is_primary_hcd(hcd))
spin_lock_init(&vhci_hcd->vhci->lock);
/* initialize private data of usb_hcd */
for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
struct vhci_device *vdev = &vhci_hcd->vdev[rhport];
vhci_device_init(vdev);
vdev->rhport = rhport;
}
atomic_set(&vhci_hcd->seqnum, 0);
hcd->power_budget = 0; /* no limit */
hcd->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
hcd->self.otg_port = 1;
#endif
id = hcd_name_to_id(hcd_name(hcd));
if (id < 0) {
pr_err("invalid vhci name %s\n", hcd_name(hcd));
return -EINVAL;
}
/* vhci_hcd is now ready to be controlled through sysfs */
if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
err = vhci_init_attr_group();
if (err) {
dev_err(hcd_dev(hcd), "init attr group failed, err = %d\n", err);
return err;
}
err = sysfs_create_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
if (err) {
dev_err(hcd_dev(hcd), "create sysfs files failed, err = %d\n", err);
vhci_finish_attr_group();
return err;
}
pr_info("created sysfs %s\n", hcd_name(hcd));
}
return 0;
}
static void vhci_stop(struct usb_hcd *hcd)
{
struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
int id, rhport;
usbip_dbg_vhci_hc("stop VHCI controller\n");
/* 1. remove the userland interface of vhci_hcd */
id = hcd_name_to_id(hcd_name(hcd));
if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
sysfs_remove_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
vhci_finish_attr_group();
}
/* 2. shutdown all the ports of vhci_hcd */
for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
struct vhci_device *vdev = &vhci_hcd->vdev[rhport];
usbip_event_add(&vdev->ud, VDEV_EVENT_REMOVED);
usbip_stop_eh(&vdev->ud);
}
}
static int vhci_get_frame_number(struct usb_hcd *hcd)
{
dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
return 0;
}
#ifdef CONFIG_PM
/* FIXME: suspend/resume */
static int vhci_bus_suspend(struct usb_hcd *hcd)
{
struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
unsigned long flags;
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irqsave(&vhci->lock, flags);
hcd->state = HC_STATE_SUSPENDED;
spin_unlock_irqrestore(&vhci->lock, flags);
return 0;
}
static int vhci_bus_resume(struct usb_hcd *hcd)
{
struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
int rc = 0;
unsigned long flags;
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irqsave(&vhci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else
hcd->state = HC_STATE_RUNNING;
spin_unlock_irqrestore(&vhci->lock, flags);
return rc;
}
#else
#define vhci_bus_suspend NULL
#define vhci_bus_resume NULL
#endif
/* Change a group of bulk endpoints to support multiple stream IDs */
static int vhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags)
{
dev_dbg(&hcd->self.root_hub->dev, "vhci_alloc_streams not implemented\n");
return 0;
}
/* Reverts a group of bulk endpoints back to not using stream IDs. */
static int vhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
dev_dbg(&hcd->self.root_hub->dev, "vhci_free_streams not implemented\n");
return 0;
}
static const struct hc_driver vhci_hc_driver = {
.description = driver_name,
.product_desc = driver_desc,
.hcd_priv_size = sizeof(struct vhci_hcd),
.flags = HCD_USB3 | HCD_SHARED,
.reset = vhci_setup,
.start = vhci_start,
.stop = vhci_stop,
.urb_enqueue = vhci_urb_enqueue,
.urb_dequeue = vhci_urb_dequeue,
.get_frame_number = vhci_get_frame_number,
.hub_status_data = vhci_hub_status,
.hub_control = vhci_hub_control,
.bus_suspend = vhci_bus_suspend,
.bus_resume = vhci_bus_resume,
.alloc_streams = vhci_alloc_streams,
.free_streams = vhci_free_streams,
};
static int vhci_hcd_probe(struct platform_device *pdev)
{
struct vhci *vhci = *((void **)dev_get_platdata(&pdev->dev));
struct usb_hcd *hcd_hs;
struct usb_hcd *hcd_ss;
int ret;
usbip_dbg_vhci_hc("name %s id %d\n", pdev->name, pdev->id);
/*
* Allocate and initialize hcd.
* Our private data is also allocated automatically.
*/
hcd_hs = usb_create_hcd(&vhci_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd_hs) {
pr_err("create primary hcd failed\n");
return -ENOMEM;
}
hcd_hs->has_tt = 1;
/*
* Finish generic HCD structure initialization and register.
* Call the driver's reset() and start() routines.
*/
ret = usb_add_hcd(hcd_hs, 0, 0);
if (ret != 0) {
pr_err("usb_add_hcd hs failed %d\n", ret);
goto put_usb2_hcd;
}
hcd_ss = usb_create_shared_hcd(&vhci_hc_driver, &pdev->dev,
dev_name(&pdev->dev), hcd_hs);
if (!hcd_ss) {
ret = -ENOMEM;
pr_err("create shared hcd failed\n");
goto remove_usb2_hcd;
}
ret = usb_add_hcd(hcd_ss, 0, 0);
if (ret) {
pr_err("usb_add_hcd ss failed %d\n", ret);
goto put_usb3_hcd;
}
usbip_dbg_vhci_hc("bye\n");
return 0;
put_usb3_hcd:
usb_put_hcd(hcd_ss);
remove_usb2_hcd:
usb_remove_hcd(hcd_hs);
put_usb2_hcd:
usb_put_hcd(hcd_hs);
vhci->vhci_hcd_hs = NULL;
vhci->vhci_hcd_ss = NULL;
return ret;
}
static void vhci_hcd_remove(struct platform_device *pdev)
{
struct vhci *vhci = *((void **)dev_get_platdata(&pdev->dev));
/*
* Disconnects the root hub,
* then reverses the effects of usb_add_hcd(),
* invoking the HCD's stop() methods.
*/
usb_remove_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_ss));
usb_put_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_ss));
usb_remove_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_hs));
usb_put_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_hs));
vhci->vhci_hcd_hs = NULL;
vhci->vhci_hcd_ss = NULL;
}
#ifdef CONFIG_PM
/* what should happen for USB/IP under suspend/resume? */
static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd;
struct vhci *vhci;
int rhport;
int connected = 0;
int ret = 0;
unsigned long flags;
dev_dbg(&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata(pdev);
if (!hcd)
return 0;
vhci = *((void **)dev_get_platdata(hcd->self.controller));
spin_lock_irqsave(&vhci->lock, flags);
for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
if (vhci->vhci_hcd_hs->port_status[rhport] &
USB_PORT_STAT_CONNECTION)
connected += 1;
if (vhci->vhci_hcd_ss->port_status[rhport] &
USB_PORT_STAT_CONNECTION)
connected += 1;
}
spin_unlock_irqrestore(&vhci->lock, flags);
if (connected > 0) {
dev_info(&pdev->dev,
"We have %d active connection%s. Do not suspend.\n",
connected, (connected == 1 ? "" : "s"));
ret = -EBUSY;
} else {
dev_info(&pdev->dev, "suspend vhci_hcd");
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
}
return ret;
}
static int vhci_hcd_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd;
dev_dbg(&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata(pdev);
if (!hcd)
return 0;
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
return 0;
}
#else
#define vhci_hcd_suspend NULL
#define vhci_hcd_resume NULL
#endif
static struct platform_driver vhci_driver = {
.probe = vhci_hcd_probe,
.remove_new = vhci_hcd_remove,
.suspend = vhci_hcd_suspend,
.resume = vhci_hcd_resume,
.driver = {
.name = driver_name,
},
};
static void del_platform_devices(void)
{
struct platform_device *pdev;
int i;
for (i = 0; i < vhci_num_controllers; i++) {
pdev = vhcis[i].pdev;
if (pdev != NULL)
platform_device_unregister(pdev);
vhcis[i].pdev = NULL;
}
sysfs_remove_link(&platform_bus.kobj, driver_name);
}
static int __init vhci_hcd_init(void)
{
int i, ret;
if (usb_disabled())
return -ENODEV;
if (vhci_num_controllers < 1)
vhci_num_controllers = 1;
vhcis = kcalloc(vhci_num_controllers, sizeof(struct vhci), GFP_KERNEL);
if (vhcis == NULL)
return -ENOMEM;
for (i = 0; i < vhci_num_controllers; i++) {
vhcis[i].pdev = platform_device_alloc(driver_name, i);
if (!vhcis[i].pdev) {
i--;
while (i >= 0)
platform_device_put(vhcis[i--].pdev);
ret = -ENOMEM;
goto err_device_alloc;
}
}
for (i = 0; i < vhci_num_controllers; i++) {
void *vhci = &vhcis[i];
ret = platform_device_add_data(vhcis[i].pdev, &vhci, sizeof(void *));
if (ret)
goto err_driver_register;
}
ret = platform_driver_register(&vhci_driver);
if (ret)
goto err_driver_register;
for (i = 0; i < vhci_num_controllers; i++) {
ret = platform_device_add(vhcis[i].pdev);
if (ret < 0) {
i--;
while (i >= 0)
platform_device_del(vhcis[i--].pdev);
goto err_add_hcd;
}
}
return ret;
err_add_hcd:
platform_driver_unregister(&vhci_driver);
err_driver_register:
for (i = 0; i < vhci_num_controllers; i++)
platform_device_put(vhcis[i].pdev);
err_device_alloc:
kfree(vhcis);
return ret;
}
static void __exit vhci_hcd_exit(void)
{
del_platform_devices();
platform_driver_unregister(&vhci_driver);
kfree(vhcis);
}
module_init(vhci_hcd_init);
module_exit(vhci_hcd_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/usbip/vhci_hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/reset.h>
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
/* u2-port0 should be powered on and enabled; */
int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks)
{
void __iomem *ibase = ssusb->ippc_base;
u32 value, check_val;
int ret;
check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE |
SSUSB_REF_RST_B_STS;
ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
(check_val == (value & check_val)), 100, 20000);
if (ret) {
dev_err(ssusb->dev, "clks of sts1 are not stable!\n");
return ret;
}
ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value,
(value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000);
if (ret) {
dev_err(ssusb->dev, "mac2 clock is not stable\n");
return ret;
}
return 0;
}
static int wait_for_ip_sleep(struct ssusb_mtk *ssusb)
{
bool sleep_check = true;
u32 value;
int ret;
if (!ssusb->is_host)
sleep_check = ssusb_gadget_ip_sleep_check(ssusb);
if (!sleep_check)
return 0;
/* wait for ip enter sleep mode */
ret = readl_poll_timeout(ssusb->ippc_base + U3D_SSUSB_IP_PW_STS1, value,
(value & SSUSB_IP_SLEEP_STS), 100, 100000);
if (ret) {
dev_err(ssusb->dev, "ip sleep failed!!!\n");
ret = -EBUSY;
} else {
/* workaround: avoid wrong wakeup signal latch for some soc */
usleep_range(100, 200);
}
return ret;
}
static int ssusb_phy_init(struct ssusb_mtk *ssusb)
{
int i;
int ret;
for (i = 0; i < ssusb->num_phys; i++) {
ret = phy_init(ssusb->phys[i]);
if (ret)
goto exit_phy;
}
return 0;
exit_phy:
for (; i > 0; i--)
phy_exit(ssusb->phys[i - 1]);
return ret;
}
static int ssusb_phy_exit(struct ssusb_mtk *ssusb)
{
int i;
for (i = 0; i < ssusb->num_phys; i++)
phy_exit(ssusb->phys[i]);
return 0;
}
static int ssusb_phy_power_on(struct ssusb_mtk *ssusb)
{
int i;
int ret;
for (i = 0; i < ssusb->num_phys; i++) {
ret = phy_power_on(ssusb->phys[i]);
if (ret)
goto power_off_phy;
}
return 0;
power_off_phy:
for (; i > 0; i--)
phy_power_off(ssusb->phys[i - 1]);
return ret;
}
static void ssusb_phy_power_off(struct ssusb_mtk *ssusb)
{
unsigned int i;
for (i = 0; i < ssusb->num_phys; i++)
phy_power_off(ssusb->phys[i]);
}
static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
{
int ret = 0;
ret = regulator_enable(ssusb->vusb33);
if (ret) {
dev_err(ssusb->dev, "failed to enable vusb33\n");
goto vusb33_err;
}
ret = clk_bulk_prepare_enable(BULK_CLKS_CNT, ssusb->clks);
if (ret)
goto clks_err;
ret = ssusb_phy_init(ssusb);
if (ret) {
dev_err(ssusb->dev, "failed to init phy\n");
goto phy_init_err;
}
ret = ssusb_phy_power_on(ssusb);
if (ret) {
dev_err(ssusb->dev, "failed to power on phy\n");
goto phy_err;
}
return 0;
phy_err:
ssusb_phy_exit(ssusb);
phy_init_err:
clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
clks_err:
regulator_disable(ssusb->vusb33);
vusb33_err:
return ret;
}
static void ssusb_rscs_exit(struct ssusb_mtk *ssusb)
{
clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
regulator_disable(ssusb->vusb33);
ssusb_phy_power_off(ssusb);
ssusb_phy_exit(ssusb);
}
static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
{
/* reset whole ip (xhci & u3d) */
mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
udelay(1);
mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
/*
* device ip may be powered on in firmware/BROM stage before entering
* kernel stage;
* power down device ip, otherwise ip-sleep will fail when working as
* host only mode
*/
mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
static void ssusb_u3_drd_check(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
u32 dev_u3p_num;
u32 host_u3p_num;
u32 value;
/* u3 port0 is disabled */
if (ssusb->u3p_dis_msk & BIT(0)) {
otg_sx->is_u3_drd = false;
goto out;
}
value = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_DEV_CAP);
dev_u3p_num = SSUSB_IP_DEV_U3_PORT_NUM(value);
value = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP);
host_u3p_num = SSUSB_IP_XHCI_U3_PORT_NUM(value);
otg_sx->is_u3_drd = !!(dev_u3p_num && host_u3p_num);
out:
dev_info(ssusb->dev, "usb3-drd: %d\n", otg_sx->is_u3_drd);
}
static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
{
struct device_node *node = pdev->dev.of_node;
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
struct clk_bulk_data *clks = ssusb->clks;
struct device *dev = &pdev->dev;
int i;
int ret;
ssusb->vusb33 = devm_regulator_get(dev, "vusb33");
if (IS_ERR(ssusb->vusb33)) {
dev_err(dev, "failed to get vusb33\n");
return PTR_ERR(ssusb->vusb33);
}
clks[0].id = "sys_ck";
clks[1].id = "ref_ck";
clks[2].id = "mcu_ck";
clks[3].id = "dma_ck";
clks[4].id = "xhci_ck";
clks[5].id = "frmcnt_ck";
ret = devm_clk_bulk_get_optional(dev, BULK_CLKS_CNT, clks);
if (ret)
return ret;
ssusb->num_phys = of_count_phandle_with_args(node,
"phys", "#phy-cells");
if (ssusb->num_phys > 0) {
ssusb->phys = devm_kcalloc(dev, ssusb->num_phys,
sizeof(*ssusb->phys), GFP_KERNEL);
if (!ssusb->phys)
return -ENOMEM;
} else {
ssusb->num_phys = 0;
}
for (i = 0; i < ssusb->num_phys; i++) {
ssusb->phys[i] = devm_of_phy_get_by_index(dev, node, i);
if (IS_ERR(ssusb->phys[i])) {
dev_err(dev, "failed to get phy-%d\n", i);
return PTR_ERR(ssusb->phys[i]);
}
}
ssusb->ippc_base = devm_platform_ioremap_resource_byname(pdev, "ippc");
if (IS_ERR(ssusb->ippc_base))
return PTR_ERR(ssusb->ippc_base);
ssusb->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
if (ssusb->wakeup_irq == -EPROBE_DEFER)
return ssusb->wakeup_irq;
ssusb->dr_mode = usb_get_dr_mode(dev);
if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN)
ssusb->dr_mode = USB_DR_MODE_OTG;
of_property_read_u32(node, "mediatek,u3p-dis-msk", &ssusb->u3p_dis_msk);
if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
goto out;
/* if host role is supported */
ret = ssusb_wakeup_of_property_parse(ssusb, node);
if (ret) {
dev_err(dev, "failed to parse uwk property\n");
return ret;
}
/* optional property, ignore the error if it does not exist */
of_property_read_u32(node, "mediatek,u2p-dis-msk",
&ssusb->u2p_dis_msk);
otg_sx->vbus = devm_regulator_get(dev, "vbus");
if (IS_ERR(otg_sx->vbus)) {
dev_err(dev, "failed to get vbus\n");
return PTR_ERR(otg_sx->vbus);
}
if (ssusb->dr_mode == USB_DR_MODE_HOST)
goto out;
/* if dual-role mode is supported */
otg_sx->manual_drd_enabled =
of_property_read_bool(node, "enable-manual-drd");
otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch");
/* can't disable port0 when use dual-role mode */
ssusb->u2p_dis_msk &= ~0x1;
if (otg_sx->role_sw_used || otg_sx->manual_drd_enabled)
goto out;
if (of_property_read_bool(node, "extcon")) {
otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
if (IS_ERR(otg_sx->edev)) {
return dev_err_probe(dev, PTR_ERR(otg_sx->edev),
"couldn't get extcon device\n");
}
}
out:
dev_info(dev, "dr_mode: %d, drd: %s\n", ssusb->dr_mode,
otg_sx->manual_drd_enabled ? "manual" : "auto");
dev_info(dev, "u2p_dis_msk: %x, u3p_dis_msk: %x\n",
ssusb->u2p_dis_msk, ssusb->u3p_dis_msk);
return 0;
}
static int mtu3_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct ssusb_mtk *ssusb;
int ret = -ENOMEM;
/* all elements are set to ZERO as default value */
ssusb = devm_kzalloc(dev, sizeof(*ssusb), GFP_KERNEL);
if (!ssusb)
return -ENOMEM;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "No suitable DMA config available\n");
return -ENOTSUPP;
}
platform_set_drvdata(pdev, ssusb);
ssusb->dev = dev;
ret = get_ssusb_rscs(pdev, ssusb);
if (ret)
return ret;
ssusb_debugfs_create_root(ssusb);
/* enable power domain */
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 4000);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
device_init_wakeup(dev, true);
ret = ssusb_rscs_init(ssusb);
if (ret)
goto comm_init_err;
if (ssusb->wakeup_irq > 0) {
ret = dev_pm_set_dedicated_wake_irq_reverse(dev, ssusb->wakeup_irq);
if (ret) {
dev_err(dev, "failed to set wakeup irq %d\n", ssusb->wakeup_irq);
goto comm_exit;
}
dev_info(dev, "wakeup irq %d\n", ssusb->wakeup_irq);
}
ret = device_reset_optional(dev);
if (ret) {
dev_err_probe(dev, ret, "failed to reset controller\n");
goto comm_exit;
}
ssusb_ip_sw_reset(ssusb);
ssusb_u3_drd_check(ssusb);
if (IS_ENABLED(CONFIG_USB_MTU3_HOST))
ssusb->dr_mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_MTU3_GADGET))
ssusb->dr_mode = USB_DR_MODE_PERIPHERAL;
/* default as host */
ssusb->is_host = !(ssusb->dr_mode == USB_DR_MODE_PERIPHERAL);
switch (ssusb->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
ret = ssusb_gadget_init(ssusb);
if (ret) {
dev_err(dev, "failed to initialize gadget\n");
goto comm_exit;
}
break;
case USB_DR_MODE_HOST:
ret = ssusb_host_init(ssusb, node);
if (ret) {
dev_err(dev, "failed to initialize host\n");
goto comm_exit;
}
break;
case USB_DR_MODE_OTG:
ret = ssusb_gadget_init(ssusb);
if (ret) {
dev_err(dev, "failed to initialize gadget\n");
goto comm_exit;
}
ret = ssusb_host_init(ssusb, node);
if (ret) {
dev_err(dev, "failed to initialize host\n");
goto gadget_exit;
}
ret = ssusb_otg_switch_init(ssusb);
if (ret) {
dev_err(dev, "failed to initialize switch\n");
goto host_exit;
}
break;
default:
dev_err(dev, "unsupported mode: %d\n", ssusb->dr_mode);
ret = -EINVAL;
goto comm_exit;
}
device_enable_async_suspend(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
pm_runtime_forbid(dev);
return 0;
host_exit:
ssusb_host_exit(ssusb);
gadget_exit:
ssusb_gadget_exit(ssusb);
comm_exit:
ssusb_rscs_exit(ssusb);
comm_init_err:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
ssusb_debugfs_remove_root(ssusb);
return ret;
}
static int mtu3_remove(struct platform_device *pdev)
{
struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
pm_runtime_get_sync(&pdev->dev);
switch (ssusb->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
ssusb_gadget_exit(ssusb);
break;
case USB_DR_MODE_HOST:
ssusb_host_exit(ssusb);
break;
case USB_DR_MODE_OTG:
ssusb_otg_switch_exit(ssusb);
ssusb_gadget_exit(ssusb);
ssusb_host_exit(ssusb);
break;
default:
return -EINVAL;
}
ssusb_rscs_exit(ssusb);
ssusb_debugfs_remove_root(ssusb);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
return 0;
}
static int resume_ip_and_ports(struct ssusb_mtk *ssusb, pm_message_t msg)
{
switch (ssusb->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
ssusb_gadget_resume(ssusb, msg);
break;
case USB_DR_MODE_HOST:
ssusb_host_resume(ssusb, false);
break;
case USB_DR_MODE_OTG:
ssusb_host_resume(ssusb, !ssusb->is_host);
if (!ssusb->is_host)
ssusb_gadget_resume(ssusb, msg);
break;
default:
return -EINVAL;
}
return 0;
}
static int mtu3_suspend_common(struct device *dev, pm_message_t msg)
{
struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
int ret = 0;
dev_dbg(dev, "%s\n", __func__);
switch (ssusb->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
ret = ssusb_gadget_suspend(ssusb, msg);
if (ret)
goto err;
break;
case USB_DR_MODE_HOST:
ssusb_host_suspend(ssusb);
break;
case USB_DR_MODE_OTG:
if (!ssusb->is_host) {
ret = ssusb_gadget_suspend(ssusb, msg);
if (ret)
goto err;
}
ssusb_host_suspend(ssusb);
break;
default:
return -EINVAL;
}
ret = wait_for_ip_sleep(ssusb);
if (ret)
goto sleep_err;
ssusb_phy_power_off(ssusb);
clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
ssusb_wakeup_set(ssusb, true);
return 0;
sleep_err:
resume_ip_and_ports(ssusb, msg);
err:
return ret;
}
static int mtu3_resume_common(struct device *dev, pm_message_t msg)
{
struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
int ret;
dev_dbg(dev, "%s\n", __func__);
ssusb_wakeup_set(ssusb, false);
ret = clk_bulk_prepare_enable(BULK_CLKS_CNT, ssusb->clks);
if (ret)
goto clks_err;
ret = ssusb_phy_power_on(ssusb);
if (ret)
goto phy_err;
return resume_ip_and_ports(ssusb, msg);
phy_err:
clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
clks_err:
return ret;
}
static int __maybe_unused mtu3_suspend(struct device *dev)
{
return mtu3_suspend_common(dev, PMSG_SUSPEND);
}
static int __maybe_unused mtu3_resume(struct device *dev)
{
return mtu3_resume_common(dev, PMSG_SUSPEND);
}
static int __maybe_unused mtu3_runtime_suspend(struct device *dev)
{
if (!device_may_wakeup(dev))
return 0;
return mtu3_suspend_common(dev, PMSG_AUTO_SUSPEND);
}
static int __maybe_unused mtu3_runtime_resume(struct device *dev)
{
if (!device_may_wakeup(dev))
return 0;
return mtu3_resume_common(dev, PMSG_AUTO_SUSPEND);
}
static const struct dev_pm_ops mtu3_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mtu3_suspend, mtu3_resume)
SET_RUNTIME_PM_OPS(mtu3_runtime_suspend,
mtu3_runtime_resume, NULL)
};
#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &mtu3_pm_ops : NULL)
static const struct of_device_id mtu3_of_match[] = {
{.compatible = "mediatek,mt8173-mtu3",},
{.compatible = "mediatek,mtu3",},
{},
};
MODULE_DEVICE_TABLE(of, mtu3_of_match);
static struct platform_driver mtu3_driver = {
.probe = mtu3_probe,
.remove = mtu3_remove,
.driver = {
.name = MTU3_DRIVER_NAME,
.pm = DEV_PM_OPS,
.of_match_table = mtu3_of_match,
},
};
module_platform_driver(mtu3_driver);
MODULE_AUTHOR("Chunfeng Yun <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek USB3 DRD Controller Driver");
| linux-master | drivers/usb/mtu3/mtu3_plat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_debugfs.c - debugfs interface
*
* Copyright (C) 2019 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
#include <linux/uaccess.h>
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
#define dump_register(nm) \
{ \
.name = __stringify(nm), \
.offset = U3D_ ##nm, \
}
#define dump_prb_reg(nm, os) \
{ \
.name = nm, \
.offset = os, \
}
static const struct debugfs_reg32 mtu3_ippc_regs[] = {
dump_register(SSUSB_IP_PW_CTRL0),
dump_register(SSUSB_IP_PW_CTRL1),
dump_register(SSUSB_IP_PW_CTRL2),
dump_register(SSUSB_IP_PW_CTRL3),
dump_register(SSUSB_IP_PW_STS1),
dump_register(SSUSB_OTG_STS),
dump_register(SSUSB_IP_XHCI_CAP),
dump_register(SSUSB_IP_DEV_CAP),
dump_register(SSUSB_U3_CTRL_0P),
dump_register(SSUSB_U2_CTRL_0P),
dump_register(SSUSB_HW_ID),
dump_register(SSUSB_HW_SUB_ID),
dump_register(SSUSB_IP_SPARE0),
};
static const struct debugfs_reg32 mtu3_dev_regs[] = {
dump_register(LV1ISR),
dump_register(LV1IER),
dump_register(EPISR),
dump_register(EPIER),
dump_register(EP0CSR),
dump_register(RXCOUNT0),
dump_register(QISAR0),
dump_register(QIER0),
dump_register(QISAR1),
dump_register(QIER1),
dump_register(CAP_EPNTXFFSZ),
dump_register(CAP_EPNRXFFSZ),
dump_register(CAP_EPINFO),
dump_register(MISC_CTRL),
};
static const struct debugfs_reg32 mtu3_csr_regs[] = {
dump_register(DEVICE_CONF),
dump_register(DEV_LINK_INTR_ENABLE),
dump_register(DEV_LINK_INTR),
dump_register(LTSSM_CTRL),
dump_register(USB3_CONFIG),
dump_register(LINK_STATE_MACHINE),
dump_register(LTSSM_INTR_ENABLE),
dump_register(LTSSM_INTR),
dump_register(U3U2_SWITCH_CTRL),
dump_register(POWER_MANAGEMENT),
dump_register(DEVICE_CONTROL),
dump_register(COMMON_USB_INTR_ENABLE),
dump_register(COMMON_USB_INTR),
dump_register(USB20_MISC_CONTROL),
dump_register(USB20_OPSTATE),
};
static int mtu3_link_state_show(struct seq_file *sf, void *unused)
{
struct mtu3 *mtu = sf->private;
void __iomem *mbase = mtu->mac_base;
seq_printf(sf, "opstate: %#x, ltssm: %#x\n",
mtu3_readl(mbase, U3D_USB20_OPSTATE),
LTSSM_STATE(mtu3_readl(mbase, U3D_LINK_STATE_MACHINE)));
return 0;
}
static int mtu3_ep_used_show(struct seq_file *sf, void *unused)
{
struct mtu3 *mtu = sf->private;
struct mtu3_ep *mep;
unsigned long flags;
int used = 0;
int i;
spin_lock_irqsave(&mtu->lock, flags);
for (i = 0; i < mtu->num_eps; i++) {
mep = mtu->in_eps + i;
if (mep->flags & MTU3_EP_ENABLED) {
seq_printf(sf, "%s - type: %s\n", mep->name, usb_ep_type_string(mep->type));
used++;
}
mep = mtu->out_eps + i;
if (mep->flags & MTU3_EP_ENABLED) {
seq_printf(sf, "%s - type: %s\n", mep->name, usb_ep_type_string(mep->type));
used++;
}
}
seq_printf(sf, "total used: %d eps\n", used);
spin_unlock_irqrestore(&mtu->lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mtu3_link_state);
DEFINE_SHOW_ATTRIBUTE(mtu3_ep_used);
static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base,
const struct debugfs_reg32 *regs, size_t nregs,
const char *name, struct dentry *parent)
{
struct debugfs_regset32 *regset;
struct mtu3_regset *mregs;
mregs = devm_kzalloc(mtu->dev, sizeof(*mregs), GFP_KERNEL);
if (!mregs)
return;
sprintf(mregs->name, "%s", name);
regset = &mregs->regset;
regset->regs = regs;
regset->nregs = nregs;
regset->base = base;
debugfs_create_regset32(mregs->name, 0444, parent, regset);
}
static void mtu3_debugfs_ep_regset(struct mtu3 *mtu, struct mtu3_ep *mep,
struct dentry *parent)
{
struct debugfs_reg32 *regs;
int epnum = mep->epnum;
int in = mep->is_in;
regs = devm_kcalloc(mtu->dev, 7, sizeof(*regs), GFP_KERNEL);
if (!regs)
return;
regs[0].name = in ? "TCR0" : "RCR0";
regs[0].offset = in ? MU3D_EP_TXCR0(epnum) : MU3D_EP_RXCR0(epnum);
regs[1].name = in ? "TCR1" : "RCR1";
regs[1].offset = in ? MU3D_EP_TXCR1(epnum) : MU3D_EP_RXCR1(epnum);
regs[2].name = in ? "TCR2" : "RCR2";
regs[2].offset = in ? MU3D_EP_TXCR2(epnum) : MU3D_EP_RXCR2(epnum);
regs[3].name = in ? "TQHIAR" : "RQHIAR";
regs[3].offset = in ? USB_QMU_TQHIAR(epnum) : USB_QMU_RQHIAR(epnum);
regs[4].name = in ? "TQCSR" : "RQCSR";
regs[4].offset = in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
regs[5].name = in ? "TQSAR" : "RQSAR";
regs[5].offset = in ? USB_QMU_TQSAR(epnum) : USB_QMU_RQSAR(epnum);
regs[6].name = in ? "TQCPR" : "RQCPR";
regs[6].offset = in ? USB_QMU_TQCPR(epnum) : USB_QMU_RQCPR(epnum);
mtu3_debugfs_regset(mtu, mtu->mac_base, regs, 7, "ep-regs", parent);
}
static int mtu3_ep_info_show(struct seq_file *sf, void *unused)
{
struct mtu3_ep *mep = sf->private;
struct mtu3 *mtu = mep->mtu;
unsigned long flags;
spin_lock_irqsave(&mtu->lock, flags);
seq_printf(sf, "ep - type:%s, maxp:%d, slot:%d, flags:%x\n",
usb_ep_type_string(mep->type), mep->maxp, mep->slot, mep->flags);
spin_unlock_irqrestore(&mtu->lock, flags);
return 0;
}
static int mtu3_fifo_show(struct seq_file *sf, void *unused)
{
struct mtu3_ep *mep = sf->private;
struct mtu3 *mtu = mep->mtu;
unsigned long flags;
spin_lock_irqsave(&mtu->lock, flags);
seq_printf(sf, "fifo - seg_size:%d, addr:%d, size:%d\n",
mep->fifo_seg_size, mep->fifo_addr, mep->fifo_size);
spin_unlock_irqrestore(&mtu->lock, flags);
return 0;
}
static int mtu3_qmu_ring_show(struct seq_file *sf, void *unused)
{
struct mtu3_ep *mep = sf->private;
struct mtu3 *mtu = mep->mtu;
struct mtu3_gpd_ring *ring;
unsigned long flags;
ring = &mep->gpd_ring;
spin_lock_irqsave(&mtu->lock, flags);
seq_printf(sf,
"qmu-ring - dma:%pad, start:%p, end:%p, enq:%p, dep:%p\n",
&ring->dma, ring->start, ring->end,
ring->enqueue, ring->dequeue);
spin_unlock_irqrestore(&mtu->lock, flags);
return 0;
}
static int mtu3_qmu_gpd_show(struct seq_file *sf, void *unused)
{
struct mtu3_ep *mep = sf->private;
struct mtu3 *mtu = mep->mtu;
struct mtu3_gpd_ring *ring;
struct qmu_gpd *gpd;
dma_addr_t dma;
unsigned long flags;
int i;
spin_lock_irqsave(&mtu->lock, flags);
ring = &mep->gpd_ring;
gpd = ring->start;
if (!gpd || !(mep->flags & MTU3_EP_ENABLED)) {
seq_puts(sf, "empty!\n");
goto out;
}
for (i = 0; i < MAX_GPD_NUM; i++, gpd++) {
dma = ring->dma + i * sizeof(*gpd);
seq_printf(sf, "gpd.%03d -> %pad, %p: %08x %08x %08x %08x\n",
i, &dma, gpd, gpd->dw0_info, gpd->next_gpd,
gpd->buffer, gpd->dw3_info);
}
out:
spin_unlock_irqrestore(&mtu->lock, flags);
return 0;
}
static const struct mtu3_file_map mtu3_ep_files[] = {
{"ep-info", mtu3_ep_info_show, },
{"fifo", mtu3_fifo_show, },
{"qmu-ring", mtu3_qmu_ring_show, },
{"qmu-gpd", mtu3_qmu_gpd_show, },
};
static int mtu3_ep_open(struct inode *inode, struct file *file)
{
const char *file_name = file_dentry(file)->d_iname;
const struct mtu3_file_map *f_map;
int i;
for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
f_map = &mtu3_ep_files[i];
if (strcmp(f_map->name, file_name) == 0)
break;
}
return single_open(file, f_map->show, inode->i_private);
}
static const struct file_operations mtu3_ep_fops = {
.open = mtu3_ep_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct debugfs_reg32 mtu3_prb_regs[] = {
dump_prb_reg("enable", U3D_SSUSB_PRB_CTRL0),
dump_prb_reg("byte-sell", U3D_SSUSB_PRB_CTRL1),
dump_prb_reg("byte-selh", U3D_SSUSB_PRB_CTRL2),
dump_prb_reg("module-sel", U3D_SSUSB_PRB_CTRL3),
dump_prb_reg("sw-out", U3D_SSUSB_PRB_CTRL4),
dump_prb_reg("data", U3D_SSUSB_PRB_CTRL5),
};
static int mtu3_probe_show(struct seq_file *sf, void *unused)
{
const char *file_name = file_dentry(sf->file)->d_iname;
struct mtu3 *mtu = sf->private;
const struct debugfs_reg32 *regs;
int i;
for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
regs = &mtu3_prb_regs[i];
if (strcmp(regs->name, file_name) == 0)
break;
}
seq_printf(sf, "0x%04x - 0x%08x\n", (u32)regs->offset,
mtu3_readl(mtu->ippc_base, (u32)regs->offset));
return 0;
}
static int mtu3_probe_open(struct inode *inode, struct file *file)
{
return single_open(file, mtu3_probe_show, inode->i_private);
}
static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
const char *file_name = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtu3 *mtu = sf->private;
const struct debugfs_reg32 *regs;
char buf[32];
u32 val;
int i;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (kstrtou32(buf, 0, &val))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
regs = &mtu3_prb_regs[i];
if (strcmp(regs->name, file_name) == 0)
break;
}
mtu3_writel(mtu->ippc_base, (u32)regs->offset, val);
return count;
}
static const struct file_operations mtu3_probe_fops = {
.open = mtu3_probe_open,
.write = mtu3_probe_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
{
struct ssusb_mtk *ssusb = mtu->ssusb;
const struct debugfs_reg32 *regs;
struct dentry *dir_prb;
int i;
dir_prb = debugfs_create_dir("probe", ssusb->dbgfs_root);
for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
regs = &mtu3_prb_regs[i];
debugfs_create_file(regs->name, 0644, dir_prb,
mtu, &mtu3_probe_fops);
}
mtu3_debugfs_regset(mtu, mtu->ippc_base, mtu3_prb_regs,
ARRAY_SIZE(mtu3_prb_regs), "regs", dir_prb);
}
static void mtu3_debugfs_create_ep_dir(struct mtu3_ep *mep,
struct dentry *parent)
{
const struct mtu3_file_map *files;
struct dentry *dir_ep;
int i;
dir_ep = debugfs_create_dir(mep->name, parent);
mtu3_debugfs_ep_regset(mep->mtu, mep, dir_ep);
for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
files = &mtu3_ep_files[i];
debugfs_create_file(files->name, 0444, dir_ep,
mep, &mtu3_ep_fops);
}
}
static void mtu3_debugfs_create_ep_dirs(struct mtu3 *mtu)
{
struct ssusb_mtk *ssusb = mtu->ssusb;
struct dentry *dir_eps;
int i;
dir_eps = debugfs_create_dir("eps", ssusb->dbgfs_root);
for (i = 1; i < mtu->num_eps; i++) {
mtu3_debugfs_create_ep_dir(mtu->in_eps + i, dir_eps);
mtu3_debugfs_create_ep_dir(mtu->out_eps + i, dir_eps);
}
}
void ssusb_dev_debugfs_init(struct ssusb_mtk *ssusb)
{
struct mtu3 *mtu = ssusb->u3d;
struct dentry *dir_regs;
dir_regs = debugfs_create_dir("regs", ssusb->dbgfs_root);
mtu3_debugfs_regset(mtu, mtu->ippc_base,
mtu3_ippc_regs, ARRAY_SIZE(mtu3_ippc_regs),
"reg-ippc", dir_regs);
mtu3_debugfs_regset(mtu, mtu->mac_base,
mtu3_dev_regs, ARRAY_SIZE(mtu3_dev_regs),
"reg-dev", dir_regs);
mtu3_debugfs_regset(mtu, mtu->mac_base,
mtu3_csr_regs, ARRAY_SIZE(mtu3_csr_regs),
"reg-csr", dir_regs);
mtu3_debugfs_create_ep_dirs(mtu);
mtu3_debugfs_create_prb_files(mtu);
debugfs_create_file("link-state", 0444, ssusb->dbgfs_root,
mtu, &mtu3_link_state_fops);
debugfs_create_file("ep-used", 0444, ssusb->dbgfs_root,
mtu, &mtu3_ep_used_fops);
}
static int ssusb_mode_show(struct seq_file *sf, void *unused)
{
struct ssusb_mtk *ssusb = sf->private;
seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n",
ssusb->is_host ? "host" : "device",
ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto");
return 0;
}
static int ssusb_mode_open(struct inode *inode, struct file *file)
{
return single_open(file, ssusb_mode_show, inode->i_private);
}
static ssize_t ssusb_mode_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *sf = file->private_data;
struct ssusb_mtk *ssusb = sf->private;
char buf[16];
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
ssusb_mode_switch(ssusb, 1);
} else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
ssusb_mode_switch(ssusb, 0);
} else {
dev_err(ssusb->dev, "wrong or duplicated setting\n");
return -EINVAL;
}
return count;
}
static const struct file_operations ssusb_mode_fops = {
.open = ssusb_mode_open,
.write = ssusb_mode_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ssusb_vbus_show(struct seq_file *sf, void *unused)
{
struct ssusb_mtk *ssusb = sf->private;
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
seq_printf(sf, "vbus state: %s\n(echo on/off)\n",
regulator_is_enabled(otg_sx->vbus) ? "on" : "off");
return 0;
}
static int ssusb_vbus_open(struct inode *inode, struct file *file)
{
return single_open(file, ssusb_vbus_show, inode->i_private);
}
static ssize_t ssusb_vbus_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *sf = file->private_data;
struct ssusb_mtk *ssusb = sf->private;
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
char buf[16];
bool enable;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (kstrtobool(buf, &enable)) {
dev_err(ssusb->dev, "wrong setting\n");
return -EINVAL;
}
ssusb_set_vbus(otg_sx, enable);
return count;
}
static const struct file_operations ssusb_vbus_fops = {
.open = ssusb_vbus_open,
.write = ssusb_vbus_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void ssusb_dr_debugfs_init(struct ssusb_mtk *ssusb)
{
struct dentry *root = ssusb->dbgfs_root;
debugfs_create_file("mode", 0644, root, ssusb, &ssusb_mode_fops);
debugfs_create_file("vbus", 0644, root, ssusb, &ssusb_vbus_fops);
}
void ssusb_debugfs_create_root(struct ssusb_mtk *ssusb)
{
ssusb->dbgfs_root =
debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
}
void ssusb_debugfs_remove_root(struct ssusb_mtk *ssusb)
{
debugfs_remove_recursive(ssusb->dbgfs_root);
ssusb->dbgfs_root = NULL;
}
| linux-master | drivers/usb/mtu3/mtu3_debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling
*
* Copyright (c) 2016 MediaTek Inc.
*
* Author: Chunfeng.Yun <[email protected]>
*/
#include <linux/iopoll.h>
#include <linux/usb/composite.h>
#include "mtu3.h"
#include "mtu3_debug.h"
#include "mtu3_trace.h"
/* ep0 is always mtu3->in_eps[0] */
#define next_ep0_request(mtu) next_request((mtu)->ep0)
/* for high speed test mode; see USB 2.0 spec 7.1.20 */
static const u8 mtu3_test_packet[53] = {
/* implicit SYNC then DATA0 to start */
/* JKJKJKJK x9 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* JJKKJJKK x8 */
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
/* JJJJKKKK x8 */
0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
/* JJJJJJJKKKKKKK x8 */
0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
/* JJJJJJJK x8 */
0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
/* JKKKKKKK x10, JK */
0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e,
/* implicit CRC16 then EOP to end */
};
static char *decode_ep0_state(struct mtu3 *mtu)
{
switch (mtu->ep0_state) {
case MU3D_EP0_STATE_SETUP:
return "SETUP";
case MU3D_EP0_STATE_TX:
return "IN";
case MU3D_EP0_STATE_RX:
return "OUT";
case MU3D_EP0_STATE_TX_END:
return "TX-END";
case MU3D_EP0_STATE_STALL:
return "STALL";
default:
return "??";
}
}
static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req)
{
mtu3_req_complete(mtu->ep0, req, 0);
}
static int
forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
__releases(mtu->lock)
__acquires(mtu->lock)
{
int ret;
if (!mtu->gadget_driver || !mtu->async_callbacks)
return -EOPNOTSUPP;
spin_unlock(&mtu->lock);
ret = mtu->gadget_driver->setup(&mtu->g, setup);
spin_lock(&mtu->lock);
dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret);
return ret;
}
static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len)
{
void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
u16 index = 0;
dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n",
__func__, mep->epnum, len, src);
if (len >= 4) {
iowrite32_rep(fifo, src, len >> 2);
index = len & ~0x03;
}
if (len & 0x02) {
writew(*(u16 *)&src[index], fifo);
index += 2;
}
if (len & 0x01)
writeb(src[index], fifo);
}
static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len)
{
void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
u32 value;
u16 index = 0;
dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n",
__func__, mep->epnum, len, dst);
if (len >= 4) {
ioread32_rep(fifo, dst, len >> 2);
index = len & ~0x03;
}
if (len & 0x3) {
value = readl(fifo);
memcpy(&dst[index], &value, len & 0x3);
}
}
static void ep0_load_test_packet(struct mtu3 *mtu)
{
/*
* because the length of test packet is less than max packet of HS ep0,
* write it into fifo directly.
*/
ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet));
}
/*
* A. send STALL for setup transfer without data stage:
* set SENDSTALL and SETUPPKTRDY at the same time;
* B. send STALL for other cases:
* set SENDSTALL only.
*/
static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
{
struct mtu3 *mtu = mep0->mtu;
void __iomem *mbase = mtu->mac_base;
u32 csr;
/* EP0_SENTSTALL is W1C */
csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
if (set)
csr |= EP0_SENDSTALL | pktrdy;
else
csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL;
mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
mtu->delayed_status = false;
mtu->ep0_state = MU3D_EP0_STATE_SETUP;
dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n",
set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
}
static void ep0_do_status_stage(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
u32 value;
value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
}
static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
{}
static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req)
{
struct mtu3_request *mreq;
struct mtu3 *mtu;
struct usb_set_sel_req sel;
memcpy(&sel, req->buf, sizeof(sel));
mreq = to_mtu3_request(req);
mtu = mreq->mtu;
dev_dbg(mtu->dev, "u1sel:%d, u1pel:%d, u2sel:%d, u2pel:%d\n",
sel.u1_sel, sel.u1_pel, sel.u2_sel, sel.u2_pel);
}
/* queue data stage to handle 6 byte SET_SEL request */
static int ep0_set_sel(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
{
int ret;
u16 length = le16_to_cpu(setup->wLength);
if (unlikely(length != 6)) {
dev_err(mtu->dev, "%s wrong wLength:%d\n",
__func__, length);
return -EINVAL;
}
mtu->ep0_req.mep = mtu->ep0;
mtu->ep0_req.request.length = 6;
mtu->ep0_req.request.buf = mtu->setup_buf;
mtu->ep0_req.request.complete = ep0_set_sel_complete;
ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
return ret < 0 ? ret : 1;
}
static int
ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
{
struct mtu3_ep *mep = NULL;
int handled = 1;
u8 result[2] = {0, 0};
u8 epnum = 0;
int is_in;
switch (setup->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED;
result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
if (mtu->g.speed >= USB_SPEED_SUPER) {
result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED;
result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED;
}
dev_dbg(mtu->dev, "%s result=%x, U1=%x, U2=%x\n", __func__,
result[0], mtu->u1_enable, mtu->u2_enable);
break;
case USB_RECIP_INTERFACE:
/* status of function remote wakeup, forward request */
handled = 0;
break;
case USB_RECIP_ENDPOINT:
epnum = (u8) le16_to_cpu(setup->wIndex);
is_in = epnum & USB_DIR_IN;
epnum &= USB_ENDPOINT_NUMBER_MASK;
if (epnum >= mtu->num_eps) {
handled = -EINVAL;
break;
}
if (!epnum)
break;
mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
if (!mep->desc) {
handled = -EINVAL;
break;
}
if (mep->flags & MTU3_EP_STALL)
result[0] |= 1 << USB_ENDPOINT_HALT;
break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
if (handled > 0) {
int ret;
/* prepare a data stage for GET_STATUS */
dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result);
memcpy(mtu->setup_buf, result, sizeof(result));
mtu->ep0_req.mep = mtu->ep0;
mtu->ep0_req.request.length = 2;
mtu->ep0_req.request.buf = &mtu->setup_buf;
mtu->ep0_req.request.complete = ep0_dummy_complete;
ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
if (ret < 0)
handled = ret;
}
return handled;
}
static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
{
void __iomem *mbase = mtu->mac_base;
int handled = 1;
u32 value;
switch (le16_to_cpu(setup->wIndex) >> 8) {
case USB_TEST_J:
dev_dbg(mtu->dev, "USB_TEST_J\n");
mtu->test_mode_nr = TEST_J_MODE;
break;
case USB_TEST_K:
dev_dbg(mtu->dev, "USB_TEST_K\n");
mtu->test_mode_nr = TEST_K_MODE;
break;
case USB_TEST_SE0_NAK:
dev_dbg(mtu->dev, "USB_TEST_SE0_NAK\n");
mtu->test_mode_nr = TEST_SE0_NAK_MODE;
break;
case USB_TEST_PACKET:
dev_dbg(mtu->dev, "USB_TEST_PACKET\n");
mtu->test_mode_nr = TEST_PACKET_MODE;
break;
default:
handled = -EINVAL;
goto out;
}
mtu->test_mode = true;
/* no TX completion interrupt, and need restart platform after test */
if (mtu->test_mode_nr == TEST_PACKET_MODE)
ep0_load_test_packet(mtu);
/* send status before entering test mode. */
ep0_do_status_stage(mtu);
/* wait for ACK status sent by host */
readl_poll_timeout_atomic(mbase + U3D_EP0CSR, value,
!(value & EP0_DATAEND), 100, 5000);
mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr);
mtu->ep0_state = MU3D_EP0_STATE_SETUP;
out:
return handled;
}
static int ep0_handle_feature_dev(struct mtu3 *mtu,
struct usb_ctrlrequest *setup, bool set)
{
void __iomem *mbase = mtu->mac_base;
int handled = -EINVAL;
u32 lpc;
switch (le16_to_cpu(setup->wValue)) {
case USB_DEVICE_REMOTE_WAKEUP:
mtu->may_wakeup = !!set;
handled = 1;
break;
case USB_DEVICE_TEST_MODE:
if (!set || (mtu->g.speed != USB_SPEED_HIGH) ||
(le16_to_cpu(setup->wIndex) & 0xff))
break;
handled = handle_test_mode(mtu, setup);
break;
case USB_DEVICE_U1_ENABLE:
if (mtu->g.speed < USB_SPEED_SUPER ||
mtu->g.state != USB_STATE_CONFIGURED)
break;
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
if (set)
lpc |= SW_U1_REQUEST_ENABLE;
else
lpc &= ~SW_U1_REQUEST_ENABLE;
mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
mtu->u1_enable = !!set;
handled = 1;
break;
case USB_DEVICE_U2_ENABLE:
if (mtu->g.speed < USB_SPEED_SUPER ||
mtu->g.state != USB_STATE_CONFIGURED)
break;
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
if (set)
lpc |= SW_U2_REQUEST_ENABLE;
else
lpc &= ~SW_U2_REQUEST_ENABLE;
mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
mtu->u2_enable = !!set;
handled = 1;
break;
default:
handled = -EINVAL;
break;
}
return handled;
}
static int ep0_handle_feature(struct mtu3 *mtu,
struct usb_ctrlrequest *setup, bool set)
{
struct mtu3_ep *mep;
int handled = -EINVAL;
int is_in;
u16 value;
u16 index;
u8 epnum;
value = le16_to_cpu(setup->wValue);
index = le16_to_cpu(setup->wIndex);
switch (setup->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
handled = ep0_handle_feature_dev(mtu, setup, set);
break;
case USB_RECIP_INTERFACE:
/* superspeed only */
if (value == USB_INTRF_FUNC_SUSPEND &&
mtu->g.speed >= USB_SPEED_SUPER) {
/* forward the request for function suspend */
mtu->may_wakeup = !!(index & USB_INTRF_FUNC_SUSPEND_RW);
handled = 0;
}
break;
case USB_RECIP_ENDPOINT:
epnum = index & USB_ENDPOINT_NUMBER_MASK;
if (epnum == 0 || epnum >= mtu->num_eps ||
value != USB_ENDPOINT_HALT)
break;
is_in = index & USB_DIR_IN;
mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
if (!mep->desc)
break;
handled = 1;
/* ignore request if endpoint is wedged */
if (mep->flags & MTU3_EP_WEDGE)
break;
mtu3_ep_stall_set(mep, set);
break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
return handled;
}
/*
* handle all control requests can be handled
* returns:
* negative errno - error happened
* zero - need delegate SETUP to gadget driver
* positive - already handled
*/
static int handle_standard_request(struct mtu3 *mtu,
struct usb_ctrlrequest *setup)
{
void __iomem *mbase = mtu->mac_base;
enum usb_device_state state = mtu->g.state;
int handled = -EINVAL;
u32 dev_conf;
u16 value;
value = le16_to_cpu(setup->wValue);
/* the gadget driver handles everything except what we must handle */
switch (setup->bRequest) {
case USB_REQ_SET_ADDRESS:
/* change it after the status stage */
mtu->address = (u8) (value & 0x7f);
dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address);
dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF);
dev_conf &= ~DEV_ADDR_MSK;
dev_conf |= DEV_ADDR(mtu->address);
mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf);
if (mtu->address)
usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS);
else
usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT);
handled = 1;
break;
case USB_REQ_SET_CONFIGURATION:
if (state == USB_STATE_ADDRESS) {
usb_gadget_set_state(&mtu->g,
USB_STATE_CONFIGURED);
} else if (state == USB_STATE_CONFIGURED) {
/*
* USB2 spec sec 9.4.7, if wValue is 0 then dev
* is moved to addressed state
*/
if (!value)
usb_gadget_set_state(&mtu->g,
USB_STATE_ADDRESS);
}
handled = 0;
break;
case USB_REQ_CLEAR_FEATURE:
handled = ep0_handle_feature(mtu, setup, 0);
break;
case USB_REQ_SET_FEATURE:
handled = ep0_handle_feature(mtu, setup, 1);
break;
case USB_REQ_GET_STATUS:
handled = ep0_get_status(mtu, setup);
break;
case USB_REQ_SET_SEL:
handled = ep0_set_sel(mtu, setup);
break;
case USB_REQ_SET_ISOCH_DELAY:
handled = 1;
break;
default:
/* delegate SET_CONFIGURATION, etc */
handled = 0;
}
return handled;
}
/* receive an data packet (OUT) */
static void ep0_rx_state(struct mtu3 *mtu)
{
struct mtu3_request *mreq;
struct usb_request *req;
void __iomem *mbase = mtu->mac_base;
u32 maxp;
u32 csr;
u16 count = 0;
dev_dbg(mtu->dev, "%s\n", __func__);
csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
mreq = next_ep0_request(mtu);
req = &mreq->request;
/* read packet and ack; or stall because of gadget driver bug */
if (req) {
void *buf = req->buf + req->actual;
unsigned int len = req->length - req->actual;
/* read the buffer */
count = mtu3_readl(mbase, U3D_RXCOUNT0);
if (count > len) {
req->status = -EOVERFLOW;
count = len;
}
ep0_read_fifo(mtu->ep0, buf, count);
req->actual += count;
csr |= EP0_RXPKTRDY;
maxp = mtu->g.ep0->maxpacket;
if (count < maxp || req->actual == req->length) {
mtu->ep0_state = MU3D_EP0_STATE_SETUP;
dev_dbg(mtu->dev, "ep0 state: %s\n",
decode_ep0_state(mtu));
csr |= EP0_DATAEND;
} else {
req = NULL;
}
} else {
csr |= EP0_RXPKTRDY | EP0_SENDSTALL;
dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__);
}
mtu3_writel(mbase, U3D_EP0CSR, csr);
/* give back the request if have received all data */
if (req)
ep0_req_giveback(mtu, req);
}
/* transmitting to the host (IN) */
static void ep0_tx_state(struct mtu3 *mtu)
{
struct mtu3_request *mreq = next_ep0_request(mtu);
struct usb_request *req;
u32 csr;
u8 *src;
u32 count;
u32 maxp;
dev_dbg(mtu->dev, "%s\n", __func__);
if (!mreq)
return;
maxp = mtu->g.ep0->maxpacket;
req = &mreq->request;
/* load the data */
src = (u8 *)req->buf + req->actual;
count = min(maxp, req->length - req->actual);
if (count)
ep0_write_fifo(mtu->ep0, src, count);
dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n",
__func__, req->actual, req->length, count, maxp, req->zero);
req->actual += count;
if ((count < maxp)
|| ((req->actual == req->length) && !req->zero))
mtu->ep0_state = MU3D_EP0_STATE_TX_END;
/* send it out, triggering a "txpktrdy cleared" irq */
csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY);
dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__,
mtu3_readl(mtu->mac_base, U3D_EP0CSR));
}
static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
{
struct mtu3_request *mreq;
u32 count;
u32 csr;
csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0);
ep0_read_fifo(mtu->ep0, (u8 *)setup, count);
dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n",
setup->bRequestType, setup->bRequest,
le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex),
le16_to_cpu(setup->wLength));
/* clean up any leftover transfers */
mreq = next_ep0_request(mtu);
if (mreq)
ep0_req_giveback(mtu, &mreq->request);
if (le16_to_cpu(setup->wLength) == 0) {
; /* no data stage, nothing to do */
} else if (setup->bRequestType & USB_DIR_IN) {
mtu3_writel(mtu->mac_base, U3D_EP0CSR,
csr | EP0_SETUPPKTRDY | EP0_DPHTX);
mtu->ep0_state = MU3D_EP0_STATE_TX;
} else {
mtu3_writel(mtu->mac_base, U3D_EP0CSR,
(csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX));
mtu->ep0_state = MU3D_EP0_STATE_RX;
}
}
static int ep0_handle_setup(struct mtu3 *mtu)
__releases(mtu->lock)
__acquires(mtu->lock)
{
struct usb_ctrlrequest setup;
struct mtu3_request *mreq;
int handled = 0;
ep0_read_setup(mtu, &setup);
trace_mtu3_handle_setup(&setup);
if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
handled = handle_standard_request(mtu, &setup);
dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n",
handled, decode_ep0_state(mtu));
if (handled < 0)
goto stall;
else if (handled > 0)
goto finish;
handled = forward_to_driver(mtu, &setup);
if (handled < 0) {
stall:
dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled);
ep0_stall_set(mtu->ep0, true,
le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY);
return 0;
}
finish:
if (mtu->test_mode) {
; /* nothing to do */
} else if (handled == USB_GADGET_DELAYED_STATUS) {
mreq = next_ep0_request(mtu);
if (mreq) {
/* already asked us to continue delayed status */
ep0_do_status_stage(mtu);
ep0_req_giveback(mtu, &mreq->request);
} else {
/* do delayed STATUS stage till receive ep0_queue */
mtu->delayed_status = true;
}
} else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
ep0_do_status_stage(mtu);
/* complete zlp request directly */
mreq = next_ep0_request(mtu);
if (mreq && !mreq->request.length)
ep0_req_giveback(mtu, &mreq->request);
}
return 0;
}
irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
struct mtu3_request *mreq;
u32 int_status;
irqreturn_t ret = IRQ_NONE;
u32 csr;
u32 len;
int_status = mtu3_readl(mbase, U3D_EPISR);
int_status &= mtu3_readl(mbase, U3D_EPIER);
mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */
/* only handle ep0's */
if (!(int_status & (EP0ISR | SETUPENDISR)))
return IRQ_NONE;
/* abort current SETUP, and process new one */
if (int_status & SETUPENDISR)
mtu->ep0_state = MU3D_EP0_STATE_SETUP;
csr = mtu3_readl(mbase, U3D_EP0CSR);
dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr);
/* we sent a stall.. need to clear it now.. */
if (csr & EP0_SENTSTALL) {
ep0_stall_set(mtu->ep0, false, 0);
csr = mtu3_readl(mbase, U3D_EP0CSR);
ret = IRQ_HANDLED;
}
dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
mtu3_dbg_trace(mtu->dev, "ep0_state %s", decode_ep0_state(mtu));
switch (mtu->ep0_state) {
case MU3D_EP0_STATE_TX:
/* irq on clearing txpktrdy */
if ((csr & EP0_FIFOFULL) == 0) {
ep0_tx_state(mtu);
ret = IRQ_HANDLED;
}
break;
case MU3D_EP0_STATE_RX:
/* irq on set rxpktrdy */
if (csr & EP0_RXPKTRDY) {
ep0_rx_state(mtu);
ret = IRQ_HANDLED;
}
break;
case MU3D_EP0_STATE_TX_END:
mtu3_writel(mbase, U3D_EP0CSR,
(csr & EP0_W1C_BITS) | EP0_DATAEND);
mreq = next_ep0_request(mtu);
if (mreq)
ep0_req_giveback(mtu, &mreq->request);
mtu->ep0_state = MU3D_EP0_STATE_SETUP;
ret = IRQ_HANDLED;
dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
break;
case MU3D_EP0_STATE_SETUP:
if (!(csr & EP0_SETUPPKTRDY))
break;
len = mtu3_readl(mbase, U3D_RXCOUNT0);
if (len != 8) {
dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len);
break;
}
ep0_handle_setup(mtu);
ret = IRQ_HANDLED;
break;
default:
/* can't happen */
ep0_stall_set(mtu->ep0, true, 0);
WARN_ON(1);
break;
}
return ret;
}
static int mtu3_ep0_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
/* always enabled */
return -EINVAL;
}
static int mtu3_ep0_disable(struct usb_ep *ep)
{
/* always enabled */
return -EINVAL;
}
static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
{
struct mtu3 *mtu = mep->mtu;
mreq->mtu = mtu;
mreq->request.actual = 0;
mreq->request.status = -EINPROGRESS;
dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__,
mep->name, decode_ep0_state(mtu), mreq->request.length);
switch (mtu->ep0_state) {
case MU3D_EP0_STATE_SETUP:
case MU3D_EP0_STATE_RX: /* control-OUT data */
case MU3D_EP0_STATE_TX: /* control-IN data */
break;
default:
dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__,
decode_ep0_state(mtu));
return -EINVAL;
}
if (mtu->delayed_status) {
mtu->delayed_status = false;
ep0_do_status_stage(mtu);
/* needn't giveback the request for handling delay STATUS */
return 0;
}
if (!list_empty(&mep->req_list))
return -EBUSY;
list_add_tail(&mreq->list, &mep->req_list);
/* sequence #1, IN ... start writing the data */
if (mtu->ep0_state == MU3D_EP0_STATE_TX)
ep0_tx_state(mtu);
return 0;
}
static int mtu3_ep0_queue(struct usb_ep *ep,
struct usb_request *req, gfp_t gfp)
{
struct mtu3_ep *mep;
struct mtu3_request *mreq;
struct mtu3 *mtu;
unsigned long flags;
int ret = 0;
if (!ep || !req)
return -EINVAL;
mep = to_mtu3_ep(ep);
mtu = mep->mtu;
mreq = to_mtu3_request(req);
spin_lock_irqsave(&mtu->lock, flags);
ret = ep0_queue(mep, mreq);
spin_unlock_irqrestore(&mtu->lock, flags);
return ret;
}
static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
{
/* we just won't support this */
return -EINVAL;
}
static int mtu3_ep0_halt(struct usb_ep *ep, int value)
{
struct mtu3_ep *mep;
struct mtu3 *mtu;
unsigned long flags;
int ret = 0;
if (!ep || !value)
return -EINVAL;
mep = to_mtu3_ep(ep);
mtu = mep->mtu;
dev_dbg(mtu->dev, "%s\n", __func__);
spin_lock_irqsave(&mtu->lock, flags);
if (!list_empty(&mep->req_list)) {
ret = -EBUSY;
goto cleanup;
}
switch (mtu->ep0_state) {
/*
* stalls are usually issued after parsing SETUP packet, either
* directly in irq context from setup() or else later.
*/
case MU3D_EP0_STATE_TX:
case MU3D_EP0_STATE_TX_END:
case MU3D_EP0_STATE_RX:
case MU3D_EP0_STATE_SETUP:
ep0_stall_set(mtu->ep0, true, 0);
break;
default:
dev_dbg(mtu->dev, "ep0 can't halt in state %s\n",
decode_ep0_state(mtu));
ret = -EINVAL;
}
cleanup:
spin_unlock_irqrestore(&mtu->lock, flags);
return ret;
}
const struct usb_ep_ops mtu3_ep0_ops = {
.enable = mtu3_ep0_enable,
.disable = mtu3_ep0_disable,
.alloc_request = mtu3_alloc_request,
.free_request = mtu3_free_request,
.queue = mtu3_ep0_queue,
.dequeue = mtu3_ep0_dequeue,
.set_halt = mtu3_ep0_halt,
};
| linux-master | drivers/usb/mtu3/mtu3_gadget_ep0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_core.c - hardware access layer and gadget init/exit of
* MediaTek usb3 Dual-Role Controller Driver
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
#include "mtu3_trace.h"
static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
{
struct mtu3_fifo_info *fifo = mep->fifo;
u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT);
u32 start_bit;
/* ensure that @mep->fifo_seg_size is power of two */
num_bits = roundup_pow_of_two(num_bits);
if (num_bits > fifo->limit)
return -EINVAL;
mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT;
num_bits = num_bits * (mep->slot + 1);
start_bit = bitmap_find_next_zero_area(fifo->bitmap,
fifo->limit, 0, num_bits, 0);
if (start_bit >= fifo->limit)
return -EOVERFLOW;
bitmap_set(fifo->bitmap, start_bit, num_bits);
mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT;
mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit;
dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n",
__func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
return mep->fifo_addr;
}
static void ep_fifo_free(struct mtu3_ep *mep)
{
struct mtu3_fifo_info *fifo = mep->fifo;
u32 addr = mep->fifo_addr;
u32 bits = mep->fifo_size / MTU3_EP_FIFO_UNIT;
u32 start_bit;
if (unlikely(addr < fifo->base || bits > fifo->limit))
return;
start_bit = (addr - fifo->base) / MTU3_EP_FIFO_UNIT;
bitmap_clear(fifo->bitmap, start_bit, bits);
mep->fifo_size = 0;
mep->fifo_seg_size = 0;
dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n",
__func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
}
/* enable/disable U3D SS function */
static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable)
{
/* If usb3_en==0, LTSSM will go to SS.Disable state */
if (enable)
mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
else
mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable);
}
/* set/clear U3D HS device soft connect */
static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable)
{
if (enable) {
mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
SOFT_CONN | SUSPENDM_ENABLE);
} else {
mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
SOFT_CONN | SUSPENDM_ENABLE);
}
dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable);
}
/* only port0 of U2/U3 supports device mode */
static int mtu3_device_enable(struct mtu3 *mtu)
{
void __iomem *ibase = mtu->ippc_base;
u32 check_clk = 0;
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
if (mtu->u3_capable) {
check_clk = SSUSB_U3_MAC_RST_B_STS;
mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
(SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
SSUSB_U3_PORT_HOST_SEL));
}
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0),
(SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
SSUSB_U2_PORT_HOST_SEL));
if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
if (mtu->u3_capable)
mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
SSUSB_U3_PORT_DUAL_MODE);
}
return ssusb_check_clocks(mtu->ssusb, check_clk);
}
static void mtu3_device_disable(struct mtu3 *mtu)
{
void __iomem *ibase = mtu->ippc_base;
if (mtu->u3_capable)
mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
(SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN));
mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
if (mtu->u3_capable)
mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
SSUSB_U3_PORT_DUAL_MODE);
}
mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
static void mtu3_dev_power_on(struct mtu3 *mtu)
{
void __iomem *ibase = mtu->ippc_base;
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
if (mtu->u3_capable)
mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), SSUSB_U3_PORT_PDN);
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_PDN);
}
static void mtu3_dev_power_down(struct mtu3 *mtu)
{
void __iomem *ibase = mtu->ippc_base;
if (mtu->u3_capable)
mtu3_setbits(ibase, SSUSB_U3_CTRL(0), SSUSB_U3_PORT_PDN);
mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_PDN);
mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
/* reset U3D's device module. */
static void mtu3_device_reset(struct mtu3 *mtu)
{
void __iomem *ibase = mtu->ippc_base;
mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
udelay(1);
mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
}
static void mtu3_intr_status_clear(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
/* Clear EP0 and Tx/Rx EPn interrupts status */
mtu3_writel(mbase, U3D_EPISR, ~0x0);
/* Clear U2 USB common interrupts status */
mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0);
/* Clear U3 LTSSM interrupts status */
mtu3_writel(mbase, U3D_LTSSM_INTR, ~0x0);
/* Clear speed change interrupt status */
mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0);
/* Clear QMU interrupt status */
mtu3_writel(mbase, U3D_QISAR0, ~0x0);
}
/* disable all interrupts */
static void mtu3_intr_disable(struct mtu3 *mtu)
{
/* Disable level 1 interrupts */
mtu3_writel(mtu->mac_base, U3D_LV1IECR, ~0x0);
/* Disable endpoint interrupts */
mtu3_writel(mtu->mac_base, U3D_EPIECR, ~0x0);
mtu3_intr_status_clear(mtu);
}
/* enable system global interrupt */
static void mtu3_intr_enable(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
u32 value;
/*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */
value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR;
mtu3_writel(mbase, U3D_LV1IESR, value);
/* Enable U2 common USB interrupts */
value = SUSPEND_INTR | RESUME_INTR | RESET_INTR;
mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value);
if (mtu->u3_capable) {
/* Enable U3 LTSSM interrupts */
value = HOT_RST_INTR | WARM_RST_INTR |
ENTER_U3_INTR | EXIT_U3_INTR;
mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
}
/* Enable QMU interrupts. */
value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT |
RXQ_LENERR_INT | RXQ_ZLPERR_INT;
mtu3_writel(mbase, U3D_QIESR1, value);
/* Enable speed change interrupt */
mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR);
}
static void mtu3_set_speed(struct mtu3 *mtu, enum usb_device_speed speed)
{
void __iomem *mbase = mtu->mac_base;
if (speed > mtu->max_speed)
speed = mtu->max_speed;
switch (speed) {
case USB_SPEED_FULL:
/* disable U3 SS function */
mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
/* disable HS function */
mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
break;
case USB_SPEED_HIGH:
mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
/* HS/FS detected by HW */
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
break;
case USB_SPEED_SUPER:
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
mtu3_clrbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
SSUSB_U3_PORT_SSP_SPEED);
break;
case USB_SPEED_SUPER_PLUS:
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
mtu3_setbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
SSUSB_U3_PORT_SSP_SPEED);
break;
default:
dev_err(mtu->dev, "invalid speed: %s\n",
usb_speed_string(speed));
return;
}
mtu->speed = speed;
dev_dbg(mtu->dev, "set speed: %s\n", usb_speed_string(speed));
}
/* CSR registers will be reset to default value if port is disabled */
static void mtu3_csr_init(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
if (mtu->u3_capable) {
/* disable LGO_U1/U2 by default */
mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
/* enable accept LGO_U1/U2 link command from host */
mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL,
SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE);
/* device responses to u3_exit from host automatically */
mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
/* automatically build U2 link when U3 detect fail */
mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH);
/* auto clear SOFT_CONN when clear USB3_EN if work as HS */
mtu3_setbits(mbase, U3D_U3U2_SWITCH_CTRL, SOFTCON_CLR_AUTO_EN);
}
/* delay about 0.1us from detecting reset to send chirp-K */
mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK);
/* enable automatical HWRW from L1 */
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, LPM_HRWE);
}
/* reset: u2 - data toggle, u3 - SeqN, flow control status etc */
static void mtu3_ep_reset(struct mtu3_ep *mep)
{
struct mtu3 *mtu = mep->mtu;
u32 rst_bit = EP_RST(mep->is_in, mep->epnum);
mtu3_setbits(mtu->mac_base, U3D_EP_RST, rst_bit);
mtu3_clrbits(mtu->mac_base, U3D_EP_RST, rst_bit);
}
/* set/clear the stall and toggle bits for non-ep0 */
void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
{
struct mtu3 *mtu = mep->mtu;
void __iomem *mbase = mtu->mac_base;
u8 epnum = mep->epnum;
u32 csr;
if (mep->is_in) { /* TX */
csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS;
if (set)
csr |= TX_SENDSTALL;
else
csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL;
mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr);
} else { /* RX */
csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS;
if (set)
csr |= RX_SENDSTALL;
else
csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL;
mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr);
}
if (!set) {
mtu3_ep_reset(mep);
mep->flags &= ~MTU3_EP_STALL;
} else {
mep->flags |= MTU3_EP_STALL;
}
dev_dbg(mtu->dev, "%s: %s\n", mep->name,
set ? "SEND STALL" : "CLEAR STALL, with EP RESET");
}
void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
{
if (mtu->u3_capable && mtu->speed >= USB_SPEED_SUPER)
mtu3_ss_func_set(mtu, is_on);
else
mtu3_hs_softconn_set(mtu, is_on);
dev_info(mtu->dev, "gadget (%s) pullup D%s\n",
usb_speed_string(mtu->speed), is_on ? "+" : "-");
}
void mtu3_start(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__,
mtu3_readl(mbase, U3D_DEVICE_CONTROL));
mtu3_dev_power_on(mtu);
mtu3_csr_init(mtu);
mtu3_set_speed(mtu, mtu->speed);
/* Initialize the default interrupts */
mtu3_intr_enable(mtu);
mtu->is_active = 1;
if (mtu->softconnect)
mtu3_dev_on_off(mtu, 1);
}
void mtu3_stop(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "%s\n", __func__);
mtu3_intr_disable(mtu);
if (mtu->softconnect)
mtu3_dev_on_off(mtu, 0);
mtu->is_active = 0;
mtu3_dev_power_down(mtu);
}
static void mtu3_dev_suspend(struct mtu3 *mtu)
{
if (!mtu->is_active)
return;
mtu3_intr_disable(mtu);
mtu3_dev_power_down(mtu);
}
static void mtu3_dev_resume(struct mtu3 *mtu)
{
if (!mtu->is_active)
return;
mtu3_dev_power_on(mtu);
mtu3_intr_enable(mtu);
}
/* for non-ep0 */
int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
int interval, int burst, int mult)
{
void __iomem *mbase = mtu->mac_base;
bool gen2cp = mtu->gen2cp;
int epnum = mep->epnum;
u32 csr0, csr1, csr2;
int fifo_sgsz, fifo_addr;
int num_pkts;
fifo_addr = ep_fifo_alloc(mep, mep->maxp);
if (fifo_addr < 0) {
dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp);
return -ENOMEM;
}
fifo_sgsz = ilog2(mep->fifo_seg_size);
dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz,
mep->fifo_seg_size, mep->fifo_size);
if (mep->is_in) {
csr0 = TX_TXMAXPKTSZ(mep->maxp);
csr0 |= TX_DMAREQEN;
num_pkts = (burst + 1) * (mult + 1) - 1;
csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot);
csr1 |= TX_MAX_PKT(gen2cp, num_pkts) | TX_MULT(gen2cp, mult);
csr2 = TX_FIFOADDR(fifo_addr >> 4);
csr2 |= TX_FIFOSEGSIZE(fifo_sgsz);
switch (mep->type) {
case USB_ENDPOINT_XFER_BULK:
csr1 |= TX_TYPE(TYPE_BULK);
break;
case USB_ENDPOINT_XFER_ISOC:
csr1 |= TX_TYPE(TYPE_ISO);
csr2 |= TX_BINTERVAL(interval);
break;
case USB_ENDPOINT_XFER_INT:
csr1 |= TX_TYPE(TYPE_INT);
csr2 |= TX_BINTERVAL(interval);
break;
}
/* Enable QMU Done interrupt */
mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum));
mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0);
mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1);
mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2);
dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)),
mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)),
mtu3_readl(mbase, MU3D_EP_TXCR2(epnum)));
} else {
csr0 = RX_RXMAXPKTSZ(mep->maxp);
csr0 |= RX_DMAREQEN;
num_pkts = (burst + 1) * (mult + 1) - 1;
csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot);
csr1 |= RX_MAX_PKT(gen2cp, num_pkts) | RX_MULT(gen2cp, mult);
csr2 = RX_FIFOADDR(fifo_addr >> 4);
csr2 |= RX_FIFOSEGSIZE(fifo_sgsz);
switch (mep->type) {
case USB_ENDPOINT_XFER_BULK:
csr1 |= RX_TYPE(TYPE_BULK);
break;
case USB_ENDPOINT_XFER_ISOC:
csr1 |= RX_TYPE(TYPE_ISO);
csr2 |= RX_BINTERVAL(interval);
break;
case USB_ENDPOINT_XFER_INT:
csr1 |= RX_TYPE(TYPE_INT);
csr2 |= RX_BINTERVAL(interval);
break;
}
/*Enable QMU Done interrupt */
mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum));
mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0);
mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1);
mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2);
dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)),
mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)),
mtu3_readl(mbase, MU3D_EP_RXCR2(epnum)));
}
dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2);
dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n",
__func__, mep->name, mep->fifo_addr, mep->fifo_size,
fifo_sgsz, mep->fifo_seg_size);
return 0;
}
/* for non-ep0 */
void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep)
{
void __iomem *mbase = mtu->mac_base;
int epnum = mep->epnum;
if (mep->is_in) {
mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0);
mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0);
mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0);
mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum));
} else {
mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0);
mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0);
mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0);
mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum));
}
mtu3_ep_reset(mep);
ep_fifo_free(mep);
dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name);
}
/*
* Two scenarios:
* 1. when device IP supports SS, the fifo of EP0, TX EPs, RX EPs
* are separated;
* 2. when supports only HS, the fifo is shared for all EPs, and
* the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate
* the total fifo size of non-ep0, and ep0's is fixed to 64B,
* so the total fifo size is 64B + @EPNTXFFSZ;
* Due to the first 64B should be reserved for EP0, non-ep0's fifo
* starts from offset 64 and are divided into two equal parts for
* TX or RX EPs for simplification.
*/
static void get_ep_fifo_config(struct mtu3 *mtu)
{
struct mtu3_fifo_info *tx_fifo;
struct mtu3_fifo_info *rx_fifo;
u32 fifosize;
if (mtu->separate_fifo) {
fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
tx_fifo = &mtu->tx_fifo;
tx_fifo->base = 0;
tx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNRXFFSZ);
rx_fifo = &mtu->rx_fifo;
rx_fifo->base = 0;
rx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
mtu->slot = MTU3_U3_IP_SLOT_DEFAULT;
} else {
fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
tx_fifo = &mtu->tx_fifo;
tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE;
tx_fifo->limit = (fifosize / MTU3_EP_FIFO_UNIT) >> 1;
bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
rx_fifo = &mtu->rx_fifo;
rx_fifo->base =
tx_fifo->base + tx_fifo->limit * MTU3_EP_FIFO_UNIT;
rx_fifo->limit = tx_fifo->limit;
bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
mtu->slot = MTU3_U2_IP_SLOT_DEFAULT;
}
dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n",
__func__, tx_fifo->base, tx_fifo->limit,
rx_fifo->base, rx_fifo->limit);
}
static void mtu3_ep0_setup(struct mtu3 *mtu)
{
u32 maxpacket = mtu->g.ep0->maxpacket;
u32 csr;
dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket);
csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR);
csr &= ~EP0_MAXPKTSZ_MSK;
csr |= EP0_MAXPKTSZ(maxpacket);
csr &= EP0_W1C_BITS;
mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
/* Enable EP0 interrupt */
mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR | SETUPENDISR);
}
static int mtu3_mem_alloc(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
struct mtu3_ep *ep_array;
int in_ep_num, out_ep_num;
u32 cap_epinfo;
int ret;
int i;
cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO);
in_ep_num = CAP_TX_EP_NUM(cap_epinfo);
out_ep_num = CAP_RX_EP_NUM(cap_epinfo);
dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n",
mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num,
mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num);
/* one for ep0, another is reserved */
mtu->num_eps = min(in_ep_num, out_ep_num) + 1;
ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL);
if (ep_array == NULL)
return -ENOMEM;
mtu->ep_array = ep_array;
mtu->in_eps = ep_array;
mtu->out_eps = &ep_array[mtu->num_eps];
/* ep0 uses in_eps[0], out_eps[0] is reserved */
mtu->ep0 = mtu->in_eps;
mtu->ep0->mtu = mtu;
mtu->ep0->epnum = 0;
for (i = 1; i < mtu->num_eps; i++) {
struct mtu3_ep *mep = mtu->in_eps + i;
mep->fifo = &mtu->tx_fifo;
mep = mtu->out_eps + i;
mep->fifo = &mtu->rx_fifo;
}
get_ep_fifo_config(mtu);
ret = mtu3_qmu_init(mtu);
if (ret)
kfree(mtu->ep_array);
return ret;
}
static void mtu3_mem_free(struct mtu3 *mtu)
{
mtu3_qmu_exit(mtu);
kfree(mtu->ep_array);
}
static void mtu3_regs_init(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
/* be sure interrupts are disabled before registration of ISR */
mtu3_intr_disable(mtu);
mtu3_csr_init(mtu);
/* U2/U3 detected by HW */
mtu3_writel(mbase, U3D_DEVICE_CONF, 0);
/* vbus detected by HW */
mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON);
/* use new QMU format when HW version >= 0x1003 */
if (mtu->gen2cp)
mtu3_writel(mbase, U3D_QFCR, ~0x0);
}
static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
enum usb_device_speed udev_speed;
u32 maxpkt = 64;
u32 link;
u32 speed;
link = mtu3_readl(mbase, U3D_DEV_LINK_INTR);
link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE);
mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */
dev_dbg(mtu->dev, "=== LINK[%x] ===\n", link);
if (!(link & SSUSB_DEV_SPEED_CHG_INTR))
return IRQ_NONE;
speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF));
switch (speed) {
case MTU3_SPEED_FULL:
udev_speed = USB_SPEED_FULL;
/*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
| LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
LPM_BESL_STALL | LPM_BESLD_STALL);
break;
case MTU3_SPEED_HIGH:
udev_speed = USB_SPEED_HIGH;
/*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
| LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
LPM_BESL_STALL | LPM_BESLD_STALL);
break;
case MTU3_SPEED_SUPER:
udev_speed = USB_SPEED_SUPER;
maxpkt = 512;
break;
case MTU3_SPEED_SUPER_PLUS:
udev_speed = USB_SPEED_SUPER_PLUS;
maxpkt = 512;
break;
default:
udev_speed = USB_SPEED_UNKNOWN;
break;
}
dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed));
mtu3_dbg_trace(mtu->dev, "link speed %s",
usb_speed_string(udev_speed));
mtu->g.speed = udev_speed;
mtu->g.ep0->maxpacket = maxpkt;
mtu->ep0_state = MU3D_EP0_STATE_SETUP;
mtu->connected = !!(udev_speed != USB_SPEED_UNKNOWN);
if (udev_speed == USB_SPEED_UNKNOWN) {
mtu3_gadget_disconnect(mtu);
pm_runtime_put(mtu->dev);
} else {
pm_runtime_get(mtu->dev);
mtu3_ep0_setup(mtu);
}
return IRQ_HANDLED;
}
static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
u32 ltssm;
ltssm = mtu3_readl(mbase, U3D_LTSSM_INTR);
ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE);
mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */
dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm);
trace_mtu3_u3_ltssm_isr(ltssm);
if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
mtu3_gadget_reset(mtu);
if (ltssm & VBUS_FALL_INTR) {
mtu3_ss_func_set(mtu, false);
mtu3_gadget_reset(mtu);
}
if (ltssm & VBUS_RISE_INTR)
mtu3_ss_func_set(mtu, true);
if (ltssm & EXIT_U3_INTR)
mtu3_gadget_resume(mtu);
if (ltssm & ENTER_U3_INTR)
mtu3_gadget_suspend(mtu);
return IRQ_HANDLED;
}
static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
u32 u2comm;
u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR);
u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE);
mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */
dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm);
trace_mtu3_u2_common_isr(u2comm);
if (u2comm & SUSPEND_INTR)
mtu3_gadget_suspend(mtu);
if (u2comm & RESUME_INTR)
mtu3_gadget_resume(mtu);
if (u2comm & RESET_INTR)
mtu3_gadget_reset(mtu);
return IRQ_HANDLED;
}
static irqreturn_t mtu3_irq(int irq, void *data)
{
struct mtu3 *mtu = (struct mtu3 *)data;
unsigned long flags;
u32 level1;
spin_lock_irqsave(&mtu->lock, flags);
/* U3D_LV1ISR is RU */
level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR);
level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER);
if (level1 & EP_CTRL_INTR)
mtu3_link_isr(mtu);
if (level1 & MAC2_INTR)
mtu3_u2_common_isr(mtu);
if (level1 & MAC3_INTR)
mtu3_u3_ltssm_isr(mtu);
if (level1 & BMU_INTR)
mtu3_ep0_isr(mtu);
if (level1 & QMU_INTR)
mtu3_qmu_isr(mtu);
spin_unlock_irqrestore(&mtu->lock, flags);
return IRQ_HANDLED;
}
static void mtu3_check_params(struct mtu3 *mtu)
{
/* device's u3 port (port0) is disabled */
if (mtu->u3_capable && (mtu->ssusb->u3p_dis_msk & BIT(0)))
mtu->u3_capable = 0;
/* check the max_speed parameter */
switch (mtu->max_speed) {
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
break;
default:
dev_err(mtu->dev, "invalid max_speed: %s\n",
usb_speed_string(mtu->max_speed));
fallthrough;
case USB_SPEED_UNKNOWN:
/* default as SSP */
mtu->max_speed = USB_SPEED_SUPER_PLUS;
break;
}
if (!mtu->u3_capable && (mtu->max_speed > USB_SPEED_HIGH))
mtu->max_speed = USB_SPEED_HIGH;
mtu->speed = mtu->max_speed;
dev_info(mtu->dev, "max_speed: %s\n",
usb_speed_string(mtu->max_speed));
}
static int mtu3_hw_init(struct mtu3 *mtu)
{
u32 value;
int ret;
value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_TRUNK_VERS);
mtu->hw_version = IP_TRUNK_VERS(value);
mtu->gen2cp = !!(mtu->hw_version >= MTU3_TRUNK_VERS_1003);
value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
mtu->u3_capable = !!SSUSB_IP_DEV_U3_PORT_NUM(value);
/* usb3 ip uses separate fifo */
mtu->separate_fifo = mtu->u3_capable;
dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
mtu->u3_capable ? "U3" : "U2");
mtu3_check_params(mtu);
mtu3_device_reset(mtu);
ret = mtu3_device_enable(mtu);
if (ret) {
dev_err(mtu->dev, "device enable failed %d\n", ret);
return ret;
}
ret = mtu3_mem_alloc(mtu);
if (ret)
return -ENOMEM;
mtu3_regs_init(mtu);
return 0;
}
static void mtu3_hw_exit(struct mtu3 *mtu)
{
mtu3_device_disable(mtu);
mtu3_mem_free(mtu);
}
/*
* we set 32-bit DMA mask by default, here check whether the controller
* supports 36-bit DMA or not, if it does, set 36-bit DMA mask.
*/
static int mtu3_set_dma_mask(struct mtu3 *mtu)
{
struct device *dev = mtu->dev;
bool is_36bit = false;
int ret = 0;
u32 value;
value = mtu3_readl(mtu->mac_base, U3D_MISC_CTRL);
if (value & DMA_ADDR_36BIT) {
is_36bit = true;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
/* If set 36-bit DMA mask fails, fall back to 32-bit DMA mask */
if (ret) {
is_36bit = false;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
}
}
dev_info(dev, "dma mask: %s bits\n", is_36bit ? "36" : "32");
return ret;
}
int ssusb_gadget_init(struct ssusb_mtk *ssusb)
{
struct device *dev = ssusb->dev;
struct platform_device *pdev = to_platform_device(dev);
struct mtu3 *mtu = NULL;
int ret = -ENOMEM;
mtu = devm_kzalloc(dev, sizeof(struct mtu3), GFP_KERNEL);
if (mtu == NULL)
return -ENOMEM;
mtu->irq = platform_get_irq_byname_optional(pdev, "device");
if (mtu->irq < 0) {
if (mtu->irq == -EPROBE_DEFER)
return mtu->irq;
/* for backward compatibility */
mtu->irq = platform_get_irq(pdev, 0);
if (mtu->irq < 0)
return mtu->irq;
}
dev_info(dev, "irq %d\n", mtu->irq);
mtu->mac_base = devm_platform_ioremap_resource_byname(pdev, "mac");
if (IS_ERR(mtu->mac_base)) {
dev_err(dev, "error mapping memory for dev mac\n");
return PTR_ERR(mtu->mac_base);
}
spin_lock_init(&mtu->lock);
mtu->dev = dev;
mtu->ippc_base = ssusb->ippc_base;
ssusb->mac_base = mtu->mac_base;
ssusb->u3d = mtu;
mtu->ssusb = ssusb;
mtu->max_speed = usb_get_maximum_speed(dev);
dev_dbg(dev, "mac_base=0x%p, ippc_base=0x%p\n",
mtu->mac_base, mtu->ippc_base);
ret = mtu3_hw_init(mtu);
if (ret) {
dev_err(dev, "mtu3 hw init failed:%d\n", ret);
return ret;
}
ret = mtu3_set_dma_mask(mtu);
if (ret) {
dev_err(dev, "mtu3 set dma_mask failed:%d\n", ret);
goto dma_mask_err;
}
ret = devm_request_threaded_irq(dev, mtu->irq, NULL, mtu3_irq,
IRQF_ONESHOT, dev_name(dev), mtu);
if (ret) {
dev_err(dev, "request irq %d failed!\n", mtu->irq);
goto irq_err;
}
/* power down device IP for power saving by default */
mtu3_stop(mtu);
ret = mtu3_gadget_setup(mtu);
if (ret) {
dev_err(dev, "mtu3 gadget init failed:%d\n", ret);
goto gadget_err;
}
ssusb_dev_debugfs_init(ssusb);
dev_dbg(dev, " %s() done...\n", __func__);
return 0;
gadget_err:
device_init_wakeup(dev, false);
dma_mask_err:
irq_err:
mtu3_hw_exit(mtu);
ssusb->u3d = NULL;
dev_err(dev, " %s() fail...\n", __func__);
return ret;
}
void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
{
struct mtu3 *mtu = ssusb->u3d;
mtu3_gadget_cleanup(mtu);
device_init_wakeup(ssusb->dev, false);
mtu3_hw_exit(mtu);
}
bool ssusb_gadget_ip_sleep_check(struct ssusb_mtk *ssusb)
{
struct mtu3 *mtu = ssusb->u3d;
/* host only, should wait for ip sleep */
if (!mtu)
return true;
/* device is started and pullup D+, ip can sleep */
if (mtu->is_active && mtu->softconnect)
return true;
/* ip can't sleep if not pullup D+ when support device mode */
return false;
}
int ssusb_gadget_suspend(struct ssusb_mtk *ssusb, pm_message_t msg)
{
struct mtu3 *mtu = ssusb->u3d;
if (!mtu->gadget_driver)
return 0;
if (mtu->connected)
return -EBUSY;
mtu3_dev_suspend(mtu);
synchronize_irq(mtu->irq);
return 0;
}
int ssusb_gadget_resume(struct ssusb_mtk *ssusb, pm_message_t msg)
{
struct mtu3 *mtu = ssusb->u3d;
if (!mtu->gadget_driver)
return 0;
mtu3_dev_resume(mtu);
return 0;
}
| linux-master | drivers/usb/mtu3/mtu3_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.c - dual role switch and host glue layer
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
#include "mtu3.h"
#include "mtu3_dr.h"
/* mt8173 etc */
#define PERI_WK_CTRL1 0x4
#define WC1_IS_C(x) (((x) & 0xf) << 26) /* cycle debounce */
#define WC1_IS_EN BIT(25)
#define WC1_IS_P BIT(6) /* polarity for ip sleep */
/* mt8183 */
#define PERI_WK_CTRL0 0x0
#define WC0_IS_C(x) ((u32)(((x) & 0xf) << 28)) /* cycle debounce */
#define WC0_IS_P BIT(12) /* polarity */
#define WC0_IS_EN BIT(6)
/* mt8192 */
#define WC0_SSUSB0_CDEN BIT(6)
#define WC0_IS_SPM_EN BIT(1)
/* mt2712 etc */
#define PERI_SSUSB_SPM_CTRL 0x0
#define SSC_IP_SLEEP_EN BIT(4)
#define SSC_SPM_INT_EN BIT(1)
enum ssusb_uwk_vers {
SSUSB_UWK_V1 = 1,
SSUSB_UWK_V2,
SSUSB_UWK_V1_1 = 101, /* specific revision 1.01 */
SSUSB_UWK_V1_2, /* specific revision 1.02 */
};
/*
* ip-sleep wakeup mode:
* all clocks can be turn off, but power domain should be kept on
*/
static void ssusb_wakeup_ip_sleep_set(struct ssusb_mtk *ssusb, bool enable)
{
u32 reg, msk, val;
switch (ssusb->uwk_vers) {
case SSUSB_UWK_V1:
reg = ssusb->uwk_reg_base + PERI_WK_CTRL1;
msk = WC1_IS_EN | WC1_IS_C(0xf) | WC1_IS_P;
val = enable ? (WC1_IS_EN | WC1_IS_C(0x8)) : 0;
break;
case SSUSB_UWK_V1_1:
reg = ssusb->uwk_reg_base + PERI_WK_CTRL0;
msk = WC0_IS_EN | WC0_IS_C(0xf) | WC0_IS_P;
val = enable ? (WC0_IS_EN | WC0_IS_C(0x1)) : 0;
break;
case SSUSB_UWK_V1_2:
reg = ssusb->uwk_reg_base + PERI_WK_CTRL0;
msk = WC0_SSUSB0_CDEN | WC0_IS_SPM_EN;
val = enable ? msk : 0;
break;
case SSUSB_UWK_V2:
reg = ssusb->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
val = enable ? msk : 0;
break;
default:
return;
}
regmap_update_bits(ssusb->uwk, reg, msk, val);
}
int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
struct device_node *dn)
{
struct of_phandle_args args;
int ret;
/* wakeup function is optional */
ssusb->uwk_en = of_property_read_bool(dn, "wakeup-source");
if (!ssusb->uwk_en)
return 0;
ret = of_parse_phandle_with_fixed_args(dn,
"mediatek,syscon-wakeup", 2, 0, &args);
if (ret)
return ret;
ssusb->uwk_reg_base = args.args[0];
ssusb->uwk_vers = args.args[1];
ssusb->uwk = syscon_node_to_regmap(args.np);
of_node_put(args.np);
dev_info(ssusb->dev, "uwk - reg:0x%x, version:%d\n",
ssusb->uwk_reg_base, ssusb->uwk_vers);
return PTR_ERR_OR_ZERO(ssusb->uwk);
}
void ssusb_wakeup_set(struct ssusb_mtk *ssusb, bool enable)
{
if (ssusb->uwk_en)
ssusb_wakeup_ip_sleep_set(ssusb, enable);
}
static void host_ports_num_get(struct ssusb_mtk *ssusb)
{
u32 xhci_cap;
xhci_cap = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP);
ssusb->u2_ports = SSUSB_IP_XHCI_U2_PORT_NUM(xhci_cap);
ssusb->u3_ports = SSUSB_IP_XHCI_U3_PORT_NUM(xhci_cap);
dev_dbg(ssusb->dev, "host - u2_ports:%d, u3_ports:%d\n",
ssusb->u2_ports, ssusb->u3_ports);
}
/* only configure ports will be used later */
static int ssusb_host_enable(struct ssusb_mtk *ssusb)
{
void __iomem *ibase = ssusb->ippc_base;
int num_u3p = ssusb->u3_ports;
int num_u2p = ssusb->u2_ports;
int u3_ports_disabled;
u32 check_clk;
u32 value;
int i;
/* power on host ip */
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
/* power on and enable u3 ports except skipped ones */
u3_ports_disabled = 0;
for (i = 0; i < num_u3p; i++) {
if ((0x1 << i) & ssusb->u3p_dis_msk) {
u3_ports_disabled++;
continue;
}
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
value |= SSUSB_U3_PORT_HOST_SEL;
mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
}
/* power on and enable all u2 ports */
for (i = 0; i < num_u2p; i++) {
if ((0x1 << i) & ssusb->u2p_dis_msk)
continue;
value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
value |= SSUSB_U2_PORT_HOST_SEL;
mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
}
check_clk = SSUSB_XHCI_RST_B_STS;
if (num_u3p > u3_ports_disabled)
check_clk = SSUSB_U3_MAC_RST_B_STS;
return ssusb_check_clocks(ssusb, check_clk);
}
static int ssusb_host_disable(struct ssusb_mtk *ssusb)
{
void __iomem *ibase = ssusb->ippc_base;
int num_u3p = ssusb->u3_ports;
int num_u2p = ssusb->u2_ports;
u32 value;
int i;
/* power down and disable u3 ports except skipped ones */
for (i = 0; i < num_u3p; i++) {
if ((0x1 << i) & ssusb->u3p_dis_msk)
continue;
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS;
mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
}
/* power down and disable u2 ports except skipped ones */
for (i = 0; i < num_u2p; i++) {
if ((0x1 << i) & ssusb->u2p_dis_msk)
continue;
value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS;
mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
}
/* power down host ip */
mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
return 0;
}
int ssusb_host_resume(struct ssusb_mtk *ssusb, bool p0_skipped)
{
void __iomem *ibase = ssusb->ippc_base;
int u3p_skip_msk = ssusb->u3p_dis_msk;
int u2p_skip_msk = ssusb->u2p_dis_msk;
int num_u3p = ssusb->u3_ports;
int num_u2p = ssusb->u2_ports;
u32 value;
int i;
if (p0_skipped) {
u2p_skip_msk |= 0x1;
if (ssusb->otg_switch.is_u3_drd)
u3p_skip_msk |= 0x1;
}
/* power on host ip */
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
/* power on u3 ports except skipped ones */
for (i = 0; i < num_u3p; i++) {
if ((0x1 << i) & u3p_skip_msk)
continue;
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value &= ~SSUSB_U3_PORT_PDN;
mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
}
/* power on all u2 ports except skipped ones */
for (i = 0; i < num_u2p; i++) {
if ((0x1 << i) & u2p_skip_msk)
continue;
value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
value &= ~SSUSB_U2_PORT_PDN;
mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
}
return 0;
}
/* here not skip port0 due to PDN can be set repeatedly */
int ssusb_host_suspend(struct ssusb_mtk *ssusb)
{
void __iomem *ibase = ssusb->ippc_base;
int num_u3p = ssusb->u3_ports;
int num_u2p = ssusb->u2_ports;
u32 value;
int i;
/* power down u3 ports except skipped ones */
for (i = 0; i < num_u3p; i++) {
if ((0x1 << i) & ssusb->u3p_dis_msk)
continue;
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value |= SSUSB_U3_PORT_PDN;
mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
}
/* power down u2 ports except skipped ones */
for (i = 0; i < num_u2p; i++) {
if ((0x1 << i) & ssusb->u2p_dis_msk)
continue;
value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
value |= SSUSB_U2_PORT_PDN;
mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
}
/* power down host ip */
mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
return 0;
}
static void ssusb_host_setup(struct ssusb_mtk *ssusb)
{
host_ports_num_get(ssusb);
/*
* power on host and power on/enable all ports
* if support OTG, gadget driver will switch port0 to device mode
*/
ssusb_host_enable(ssusb);
ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
/* if port0 supports dual-role, works as host mode by default */
ssusb_set_vbus(&ssusb->otg_switch, 1);
}
static void ssusb_host_cleanup(struct ssusb_mtk *ssusb)
{
if (ssusb->is_host)
ssusb_set_vbus(&ssusb->otg_switch, 0);
ssusb_host_disable(ssusb);
}
/*
* If host supports multiple ports, the VBUSes(5V) of ports except port0
* which supports OTG are better to be enabled by default in DTS.
* Because the host driver will keep link with devices attached when system
* enters suspend mode, so no need to control VBUSes after initialization.
*/
int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn)
{
struct device *parent_dev = ssusb->dev;
int ret;
ssusb_host_setup(ssusb);
ret = of_platform_populate(parent_dn, NULL, NULL, parent_dev);
if (ret) {
dev_dbg(parent_dev, "failed to create child devices at %pOF\n",
parent_dn);
return ret;
}
dev_info(parent_dev, "xHCI platform device register success...\n");
return 0;
}
void ssusb_host_exit(struct ssusb_mtk *ssusb)
{
of_platform_depopulate(ssusb->dev);
ssusb_host_cleanup(ssusb);
}
| linux-master | drivers/usb/mtu3/mtu3_host.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.c - dual role switch and host glue layer
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
#define USB2_PORT 2
#define USB3_PORT 3
static inline struct ssusb_mtk *otg_sx_to_ssusb(struct otg_switch_mtk *otg_sx)
{
return container_of(otg_sx, struct ssusb_mtk, otg_switch);
}
static void toggle_opstate(struct ssusb_mtk *ssusb)
{
mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
}
/* only port0 supports dual-role mode */
static int ssusb_port0_switch(struct ssusb_mtk *ssusb,
int version, bool tohost)
{
void __iomem *ibase = ssusb->ippc_base;
u32 value;
dev_dbg(ssusb->dev, "%s (switch u%d port0 to %s)\n", __func__,
version, tohost ? "host" : "device");
if (version == USB2_PORT) {
/* 1. power off and disable u2 port0 */
value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS;
mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
/* 2. power on, enable u2 port0 and select its mode */
value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
value = tohost ? (value | SSUSB_U2_PORT_HOST_SEL) :
(value & (~SSUSB_U2_PORT_HOST_SEL));
mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
} else {
/* 1. power off and disable u3 port0 */
value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS;
mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
/* 2. power on, enable u3 port0 and select its mode */
value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
value = tohost ? (value | SSUSB_U3_PORT_HOST_SEL) :
(value & (~SSUSB_U3_PORT_HOST_SEL));
mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
}
return 0;
}
static void switch_port_to_host(struct ssusb_mtk *ssusb)
{
u32 check_clk = 0;
dev_dbg(ssusb->dev, "%s\n", __func__);
ssusb_port0_switch(ssusb, USB2_PORT, true);
if (ssusb->otg_switch.is_u3_drd) {
ssusb_port0_switch(ssusb, USB3_PORT, true);
check_clk = SSUSB_U3_MAC_RST_B_STS;
}
ssusb_check_clocks(ssusb, check_clk);
/* after all clocks are stable */
toggle_opstate(ssusb);
}
static void switch_port_to_device(struct ssusb_mtk *ssusb)
{
u32 check_clk = 0;
dev_dbg(ssusb->dev, "%s\n", __func__);
ssusb_port0_switch(ssusb, USB2_PORT, false);
if (ssusb->otg_switch.is_u3_drd) {
ssusb_port0_switch(ssusb, USB3_PORT, false);
check_clk = SSUSB_U3_MAC_RST_B_STS;
}
ssusb_check_clocks(ssusb, check_clk);
}
int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
{
struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
struct regulator *vbus = otg_sx->vbus;
int ret;
/* vbus is optional */
if (!vbus)
return 0;
dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off");
if (is_on) {
ret = regulator_enable(vbus);
if (ret) {
dev_err(ssusb->dev, "vbus regulator enable failed\n");
return ret;
}
} else {
regulator_disable(vbus);
}
return 0;
}
static void ssusb_mode_sw_work(struct work_struct *work)
{
struct otg_switch_mtk *otg_sx =
container_of(work, struct otg_switch_mtk, dr_work);
struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
struct mtu3 *mtu = ssusb->u3d;
enum usb_role desired_role = otg_sx->desired_role;
enum usb_role current_role;
current_role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
if (desired_role == USB_ROLE_NONE) {
/* the default mode is host as probe does */
desired_role = USB_ROLE_HOST;
if (otg_sx->default_role == USB_ROLE_DEVICE)
desired_role = USB_ROLE_DEVICE;
}
if (current_role == desired_role)
return;
dev_dbg(ssusb->dev, "set role : %s\n", usb_role_string(desired_role));
mtu3_dbg_trace(ssusb->dev, "set role : %s", usb_role_string(desired_role));
pm_runtime_get_sync(ssusb->dev);
switch (desired_role) {
case USB_ROLE_HOST:
ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
mtu3_stop(mtu);
switch_port_to_host(ssusb);
ssusb_set_vbus(otg_sx, 1);
ssusb->is_host = true;
break;
case USB_ROLE_DEVICE:
ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
ssusb->is_host = false;
ssusb_set_vbus(otg_sx, 0);
switch_port_to_device(ssusb);
mtu3_start(mtu);
break;
case USB_ROLE_NONE:
default:
dev_err(ssusb->dev, "invalid role\n");
}
pm_runtime_put(ssusb->dev);
}
static void ssusb_set_mode(struct otg_switch_mtk *otg_sx, enum usb_role role)
{
struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
if (ssusb->dr_mode != USB_DR_MODE_OTG)
return;
otg_sx->desired_role = role;
queue_work(system_freezable_wq, &otg_sx->dr_work);
}
static int ssusb_id_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct otg_switch_mtk *otg_sx =
container_of(nb, struct otg_switch_mtk, id_nb);
ssusb_set_mode(otg_sx, event ? USB_ROLE_HOST : USB_ROLE_DEVICE);
return NOTIFY_DONE;
}
static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
{
struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
struct extcon_dev *edev = otg_sx->edev;
int ret;
/* extcon is optional */
if (!edev)
return 0;
otg_sx->id_nb.notifier_call = ssusb_id_notifier;
ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB_HOST,
&otg_sx->id_nb);
if (ret < 0) {
dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n");
return ret;
}
ret = extcon_get_state(edev, EXTCON_USB_HOST);
dev_dbg(ssusb->dev, "EXTCON_USB_HOST: %d\n", ret);
/* default as host, switch to device mode if needed */
if (!ret)
ssusb_set_mode(otg_sx, USB_ROLE_DEVICE);
return 0;
}
/*
* We provide an interface via debugfs to switch between host and device modes
* depending on user input.
* This is useful in special cases, such as uses TYPE-A receptacle but also
* wants to support dual-role mode.
*/
void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
ssusb_set_mode(otg_sx, to_host ? USB_ROLE_HOST : USB_ROLE_DEVICE);
}
void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
enum mtu3_dr_force_mode mode)
{
u32 value;
value = mtu3_readl(ssusb->ippc_base, SSUSB_U2_CTRL(0));
switch (mode) {
case MTU3_DR_FORCE_DEVICE:
value |= SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG;
break;
case MTU3_DR_FORCE_HOST:
value |= SSUSB_U2_PORT_FORCE_IDDIG;
value &= ~SSUSB_U2_PORT_RG_IDDIG;
break;
case MTU3_DR_FORCE_NONE:
value &= ~(SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG);
break;
default:
return;
}
mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value);
}
static int ssusb_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
{
struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw);
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
ssusb_set_mode(otg_sx, role);
return 0;
}
static enum usb_role ssusb_role_sw_get(struct usb_role_switch *sw)
{
struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw);
return ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
}
static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx)
{
struct usb_role_switch_desc role_sx_desc = { 0 };
struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
struct device *dev = ssusb->dev;
enum usb_dr_mode mode;
if (!otg_sx->role_sw_used)
return 0;
mode = usb_get_role_switch_default_mode(dev);
if (mode == USB_DR_MODE_PERIPHERAL)
otg_sx->default_role = USB_ROLE_DEVICE;
else
otg_sx->default_role = USB_ROLE_HOST;
role_sx_desc.set = ssusb_role_sw_set;
role_sx_desc.get = ssusb_role_sw_get;
role_sx_desc.fwnode = dev_fwnode(dev);
role_sx_desc.driver_data = ssusb;
role_sx_desc.allow_userspace_control = true;
otg_sx->role_sw = usb_role_switch_register(dev, &role_sx_desc);
if (IS_ERR(otg_sx->role_sw))
return PTR_ERR(otg_sx->role_sw);
ssusb_set_mode(otg_sx, otg_sx->default_role);
return 0;
}
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
int ret = 0;
INIT_WORK(&otg_sx->dr_work, ssusb_mode_sw_work);
if (otg_sx->manual_drd_enabled)
ssusb_dr_debugfs_init(ssusb);
else if (otg_sx->role_sw_used)
ret = ssusb_role_sw_register(otg_sx);
else
ret = ssusb_extcon_register(otg_sx);
return ret;
}
void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
cancel_work_sync(&otg_sx->dr_work);
usb_role_switch_unregister(otg_sx->role_sw);
}
| linux-master | drivers/usb/mtu3/mtu3_dr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_qmu.c - Queue Management Unit driver for device controller
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
/*
* Queue Management Unit (QMU) is designed to unload SW effort
* to serve DMA interrupts.
* By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
* SW links data buffers and triggers QMU to send / receive data to
* host / from device at a time.
* And now only GPD is supported.
*
* For more detailed information, please refer to QMU Programming Guide
*/
#include <linux/dmapool.h>
#include <linux/iopoll.h>
#include "mtu3.h"
#include "mtu3_trace.h"
#define QMU_CHECKSUM_LEN 16
#define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_BDP BIT(1)
#define GPD_FLAGS_BPS BIT(2)
#define GPD_FLAGS_ZLP BIT(6)
#define GPD_FLAGS_IOC BIT(7)
#define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
#define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16)
#define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12)
#define GPD_RX_BUF_LEN(mtu, x) \
({ \
typeof(x) x_ = (x); \
((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
})
#define GPD_DATA_LEN_OG(x) ((x) & 0xffff)
#define GPD_DATA_LEN_EL(x) ((x) & 0xfffff)
#define GPD_DATA_LEN(mtu, x) \
({ \
typeof(x) x_ = (x); \
((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
})
#define GPD_EXT_FLAG_ZLP BIT(29)
#define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20)
#define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16)
#define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28)
#define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24)
#define GPD_EXT_NGP(mtu, x) \
({ \
typeof(x) x_ = (x); \
((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
})
#define GPD_EXT_BUF(mtu, x) \
({ \
typeof(x) x_ = (x); \
((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
})
#define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
#define HILO_DMA(hi, lo) \
((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
{
u32 txcpr;
u32 txhiar;
txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
}
static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
{
u32 rxcpr;
u32 rxhiar;
rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
}
static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
{
u32 tqhiar;
mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
cpu_to_le32(lower_32_bits(dma)));
tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
tqhiar &= ~QMU_START_ADDR_HI_MSK;
tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
}
static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
{
u32 rqhiar;
mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
cpu_to_le32(lower_32_bits(dma)));
rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
rqhiar &= ~QMU_START_ADDR_HI_MSK;
rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
}
static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
dma_addr_t dma_addr)
{
dma_addr_t dma_base = ring->dma;
struct qmu_gpd *gpd_head = ring->start;
u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
if (offset >= MAX_GPD_NUM)
return NULL;
return gpd_head + offset;
}
static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
struct qmu_gpd *gpd)
{
dma_addr_t dma_base = ring->dma;
struct qmu_gpd *gpd_head = ring->start;
u32 offset;
offset = gpd - gpd_head;
if (offset >= MAX_GPD_NUM)
return 0;
return dma_base + (offset * sizeof(*gpd));
}
static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
{
ring->start = gpd;
ring->enqueue = gpd;
ring->dequeue = gpd;
ring->end = gpd + MAX_GPD_NUM - 1;
}
static void reset_gpd_list(struct mtu3_ep *mep)
{
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->start;
if (gpd) {
gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
gpd_ring_init(ring, gpd);
}
}
int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
{
struct qmu_gpd *gpd;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
/* software own all gpds as default */
gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
if (gpd == NULL)
return -ENOMEM;
gpd_ring_init(ring, gpd);
return 0;
}
void mtu3_gpd_ring_free(struct mtu3_ep *mep)
{
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
dma_pool_free(mep->mtu->qmu_gpd_pool,
ring->start, ring->dma);
memset(ring, 0, sizeof(*ring));
}
void mtu3_qmu_resume(struct mtu3_ep *mep)
{
struct mtu3 *mtu = mep->mtu;
void __iomem *mbase = mtu->mac_base;
int epnum = mep->epnum;
u32 offset;
offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
mtu3_writel(mbase, offset, QMU_Q_RESUME);
if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
mtu3_writel(mbase, offset, QMU_Q_RESUME);
}
static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
{
if (ring->enqueue < ring->end)
ring->enqueue++;
else
ring->enqueue = ring->start;
return ring->enqueue;
}
/* @dequeue may be NULL if ring is unallocated or freed */
static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
{
if (ring->dequeue < ring->end)
ring->dequeue++;
else
ring->dequeue = ring->start;
return ring->dequeue;
}
/* check if a ring is emtpy */
static bool gpd_ring_empty(struct mtu3_gpd_ring *ring)
{
struct qmu_gpd *enq = ring->enqueue;
struct qmu_gpd *next;
if (ring->enqueue < ring->end)
next = enq + 1;
else
next = ring->start;
/* one gpd is reserved to simplify gpd preparation */
return next == ring->dequeue;
}
int mtu3_prepare_transfer(struct mtu3_ep *mep)
{
return gpd_ring_empty(&mep->gpd_ring);
}
static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
{
struct qmu_gpd *enq;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
struct mtu3 *mtu = mep->mtu;
dma_addr_t enq_dma;
u32 ext_addr;
gpd->dw0_info = 0; /* SW own it */
gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
/* get the next GPD */
enq = advance_enq_gpd(ring);
enq_dma = gpd_virt_to_dma(ring, enq);
dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
mep->epnum, gpd, enq, &enq_dma);
enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
gpd->dw0_info = cpu_to_le32(ext_addr);
if (req->zero) {
if (mtu->gen2cp)
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
else
gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
}
/* prevent reorder, make sure GPD's HWO is set last */
mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
trace_mtu3_prepare_gpd(mep, gpd);
return 0;
}
static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
{
struct qmu_gpd *enq;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
struct mtu3 *mtu = mep->mtu;
dma_addr_t enq_dma;
u32 ext_addr;
gpd->dw0_info = 0; /* SW own it */
gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
/* get the next GPD */
enq = advance_enq_gpd(ring);
enq_dma = gpd_virt_to_dma(ring, enq);
dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
mep->epnum, gpd, enq, &enq_dma);
enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
gpd->dw3_info = cpu_to_le32(ext_addr);
/* prevent reorder, make sure GPD's HWO is set last */
mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
trace_mtu3_prepare_gpd(mep, gpd);
return 0;
}
void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
{
if (mep->is_in)
mtu3_prepare_tx_gpd(mep, mreq);
else
mtu3_prepare_rx_gpd(mep, mreq);
}
int mtu3_qmu_start(struct mtu3_ep *mep)
{
struct mtu3 *mtu = mep->mtu;
void __iomem *mbase = mtu->mac_base;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
u8 epnum = mep->epnum;
if (mep->is_in) {
/* set QMU start address */
write_txq_start_addr(mbase, epnum, ring->dma);
mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
/* send zero length packet according to ZLP flag in GPD */
mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
mtu3_writel(mbase, U3D_TQERRIESR0,
QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
return 0;
}
mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
} else {
write_rxq_start_addr(mbase, epnum, ring->dma);
mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
/* don't expect ZLP */
mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
/* move to next GPD when receive ZLP */
mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
mtu3_writel(mbase, U3D_RQERRIESR0,
QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
return 0;
}
mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
}
return 0;
}
/* may called in atomic context */
void mtu3_qmu_stop(struct mtu3_ep *mep)
{
struct mtu3 *mtu = mep->mtu;
void __iomem *mbase = mtu->mac_base;
int epnum = mep->epnum;
u32 value = 0;
u32 qcsr;
int ret;
qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
return;
}
mtu3_writel(mbase, qcsr, QMU_Q_STOP);
if (mep->is_in)
mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
ret = readl_poll_timeout_atomic(mbase + qcsr, value,
!(value & QMU_Q_ACTIVE), 1, 1000);
if (ret) {
dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
return;
}
/* flush fifo again to make sure the fifo is empty */
if (mep->is_in)
mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
}
void mtu3_qmu_flush(struct mtu3_ep *mep)
{
dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
((mep->is_in) ? "TX" : "RX"));
/*Stop QMU */
mtu3_qmu_stop(mep);
reset_gpd_list(mep);
}
/*
* QMU can't transfer zero length packet directly (a hardware limit
* on old SoCs), so when needs to send ZLP, we intentionally trigger
* a length error interrupt, and in the ISR sends a ZLP by BMU.
*/
static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
{
struct mtu3_ep *mep = mtu->in_eps + epnum;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd_current = NULL;
struct mtu3_request *mreq;
dma_addr_t cur_gpd_dma;
u32 txcsr = 0;
int ret;
mreq = next_request(mep);
if (mreq && mreq->request.length != 0)
return;
cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
return;
}
dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
trace_mtu3_zlp_exp_gpd(mep, gpd_current);
mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
if (ret) {
dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
return;
}
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
/* prevent reorder, make sure GPD's HWO is set last */
mb();
/* by pass the current GDP */
gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
/*enable DMAREQEN, switch back to QMU mode */
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
mtu3_qmu_resume(mep);
}
/*
* when rx error happens (except zlperr), QMU will stop, and RQCPR saves
* the GPD encountered error, Done irq will arise after resuming QMU again.
*/
static void qmu_error_rx(struct mtu3 *mtu, u8 epnum)
{
struct mtu3_ep *mep = mtu->out_eps + epnum;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd_current = NULL;
struct mtu3_request *mreq;
dma_addr_t cur_gpd_dma;
cur_gpd_dma = read_rxq_cur_addr(mtu->mac_base, epnum);
gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
mreq = next_request(mep);
if (!mreq || mreq->gpd != gpd_current) {
dev_err(mtu->dev, "no correct RX req is found\n");
return;
}
mreq->request.status = -EAGAIN;
/* by pass the current GDP */
gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
mtu3_qmu_resume(mep);
dev_dbg(mtu->dev, "%s EP%d, current=%p, req=%p\n",
__func__, epnum, gpd_current, mreq);
}
/*
* NOTE: request list maybe is already empty as following case:
* queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
* queue_tx --> process_tasklet(meanwhile, the second one is transferred,
* tasklet process both of them)-->qmu_interrupt for second one.
* To avoid upper case, put qmu_done_tx in ISR directly to process it.
*/
static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
{
struct mtu3_ep *mep = mtu->in_eps + epnum;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd = ring->dequeue;
struct qmu_gpd *gpd_current = NULL;
struct usb_request *request = NULL;
struct mtu3_request *mreq;
dma_addr_t cur_gpd_dma;
/*transfer phy address got from QMU register to virtual address */
cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
if (mreq == NULL || mreq->gpd != gpd) {
dev_err(mtu->dev, "no correct TX req is found\n");
break;
}
request = &mreq->request;
request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
trace_mtu3_complete_gpd(mep, gpd);
mtu3_req_complete(mep, request, 0);
gpd = advance_deq_gpd(ring);
}
dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
__func__, epnum, ring->dequeue, ring->enqueue);
}
static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
{
struct mtu3_ep *mep = mtu->out_eps + epnum;
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd = ring->dequeue;
struct qmu_gpd *gpd_current = NULL;
struct usb_request *req = NULL;
struct mtu3_request *mreq;
dma_addr_t cur_gpd_dma;
cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
if (mreq == NULL || mreq->gpd != gpd) {
dev_err(mtu->dev, "no correct RX req is found\n");
break;
}
req = &mreq->request;
req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
trace_mtu3_complete_gpd(mep, gpd);
mtu3_req_complete(mep, req, 0);
gpd = advance_deq_gpd(ring);
}
dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
__func__, epnum, ring->dequeue, ring->enqueue);
}
static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
{
int i;
for (i = 1; i < mtu->num_eps; i++) {
if (done_status & QMU_RX_DONE_INT(i))
qmu_done_rx(mtu, i);
if (done_status & QMU_TX_DONE_INT(i))
qmu_done_tx(mtu, i);
}
}
static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
{
void __iomem *mbase = mtu->mac_base;
u32 errval;
int i;
if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
errval = mtu3_readl(mbase, U3D_RQERRIR0);
mtu3_writel(mbase, U3D_RQERRIR0, errval);
for (i = 1; i < mtu->num_eps; i++) {
if (errval & QMU_RX_CS_ERR(i))
dev_err(mtu->dev, "Rx %d CS error!\n", i);
if (errval & QMU_RX_LEN_ERR(i))
dev_err(mtu->dev, "RX %d Length error\n", i);
if (errval & (QMU_RX_CS_ERR(i) | QMU_RX_LEN_ERR(i)))
qmu_error_rx(mtu, i);
}
}
if (qmu_status & RXQ_ZLPERR_INT) {
errval = mtu3_readl(mbase, U3D_RQERRIR1);
for (i = 1; i < mtu->num_eps; i++) {
if (errval & QMU_RX_ZLP_ERR(i))
dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
}
mtu3_writel(mbase, U3D_RQERRIR1, errval);
}
if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
errval = mtu3_readl(mbase, U3D_TQERRIR0);
for (i = 1; i < mtu->num_eps; i++) {
if (errval & QMU_TX_CS_ERR(i))
dev_err(mtu->dev, "Tx %d checksum error!\n", i);
if (errval & QMU_TX_LEN_ERR(i))
qmu_tx_zlp_error_handler(mtu, i);
}
mtu3_writel(mbase, U3D_TQERRIR0, errval);
}
}
irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
u32 qmu_status;
u32 qmu_done_status;
/* U3D_QISAR1 is read update */
qmu_status = mtu3_readl(mbase, U3D_QISAR1);
qmu_status &= mtu3_readl(mbase, U3D_QIER1);
qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
(qmu_done_status & 0xFFFF), qmu_done_status >> 16,
qmu_status);
trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
if (qmu_done_status)
qmu_done_isr(mtu, qmu_done_status);
if (qmu_status)
qmu_exception_isr(mtu, qmu_status);
return IRQ_HANDLED;
}
int mtu3_qmu_init(struct mtu3 *mtu)
{
compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
if (!mtu->qmu_gpd_pool)
return -ENOMEM;
return 0;
}
void mtu3_qmu_exit(struct mtu3 *mtu)
{
dma_pool_destroy(mtu->qmu_gpd_pool);
}
| linux-master | drivers/usb/mtu3/mtu3_qmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_trace.c - trace support
*
* Copyright (C) 2019 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "mtu3_debug.h"
#include "mtu3_trace.h"
void mtu3_dbg_trace(struct device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
trace_mtu3_log(dev, &vaf);
va_end(args);
}
| linux-master | drivers/usb/mtu3/mtu3_trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_gadget.c - MediaTek usb3 DRD peripheral support
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*/
#include "mtu3.h"
#include "mtu3_trace.h"
void mtu3_req_complete(struct mtu3_ep *mep,
struct usb_request *req, int status)
__releases(mep->mtu->lock)
__acquires(mep->mtu->lock)
{
struct mtu3_request *mreq = to_mtu3_request(req);
struct mtu3 *mtu = mreq->mtu;
list_del(&mreq->list);
if (req->status == -EINPROGRESS)
req->status = status;
trace_mtu3_req_complete(mreq);
/* ep0 makes use of PIO, needn't unmap it */
if (mep->epnum)
usb_gadget_unmap_request(&mtu->g, req, mep->is_in);
dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n",
mep->name, req, req->status, req->actual, req->length);
spin_unlock(&mtu->lock);
usb_gadget_giveback_request(&mep->ep, req);
spin_lock(&mtu->lock);
}
static void nuke(struct mtu3_ep *mep, const int status)
{
struct mtu3_request *mreq = NULL;
if (list_empty(&mep->req_list))
return;
dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status);
/* exclude EP0 */
if (mep->epnum)
mtu3_qmu_flush(mep);
while (!list_empty(&mep->req_list)) {
mreq = list_first_entry(&mep->req_list,
struct mtu3_request, list);
mtu3_req_complete(mep, &mreq->request, status);
}
}
static int mtu3_ep_enable(struct mtu3_ep *mep)
{
const struct usb_endpoint_descriptor *desc;
const struct usb_ss_ep_comp_descriptor *comp_desc;
struct mtu3 *mtu = mep->mtu;
u32 interval = 0;
u32 mult = 0;
u32 burst = 0;
int ret;
desc = mep->desc;
comp_desc = mep->comp_desc;
mep->type = usb_endpoint_type(desc);
mep->maxp = usb_endpoint_maxp(desc);
switch (mtu->g.speed) {
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
interval = desc->bInterval;
interval = clamp_val(interval, 1, 16);
if (usb_endpoint_xfer_isoc(desc) && comp_desc)
mult = comp_desc->bmAttributes;
}
if (comp_desc)
burst = comp_desc->bMaxBurst;
break;
case USB_SPEED_HIGH:
if (usb_endpoint_xfer_isoc(desc) ||
usb_endpoint_xfer_int(desc)) {
interval = desc->bInterval;
interval = clamp_val(interval, 1, 16);
mult = usb_endpoint_maxp_mult(desc) - 1;
}
break;
case USB_SPEED_FULL:
if (usb_endpoint_xfer_isoc(desc))
interval = clamp_val(desc->bInterval, 1, 16);
else if (usb_endpoint_xfer_int(desc))
interval = clamp_val(desc->bInterval, 1, 255);
break;
default:
break; /*others are ignored */
}
dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n",
__func__, mep->maxp, interval, burst, mult);
mep->ep.maxpacket = mep->maxp;
mep->ep.desc = desc;
mep->ep.comp_desc = comp_desc;
/* slot mainly affects bulk/isoc transfer, so ignore int */
mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot;
ret = mtu3_config_ep(mtu, mep, interval, burst, mult);
if (ret < 0)
return ret;
ret = mtu3_gpd_ring_alloc(mep);
if (ret < 0) {
mtu3_deconfig_ep(mtu, mep);
return ret;
}
mtu3_qmu_start(mep);
return 0;
}
static int mtu3_ep_disable(struct mtu3_ep *mep)
{
struct mtu3 *mtu = mep->mtu;
/* abort all pending requests */
nuke(mep, -ESHUTDOWN);
mtu3_qmu_stop(mep);
mtu3_deconfig_ep(mtu, mep);
mtu3_gpd_ring_free(mep);
mep->desc = NULL;
mep->ep.desc = NULL;
mep->comp_desc = NULL;
mep->type = 0;
mep->flags = 0;
return 0;
}
static int mtu3_gadget_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct mtu3_ep *mep;
struct mtu3 *mtu;
unsigned long flags;
int ret = -EINVAL;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
pr_debug("%s invalid parameters\n", __func__);
return -EINVAL;
}
if (!desc->wMaxPacketSize) {
pr_debug("%s missing wMaxPacketSize\n", __func__);
return -EINVAL;
}
mep = to_mtu3_ep(ep);
mtu = mep->mtu;
/* check ep number and direction against endpoint */
if (usb_endpoint_num(desc) != mep->epnum)
return -EINVAL;
if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in)
return -EINVAL;
dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name);
if (mep->flags & MTU3_EP_ENABLED) {
dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n",
mep->name);
return 0;
}
spin_lock_irqsave(&mtu->lock, flags);
mep->desc = desc;
mep->comp_desc = ep->comp_desc;
ret = mtu3_ep_enable(mep);
if (ret)
goto error;
mep->flags = MTU3_EP_ENABLED;
mtu->active_ep++;
error:
spin_unlock_irqrestore(&mtu->lock, flags);
dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep);
trace_mtu3_gadget_ep_enable(mep);
return ret;
}
static int mtu3_gadget_ep_disable(struct usb_ep *ep)
{
struct mtu3_ep *mep = to_mtu3_ep(ep);
struct mtu3 *mtu = mep->mtu;
unsigned long flags;
dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name);
trace_mtu3_gadget_ep_disable(mep);
if (!(mep->flags & MTU3_EP_ENABLED)) {
dev_warn(mtu->dev, "%s is already disabled\n", mep->name);
return 0;
}
spin_lock_irqsave(&mtu->lock, flags);
mtu3_ep_disable(mep);
mep->flags = 0;
mtu->active_ep--;
spin_unlock_irqrestore(&(mtu->lock), flags);
dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n",
__func__, mtu->active_ep, mtu->is_active);
return 0;
}
struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct mtu3_ep *mep = to_mtu3_ep(ep);
struct mtu3_request *mreq;
mreq = kzalloc(sizeof(*mreq), gfp_flags);
if (!mreq)
return NULL;
mreq->request.dma = DMA_ADDR_INVALID;
mreq->epnum = mep->epnum;
mreq->mep = mep;
INIT_LIST_HEAD(&mreq->list);
trace_mtu3_alloc_request(mreq);
return &mreq->request;
}
void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
{
struct mtu3_request *mreq = to_mtu3_request(req);
trace_mtu3_free_request(mreq);
kfree(mreq);
}
static int mtu3_gadget_queue(struct usb_ep *ep,
struct usb_request *req, gfp_t gfp_flags)
{
struct mtu3_ep *mep = to_mtu3_ep(ep);
struct mtu3_request *mreq = to_mtu3_request(req);
struct mtu3 *mtu = mep->mtu;
unsigned long flags;
int ret = 0;
if (!req->buf)
return -ENODATA;
if (mreq->mep != mep)
return -EINVAL;
dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n",
__func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name,
mreq, ep->maxpacket, mreq->request.length);
if (req->length > GPD_BUF_SIZE ||
(mtu->gen2cp && req->length > GPD_BUF_SIZE_EL)) {
dev_warn(mtu->dev,
"req length > supported MAX:%d requested:%d\n",
mtu->gen2cp ? GPD_BUF_SIZE_EL : GPD_BUF_SIZE,
req->length);
return -EOPNOTSUPP;
}
/* don't queue if the ep is down */
if (!mep->desc) {
dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n",
req, ep->name);
return -ESHUTDOWN;
}
mreq->mtu = mtu;
mreq->request.actual = 0;
mreq->request.status = -EINPROGRESS;
ret = usb_gadget_map_request(&mtu->g, req, mep->is_in);
if (ret) {
dev_err(mtu->dev, "dma mapping failed\n");
return ret;
}
spin_lock_irqsave(&mtu->lock, flags);
if (mtu3_prepare_transfer(mep)) {
ret = -EAGAIN;
goto error;
}
list_add_tail(&mreq->list, &mep->req_list);
mtu3_insert_gpd(mep, mreq);
mtu3_qmu_resume(mep);
error:
spin_unlock_irqrestore(&mtu->lock, flags);
trace_mtu3_gadget_queue(mreq);
return ret;
}
static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct mtu3_ep *mep = to_mtu3_ep(ep);
struct mtu3_request *mreq = to_mtu3_request(req);
struct mtu3_request *r;
struct mtu3 *mtu = mep->mtu;
unsigned long flags;
int ret = 0;
if (mreq->mep != mep)
return -EINVAL;
dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
trace_mtu3_gadget_dequeue(mreq);
spin_lock_irqsave(&mtu->lock, flags);
list_for_each_entry(r, &mep->req_list, list) {
if (r == mreq)
break;
}
if (r != mreq) {
dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name);
ret = -EINVAL;
goto done;
}
mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */
mtu3_req_complete(mep, req, -ECONNRESET);
mtu3_qmu_start(mep);
done:
spin_unlock_irqrestore(&mtu->lock, flags);
return ret;
}
/*
* Set or clear the halt bit of an EP.
* A halted EP won't TX/RX any data but will queue requests.
*/
static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value)
{
struct mtu3_ep *mep = to_mtu3_ep(ep);
struct mtu3 *mtu = mep->mtu;
struct mtu3_request *mreq;
unsigned long flags;
int ret = 0;
dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name);
spin_lock_irqsave(&mtu->lock, flags);
if (mep->type == USB_ENDPOINT_XFER_ISOC) {
ret = -EINVAL;
goto done;
}
mreq = next_request(mep);
if (value) {
/*
* If there is not request for TX-EP, QMU will not transfer
* data to TX-FIFO, so no need check whether TX-FIFO
* holds bytes or not here
*/
if (mreq) {
dev_dbg(mtu->dev, "req in progress, cannot halt %s\n",
ep->name);
ret = -EAGAIN;
goto done;
}
} else {
mep->flags &= ~MTU3_EP_WEDGE;
}
dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear");
mtu3_ep_stall_set(mep, value);
done:
spin_unlock_irqrestore(&mtu->lock, flags);
trace_mtu3_gadget_ep_set_halt(mep);
return ret;
}
/* Sets the halt feature with the clear requests ignored */
static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct mtu3_ep *mep = to_mtu3_ep(ep);
mep->flags |= MTU3_EP_WEDGE;
return usb_ep_set_halt(ep);
}
static const struct usb_ep_ops mtu3_ep_ops = {
.enable = mtu3_gadget_ep_enable,
.disable = mtu3_gadget_ep_disable,
.alloc_request = mtu3_alloc_request,
.free_request = mtu3_free_request,
.queue = mtu3_gadget_queue,
.dequeue = mtu3_gadget_dequeue,
.set_halt = mtu3_gadget_ep_set_halt,
.set_wedge = mtu3_gadget_ep_set_wedge,
};
static int mtu3_gadget_get_frame(struct usb_gadget *gadget)
{
struct mtu3 *mtu = gadget_to_mtu3(gadget);
return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM);
}
static void function_wake_notif(struct mtu3 *mtu, u8 intf)
{
mtu3_writel(mtu->mac_base, U3D_DEV_NOTIF_0,
TYPE_FUNCTION_WAKE | DEV_NOTIF_VAL_FW(intf));
mtu3_setbits(mtu->mac_base, U3D_DEV_NOTIF_0, SEND_DEV_NOTIF);
}
static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
{
struct mtu3 *mtu = gadget_to_mtu3(gadget);
unsigned long flags;
dev_dbg(mtu->dev, "%s\n", __func__);
/* remote wakeup feature is not enabled by host */
if (!mtu->may_wakeup)
return -EOPNOTSUPP;
spin_lock_irqsave(&mtu->lock, flags);
if (mtu->g.speed >= USB_SPEED_SUPER) {
/*
* class driver may do function wakeup even UFP is in U0,
* and UX_EXIT only takes effect in U1/U2/U3;
*/
mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
/*
* Assume there's only one function on the composite device
* and enable remote wake for the first interface.
* FIXME if the IAD (interface association descriptor) shows
* there is more than one function.
*/
function_wake_notif(mtu, 0);
} else {
mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
spin_unlock_irqrestore(&mtu->lock, flags);
usleep_range(10000, 11000);
spin_lock_irqsave(&mtu->lock, flags);
mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
}
spin_unlock_irqrestore(&mtu->lock, flags);
return 0;
}
static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget,
int is_selfpowered)
{
struct mtu3 *mtu = gadget_to_mtu3(gadget);
mtu->is_self_powered = !!is_selfpowered;
return 0;
}
static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct mtu3 *mtu = gadget_to_mtu3(gadget);
unsigned long flags;
dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__,
is_on ? "on" : "off", mtu->is_active ? "" : "in");
pm_runtime_get_sync(mtu->dev);
/* we'd rather not pullup unless the device is active. */
spin_lock_irqsave(&mtu->lock, flags);
is_on = !!is_on;
if (!mtu->is_active) {
/* save it for mtu3_start() to process the request */
mtu->softconnect = is_on;
} else if (is_on != mtu->softconnect) {
mtu->softconnect = is_on;
mtu3_dev_on_off(mtu, is_on);
}
spin_unlock_irqrestore(&mtu->lock, flags);
pm_runtime_put(mtu->dev);
return 0;
}
static int mtu3_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct mtu3 *mtu = gadget_to_mtu3(gadget);
unsigned long flags;
if (mtu->gadget_driver) {
dev_err(mtu->dev, "%s is already bound to %s\n",
mtu->g.name, mtu->gadget_driver->driver.name);
return -EBUSY;
}
dev_dbg(mtu->dev, "bind driver %s\n", driver->function);
pm_runtime_get_sync(mtu->dev);
spin_lock_irqsave(&mtu->lock, flags);
mtu->softconnect = 0;
mtu->gadget_driver = driver;
if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
mtu3_start(mtu);
spin_unlock_irqrestore(&mtu->lock, flags);
pm_runtime_put(mtu->dev);
return 0;
}
static void stop_activity(struct mtu3 *mtu)
{
struct usb_gadget_driver *driver = mtu->gadget_driver;
int i;
/* don't disconnect if it's not connected */
if (mtu->g.speed == USB_SPEED_UNKNOWN)
driver = NULL;
else
mtu->g.speed = USB_SPEED_UNKNOWN;
/* deactivate the hardware */
if (mtu->softconnect) {
mtu->softconnect = 0;
mtu3_dev_on_off(mtu, 0);
}
/*
* killing any outstanding requests will quiesce the driver;
* then report disconnect
*/
nuke(mtu->ep0, -ESHUTDOWN);
for (i = 1; i < mtu->num_eps; i++) {
nuke(mtu->in_eps + i, -ESHUTDOWN);
nuke(mtu->out_eps + i, -ESHUTDOWN);
}
if (driver) {
spin_unlock(&mtu->lock);
driver->disconnect(&mtu->g);
spin_lock(&mtu->lock);
}
}
static int mtu3_gadget_stop(struct usb_gadget *g)
{
struct mtu3 *mtu = gadget_to_mtu3(g);
unsigned long flags;
dev_dbg(mtu->dev, "%s\n", __func__);
spin_lock_irqsave(&mtu->lock, flags);
stop_activity(mtu);
mtu->gadget_driver = NULL;
if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
mtu3_stop(mtu);
spin_unlock_irqrestore(&mtu->lock, flags);
synchronize_irq(mtu->irq);
return 0;
}
static void
mtu3_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
{
struct mtu3 *mtu = gadget_to_mtu3(g);
unsigned long flags;
dev_dbg(mtu->dev, "%s %s\n", __func__, usb_speed_string(speed));
spin_lock_irqsave(&mtu->lock, flags);
mtu->speed = speed;
spin_unlock_irqrestore(&mtu->lock, flags);
}
static void mtu3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
{
struct mtu3 *mtu = gadget_to_mtu3(g);
unsigned long flags;
dev_dbg(mtu->dev, "%s %s\n", __func__, enable ? "en" : "dis");
spin_lock_irqsave(&mtu->lock, flags);
mtu->async_callbacks = enable;
spin_unlock_irqrestore(&mtu->lock, flags);
}
static const struct usb_gadget_ops mtu3_gadget_ops = {
.get_frame = mtu3_gadget_get_frame,
.wakeup = mtu3_gadget_wakeup,
.set_selfpowered = mtu3_gadget_set_self_powered,
.pullup = mtu3_gadget_pullup,
.udc_start = mtu3_gadget_start,
.udc_stop = mtu3_gadget_stop,
.udc_set_speed = mtu3_gadget_set_speed,
.udc_async_callbacks = mtu3_gadget_async_callbacks,
};
static void mtu3_state_reset(struct mtu3 *mtu)
{
mtu->address = 0;
mtu->ep0_state = MU3D_EP0_STATE_SETUP;
mtu->may_wakeup = 0;
mtu->u1_enable = 0;
mtu->u2_enable = 0;
mtu->delayed_status = false;
mtu->test_mode = false;
}
static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
u32 epnum, u32 is_in)
{
mep->epnum = epnum;
mep->mtu = mtu;
mep->is_in = is_in;
INIT_LIST_HEAD(&mep->req_list);
sprintf(mep->name, "ep%d%s", epnum,
!epnum ? "" : (is_in ? "in" : "out"));
mep->ep.name = mep->name;
INIT_LIST_HEAD(&mep->ep.ep_list);
/* initialize maxpacket as SS */
if (!epnum) {
usb_ep_set_maxpacket_limit(&mep->ep, 512);
mep->ep.caps.type_control = true;
mep->ep.ops = &mtu3_ep0_ops;
mtu->g.ep0 = &mep->ep;
} else {
usb_ep_set_maxpacket_limit(&mep->ep, 1024);
mep->ep.caps.type_iso = true;
mep->ep.caps.type_bulk = true;
mep->ep.caps.type_int = true;
mep->ep.ops = &mtu3_ep_ops;
list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list);
}
dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name,
mep->ep.maxpacket);
if (!epnum) {
mep->ep.caps.dir_in = true;
mep->ep.caps.dir_out = true;
} else if (is_in) {
mep->ep.caps.dir_in = true;
} else {
mep->ep.caps.dir_out = true;
}
}
static void mtu3_gadget_init_eps(struct mtu3 *mtu)
{
u8 epnum;
/* initialize endpoint list just once */
INIT_LIST_HEAD(&(mtu->g.ep_list));
dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n",
__func__, mtu->num_eps);
init_hw_ep(mtu, mtu->ep0, 0, 0);
for (epnum = 1; epnum < mtu->num_eps; epnum++) {
init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1);
init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0);
}
}
int mtu3_gadget_setup(struct mtu3 *mtu)
{
mtu->g.ops = &mtu3_gadget_ops;
mtu->g.max_speed = mtu->max_speed;
mtu->g.speed = USB_SPEED_UNKNOWN;
mtu->g.sg_supported = 0;
mtu->g.name = MTU3_DRIVER_NAME;
mtu->g.irq = mtu->irq;
mtu->is_active = 0;
mtu->delayed_status = false;
mtu3_gadget_init_eps(mtu);
return usb_add_gadget_udc(mtu->dev, &mtu->g);
}
void mtu3_gadget_cleanup(struct mtu3 *mtu)
{
usb_del_gadget_udc(&mtu->g);
}
void mtu3_gadget_resume(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "gadget RESUME\n");
if (mtu->async_callbacks && mtu->gadget_driver && mtu->gadget_driver->resume) {
spin_unlock(&mtu->lock);
mtu->gadget_driver->resume(&mtu->g);
spin_lock(&mtu->lock);
}
}
/* called when SOF packets stop for 3+ msec or enters U3 */
void mtu3_gadget_suspend(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "gadget SUSPEND\n");
if (mtu->async_callbacks && mtu->gadget_driver && mtu->gadget_driver->suspend) {
spin_unlock(&mtu->lock);
mtu->gadget_driver->suspend(&mtu->g);
spin_lock(&mtu->lock);
}
}
/* called when VBUS drops below session threshold, and in other cases */
void mtu3_gadget_disconnect(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "gadget DISCONNECT\n");
if (mtu->async_callbacks && mtu->gadget_driver && mtu->gadget_driver->disconnect) {
spin_unlock(&mtu->lock);
mtu->gadget_driver->disconnect(&mtu->g);
spin_lock(&mtu->lock);
}
mtu3_state_reset(mtu);
usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
}
void mtu3_gadget_reset(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "gadget RESET\n");
/* report disconnect, if we didn't flush EP state */
if (mtu->g.speed != USB_SPEED_UNKNOWN)
mtu3_gadget_disconnect(mtu);
else
mtu3_state_reset(mtu);
}
| linux-master | drivers/usb/mtu3/mtu3_gadget.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence CDNSP DRD Driver.
*
* Copyright (C) 2020 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*
*/
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/dmi.h>
#include "core.h"
#include "gadget-export.h"
#include "drd.h"
#include "cdnsp-gadget.h"
#include "cdnsp-trace.h"
unsigned int cdnsp_port_speed(unsigned int port_status)
{
/*Detect gadget speed based on PORTSC register*/
if (DEV_SUPERSPEEDPLUS(port_status))
return USB_SPEED_SUPER_PLUS;
else if (DEV_SUPERSPEED(port_status))
return USB_SPEED_SUPER;
else if (DEV_HIGHSPEED(port_status))
return USB_SPEED_HIGH;
else if (DEV_FULLSPEED(port_status))
return USB_SPEED_FULL;
/* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
return USB_SPEED_UNKNOWN;
}
/*
* Given a port state, this function returns a value that would result in the
* port being in the same state, if the value was written to the port status
* control register.
* Save Read Only (RO) bits and save read/write bits where
* writing a 0 clears the bit and writing a 1 sets the bit (RWS).
* For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
*/
u32 cdnsp_port_state_to_neutral(u32 state)
{
/* Save read-only status and port state. */
return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
}
/**
* cdnsp_find_next_ext_cap - Find the offset of the extended capabilities
* with capability ID id.
* @base: PCI MMIO registers base address.
* @start: Address at which to start looking, (0 or HCC_PARAMS to start at
* beginning of list)
* @id: Extended capability ID to search for.
*
* Returns the offset of the next matching extended capability structure.
* Some capabilities can occur several times,
* e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
*/
int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
{
u32 offset = start;
u32 next;
u32 val;
if (!start || start == HCC_PARAMS_OFFSET) {
val = readl(base + HCC_PARAMS_OFFSET);
if (val == ~0)
return 0;
offset = HCC_EXT_CAPS(val) << 2;
if (!offset)
return 0;
}
do {
val = readl(base + offset);
if (val == ~0)
return 0;
if (EXT_CAPS_ID(val) == id && offset != start)
return offset;
next = EXT_CAPS_NEXT(val);
offset += next << 2;
} while (next);
return 0;
}
void cdnsp_set_link_state(struct cdnsp_device *pdev,
__le32 __iomem *port_regs,
u32 link_state)
{
int port_num = 0xFF;
u32 temp;
temp = readl(port_regs);
temp = cdnsp_port_state_to_neutral(temp);
temp |= PORT_WKCONN_E | PORT_WKDISC_E;
writel(temp, port_regs);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | link_state;
if (pdev->active_port)
port_num = pdev->active_port->port_num;
trace_cdnsp_handle_port_status(port_num, readl(port_regs));
writel(temp, port_regs);
trace_cdnsp_link_state_changed(port_num, readl(port_regs));
}
static void cdnsp_disable_port(struct cdnsp_device *pdev,
__le32 __iomem *port_regs)
{
u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
writel(temp | PORT_PED, port_regs);
}
static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
__le32 __iomem *port_regs)
{
u32 portsc = readl(port_regs);
writel(cdnsp_port_state_to_neutral(portsc) |
(portsc & PORT_CHANGE_BITS), port_regs);
}
static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
{
__le32 __iomem *reg;
void __iomem *base;
u32 offset = 0;
base = &pdev->cap_regs->hc_capbase;
offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
bit = readl(reg) | bit;
writel(bit, reg);
}
static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
{
__le32 __iomem *reg;
void __iomem *base;
u32 offset = 0;
base = &pdev->cap_regs->hc_capbase;
offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
bit = readl(reg) & ~bit;
writel(bit, reg);
}
/*
* Disable interrupts and begin the controller halting process.
*/
static void cdnsp_quiesce(struct cdnsp_device *pdev)
{
u32 halted;
u32 mask;
u32 cmd;
mask = ~(u32)(CDNSP_IRQS);
halted = readl(&pdev->op_regs->status) & STS_HALT;
if (!halted)
mask &= ~(CMD_R_S | CMD_DEVEN);
cmd = readl(&pdev->op_regs->command);
cmd &= mask;
writel(cmd, &pdev->op_regs->command);
}
/*
* Force controller into halt state.
*
* Disable any IRQs and clear the run/stop bit.
* Controller will complete any current and actively pipelined transactions, and
* should halt within 16 ms of the run/stop bit being cleared.
* Read controller Halted bit in the status register to see when the
* controller is finished.
*/
int cdnsp_halt(struct cdnsp_device *pdev)
{
int ret;
u32 val;
cdnsp_quiesce(pdev);
ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
val & STS_HALT, 1,
CDNSP_MAX_HALT_USEC);
if (ret) {
dev_err(pdev->dev, "ERROR: Device halt failed\n");
return ret;
}
pdev->cdnsp_state |= CDNSP_STATE_HALTED;
return 0;
}
/*
* device controller died, register read returns 0xffffffff, or command never
* ends.
*/
void cdnsp_died(struct cdnsp_device *pdev)
{
dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
pdev->cdnsp_state |= CDNSP_STATE_DYING;
cdnsp_halt(pdev);
}
/*
* Set the run bit and wait for the device to be running.
*/
static int cdnsp_start(struct cdnsp_device *pdev)
{
u32 temp;
int ret;
temp = readl(&pdev->op_regs->command);
temp |= (CMD_R_S | CMD_DEVEN);
writel(temp, &pdev->op_regs->command);
pdev->cdnsp_state = 0;
/*
* Wait for the STS_HALT Status bit to be 0 to indicate the device is
* running.
*/
ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
!(temp & STS_HALT), 1,
CDNSP_MAX_HALT_USEC);
if (ret) {
pdev->cdnsp_state = CDNSP_STATE_DYING;
dev_err(pdev->dev, "ERROR: Controller run failed\n");
}
return ret;
}
/*
* Reset a halted controller.
*
* This resets pipelines, timers, counters, state machines, etc.
* Transactions will be terminated immediately, and operational registers
* will be set to their defaults.
*/
int cdnsp_reset(struct cdnsp_device *pdev)
{
u32 command;
u32 temp;
int ret;
temp = readl(&pdev->op_regs->status);
if (temp == ~(u32)0) {
dev_err(pdev->dev, "Device not accessible, reset failed.\n");
return -ENODEV;
}
if ((temp & STS_HALT) == 0) {
dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
return -EINVAL;
}
command = readl(&pdev->op_regs->command);
command |= CMD_RESET;
writel(command, &pdev->op_regs->command);
ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
!(temp & CMD_RESET), 1,
10 * 1000);
if (ret) {
dev_err(pdev->dev, "ERROR: Controller reset failed\n");
return ret;
}
/*
* CDNSP cannot write any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
!(temp & STS_CNR), 1,
10 * 1000);
if (ret) {
dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
return ret;
}
dev_dbg(pdev->dev, "Controller ready to work");
return ret;
}
/*
* cdnsp_get_endpoint_index - Find the index for an endpoint given its
* descriptor.Use the return value to right shift 1 for the bitmask.
*
* Index = (epnum * 2) + direction - 1,
* where direction = 0 for OUT, 1 for IN.
* For control endpoints, the IN index is used (OUT index is unused), so
* index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
*/
static unsigned int
cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
{
unsigned int index = (unsigned int)usb_endpoint_num(desc);
if (usb_endpoint_xfer_control(desc))
return index * 2;
return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
}
/*
* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
static unsigned int
cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
{
return 1 << (cdnsp_get_endpoint_index(desc) + 1);
}
int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
{
struct cdnsp_device *pdev = pep->pdev;
struct usb_request *request;
int ret;
if (preq->epnum == 0 && !list_empty(&pep->pending_list)) {
trace_cdnsp_request_enqueue_busy(preq);
return -EBUSY;
}
request = &preq->request;
request->actual = 0;
request->status = -EINPROGRESS;
preq->direction = pep->direction;
preq->epnum = pep->number;
preq->td.drbl = 0;
ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
if (ret) {
trace_cdnsp_request_enqueue_error(preq);
return ret;
}
list_add_tail(&preq->list, &pep->pending_list);
trace_cdnsp_request_enqueue(preq);
switch (usb_endpoint_type(pep->endpoint.desc)) {
case USB_ENDPOINT_XFER_CONTROL:
ret = cdnsp_queue_ctrl_tx(pdev, preq);
break;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
ret = cdnsp_queue_bulk_tx(pdev, preq);
break;
case USB_ENDPOINT_XFER_ISOC:
ret = cdnsp_queue_isoc_tx(pdev, preq);
}
if (ret)
goto unmap;
return 0;
unmap:
usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
pep->direction);
list_del(&preq->list);
trace_cdnsp_request_enqueue_error(preq);
return ret;
}
/*
* Remove the request's TD from the endpoint ring. This may cause the
* controller to stop USB transfers, potentially stopping in the middle of a
* TRB buffer. The controller should pick up where it left off in the TD,
* unless a Set Transfer Ring Dequeue Pointer is issued.
*
* The TRBs that make up the buffers for the canceled request will be "removed"
* from the ring. Since the ring is a contiguous structure, they can't be
* physically removed. Instead, there are two options:
*
* 1) If the controller is in the middle of processing the request to be
* canceled, we simply move the ring's dequeue pointer past those TRBs
* using the Set Transfer Ring Dequeue Pointer command. This will be
* the common case, when drivers timeout on the last submitted request
* and attempt to cancel.
*
* 2) If the controller is in the middle of a different TD, we turn the TRBs
* into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
* The controller will need to invalidate the any TRBs it has cached after
* the stop endpoint command.
*
* 3) The TD may have completed by the time the Stop Endpoint Command
* completes, so software needs to handle that case too.
*
*/
int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
{
struct cdnsp_device *pdev = pep->pdev;
int ret_stop = 0;
int ret_rem;
trace_cdnsp_request_dequeue(preq);
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
ret_rem = cdnsp_remove_request(pdev, preq, pep);
return ret_rem ? ret_rem : ret_stop;
}
static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
{
struct cdnsp_input_control_ctx *ctrl_ctx;
struct cdnsp_slot_ctx *slot_ctx;
struct cdnsp_ep_ctx *ep_ctx;
int i;
ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
/*
* When a device's add flag and drop flag are zero, any subsequent
* configure endpoint command will leave that endpoint's state
* untouched. Make sure we don't leave any old state in the input
* endpoint contexts.
*/
ctrl_ctx->drop_flags = 0;
ctrl_ctx->add_flags = 0;
slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
/* Endpoint 0 is always valid */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0;
ep_ctx->deq = 0;
ep_ctx->tx_info = 0;
}
}
/* Issue a configure endpoint command and wait for it to finish. */
static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
{
int ret;
cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
if (ret) {
dev_err(pdev->dev,
"ERR: unexpected command completion code 0x%x.\n", ret);
return -EINVAL;
}
return ret;
}
static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
struct cdnsp_ep *pep)
{
struct cdnsp_segment *segment;
union cdnsp_trb *event;
u32 cycle_state;
u32 data;
event = pdev->event_ring->dequeue;
segment = pdev->event_ring->deq_seg;
cycle_state = pdev->event_ring->cycle_state;
while (1) {
data = le32_to_cpu(event->trans_event.flags);
/* Check the owner of the TRB. */
if ((data & TRB_CYCLE) != cycle_state)
break;
if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
TRB_TO_EP_ID(data) == (pep->idx + 1)) {
data |= TRB_EVENT_INVALIDATE;
event->trans_event.flags = cpu_to_le32(data);
}
if (cdnsp_last_trb_on_seg(segment, event)) {
cycle_state ^= 1;
segment = pdev->event_ring->deq_seg->next;
event = segment->trbs;
} else {
event++;
}
}
}
int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
{
struct cdnsp_segment *event_deq_seg;
union cdnsp_trb *cmd_trb;
dma_addr_t cmd_deq_dma;
union cdnsp_trb *event;
u32 cycle_state;
int ret, val;
u64 cmd_dma;
u32 flags;
cmd_trb = pdev->cmd.command_trb;
pdev->cmd.status = 0;
trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic);
ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
!CMD_RING_BUSY(val), 1,
CDNSP_CMD_TIMEOUT);
if (ret) {
dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic);
pdev->cdnsp_state = CDNSP_STATE_DYING;
return -ETIMEDOUT;
}
event = pdev->event_ring->dequeue;
event_deq_seg = pdev->event_ring->deq_seg;
cycle_state = pdev->event_ring->cycle_state;
cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
if (!cmd_deq_dma)
return -EINVAL;
while (1) {
flags = le32_to_cpu(event->event_cmd.flags);
/* Check the owner of the TRB. */
if ((flags & TRB_CYCLE) != cycle_state)
return -EINVAL;
cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
/*
* Check whether the completion event is for last queued
* command.
*/
if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
cmd_dma != (u64)cmd_deq_dma) {
if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
event++;
continue;
}
if (cdnsp_last_trb_on_ring(pdev->event_ring,
event_deq_seg, event))
cycle_state ^= 1;
event_deq_seg = event_deq_seg->next;
event = event_deq_seg->trbs;
continue;
}
trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic);
pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
if (pdev->cmd.status == COMP_SUCCESS)
return 0;
return -pdev->cmd.status;
}
}
int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
int value)
{
int ret;
trace_cdnsp_ep_halt(value ? "Set" : "Clear");
ret = cdnsp_cmd_stop_ep(pdev, pep);
if (ret)
return ret;
if (value) {
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
cdnsp_queue_halt_endpoint(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
}
pep->ep_state |= EP_HALTED;
} else {
cdnsp_queue_reset_ep(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx);
if (ret)
return ret;
pep->ep_state &= ~EP_HALTED;
if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
cdnsp_ring_doorbell_for_active_rings(pdev, pep);
pep->ep_state &= ~EP_WEDGE;
}
return 0;
}
static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
struct cdnsp_ep *pep)
{
struct cdnsp_input_control_ctx *ctrl_ctx;
struct cdnsp_slot_ctx *slot_ctx;
int ret = 0;
u32 ep_sts;
int i;
ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
/* Don't issue the command if there's no endpoints to update. */
if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
return 0;
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
__le32 le32 = cpu_to_le32(BIT(i));
if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
(ctrl_ctx->add_flags & le32) || i == 1) {
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
break;
}
}
ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
ep_sts == EP_STATE_DISABLED) ||
(ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
ret = cdnsp_configure_endpoint(pdev);
trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx));
trace_cdnsp_handle_cmd_config_ep(pep->out_ctx);
cdnsp_zero_in_ctx(pdev);
return ret;
}
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
* cdnsp_setup_device(), and then re-set up the configuration.
*/
int cdnsp_reset_device(struct cdnsp_device *pdev)
{
struct cdnsp_slot_ctx *slot_ctx;
int slot_state;
int ret, i;
slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
slot_ctx->dev_info = 0;
pdev->device_address = 0;
/* If device is not setup, there is no point in resetting it. */
slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
trace_cdnsp_reset_device(slot_ctx);
if (slot_state <= SLOT_STATE_DEFAULT &&
pdev->eps[0].ep_state & EP_HALTED) {
cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
}
/*
* During Reset Device command controller shall transition the
* endpoint ep0 to the Running State.
*/
pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
pdev->eps[0].ep_state |= EP_ENABLED;
if (slot_state <= SLOT_STATE_DEFAULT)
return 0;
cdnsp_queue_reset_device(pdev);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
/*
* After Reset Device command all not default endpoints
* are in Disabled state.
*/
for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
if (ret)
dev_err(pdev->dev, "Reset device failed with error code %d",
ret);
return ret;
}
/*
* Sets the MaxPStreams field and the Linear Stream Array field.
* Sets the dequeue pointer to the stream context array.
*/
static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
struct cdnsp_ep_ctx *ep_ctx,
struct cdnsp_stream_info *stream_info)
{
u32 max_primary_streams;
/* MaxPStreams is the number of stream context array entries, not the
* number we're actually using. Must be in 2^(MaxPstreams + 1) format.
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
| EP_HAS_LSA);
ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
}
/*
* The drivers use this function to prepare a bulk endpoints to use streams.
*
* Don't allow the call to succeed if endpoint only supports one stream
* (which means it doesn't support streams at all).
*/
int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
unsigned int num_stream_ctxs;
int ret;
if (num_streams == 0)
return 0;
if (num_streams > STREAM_NUM_STREAMS)
return -EINVAL;
/*
* Add two to the number of streams requested to account for
* stream 0 that is reserved for controller usage and one additional
* for TASK SET FULL response.
*/
num_streams += 2;
/* The stream context array size must be a power of two */
num_stream_ctxs = roundup_pow_of_two(num_streams);
trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams);
ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
if (ret)
return ret;
cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
pep->ep_state |= EP_HAS_STREAMS;
pep->stream_info.td_count = 0;
pep->stream_info.first_prime_det = 0;
/* Subtract 1 for stream 0, which drivers can't use. */
return num_streams - 1;
}
int cdnsp_disable_slot(struct cdnsp_device *pdev)
{
int ret;
cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
pdev->slot_id = 0;
pdev->active_port = NULL;
trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
return ret;
}
int cdnsp_enable_slot(struct cdnsp_device *pdev)
{
struct cdnsp_slot_ctx *slot_ctx;
int slot_state;
int ret;
/* If device is not setup, there is no point in resetting it */
slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
if (slot_state != SLOT_STATE_DISABLED)
return 0;
cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
if (ret)
goto show_trace;
pdev->slot_id = 1;
show_trace:
trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
return ret;
}
/*
* Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
* or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
*/
int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
{
struct cdnsp_input_control_ctx *ctrl_ctx;
struct cdnsp_slot_ctx *slot_ctx;
int dev_state = 0;
int ret;
if (!pdev->slot_id) {
trace_cdnsp_slot_id("incorrect");
return -EINVAL;
}
if (!pdev->active_port->port_num)
return -EINVAL;
slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) {
trace_cdnsp_slot_already_in_default(slot_ctx);
return 0;
}
slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
ret = cdnsp_setup_addressable_priv_dev(pdev);
if (ret)
return ret;
}
cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
ctrl_ctx->drop_flags = 0;
trace_cdnsp_setup_device_slot(slot_ctx);
cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx));
/* Zero the input context control for later use. */
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
return ret;
}
void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
struct usb_request *req,
int enable)
{
if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
return;
trace_cdnsp_lpm(enable);
if (enable)
writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
&pdev->active_port->regs->portpmsc);
else
writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
}
static int cdnsp_get_frame(struct cdnsp_device *pdev)
{
return readl(&pdev->run_regs->microframe_index) >> 3;
}
static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct cdnsp_input_control_ctx *ctrl_ctx;
struct cdnsp_device *pdev;
struct cdnsp_ep *pep;
unsigned long flags;
u32 added_ctxs;
int ret;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
!desc->wMaxPacketSize)
return -EINVAL;
pep = to_cdnsp_ep(ep);
pdev = pep->pdev;
pep->ep_state &= ~EP_UNCONFIGURED;
if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
"%s is already enabled\n", pep->name))
return 0;
spin_lock_irqsave(&pdev->lock, flags);
added_ctxs = cdnsp_get_endpoint_flag(desc);
if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
ret = -EINVAL;
goto unlock;
}
pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
if (pdev->gadget.speed == USB_SPEED_FULL) {
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
pep->interval = desc->bInterval << 3;
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
pep->interval = BIT(desc->bInterval - 1) << 3;
}
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
if (pep->interval > BIT(12)) {
dev_err(pdev->dev, "bInterval %d not supported\n",
desc->bInterval);
ret = -EINVAL;
goto unlock;
}
cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
}
ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
if (ret)
goto unlock;
ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
ctrl_ctx->drop_flags = 0;
ret = cdnsp_update_eps_configuration(pdev, pep);
if (ret) {
cdnsp_free_endpoint_rings(pdev, pep);
goto unlock;
}
pep->ep_state |= EP_ENABLED;
pep->ep_state &= ~EP_STOPPED;
unlock:
trace_cdnsp_ep_enable_end(pep, 0);
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
{
struct cdnsp_input_control_ctx *ctrl_ctx;
struct cdnsp_request *preq;
struct cdnsp_device *pdev;
struct cdnsp_ep *pep;
unsigned long flags;
u32 drop_flag;
int ret = 0;
if (!ep)
return -EINVAL;
pep = to_cdnsp_ep(ep);
pdev = pep->pdev;
spin_lock_irqsave(&pdev->lock, flags);
if (!(pep->ep_state & EP_ENABLED)) {
dev_err(pdev->dev, "%s is already disabled\n", pep->name);
ret = -EINVAL;
goto finish;
}
pep->ep_state |= EP_DIS_IN_RROGRESS;
/* Endpoint was unconfigured by Reset Device command. */
if (!(pep->ep_state & EP_UNCONFIGURED)) {
cdnsp_cmd_stop_ep(pdev, pep);
cdnsp_cmd_flush_ep(pdev, pep);
}
/* Remove all queued USB requests. */
while (!list_empty(&pep->pending_list)) {
preq = next_request(&pep->pending_list);
cdnsp_ep_dequeue(pep, preq);
}
cdnsp_invalidate_ep_events(pdev, pep);
pep->ep_state &= ~EP_DIS_IN_RROGRESS;
drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
ctrl_ctx->add_flags = 0;
cdnsp_endpoint_zero(pdev, pep);
if (!(pep->ep_state & EP_UNCONFIGURED))
ret = cdnsp_update_eps_configuration(pdev, pep);
cdnsp_free_endpoint_rings(pdev, pep);
pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
pep->ep_state |= EP_STOPPED;
finish:
trace_cdnsp_ep_disable_end(pep, 0);
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct cdnsp_ep *pep = to_cdnsp_ep(ep);
struct cdnsp_request *preq;
preq = kzalloc(sizeof(*preq), gfp_flags);
if (!preq)
return NULL;
preq->epnum = pep->number;
preq->pep = pep;
trace_cdnsp_alloc_request(preq);
return &preq->request;
}
static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
struct usb_request *request)
{
struct cdnsp_request *preq = to_cdnsp_request(request);
trace_cdnsp_free_request(preq);
kfree(preq);
}
static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags)
{
struct cdnsp_request *preq;
struct cdnsp_device *pdev;
struct cdnsp_ep *pep;
unsigned long flags;
int ret;
if (!request || !ep)
return -EINVAL;
pep = to_cdnsp_ep(ep);
pdev = pep->pdev;
if (!(pep->ep_state & EP_ENABLED)) {
dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
pep->name);
return -EINVAL;
}
preq = to_cdnsp_request(request);
spin_lock_irqsave(&pdev->lock, flags);
ret = cdnsp_ep_enqueue(pep, preq);
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
struct usb_request *request)
{
struct cdnsp_ep *pep = to_cdnsp_ep(ep);
struct cdnsp_device *pdev = pep->pdev;
unsigned long flags;
int ret;
if (!pep->endpoint.desc) {
dev_err(pdev->dev,
"%s: can't dequeue to disabled endpoint\n",
pep->name);
return -ESHUTDOWN;
}
/* Requests has been dequeued during disabling endpoint. */
if (!(pep->ep_state & EP_ENABLED))
return 0;
spin_lock_irqsave(&pdev->lock, flags);
ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
{
struct cdnsp_ep *pep = to_cdnsp_ep(ep);
struct cdnsp_device *pdev = pep->pdev;
struct cdnsp_request *preq;
unsigned long flags;
int ret;
spin_lock_irqsave(&pdev->lock, flags);
preq = next_request(&pep->pending_list);
if (value) {
if (preq) {
trace_cdnsp_ep_busy_try_halt_again(pep, 0);
ret = -EAGAIN;
goto done;
}
}
ret = cdnsp_halt_endpoint(pdev, pep, value);
done:
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct cdnsp_ep *pep = to_cdnsp_ep(ep);
struct cdnsp_device *pdev = pep->pdev;
unsigned long flags;
int ret;
spin_lock_irqsave(&pdev->lock, flags);
pep->ep_state |= EP_WEDGE;
ret = cdnsp_halt_endpoint(pdev, pep, 1);
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
.enable = cdnsp_gadget_ep_enable,
.disable = cdnsp_gadget_ep_disable,
.alloc_request = cdnsp_gadget_ep_alloc_request,
.free_request = cdnsp_gadget_ep_free_request,
.queue = cdnsp_gadget_ep_queue,
.dequeue = cdnsp_gadget_ep_dequeue,
.set_halt = cdnsp_gadget_ep_set_halt,
.set_wedge = cdnsp_gadget_ep_set_wedge,
};
static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
.enable = cdnsp_gadget_ep_enable,
.disable = cdnsp_gadget_ep_disable,
.alloc_request = cdnsp_gadget_ep_alloc_request,
.free_request = cdnsp_gadget_ep_free_request,
.queue = cdnsp_gadget_ep_queue,
.dequeue = cdnsp_gadget_ep_dequeue,
.set_halt = cdnsp_gadget_ep_set_halt,
.set_wedge = cdnsp_gadget_ep_set_wedge,
};
void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
struct cdnsp_request *preq,
int status)
{
struct cdnsp_device *pdev = pep->pdev;
list_del(&preq->list);
if (preq->request.status == -EINPROGRESS)
preq->request.status = status;
usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
preq->direction);
trace_cdnsp_request_giveback(preq);
if (preq != &pdev->ep0_preq) {
spin_unlock(&pdev->lock);
usb_gadget_giveback_request(&pep->endpoint, &preq->request);
spin_lock(&pdev->lock);
}
}
static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
};
static int cdnsp_run(struct cdnsp_device *pdev,
enum usb_device_speed speed)
{
u32 fs_speed = 0;
u32 temp;
int ret;
temp = readl(&pdev->ir_set->irq_control);
temp &= ~IMOD_INTERVAL_MASK;
temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
writel(temp, &pdev->ir_set->irq_control);
temp = readl(&pdev->port3x_regs->mode_addr);
switch (speed) {
case USB_SPEED_SUPER_PLUS:
temp |= CFG_3XPORT_SSP_SUPPORT;
break;
case USB_SPEED_SUPER:
temp &= ~CFG_3XPORT_SSP_SUPPORT;
break;
case USB_SPEED_HIGH:
break;
case USB_SPEED_FULL:
fs_speed = PORT_REG6_FORCE_FS;
break;
default:
dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
speed);
fallthrough;
case USB_SPEED_UNKNOWN:
/* Default to superspeed. */
speed = USB_SPEED_SUPER;
break;
}
if (speed >= USB_SPEED_SUPER) {
writel(temp, &pdev->port3x_regs->mode_addr);
cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
XDEV_RXDETECT);
} else {
cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
}
cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
XDEV_RXDETECT);
cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
ret = cdnsp_start(pdev);
if (ret) {
ret = -ENODEV;
goto err;
}
temp = readl(&pdev->op_regs->command);
temp |= (CMD_INTE);
writel(temp, &pdev->op_regs->command);
temp = readl(&pdev->ir_set->irq_pending);
writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
trace_cdnsp_init("Controller ready to work");
return 0;
err:
cdnsp_halt(pdev);
return ret;
}
static int cdnsp_gadget_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
enum usb_device_speed max_speed = driver->max_speed;
struct cdnsp_device *pdev = gadget_to_cdnsp(g);
unsigned long flags;
int ret;
spin_lock_irqsave(&pdev->lock, flags);
pdev->gadget_driver = driver;
/* limit speed if necessary */
max_speed = min(driver->max_speed, g->max_speed);
ret = cdnsp_run(pdev, max_speed);
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
/*
* Update Event Ring Dequeue Pointer:
* - When all events have finished
* - To avoid "Event Ring Full Error" condition
*/
void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
union cdnsp_trb *event_ring_deq,
u8 clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != pdev->event_ring->dequeue) {
deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
pdev->event_ring->dequeue);
temp_64 &= ERST_PTR_MASK;
temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C). */
if (clear_ehb)
temp_64 |= ERST_EHB;
else
temp_64 &= ~ERST_EHB;
cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue);
}
static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
{
struct cdnsp_segment *seg;
u64 val_64;
int i;
cdnsp_initialize_ring_info(pdev->cmd_ring);
seg = pdev->cmd_ring->first_seg;
for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
memset(seg->trbs, 0,
sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
seg = seg->next;
}
/* Set the address in the Command Ring Control register. */
val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
(pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
pdev->cmd_ring->cycle_state;
cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
}
static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
{
struct cdnsp_segment *event_deq_seg;
union cdnsp_trb *event_ring_deq;
union cdnsp_trb *event;
u32 cycle_bit;
event_ring_deq = pdev->event_ring->dequeue;
event_deq_seg = pdev->event_ring->deq_seg;
event = pdev->event_ring->dequeue;
/* Update ring dequeue pointer. */
while (1) {
cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
/* Does the controller or driver own the TRB? */
if (cycle_bit != pdev->event_ring->cycle_state)
break;
cdnsp_inc_deq(pdev, pdev->event_ring);
if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
event++;
continue;
}
if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
event))
cycle_bit ^= 1;
event_deq_seg = event_deq_seg->next;
event = event_deq_seg->trbs;
}
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
}
static void cdnsp_stop(struct cdnsp_device *pdev)
{
u32 temp;
cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
/* Remove internally queued request for ep0. */
if (!list_empty(&pdev->eps[0].pending_list)) {
struct cdnsp_request *req;
req = next_request(&pdev->eps[0].pending_list);
if (req == &pdev->ep0_preq)
cdnsp_ep_dequeue(&pdev->eps[0], req);
}
cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
cdnsp_disable_slot(pdev);
cdnsp_halt(pdev);
temp = readl(&pdev->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
temp = readl(&pdev->ir_set->irq_pending);
writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
/* Clear interrupt line */
temp = readl(&pdev->ir_set->irq_pending);
temp |= IMAN_IP;
writel(temp, &pdev->ir_set->irq_pending);
cdnsp_consume_all_events(pdev);
cdnsp_clear_cmd_ring(pdev);
trace_cdnsp_exit("Controller stopped.");
}
/*
* Stop controller.
* This function is called by the gadget core when the driver is removed.
* Disable slot, disable IRQs, and quiesce the controller.
*/
static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
{
struct cdnsp_device *pdev = gadget_to_cdnsp(g);
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
cdnsp_stop(pdev);
pdev->gadget_driver = NULL;
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdnsp_gadget_get_frame(struct usb_gadget *g)
{
struct cdnsp_device *pdev = gadget_to_cdnsp(g);
return cdnsp_get_frame(pdev);
}
static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
{
struct cdnsp_port_regs __iomem *port_regs;
u32 portpm, portsc;
port_regs = pdev->active_port->regs;
portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
/* Remote wakeup feature is not enabled by host. */
if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
portpm = readl(&port_regs->portpmsc);
if (!(portpm & PORT_RWE))
return;
}
if (portsc == XDEV_U3 && !pdev->may_wakeup)
return;
cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
}
static int cdnsp_gadget_wakeup(struct usb_gadget *g)
{
struct cdnsp_device *pdev = gadget_to_cdnsp(g);
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
__cdnsp_gadget_wakeup(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
int is_selfpowered)
{
struct cdnsp_device *pdev = gadget_to_cdnsp(g);
unsigned long flags;
spin_lock_irqsave(&pdev->lock, flags);
g->is_selfpowered = !!is_selfpowered;
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
struct cdns *cdns = dev_get_drvdata(pdev->dev);
unsigned long flags;
trace_cdnsp_pullup(is_on);
/*
* Disable events handling while controller is being
* enabled/disabled.
*/
disable_irq(cdns->dev_irq);
spin_lock_irqsave(&pdev->lock, flags);
if (!is_on) {
cdnsp_reset_device(pdev);
cdns_clear_vbus(cdns);
} else {
cdns_set_vbus(cdns);
}
spin_unlock_irqrestore(&pdev->lock, flags);
enable_irq(cdns->dev_irq);
return 0;
}
static const struct usb_gadget_ops cdnsp_gadget_ops = {
.get_frame = cdnsp_gadget_get_frame,
.wakeup = cdnsp_gadget_wakeup,
.set_selfpowered = cdnsp_gadget_set_selfpowered,
.pullup = cdnsp_gadget_pullup,
.udc_start = cdnsp_gadget_udc_start,
.udc_stop = cdnsp_gadget_udc_stop,
};
static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
struct cdnsp_ep *pep)
{
void __iomem *reg = &pdev->cap_regs->hc_capbase;
int endpoints;
reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
if (!pep->direction) {
pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
pep->buffering = (pep->buffering + 1) / 2;
pep->buffering_period = (pep->buffering_period + 1) / 2;
return;
}
endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2;
/* Set to XBUF_TX_TAG_MASK_0 register. */
reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
/* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
reg += pep->number * sizeof(u32) * 2;
pep->buffering = (readl(reg) + 1) / 2;
pep->buffering_period = pep->buffering;
}
static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
{
int max_streams = HCC_MAX_PSA(pdev->hcc_params);
struct cdnsp_ep *pep;
int i;
INIT_LIST_HEAD(&pdev->gadget.ep_list);
if (max_streams < STREAM_LOG_STREAMS) {
dev_err(pdev->dev, "Stream size %d not supported\n",
max_streams);
return -EINVAL;
}
max_streams = STREAM_LOG_STREAMS;
for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
bool direction = !(i & 1); /* Start from OUT endpoint. */
u8 epnum = ((i + 1) >> 1);
if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
continue;
pep = &pdev->eps[i];
pep->pdev = pdev;
pep->number = epnum;
pep->direction = direction; /* 0 for OUT, 1 for IN. */
/*
* Ep0 is bidirectional, so ep0in and ep0out are represented by
* pdev->eps[0]
*/
if (epnum == 0) {
snprintf(pep->name, sizeof(pep->name), "ep%d%s",
epnum, "BiDir");
pep->idx = 0;
usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
pep->endpoint.maxburst = 1;
pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
pep->endpoint.comp_desc = NULL;
pep->endpoint.caps.type_control = true;
pep->endpoint.caps.dir_in = true;
pep->endpoint.caps.dir_out = true;
pdev->ep0_preq.epnum = pep->number;
pdev->ep0_preq.pep = pep;
pdev->gadget.ep0 = &pep->endpoint;
} else {
snprintf(pep->name, sizeof(pep->name), "ep%d%s",
epnum, (pep->direction) ? "in" : "out");
pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1;
usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
pep->endpoint.max_streams = max_streams;
pep->endpoint.ops = &cdnsp_gadget_ep_ops;
list_add_tail(&pep->endpoint.ep_list,
&pdev->gadget.ep_list);
pep->endpoint.caps.type_iso = true;
pep->endpoint.caps.type_bulk = true;
pep->endpoint.caps.type_int = true;
pep->endpoint.caps.dir_in = direction;
pep->endpoint.caps.dir_out = !direction;
}
pep->endpoint.name = pep->name;
pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
cdnsp_get_ep_buffering(pdev, pep);
dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
"CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
"SupDir IN: %s, OUT: %s\n",
pep->name, 1024,
(pep->endpoint.caps.type_control) ? "yes" : "no",
(pep->endpoint.caps.type_int) ? "yes" : "no",
(pep->endpoint.caps.type_bulk) ? "yes" : "no",
(pep->endpoint.caps.type_iso) ? "yes" : "no",
(pep->endpoint.caps.dir_in) ? "yes" : "no",
(pep->endpoint.caps.dir_out) ? "yes" : "no");
INIT_LIST_HEAD(&pep->pending_list);
}
return 0;
}
static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
{
struct cdnsp_ep *pep;
int i;
for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
pep = &pdev->eps[i];
if (pep->number != 0 && pep->out_ctx)
list_del(&pep->endpoint.ep_list);
}
}
void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
{
pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
spin_unlock(&pdev->lock);
pdev->gadget_driver->disconnect(&pdev->gadget);
spin_lock(&pdev->lock);
}
pdev->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
}
void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
{
if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
spin_unlock(&pdev->lock);
pdev->gadget_driver->suspend(&pdev->gadget);
spin_lock(&pdev->lock);
}
}
void cdnsp_resume_gadget(struct cdnsp_device *pdev)
{
if (pdev->gadget_driver && pdev->gadget_driver->resume) {
spin_unlock(&pdev->lock);
pdev->gadget_driver->resume(&pdev->gadget);
spin_lock(&pdev->lock);
}
}
void cdnsp_irq_reset(struct cdnsp_device *pdev)
{
struct cdnsp_port_regs __iomem *port_regs;
cdnsp_reset_device(pdev);
port_regs = pdev->active_port->regs;
pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
spin_unlock(&pdev->lock);
usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
spin_lock(&pdev->lock);
switch (pdev->gadget.speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
pdev->gadget.ep0->maxpacket = 512;
break;
case USB_SPEED_HIGH:
case USB_SPEED_FULL:
cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
pdev->gadget.ep0->maxpacket = 64;
break;
default:
/* Low speed is not supported. */
dev_err(pdev->dev, "Unknown device speed\n");
break;
}
cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
}
static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
{
void __iomem *reg = &pdev->cap_regs->hc_capbase;
reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
pdev->rev_cap = reg;
dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
readl(&pdev->rev_cap->ctrl_revision),
readl(&pdev->rev_cap->rtl_revision),
readl(&pdev->rev_cap->ep_supported),
readl(&pdev->rev_cap->rx_buff_size),
readl(&pdev->rev_cap->tx_buff_size));
}
static int cdnsp_gen_setup(struct cdnsp_device *pdev)
{
int ret;
u32 reg;
pdev->cap_regs = pdev->regs;
pdev->op_regs = pdev->regs +
HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
pdev->run_regs = pdev->regs +
(readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
/* Cache read-only capability registers */
pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
pdev->hci_version = HC_VERSION(pdev->hcc_params);
pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
cdnsp_get_rev_cap(pdev);
/* Make sure the Device Controller is halted. */
ret = cdnsp_halt(pdev);
if (ret)
return ret;
/* Reset the internal controller memory state and registers. */
ret = cdnsp_reset(pdev);
if (ret)
return ret;
/*
* Set dma_mask and coherent_dma_mask to 64-bits,
* if controller supports 64-bit addressing.
*/
if (HCC_64BIT_ADDR(pdev->hcc_params) &&
!dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
} else {
/*
* This is to avoid error in cases where a 32-bit USB
* controller is used on a 64-bit capable system.
*/
ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
}
spin_lock_init(&pdev->lock);
ret = cdnsp_mem_init(pdev);
if (ret)
return ret;
/*
* Software workaround for U1: after transition
* to U1 the controller starts gating clock, and in some cases,
* it causes that controller stack.
*/
reg = readl(&pdev->port3x_regs->mode_2);
reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
writel(reg, &pdev->port3x_regs->mode_2);
return 0;
}
static int __cdnsp_gadget_init(struct cdns *cdns)
{
struct cdnsp_device *pdev;
u32 max_speed;
int ret = -ENOMEM;
cdns_drd_gadget_on(cdns);
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev)
return -ENOMEM;
pm_runtime_get_sync(cdns->dev);
cdns->gadget_dev = pdev;
pdev->dev = cdns->dev;
pdev->regs = cdns->dev_regs;
max_speed = usb_get_maximum_speed(cdns->dev);
switch (max_speed) {
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
break;
default:
dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
fallthrough;
case USB_SPEED_UNKNOWN:
/* Default to SSP */
max_speed = USB_SPEED_SUPER_PLUS;
break;
}
pdev->gadget.ops = &cdnsp_gadget_ops;
pdev->gadget.name = "cdnsp-gadget";
pdev->gadget.speed = USB_SPEED_UNKNOWN;
pdev->gadget.sg_supported = 1;
pdev->gadget.max_speed = max_speed;
pdev->gadget.lpm_capable = 1;
pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
if (!pdev->setup_buf)
goto free_pdev;
/*
* Controller supports not aligned buffer but it should improve
* performance.
*/
pdev->gadget.quirk_ep_out_aligned_size = true;
ret = cdnsp_gen_setup(pdev);
if (ret) {
dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
goto free_setup;
}
ret = cdnsp_gadget_init_endpoints(pdev);
if (ret) {
dev_err(pdev->dev, "failed to initialize endpoints\n");
goto halt_pdev;
}
ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
if (ret) {
dev_err(pdev->dev, "failed to register udc\n");
goto free_endpoints;
}
ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
cdnsp_irq_handler,
cdnsp_thread_irq_handler, IRQF_SHARED,
dev_name(pdev->dev), pdev);
if (ret)
goto del_gadget;
return 0;
del_gadget:
usb_del_gadget_udc(&pdev->gadget);
free_endpoints:
cdnsp_gadget_free_endpoints(pdev);
halt_pdev:
cdnsp_halt(pdev);
cdnsp_reset(pdev);
cdnsp_mem_cleanup(pdev);
free_setup:
kfree(pdev->setup_buf);
free_pdev:
kfree(pdev);
return ret;
}
static void cdnsp_gadget_exit(struct cdns *cdns)
{
struct cdnsp_device *pdev = cdns->gadget_dev;
devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
usb_del_gadget_udc(&pdev->gadget);
cdnsp_gadget_free_endpoints(pdev);
cdnsp_mem_cleanup(pdev);
kfree(pdev);
cdns->gadget_dev = NULL;
cdns_drd_gadget_off(cdns);
}
static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
{
struct cdnsp_device *pdev = cdns->gadget_dev;
unsigned long flags;
if (pdev->link_state == XDEV_U3)
return 0;
spin_lock_irqsave(&pdev->lock, flags);
cdnsp_disconnect_gadget(pdev);
cdnsp_stop(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
return 0;
}
static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
{
struct cdnsp_device *pdev = cdns->gadget_dev;
enum usb_device_speed max_speed;
unsigned long flags;
int ret;
if (!pdev->gadget_driver)
return 0;
spin_lock_irqsave(&pdev->lock, flags);
max_speed = pdev->gadget_driver->max_speed;
/* Limit speed if necessary. */
max_speed = min(max_speed, pdev->gadget.max_speed);
ret = cdnsp_run(pdev, max_speed);
if (pdev->link_state == XDEV_U3)
__cdnsp_gadget_wakeup(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
/**
* cdnsp_gadget_init - initialize device structure
* @cdns: cdnsp instance
*
* This function initializes the gadget.
*/
int cdnsp_gadget_init(struct cdns *cdns)
{
struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
rdrv->start = __cdnsp_gadget_init;
rdrv->stop = cdnsp_gadget_exit;
rdrv->suspend = cdnsp_gadget_suspend;
rdrv->resume = cdnsp_gadget_resume;
rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "gadget";
cdns->roles[USB_ROLE_DEVICE] = rdrv;
return 0;
}
| linux-master | drivers/usb/cdns3/cdnsp-gadget.c |
// SPDX-License-Identifier: GPL-2.0
/*
* cdns3-imx.c - NXP i.MX specific Glue layer for Cadence USB Controller
*
* Copyright (C) 2019 NXP
*/
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include "core.h"
#define USB3_CORE_CTRL1 0x00
#define USB3_CORE_CTRL2 0x04
#define USB3_INT_REG 0x08
#define USB3_CORE_STATUS 0x0c
#define XHCI_DEBUG_LINK_ST 0x10
#define XHCI_DEBUG_BUS 0x14
#define USB3_SSPHY_CTRL1 0x40
#define USB3_SSPHY_CTRL2 0x44
#define USB3_SSPHY_STATUS 0x4c
#define USB2_PHY_CTRL1 0x50
#define USB2_PHY_CTRL2 0x54
#define USB2_PHY_STATUS 0x5c
/* Register bits definition */
/* USB3_CORE_CTRL1 */
#define SW_RESET_MASK GENMASK(31, 26)
#define PWR_SW_RESET BIT(31)
#define APB_SW_RESET BIT(30)
#define AXI_SW_RESET BIT(29)
#define RW_SW_RESET BIT(28)
#define PHY_SW_RESET BIT(27)
#define PHYAHB_SW_RESET BIT(26)
#define ALL_SW_RESET (PWR_SW_RESET | APB_SW_RESET | AXI_SW_RESET | \
RW_SW_RESET | PHY_SW_RESET | PHYAHB_SW_RESET)
#define OC_DISABLE BIT(9)
#define MDCTRL_CLK_SEL BIT(7)
#define MODE_STRAP_MASK (0x7)
#define DEV_MODE (1 << 2)
#define HOST_MODE (1 << 1)
#define OTG_MODE (1 << 0)
/* USB3_INT_REG */
#define CLK_125_REQ BIT(29)
#define LPM_CLK_REQ BIT(28)
#define DEVU3_WAEKUP_EN BIT(14)
#define OTG_WAKEUP_EN BIT(12)
#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */
#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */
/* USB3_CORE_STATUS */
#define MDCTRL_CLK_STATUS BIT(15)
#define DEV_POWER_ON_READY BIT(13)
#define HOST_POWER_ON_READY BIT(12)
/* USB3_SSPHY_STATUS */
#define CLK_VALID_MASK (0x3f << 26)
#define CLK_VALID_COMPARE_BITS (0xf << 28)
#define PHY_REFCLK_REQ (1 << 0)
/* OTG registers definition */
#define OTGSTS 0x4
/* OTGSTS */
#define OTG_NRDY BIT(11)
/* xHCI registers definition */
#define XECP_PM_PMCSR 0x8018
#define XECP_AUX_CTRL_REG1 0x8120
/* Register bits definition */
/* XECP_AUX_CTRL_REG1 */
#define CFG_RXDET_P3_EN BIT(15)
/* XECP_PM_PMCSR */
#define PS_MASK GENMASK(1, 0)
#define PS_D0 0
#define PS_D1 1
struct cdns_imx {
struct device *dev;
void __iomem *noncore;
struct clk_bulk_data *clks;
int num_clks;
struct platform_device *cdns3_pdev;
};
static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset)
{
return readl(data->noncore + offset);
}
static inline void cdns_imx_writel(struct cdns_imx *data, u32 offset, u32 value)
{
writel(value, data->noncore + offset);
}
static const struct clk_bulk_data imx_cdns3_core_clks[] = {
{ .id = "lpm" },
{ .id = "bus" },
{ .id = "aclk" },
{ .id = "ipg" },
{ .id = "core" },
};
static int cdns_imx_noncore_init(struct cdns_imx *data)
{
u32 value;
int ret;
struct device *dev = data->dev;
cdns_imx_writel(data, USB3_SSPHY_STATUS, CLK_VALID_MASK);
udelay(1);
ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value,
(value & CLK_VALID_COMPARE_BITS) == CLK_VALID_COMPARE_BITS,
10, 100000);
if (ret) {
dev_err(dev, "wait clkvld timeout\n");
return ret;
}
value = cdns_imx_readl(data, USB3_CORE_CTRL1);
value |= ALL_SW_RESET;
cdns_imx_writel(data, USB3_CORE_CTRL1, value);
udelay(1);
value = cdns_imx_readl(data, USB3_CORE_CTRL1);
value = (value & ~MODE_STRAP_MASK) | OTG_MODE | OC_DISABLE;
cdns_imx_writel(data, USB3_CORE_CTRL1, value);
value = cdns_imx_readl(data, USB3_INT_REG);
value |= HOST_INT1_EN | DEV_INT_EN;
cdns_imx_writel(data, USB3_INT_REG, value);
value = cdns_imx_readl(data, USB3_CORE_CTRL1);
value &= ~ALL_SW_RESET;
cdns_imx_writel(data, USB3_CORE_CTRL1, value);
return ret;
}
static int cdns_imx_platform_suspend(struct device *dev,
bool suspend, bool wakeup);
static struct cdns3_platform_data cdns_imx_pdata = {
.platform_suspend = cdns_imx_platform_suspend,
.quirks = CDNS3_DEFAULT_PM_RUNTIME_ALLOW,
};
static const struct of_dev_auxdata cdns_imx_auxdata[] = {
{
.compatible = "cdns,usb3",
.platform_data = &cdns_imx_pdata,
},
{},
};
static int cdns_imx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct cdns_imx *data;
int ret;
if (!node)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
platform_set_drvdata(pdev, data);
data->dev = dev;
data->noncore = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->noncore)) {
dev_err(dev, "can't map IOMEM resource\n");
return PTR_ERR(data->noncore);
}
data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
sizeof(imx_cdns3_core_clks), GFP_KERNEL);
if (!data->clks)
return -ENOMEM;
ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
if (ret)
return ret;
ret = clk_bulk_prepare_enable(data->num_clks, data->clks);
if (ret)
return ret;
ret = cdns_imx_noncore_init(data);
if (ret)
goto err;
ret = of_platform_populate(node, NULL, cdns_imx_auxdata, dev);
if (ret) {
dev_err(dev, "failed to create children: %d\n", ret);
goto err;
}
device_set_wakeup_capable(dev, true);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return ret;
err:
clk_bulk_disable_unprepare(data->num_clks, data->clks);
return ret;
}
static void cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_imx *data = dev_get_drvdata(dev);
pm_runtime_get_sync(dev);
of_platform_depopulate(dev);
clk_bulk_disable_unprepare(data->num_clks, data->clks);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static void cdns3_set_wakeup(struct cdns_imx *data, bool enable)
{
u32 value;
value = cdns_imx_readl(data, USB3_INT_REG);
if (enable)
value |= OTG_WAKEUP_EN | DEVU3_WAEKUP_EN;
else
value &= ~(OTG_WAKEUP_EN | DEVU3_WAEKUP_EN);
cdns_imx_writel(data, USB3_INT_REG, value);
}
static int cdns_imx_platform_suspend(struct device *dev,
bool suspend, bool wakeup)
{
struct cdns *cdns = dev_get_drvdata(dev);
struct device *parent = dev->parent;
struct cdns_imx *data = dev_get_drvdata(parent);
void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs);
void __iomem *xhci_regs = cdns->xhci_regs;
u32 value;
int ret = 0;
if (cdns->role != USB_ROLE_HOST)
return 0;
if (suspend) {
/* SW request low power when all usb ports allow to it ??? */
value = readl(xhci_regs + XECP_PM_PMCSR);
value &= ~PS_MASK;
value |= PS_D1;
writel(value, xhci_regs + XECP_PM_PMCSR);
/* mdctrl_clk_sel */
value = cdns_imx_readl(data, USB3_CORE_CTRL1);
value |= MDCTRL_CLK_SEL;
cdns_imx_writel(data, USB3_CORE_CTRL1, value);
/* wait for mdctrl_clk_status */
value = cdns_imx_readl(data, USB3_CORE_STATUS);
ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value,
(value & MDCTRL_CLK_STATUS) == MDCTRL_CLK_STATUS,
10, 100000);
if (ret)
dev_warn(parent, "wait mdctrl_clk_status timeout\n");
/* wait lpm_clk_req to be 0 */
value = cdns_imx_readl(data, USB3_INT_REG);
ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value,
(value & LPM_CLK_REQ) != LPM_CLK_REQ,
10, 100000);
if (ret)
dev_warn(parent, "wait lpm_clk_req timeout\n");
/* wait phy_refclk_req to be 0 */
value = cdns_imx_readl(data, USB3_SSPHY_STATUS);
ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value,
(value & PHY_REFCLK_REQ) != PHY_REFCLK_REQ,
10, 100000);
if (ret)
dev_warn(parent, "wait phy_refclk_req timeout\n");
cdns3_set_wakeup(data, wakeup);
} else {
cdns3_set_wakeup(data, false);
/* SW request D0 */
value = readl(xhci_regs + XECP_PM_PMCSR);
value &= ~PS_MASK;
value |= PS_D0;
writel(value, xhci_regs + XECP_PM_PMCSR);
/* clr CFG_RXDET_P3_EN */
value = readl(xhci_regs + XECP_AUX_CTRL_REG1);
value &= ~CFG_RXDET_P3_EN;
writel(value, xhci_regs + XECP_AUX_CTRL_REG1);
/* clear mdctrl_clk_sel */
value = cdns_imx_readl(data, USB3_CORE_CTRL1);
value &= ~MDCTRL_CLK_SEL;
cdns_imx_writel(data, USB3_CORE_CTRL1, value);
/* wait CLK_125_REQ to be 1 */
value = cdns_imx_readl(data, USB3_INT_REG);
ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value,
(value & CLK_125_REQ) == CLK_125_REQ,
10, 100000);
if (ret)
dev_warn(parent, "wait CLK_125_REQ timeout\n");
/* wait for mdctrl_clk_status is cleared */
value = cdns_imx_readl(data, USB3_CORE_STATUS);
ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value,
(value & MDCTRL_CLK_STATUS) != MDCTRL_CLK_STATUS,
10, 100000);
if (ret)
dev_warn(parent, "wait mdctrl_clk_status cleared timeout\n");
/* Wait until OTG_NRDY is 0 */
value = readl(otg_regs + OTGSTS);
ret = readl_poll_timeout(otg_regs + OTGSTS, value,
(value & OTG_NRDY) != OTG_NRDY,
10, 100000);
if (ret)
dev_warn(parent, "wait OTG ready timeout\n");
}
return ret;
}
static int cdns_imx_resume(struct device *dev)
{
struct cdns_imx *data = dev_get_drvdata(dev);
return clk_bulk_prepare_enable(data->num_clks, data->clks);
}
static int cdns_imx_suspend(struct device *dev)
{
struct cdns_imx *data = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(data->num_clks, data->clks);
return 0;
}
/* Indicate if the controller was power lost before */
static inline bool cdns_imx_is_power_lost(struct cdns_imx *data)
{
u32 value;
value = cdns_imx_readl(data, USB3_CORE_CTRL1);
if ((value & SW_RESET_MASK) == ALL_SW_RESET)
return true;
else
return false;
}
static int __maybe_unused cdns_imx_system_suspend(struct device *dev)
{
pm_runtime_put_sync(dev);
return 0;
}
static int __maybe_unused cdns_imx_system_resume(struct device *dev)
{
struct cdns_imx *data = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Could not get runtime PM.\n");
return ret;
}
if (cdns_imx_is_power_lost(data)) {
dev_dbg(dev, "resume from power lost\n");
ret = cdns_imx_noncore_init(data);
if (ret)
cdns_imx_suspend(dev);
}
return ret;
}
#else
static int cdns_imx_platform_suspend(struct device *dev,
bool suspend, bool wakeup)
{
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops cdns_imx_pm_ops = {
SET_RUNTIME_PM_OPS(cdns_imx_suspend, cdns_imx_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(cdns_imx_system_suspend, cdns_imx_system_resume)
};
static const struct of_device_id cdns_imx_of_match[] = {
{ .compatible = "fsl,imx8qm-usb3", },
{},
};
MODULE_DEVICE_TABLE(of, cdns_imx_of_match);
static struct platform_driver cdns_imx_driver = {
.probe = cdns_imx_probe,
.remove_new = cdns_imx_remove,
.driver = {
.name = "cdns3-imx",
.of_match_table = cdns_imx_of_match,
.pm = &cdns_imx_pm_ops,
},
};
module_platform_driver(cdns_imx_driver);
MODULE_ALIAS("platform:cdns3-imx");
MODULE_AUTHOR("Peter Chen <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Cadence USB3 i.MX Glue Layer");
| linux-master | drivers/usb/cdns3/cdns3-imx.c |
// SPDX-License-Identifier: GPL-2.0
/**
* cdns3-starfive.c - StarFive specific Glue layer for Cadence USB Controller
*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
*
* Author: Minda Chen <[email protected]>
*/
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/reset.h>
#include <linux/regmap.h>
#include <linux/usb/otg.h>
#include "core.h"
#define USB_STRAP_HOST BIT(17)
#define USB_STRAP_DEVICE BIT(18)
#define USB_STRAP_MASK GENMASK(18, 16)
#define USB_SUSPENDM_HOST BIT(19)
#define USB_SUSPENDM_MASK BIT(19)
#define USB_MISC_CFG_MASK GENMASK(23, 20)
#define USB_SUSPENDM_BYPS BIT(20)
#define USB_PLL_EN BIT(22)
#define USB_REFCLK_MODE BIT(23)
struct cdns_starfive {
struct device *dev;
struct regmap *stg_syscon;
struct reset_control *resets;
struct clk_bulk_data *clks;
int num_clks;
u32 stg_usb_mode;
};
static void cdns_mode_init(struct platform_device *pdev,
struct cdns_starfive *data)
{
enum usb_dr_mode mode;
regmap_update_bits(data->stg_syscon, data->stg_usb_mode,
USB_MISC_CFG_MASK,
USB_SUSPENDM_BYPS | USB_PLL_EN | USB_REFCLK_MODE);
/* dr mode setting */
mode = usb_get_dr_mode(&pdev->dev);
switch (mode) {
case USB_DR_MODE_HOST:
regmap_update_bits(data->stg_syscon,
data->stg_usb_mode,
USB_STRAP_MASK,
USB_STRAP_HOST);
regmap_update_bits(data->stg_syscon,
data->stg_usb_mode,
USB_SUSPENDM_MASK,
USB_SUSPENDM_HOST);
break;
case USB_DR_MODE_PERIPHERAL:
regmap_update_bits(data->stg_syscon, data->stg_usb_mode,
USB_STRAP_MASK, USB_STRAP_DEVICE);
regmap_update_bits(data->stg_syscon, data->stg_usb_mode,
USB_SUSPENDM_MASK, 0);
break;
default:
break;
}
}
static int cdns_clk_rst_init(struct cdns_starfive *data)
{
int ret;
ret = clk_bulk_prepare_enable(data->num_clks, data->clks);
if (ret)
return dev_err_probe(data->dev, ret,
"failed to enable clocks\n");
ret = reset_control_deassert(data->resets);
if (ret) {
dev_err(data->dev, "failed to reset clocks\n");
goto err_clk_init;
}
return ret;
err_clk_init:
clk_bulk_disable_unprepare(data->num_clks, data->clks);
return ret;
}
static void cdns_clk_rst_deinit(struct cdns_starfive *data)
{
reset_control_assert(data->resets);
clk_bulk_disable_unprepare(data->num_clks, data->clks);
}
static int cdns_starfive_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_starfive *data;
unsigned int args;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->dev = dev;
data->stg_syscon =
syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
"starfive,stg-syscon", 1, &args);
if (IS_ERR(data->stg_syscon))
return dev_err_probe(dev, PTR_ERR(data->stg_syscon),
"Failed to parse starfive,stg-syscon\n");
data->stg_usb_mode = args;
data->num_clks = devm_clk_bulk_get_all(data->dev, &data->clks);
if (data->num_clks < 0)
return dev_err_probe(data->dev, -ENODEV,
"Failed to get clocks\n");
data->resets = devm_reset_control_array_get_exclusive(data->dev);
if (IS_ERR(data->resets))
return dev_err_probe(data->dev, PTR_ERR(data->resets),
"Failed to get resets");
cdns_mode_init(pdev, data);
ret = cdns_clk_rst_init(data);
if (ret)
return ret;
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "Failed to create children\n");
cdns_clk_rst_deinit(data);
return ret;
}
device_set_wakeup_capable(dev, true);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
platform_set_drvdata(pdev, data);
return 0;
}
static int cdns_starfive_remove_core(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static void cdns_starfive_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_starfive *data = dev_get_drvdata(dev);
pm_runtime_get_sync(dev);
device_for_each_child(dev, NULL, cdns_starfive_remove_core);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
cdns_clk_rst_deinit(data);
platform_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int cdns_starfive_runtime_resume(struct device *dev)
{
struct cdns_starfive *data = dev_get_drvdata(dev);
return clk_bulk_prepare_enable(data->num_clks, data->clks);
}
static int cdns_starfive_runtime_suspend(struct device *dev)
{
struct cdns_starfive *data = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(data->num_clks, data->clks);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int cdns_starfive_resume(struct device *dev)
{
struct cdns_starfive *data = dev_get_drvdata(dev);
return cdns_clk_rst_init(data);
}
static int cdns_starfive_suspend(struct device *dev)
{
struct cdns_starfive *data = dev_get_drvdata(dev);
cdns_clk_rst_deinit(data);
return 0;
}
#endif
#endif
static const struct dev_pm_ops cdns_starfive_pm_ops = {
SET_RUNTIME_PM_OPS(cdns_starfive_runtime_suspend,
cdns_starfive_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(cdns_starfive_suspend, cdns_starfive_resume)
};
static const struct of_device_id cdns_starfive_of_match[] = {
{ .compatible = "starfive,jh7110-usb", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, cdns_starfive_of_match);
static struct platform_driver cdns_starfive_driver = {
.probe = cdns_starfive_probe,
.remove_new = cdns_starfive_remove,
.driver = {
.name = "cdns3-starfive",
.of_match_table = cdns_starfive_of_match,
.pm = &cdns_starfive_pm_ops,
},
};
module_platform_driver(cdns_starfive_driver);
MODULE_ALIAS("platform:cdns3-starfive");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Cadence USB3 StarFive Glue Layer");
| linux-master | drivers/usb/cdns3/cdns3-starfive.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBSS DRD Driver.
*
* Copyright (C) 2018-2020 Cadence.
* Copyright (C) 2017-2018 NXP
* Copyright (C) 2019 Texas Instruments
*
*
* Author: Peter Chen <[email protected]>
* Pawel Laszczak <[email protected]>
* Roger Quadros <[email protected]>
*/
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "core.h"
#include "gadget-export.h"
#include "drd.h"
static int set_phy_power_on(struct cdns *cdns)
{
int ret;
ret = phy_power_on(cdns->usb2_phy);
if (ret)
return ret;
ret = phy_power_on(cdns->usb3_phy);
if (ret)
phy_power_off(cdns->usb2_phy);
return ret;
}
static void set_phy_power_off(struct cdns *cdns)
{
phy_power_off(cdns->usb3_phy);
phy_power_off(cdns->usb2_phy);
}
/**
* cdns3_plat_probe - probe for cdns3 core device
* @pdev: Pointer to cdns3 core platform device
*
* Returns 0 on success otherwise negative errno
*/
static int cdns3_plat_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct cdns *cdns;
void __iomem *regs;
int ret;
cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
if (!cdns)
return -ENOMEM;
cdns->dev = dev;
cdns->pdata = dev_get_platdata(dev);
platform_set_drvdata(pdev, cdns);
ret = platform_get_irq_byname(pdev, "host");
if (ret < 0)
return ret;
cdns->xhci_res[0].start = ret;
cdns->xhci_res[0].end = ret;
cdns->xhci_res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret);
cdns->xhci_res[0].name = "host";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
if (!res) {
dev_err(dev, "couldn't get xhci resource\n");
return -ENXIO;
}
cdns->xhci_res[1] = *res;
cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
if (cdns->dev_irq < 0)
return cdns->dev_irq;
regs = devm_platform_ioremap_resource_byname(pdev, "dev");
if (IS_ERR(regs))
return PTR_ERR(regs);
cdns->dev_regs = regs;
cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
if (cdns->otg_irq < 0)
return cdns->otg_irq;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
if (!res) {
dev_err(dev, "couldn't get otg resource\n");
return -ENXIO;
}
cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
cdns->otg_res = *res;
cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
if (cdns->wakeup_irq == -EPROBE_DEFER)
return cdns->wakeup_irq;
if (cdns->wakeup_irq < 0) {
dev_dbg(dev, "couldn't get wakeup irq\n");
cdns->wakeup_irq = 0x0;
}
cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
if (IS_ERR(cdns->usb2_phy))
return PTR_ERR(cdns->usb2_phy);
ret = phy_init(cdns->usb2_phy);
if (ret)
return ret;
cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
if (IS_ERR(cdns->usb3_phy))
return PTR_ERR(cdns->usb3_phy);
ret = phy_init(cdns->usb3_phy);
if (ret)
goto err_phy3_init;
ret = set_phy_power_on(cdns);
if (ret)
goto err_phy_power_on;
cdns->gadget_init = cdns3_gadget_init;
ret = cdns_init(cdns);
if (ret)
goto err_cdns_init;
device_set_wakeup_capable(dev, true);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)))
pm_runtime_forbid(dev);
/*
* The controller needs less time between bus and controller suspend,
* and we also needs a small delay to avoid frequently entering low
* power mode.
*/
pm_runtime_set_autosuspend_delay(dev, 20);
pm_runtime_mark_last_busy(dev);
pm_runtime_use_autosuspend(dev);
return 0;
err_cdns_init:
set_phy_power_off(cdns);
err_phy_power_on:
phy_exit(cdns->usb3_phy);
err_phy3_init:
phy_exit(cdns->usb2_phy);
return ret;
}
/**
* cdns3_plat_remove() - unbind drd driver and clean up
* @pdev: Pointer to Linux platform device
*
* Returns 0 on success otherwise negative errno
*/
static void cdns3_plat_remove(struct platform_device *pdev)
{
struct cdns *cdns = platform_get_drvdata(pdev);
struct device *dev = cdns->dev;
pm_runtime_get_sync(dev);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
cdns_remove(cdns);
set_phy_power_off(cdns);
phy_exit(cdns->usb2_phy);
phy_exit(cdns->usb3_phy);
}
#ifdef CONFIG_PM
static int cdns3_set_platform_suspend(struct device *dev,
bool suspend, bool wakeup)
{
struct cdns *cdns = dev_get_drvdata(dev);
int ret = 0;
if (cdns->pdata && cdns->pdata->platform_suspend)
ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
return ret;
}
static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
{
struct cdns *cdns = dev_get_drvdata(dev);
bool wakeup;
unsigned long flags;
if (cdns->in_lpm)
return 0;
if (PMSG_IS_AUTO(msg))
wakeup = true;
else
wakeup = device_may_wakeup(dev);
cdns3_set_platform_suspend(cdns->dev, true, wakeup);
set_phy_power_off(cdns);
spin_lock_irqsave(&cdns->lock, flags);
cdns->in_lpm = true;
spin_unlock_irqrestore(&cdns->lock, flags);
dev_dbg(cdns->dev, "%s ends\n", __func__);
return 0;
}
static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
{
struct cdns *cdns = dev_get_drvdata(dev);
int ret;
unsigned long flags;
if (!cdns->in_lpm)
return 0;
if (cdns_power_is_lost(cdns)) {
phy_exit(cdns->usb2_phy);
ret = phy_init(cdns->usb2_phy);
if (ret)
return ret;
phy_exit(cdns->usb3_phy);
ret = phy_init(cdns->usb3_phy);
if (ret)
return ret;
}
ret = set_phy_power_on(cdns);
if (ret)
return ret;
cdns3_set_platform_suspend(cdns->dev, false, false);
spin_lock_irqsave(&cdns->lock, flags);
cdns_resume(cdns);
cdns->in_lpm = false;
spin_unlock_irqrestore(&cdns->lock, flags);
cdns_set_active(cdns, !PMSG_IS_AUTO(msg));
if (cdns->wakeup_pending) {
cdns->wakeup_pending = false;
enable_irq(cdns->wakeup_irq);
}
dev_dbg(cdns->dev, "%s ends\n", __func__);
return ret;
}
static int cdns3_plat_runtime_suspend(struct device *dev)
{
return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
}
static int cdns3_plat_runtime_resume(struct device *dev)
{
return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
}
#ifdef CONFIG_PM_SLEEP
static int cdns3_plat_suspend(struct device *dev)
{
struct cdns *cdns = dev_get_drvdata(dev);
int ret;
cdns_suspend(cdns);
ret = cdns3_controller_suspend(dev, PMSG_SUSPEND);
if (ret)
return ret;
if (device_may_wakeup(dev) && cdns->wakeup_irq)
enable_irq_wake(cdns->wakeup_irq);
return ret;
}
static int cdns3_plat_resume(struct device *dev)
{
return cdns3_controller_resume(dev, PMSG_RESUME);
}
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
static const struct dev_pm_ops cdns3_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cdns3_plat_suspend, cdns3_plat_resume)
SET_RUNTIME_PM_OPS(cdns3_plat_runtime_suspend,
cdns3_plat_runtime_resume, NULL)
};
#ifdef CONFIG_OF
static const struct of_device_id of_cdns3_match[] = {
{ .compatible = "cdns,usb3" },
{ },
};
MODULE_DEVICE_TABLE(of, of_cdns3_match);
#endif
static struct platform_driver cdns3_driver = {
.probe = cdns3_plat_probe,
.remove_new = cdns3_plat_remove,
.driver = {
.name = "cdns-usb3",
.of_match_table = of_match_ptr(of_cdns3_match),
.pm = &cdns3_pm_ops,
},
};
module_platform_driver(cdns3_driver);
MODULE_ALIAS("platform:cdns3");
MODULE_AUTHOR("Pawel Laszczak <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
| linux-master | drivers/usb/cdns3/cdns3-plat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USBSS device controller driver Trace Support
*
* Copyright (C) 2018-2019 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "cdns3-trace.h"
| linux-master | drivers/usb/cdns3/cdns3-trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBSS and USBSSP DRD Driver.
*
* Copyright (C) 2018-2019 Cadence.
* Copyright (C) 2017-2018 NXP
* Copyright (C) 2019 Texas Instruments
*
* Author: Peter Chen <[email protected]>
* Pawel Laszczak <[email protected]>
* Roger Quadros <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include "core.h"
#include "host-export.h"
#include "drd.h"
static int cdns_idle_init(struct cdns *cdns);
static int cdns_role_start(struct cdns *cdns, enum usb_role role)
{
int ret;
if (WARN_ON(role > USB_ROLE_DEVICE))
return 0;
mutex_lock(&cdns->mutex);
cdns->role = role;
mutex_unlock(&cdns->mutex);
if (!cdns->roles[role])
return -ENXIO;
if (cdns->roles[role]->state == CDNS_ROLE_STATE_ACTIVE)
return 0;
mutex_lock(&cdns->mutex);
ret = cdns->roles[role]->start(cdns);
if (!ret)
cdns->roles[role]->state = CDNS_ROLE_STATE_ACTIVE;
mutex_unlock(&cdns->mutex);
return ret;
}
static void cdns_role_stop(struct cdns *cdns)
{
enum usb_role role = cdns->role;
if (WARN_ON(role > USB_ROLE_DEVICE))
return;
if (cdns->roles[role]->state == CDNS_ROLE_STATE_INACTIVE)
return;
mutex_lock(&cdns->mutex);
cdns->roles[role]->stop(cdns);
cdns->roles[role]->state = CDNS_ROLE_STATE_INACTIVE;
mutex_unlock(&cdns->mutex);
}
static void cdns_exit_roles(struct cdns *cdns)
{
cdns_role_stop(cdns);
cdns_drd_exit(cdns);
}
/**
* cdns_core_init_role - initialize role of operation
* @cdns: Pointer to cdns structure
*
* Returns 0 on success otherwise negative errno
*/
static int cdns_core_init_role(struct cdns *cdns)
{
struct device *dev = cdns->dev;
enum usb_dr_mode best_dr_mode;
enum usb_dr_mode dr_mode;
int ret;
dr_mode = usb_get_dr_mode(dev);
cdns->role = USB_ROLE_NONE;
/*
* If driver can't read mode by means of usb_get_dr_mode function then
* chooses mode according with Kernel configuration. This setting
* can be restricted later depending on strap pin configuration.
*/
if (dr_mode == USB_DR_MODE_UNKNOWN) {
if (cdns->version == CDNSP_CONTROLLER_V2) {
if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) &&
IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
dr_mode = USB_DR_MODE_OTG;
else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST))
dr_mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
dr_mode = USB_DR_MODE_PERIPHERAL;
} else {
if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
dr_mode = USB_DR_MODE_OTG;
else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
dr_mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
dr_mode = USB_DR_MODE_PERIPHERAL;
}
}
/*
* At this point cdns->dr_mode contains strap configuration.
* Driver try update this setting considering kernel configuration
*/
best_dr_mode = cdns->dr_mode;
ret = cdns_idle_init(cdns);
if (ret)
return ret;
if (dr_mode == USB_DR_MODE_OTG) {
best_dr_mode = cdns->dr_mode;
} else if (cdns->dr_mode == USB_DR_MODE_OTG) {
best_dr_mode = dr_mode;
} else if (cdns->dr_mode != dr_mode) {
dev_err(dev, "Incorrect DRD configuration\n");
return -EINVAL;
}
dr_mode = best_dr_mode;
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
if ((cdns->version == CDNSP_CONTROLLER_V2 &&
IS_ENABLED(CONFIG_USB_CDNSP_HOST)) ||
(cdns->version < CDNSP_CONTROLLER_V2 &&
IS_ENABLED(CONFIG_USB_CDNS3_HOST)))
ret = cdns_host_init(cdns);
else
ret = -ENXIO;
if (ret) {
dev_err(dev, "Host initialization failed with %d\n",
ret);
goto err;
}
}
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
if (cdns->gadget_init)
ret = cdns->gadget_init(cdns);
else
ret = -ENXIO;
if (ret) {
dev_err(dev, "Device initialization failed with %d\n",
ret);
goto err;
}
}
cdns->dr_mode = dr_mode;
ret = cdns_drd_update_mode(cdns);
if (ret)
goto err;
/* Initialize idle role to start with */
ret = cdns_role_start(cdns, USB_ROLE_NONE);
if (ret)
goto err;
switch (cdns->dr_mode) {
case USB_DR_MODE_OTG:
ret = cdns_hw_role_switch(cdns);
if (ret)
goto err;
break;
case USB_DR_MODE_PERIPHERAL:
ret = cdns_role_start(cdns, USB_ROLE_DEVICE);
if (ret)
goto err;
break;
case USB_DR_MODE_HOST:
ret = cdns_role_start(cdns, USB_ROLE_HOST);
if (ret)
goto err;
break;
default:
ret = -EINVAL;
goto err;
}
return 0;
err:
cdns_exit_roles(cdns);
return ret;
}
/**
* cdns_hw_role_state_machine - role switch state machine based on hw events.
* @cdns: Pointer to controller structure.
*
* Returns next role to be entered based on hw events.
*/
static enum usb_role cdns_hw_role_state_machine(struct cdns *cdns)
{
enum usb_role role = USB_ROLE_NONE;
int id, vbus;
if (cdns->dr_mode != USB_DR_MODE_OTG) {
if (cdns_is_host(cdns))
role = USB_ROLE_HOST;
if (cdns_is_device(cdns))
role = USB_ROLE_DEVICE;
return role;
}
id = cdns_get_id(cdns);
vbus = cdns_get_vbus(cdns);
/*
* Role change state machine
* Inputs: ID, VBUS
* Previous state: cdns->role
* Next state: role
*/
role = cdns->role;
switch (role) {
case USB_ROLE_NONE:
/*
* Driver treats USB_ROLE_NONE synonymous to IDLE state from
* controller specification.
*/
if (!id)
role = USB_ROLE_HOST;
else if (vbus)
role = USB_ROLE_DEVICE;
break;
case USB_ROLE_HOST: /* from HOST, we can only change to NONE */
if (id)
role = USB_ROLE_NONE;
break;
case USB_ROLE_DEVICE: /* from GADGET, we can only change to NONE*/
if (!vbus)
role = USB_ROLE_NONE;
break;
}
dev_dbg(cdns->dev, "role %d -> %d\n", cdns->role, role);
return role;
}
static int cdns_idle_role_start(struct cdns *cdns)
{
return 0;
}
static void cdns_idle_role_stop(struct cdns *cdns)
{
/* Program Lane swap and bring PHY out of RESET */
phy_reset(cdns->usb3_phy);
}
static int cdns_idle_init(struct cdns *cdns)
{
struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
rdrv->start = cdns_idle_role_start;
rdrv->stop = cdns_idle_role_stop;
rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->suspend = NULL;
rdrv->resume = NULL;
rdrv->name = "idle";
cdns->roles[USB_ROLE_NONE] = rdrv;
return 0;
}
/**
* cdns_hw_role_switch - switch roles based on HW state
* @cdns: controller
*/
int cdns_hw_role_switch(struct cdns *cdns)
{
enum usb_role real_role, current_role;
int ret = 0;
/* Depends on role switch class */
if (cdns->role_sw)
return 0;
pm_runtime_get_sync(cdns->dev);
current_role = cdns->role;
real_role = cdns_hw_role_state_machine(cdns);
/* Do nothing if nothing changed */
if (current_role == real_role)
goto exit;
cdns_role_stop(cdns);
dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role);
ret = cdns_role_start(cdns, real_role);
if (ret) {
/* Back to current role */
dev_err(cdns->dev, "set %d has failed, back to %d\n",
real_role, current_role);
ret = cdns_role_start(cdns, current_role);
if (ret)
dev_err(cdns->dev, "back to %d failed too\n",
current_role);
}
exit:
pm_runtime_put_sync(cdns->dev);
return ret;
}
/**
* cdns_role_get - get current role of controller.
*
* @sw: pointer to USB role switch structure
*
* Returns role
*/
static enum usb_role cdns_role_get(struct usb_role_switch *sw)
{
struct cdns *cdns = usb_role_switch_get_drvdata(sw);
return cdns->role;
}
/**
* cdns_role_set - set current role of controller.
*
* @sw: pointer to USB role switch structure
* @role: the previous role
* Handles below events:
* - Role switch for dual-role devices
* - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices
*/
static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
{
struct cdns *cdns = usb_role_switch_get_drvdata(sw);
int ret = 0;
pm_runtime_get_sync(cdns->dev);
if (cdns->role == role)
goto pm_put;
if (cdns->dr_mode == USB_DR_MODE_HOST) {
switch (role) {
case USB_ROLE_NONE:
case USB_ROLE_HOST:
break;
default:
goto pm_put;
}
}
if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL) {
switch (role) {
case USB_ROLE_NONE:
case USB_ROLE_DEVICE:
break;
default:
goto pm_put;
}
}
cdns_role_stop(cdns);
ret = cdns_role_start(cdns, role);
if (ret)
dev_err(cdns->dev, "set role %d has failed\n", role);
pm_put:
pm_runtime_put_sync(cdns->dev);
return ret;
}
/**
* cdns_wakeup_irq - interrupt handler for wakeup events
* @irq: irq number for cdns3/cdnsp core device
* @data: structure of cdns
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
static irqreturn_t cdns_wakeup_irq(int irq, void *data)
{
struct cdns *cdns = data;
if (cdns->in_lpm) {
disable_irq_nosync(irq);
cdns->wakeup_pending = true;
if ((cdns->role == USB_ROLE_HOST) && cdns->host_dev)
pm_request_resume(&cdns->host_dev->dev);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/**
* cdns_init - probe for cdns3/cdnsp core device
* @cdns: Pointer to cdns structure.
*
* Returns 0 on success otherwise negative errno
*/
int cdns_init(struct cdns *cdns)
{
struct device *dev = cdns->dev;
int ret;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "error setting dma mask: %d\n", ret);
return ret;
}
mutex_init(&cdns->mutex);
if (device_property_read_bool(dev, "usb-role-switch")) {
struct usb_role_switch_desc sw_desc = { };
sw_desc.set = cdns_role_set;
sw_desc.get = cdns_role_get;
sw_desc.allow_userspace_control = true;
sw_desc.driver_data = cdns;
sw_desc.fwnode = dev->fwnode;
cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
if (IS_ERR(cdns->role_sw)) {
dev_warn(dev, "Unable to register Role Switch\n");
return PTR_ERR(cdns->role_sw);
}
}
if (cdns->wakeup_irq) {
ret = devm_request_irq(cdns->dev, cdns->wakeup_irq,
cdns_wakeup_irq,
IRQF_SHARED,
dev_name(cdns->dev), cdns);
if (ret) {
dev_err(cdns->dev, "couldn't register wakeup irq handler\n");
goto role_switch_unregister;
}
}
ret = cdns_drd_init(cdns);
if (ret)
goto init_failed;
ret = cdns_core_init_role(cdns);
if (ret)
goto init_failed;
spin_lock_init(&cdns->lock);
dev_dbg(dev, "Cadence USB3 core: probe succeed\n");
return 0;
init_failed:
cdns_drd_exit(cdns);
role_switch_unregister:
if (cdns->role_sw)
usb_role_switch_unregister(cdns->role_sw);
return ret;
}
EXPORT_SYMBOL_GPL(cdns_init);
/**
* cdns_remove - unbind drd driver and clean up
* @cdns: Pointer to cdns structure.
*
* Returns 0 on success otherwise negative errno
*/
int cdns_remove(struct cdns *cdns)
{
cdns_exit_roles(cdns);
usb_role_switch_unregister(cdns->role_sw);
return 0;
}
EXPORT_SYMBOL_GPL(cdns_remove);
#ifdef CONFIG_PM_SLEEP
int cdns_suspend(struct cdns *cdns)
{
struct device *dev = cdns->dev;
unsigned long flags;
if (pm_runtime_status_suspended(dev))
pm_runtime_resume(dev);
if (cdns->roles[cdns->role]->suspend) {
spin_lock_irqsave(&cdns->lock, flags);
cdns->roles[cdns->role]->suspend(cdns, false);
spin_unlock_irqrestore(&cdns->lock, flags);
}
return 0;
}
EXPORT_SYMBOL_GPL(cdns_suspend);
int cdns_resume(struct cdns *cdns)
{
enum usb_role real_role;
bool role_changed = false;
int ret = 0;
if (cdns_power_is_lost(cdns)) {
if (cdns->role_sw) {
cdns->role = cdns_role_get(cdns->role_sw);
} else {
real_role = cdns_hw_role_state_machine(cdns);
if (real_role != cdns->role) {
ret = cdns_hw_role_switch(cdns);
if (ret)
return ret;
role_changed = true;
}
}
if (!role_changed) {
if (cdns->role == USB_ROLE_HOST)
ret = cdns_drd_host_on(cdns);
else if (cdns->role == USB_ROLE_DEVICE)
ret = cdns_drd_gadget_on(cdns);
if (ret)
return ret;
}
}
if (cdns->roles[cdns->role]->resume)
cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
return 0;
}
EXPORT_SYMBOL_GPL(cdns_resume);
void cdns_set_active(struct cdns *cdns, u8 set_active)
{
struct device *dev = cdns->dev;
if (set_active) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return;
}
EXPORT_SYMBOL_GPL(cdns_set_active);
#endif /* CONFIG_PM_SLEEP */
MODULE_AUTHOR("Peter Chen <[email protected]>");
MODULE_AUTHOR("Pawel Laszczak <[email protected]>");
MODULE_AUTHOR("Roger Quadros <[email protected]>");
MODULE_DESCRIPTION("Cadence USBSS and USBSSP DRD Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/cdns3/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBSS DRD Driver - gadget side.
*
* Copyright (C) 2018 Cadence Design Systems.
* Copyright (C) 2017-2018 NXP
*
* Authors: Pawel Jez <[email protected]>,
* Pawel Laszczak <[email protected]>
* Peter Chen <[email protected]>
*/
#include <linux/usb/composite.h>
#include <linux/iopoll.h>
#include "cdns3-gadget.h"
#include "cdns3-trace.h"
static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
};
/**
* cdns3_ep0_run_transfer - Do transfer on default endpoint hardware
* @priv_dev: extended gadget object
* @dma_addr: physical address where data is/will be stored
* @length: data length
* @erdy: set it to 1 when ERDY packet should be sent -
* exit from flow control state
* @zlp: add zero length packet
*/
static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev,
dma_addr_t dma_addr,
unsigned int length, int erdy, int zlp)
{
struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length));
if (zlp) {
priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL));
priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0));
priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
TRB_TYPE(TRB_NORMAL));
} else {
priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
TRB_TYPE(TRB_NORMAL));
priv_ep->trb_pool[1].control = 0;
}
trace_cdns3_prepare_trb(priv_ep, priv_ep->trb_pool);
cdns3_select_ep(priv_dev, priv_dev->ep0_data_dir);
writel(EP_STS_TRBERR, ®s->ep_sts);
writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), ®s->ep_traddr);
trace_cdns3_doorbell_ep0(priv_dev->ep0_data_dir ? "ep0in" : "ep0out",
readl(®s->ep_traddr));
/* TRB should be prepared before starting transfer. */
writel(EP_CMD_DRDY, ®s->ep_cmd);
/* Resume controller before arming transfer. */
__cdns3_gadget_wakeup(priv_dev);
if (erdy)
writel(EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
}
/**
* cdns3_ep0_delegate_req - Returns status of handling setup packet
* Setup is handled by gadget driver
* @priv_dev: extended gadget object
* @ctrl_req: pointer to received setup packet
*
* Returns zero on success or negative value on failure
*/
static int cdns3_ep0_delegate_req(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
int ret;
spin_unlock(&priv_dev->lock);
priv_dev->setup_pending = 1;
ret = priv_dev->gadget_driver->setup(&priv_dev->gadget, ctrl_req);
priv_dev->setup_pending = 0;
spin_lock(&priv_dev->lock);
return ret;
}
static void cdns3_prepare_setup_packet(struct cdns3_device *priv_dev)
{
priv_dev->ep0_data_dir = 0;
priv_dev->ep0_stage = CDNS3_SETUP_STAGE;
cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma,
sizeof(struct usb_ctrlrequest), 0, 0);
}
static void cdns3_ep0_complete_setup(struct cdns3_device *priv_dev,
u8 send_stall, u8 send_erdy)
{
struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
struct usb_request *request;
request = cdns3_next_request(&priv_ep->pending_req_list);
if (request)
list_del_init(&request->list);
if (send_stall) {
trace_cdns3_halt(priv_ep, send_stall, 0);
/* set_stall on ep0 */
cdns3_select_ep(priv_dev, 0x00);
writel(EP_CMD_SSTALL, &priv_dev->regs->ep_cmd);
} else {
cdns3_prepare_setup_packet(priv_dev);
}
priv_dev->ep0_stage = CDNS3_SETUP_STAGE;
writel((send_erdy ? EP_CMD_ERDY : 0) | EP_CMD_REQ_CMPL,
&priv_dev->regs->ep_cmd);
}
/**
* cdns3_req_ep0_set_configuration - Handling of SET_CONFIG standard USB request
* @priv_dev: extended gadget object
* @ctrl_req: pointer to received setup packet
*
* Returns 0 if success, USB_GADGET_DELAYED_STATUS on deferred status stage,
* error code on error
*/
static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
enum usb_device_state device_state = priv_dev->gadget.state;
u32 config = le16_to_cpu(ctrl_req->wValue);
int result = 0;
switch (device_state) {
case USB_STATE_ADDRESS:
result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
if (result || !config)
goto reset_config;
break;
case USB_STATE_CONFIGURED:
result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
if (!config && !result)
goto reset_config;
break;
default:
return -EINVAL;
}
return 0;
reset_config:
if (result != USB_GADGET_DELAYED_STATUS)
cdns3_hw_reset_eps_config(priv_dev);
usb_gadget_set_state(&priv_dev->gadget,
USB_STATE_ADDRESS);
return result;
}
/**
* cdns3_req_ep0_set_address - Handling of SET_ADDRESS standard USB request
* @priv_dev: extended gadget object
* @ctrl_req: pointer to received setup packet
*
* Returns 0 if success, error code on error
*/
static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
enum usb_device_state device_state = priv_dev->gadget.state;
u32 reg;
u32 addr;
addr = le16_to_cpu(ctrl_req->wValue);
if (addr > USB_DEVICE_MAX_ADDRESS) {
dev_err(priv_dev->dev,
"Device address (%d) cannot be greater than %d\n",
addr, USB_DEVICE_MAX_ADDRESS);
return -EINVAL;
}
if (device_state == USB_STATE_CONFIGURED) {
dev_err(priv_dev->dev,
"can't set_address from configured state\n");
return -EINVAL;
}
reg = readl(&priv_dev->regs->usb_cmd);
writel(reg | USB_CMD_FADDR(addr) | USB_CMD_SET_ADDR,
&priv_dev->regs->usb_cmd);
usb_gadget_set_state(&priv_dev->gadget,
(addr ? USB_STATE_ADDRESS : USB_STATE_DEFAULT));
return 0;
}
/**
* cdns3_req_ep0_get_status - Handling of GET_STATUS standard USB request
* @priv_dev: extended gadget object
* @ctrl: pointer to received setup packet
*
* Returns 0 if success, error code on error
*/
static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl)
{
struct cdns3_endpoint *priv_ep;
__le16 *response_pkt;
u16 usb_status = 0;
u32 recip;
u8 index;
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
/* self powered */
if (priv_dev->is_selfpowered)
usb_status = BIT(USB_DEVICE_SELF_POWERED);
if (priv_dev->wake_up_flag)
usb_status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
if (priv_dev->gadget.speed != USB_SPEED_SUPER)
break;
if (priv_dev->u1_allowed)
usb_status |= BIT(USB_DEV_STAT_U1_ENABLED);
if (priv_dev->u2_allowed)
usb_status |= BIT(USB_DEV_STAT_U2_ENABLED);
break;
case USB_RECIP_INTERFACE:
return cdns3_ep0_delegate_req(priv_dev, ctrl);
case USB_RECIP_ENDPOINT:
index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
priv_ep = priv_dev->eps[index];
/* check if endpoint is stalled or stall is pending */
cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex));
if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) ||
(priv_ep->flags & EP_STALL_PENDING))
usb_status = BIT(USB_ENDPOINT_HALT);
break;
default:
return -EINVAL;
}
response_pkt = (__le16 *)priv_dev->setup_buf;
*response_pkt = cpu_to_le16(usb_status);
cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma,
sizeof(*response_pkt), 1, 0);
return 0;
}
static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl,
int set)
{
enum usb_device_state state;
enum usb_device_speed speed;
int ret = 0;
u32 wValue;
u16 tmode;
wValue = le16_to_cpu(ctrl->wValue);
state = priv_dev->gadget.state;
speed = priv_dev->gadget.speed;
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
priv_dev->wake_up_flag = !!set;
break;
case USB_DEVICE_U1_ENABLE:
if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER)
return -EINVAL;
priv_dev->u1_allowed = !!set;
break;
case USB_DEVICE_U2_ENABLE:
if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER)
return -EINVAL;
priv_dev->u2_allowed = !!set;
break;
case USB_DEVICE_LTM_ENABLE:
ret = -EINVAL;
break;
case USB_DEVICE_TEST_MODE:
if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
return -EINVAL;
tmode = le16_to_cpu(ctrl->wIndex);
if (!set || (tmode & 0xff) != 0)
return -EINVAL;
tmode >>= 8;
switch (tmode) {
case USB_TEST_J:
case USB_TEST_K:
case USB_TEST_SE0_NAK:
case USB_TEST_PACKET:
cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
USB_CMD_STMODE |
USB_STS_TMODE_SEL(tmode - 1));
break;
default:
ret = -EINVAL;
}
break;
default:
ret = -EINVAL;
}
return ret;
}
static int cdns3_ep0_feature_handle_intf(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl,
int set)
{
u32 wValue;
int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
switch (wValue) {
case USB_INTRF_FUNC_SUSPEND:
break;
default:
ret = -EINVAL;
}
return ret;
}
static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl,
int set)
{
struct cdns3_endpoint *priv_ep;
int ret = 0;
u8 index;
if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT)
return -EINVAL;
if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN))
return 0;
index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
priv_ep = priv_dev->eps[index];
cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex));
if (set)
__cdns3_gadget_ep_set_halt(priv_ep);
else if (!(priv_ep->flags & EP_WEDGE))
ret = __cdns3_gadget_ep_clear_halt(priv_ep);
cdns3_select_ep(priv_dev, 0x00);
return ret;
}
/**
* cdns3_req_ep0_handle_feature -
* Handling of GET/SET_FEATURE standard USB request
*
* @priv_dev: extended gadget object
* @ctrl: pointer to received setup packet
* @set: must be set to 1 for SET_FEATURE request
*
* Returns 0 if success, error code on error
*/
static int cdns3_req_ep0_handle_feature(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl,
int set)
{
int ret = 0;
u32 recip;
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
ret = cdns3_ep0_feature_handle_device(priv_dev, ctrl, set);
break;
case USB_RECIP_INTERFACE:
ret = cdns3_ep0_feature_handle_intf(priv_dev, ctrl, set);
break;
case USB_RECIP_ENDPOINT:
ret = cdns3_ep0_feature_handle_endpoint(priv_dev, ctrl, set);
break;
default:
return -EINVAL;
}
return ret;
}
/**
* cdns3_req_ep0_set_sel - Handling of SET_SEL standard USB request
* @priv_dev: extended gadget object
* @ctrl_req: pointer to received setup packet
*
* Returns 0 if success, error code on error
*/
static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
if (priv_dev->gadget.state < USB_STATE_ADDRESS)
return -EINVAL;
if (le16_to_cpu(ctrl_req->wLength) != 6) {
dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n",
ctrl_req->wLength);
return -EINVAL;
}
cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, 6, 1, 0);
return 0;
}
/**
* cdns3_req_ep0_set_isoch_delay -
* Handling of GET_ISOCH_DELAY standard USB request
* @priv_dev: extended gadget object
* @ctrl_req: pointer to received setup packet
*
* Returns 0 if success, error code on error
*/
static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
if (ctrl_req->wIndex || ctrl_req->wLength)
return -EINVAL;
priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue);
return 0;
}
/**
* cdns3_ep0_standard_request - Handling standard USB requests
* @priv_dev: extended gadget object
* @ctrl_req: pointer to received setup packet
*
* Returns 0 if success, error code on error
*/
static int cdns3_ep0_standard_request(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
int ret;
switch (ctrl_req->bRequest) {
case USB_REQ_SET_ADDRESS:
ret = cdns3_req_ep0_set_address(priv_dev, ctrl_req);
break;
case USB_REQ_SET_CONFIGURATION:
ret = cdns3_req_ep0_set_configuration(priv_dev, ctrl_req);
break;
case USB_REQ_GET_STATUS:
ret = cdns3_req_ep0_get_status(priv_dev, ctrl_req);
break;
case USB_REQ_CLEAR_FEATURE:
ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 0);
break;
case USB_REQ_SET_FEATURE:
ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 1);
break;
case USB_REQ_SET_SEL:
ret = cdns3_req_ep0_set_sel(priv_dev, ctrl_req);
break;
case USB_REQ_SET_ISOCH_DELAY:
ret = cdns3_req_ep0_set_isoch_delay(priv_dev, ctrl_req);
break;
default:
ret = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
break;
}
return ret;
}
static void __pending_setup_status_handler(struct cdns3_device *priv_dev)
{
struct usb_request *request = priv_dev->pending_status_request;
if (priv_dev->status_completion_no_call && request &&
request->complete) {
request->complete(&priv_dev->eps[0]->endpoint, request);
priv_dev->status_completion_no_call = 0;
}
}
void cdns3_pending_setup_status_handler(struct work_struct *work)
{
struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
pending_status_wq);
unsigned long flags;
spin_lock_irqsave(&priv_dev->lock, flags);
__pending_setup_status_handler(priv_dev);
spin_unlock_irqrestore(&priv_dev->lock, flags);
}
/**
* cdns3_ep0_setup_phase - Handling setup USB requests
* @priv_dev: extended gadget object
*/
static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev)
{
struct usb_ctrlrequest *ctrl = priv_dev->setup_buf;
struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
int result;
priv_dev->ep0_data_dir = ctrl->bRequestType & USB_DIR_IN;
trace_cdns3_ctrl_req(ctrl);
if (!list_empty(&priv_ep->pending_req_list)) {
struct usb_request *request;
request = cdns3_next_request(&priv_ep->pending_req_list);
priv_ep->dir = priv_dev->ep0_data_dir;
cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
-ECONNRESET);
}
if (le16_to_cpu(ctrl->wLength))
priv_dev->ep0_stage = CDNS3_DATA_STAGE;
else
priv_dev->ep0_stage = CDNS3_STATUS_STAGE;
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
result = cdns3_ep0_standard_request(priv_dev, ctrl);
else
result = cdns3_ep0_delegate_req(priv_dev, ctrl);
if (result == USB_GADGET_DELAYED_STATUS)
return;
if (result < 0)
cdns3_ep0_complete_setup(priv_dev, 1, 1);
else if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE)
cdns3_ep0_complete_setup(priv_dev, 0, 1);
}
static void cdns3_transfer_completed(struct cdns3_device *priv_dev)
{
struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
if (!list_empty(&priv_ep->pending_req_list)) {
struct usb_request *request;
trace_cdns3_complete_trb(priv_ep, priv_ep->trb_pool);
request = cdns3_next_request(&priv_ep->pending_req_list);
request->actual =
TRB_LEN(le32_to_cpu(priv_ep->trb_pool->length));
priv_ep->dir = priv_dev->ep0_data_dir;
cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 0);
}
cdns3_ep0_complete_setup(priv_dev, 0, 0);
}
/**
* cdns3_check_new_setup - Check if controller receive new SETUP packet.
* @priv_dev: extended gadget object
*
* The SETUP packet can be kept in on-chip memory or in system memory.
*/
static bool cdns3_check_new_setup(struct cdns3_device *priv_dev)
{
u32 ep_sts_reg;
cdns3_select_ep(priv_dev, USB_DIR_OUT);
ep_sts_reg = readl(&priv_dev->regs->ep_sts);
return !!(ep_sts_reg & (EP_STS_SETUP | EP_STS_STPWAIT));
}
/**
* cdns3_check_ep0_interrupt_proceed - Processes interrupt related to endpoint 0
* @priv_dev: extended gadget object
* @dir: USB_DIR_IN for IN direction, USB_DIR_OUT for OUT direction
*/
void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir)
{
u32 ep_sts_reg;
cdns3_select_ep(priv_dev, dir);
ep_sts_reg = readl(&priv_dev->regs->ep_sts);
writel(ep_sts_reg, &priv_dev->regs->ep_sts);
trace_cdns3_ep0_irq(priv_dev, ep_sts_reg);
__pending_setup_status_handler(priv_dev);
if (ep_sts_reg & EP_STS_SETUP)
priv_dev->wait_for_setup = 1;
if (priv_dev->wait_for_setup && ep_sts_reg & EP_STS_IOC) {
priv_dev->wait_for_setup = 0;
cdns3_ep0_setup_phase(priv_dev);
} else if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
priv_dev->ep0_data_dir = dir;
cdns3_transfer_completed(priv_dev);
}
if (ep_sts_reg & EP_STS_DESCMIS) {
if (dir == 0 && !priv_dev->setup_pending)
cdns3_prepare_setup_packet(priv_dev);
}
}
/**
* cdns3_gadget_ep0_enable
* @ep: pointer to endpoint zero object
* @desc: pointer to usb endpoint descriptor
*
* Function shouldn't be called by gadget driver,
* endpoint 0 is allways active
*/
static int cdns3_gadget_ep0_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
return -EINVAL;
}
/**
* cdns3_gadget_ep0_disable
* @ep: pointer to endpoint zero object
*
* Function shouldn't be called by gadget driver,
* endpoint 0 is allways active
*/
static int cdns3_gadget_ep0_disable(struct usb_ep *ep)
{
return -EINVAL;
}
/**
* cdns3_gadget_ep0_set_halt
* @ep: pointer to endpoint zero object
* @value: 1 for set stall, 0 for clear stall
*
* Returns 0
*/
static int cdns3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
{
/* TODO */
return 0;
}
/**
* cdns3_gadget_ep0_queue - Transfer data on endpoint zero
* @ep: pointer to endpoint zero object
* @request: pointer to request object
* @gfp_flags: gfp flags
*
* Returns 0 on success, error code elsewhere
*/
static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
unsigned long flags;
int ret = 0;
u8 zlp = 0;
int i;
spin_lock_irqsave(&priv_dev->lock, flags);
trace_cdns3_ep0_queue(priv_dev, request);
/* cancel the request if controller receive new SETUP packet. */
if (cdns3_check_new_setup(priv_dev)) {
spin_unlock_irqrestore(&priv_dev->lock, flags);
return -ECONNRESET;
}
/* send STATUS stage. Should be called only for SET_CONFIGURATION */
if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
u32 val;
cdns3_select_ep(priv_dev, 0x00);
/*
* Configure all non-control EPs which are not enabled by class driver
*/
for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
priv_ep = priv_dev->eps[i];
if (priv_ep && priv_ep->flags & EP_CLAIMED &&
!(priv_ep->flags & EP_ENABLED))
cdns3_ep_config(priv_ep, 0);
}
cdns3_set_hw_configuration(priv_dev);
cdns3_ep0_complete_setup(priv_dev, 0, 1);
/* wait until configuration set */
ret = readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val,
val & USB_STS_CFGSTS_MASK, 1, 100);
if (ret == -ETIMEDOUT)
dev_warn(priv_dev->dev, "timeout for waiting configuration set\n");
request->actual = 0;
priv_dev->status_completion_no_call = true;
priv_dev->pending_status_request = request;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_CONFIGURED);
spin_unlock_irqrestore(&priv_dev->lock, flags);
/*
* Since there is no completion interrupt for status stage,
* it needs to call ->completion in software after
* ep0_queue is back.
*/
queue_work(system_freezable_wq, &priv_dev->pending_status_wq);
return ret;
}
if (!list_empty(&priv_ep->pending_req_list)) {
dev_err(priv_dev->dev,
"can't handle multiple requests for ep0\n");
spin_unlock_irqrestore(&priv_dev->lock, flags);
return -EBUSY;
}
ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
priv_dev->ep0_data_dir);
if (ret) {
spin_unlock_irqrestore(&priv_dev->lock, flags);
dev_err(priv_dev->dev, "failed to map request\n");
return -EINVAL;
}
request->status = -EINPROGRESS;
list_add_tail(&request->list, &priv_ep->pending_req_list);
if (request->zero && request->length &&
(request->length % ep->maxpacket == 0))
zlp = 1;
cdns3_ep0_run_transfer(priv_dev, request->dma, request->length, 1, zlp);
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
/**
* cdns3_gadget_ep_set_wedge - Set wedge on selected endpoint
* @ep: endpoint object
*
* Returns 0
*/
int cdns3_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
dev_dbg(priv_dev->dev, "Wedge for %s\n", ep->name);
cdns3_gadget_ep_set_halt(ep, 1);
priv_ep->flags |= EP_WEDGE;
return 0;
}
static const struct usb_ep_ops cdns3_gadget_ep0_ops = {
.enable = cdns3_gadget_ep0_enable,
.disable = cdns3_gadget_ep0_disable,
.alloc_request = cdns3_gadget_ep_alloc_request,
.free_request = cdns3_gadget_ep_free_request,
.queue = cdns3_gadget_ep0_queue,
.dequeue = cdns3_gadget_ep_dequeue,
.set_halt = cdns3_gadget_ep0_set_halt,
.set_wedge = cdns3_gadget_ep_set_wedge,
};
/**
* cdns3_ep0_config - Configures default endpoint
* @priv_dev: extended gadget object
*
* Functions sets parameters: maximal packet size and enables interrupts
*/
void cdns3_ep0_config(struct cdns3_device *priv_dev)
{
struct cdns3_usb_regs __iomem *regs;
struct cdns3_endpoint *priv_ep;
u32 max_packet_size = 64;
u32 ep_cfg;
regs = priv_dev->regs;
if (priv_dev->gadget.speed == USB_SPEED_SUPER)
max_packet_size = 512;
priv_ep = priv_dev->eps[0];
if (!list_empty(&priv_ep->pending_req_list)) {
struct usb_request *request;
request = cdns3_next_request(&priv_ep->pending_req_list);
list_del_init(&request->list);
}
priv_dev->u1_allowed = 0;
priv_dev->u2_allowed = 0;
priv_dev->gadget.ep0->maxpacket = max_packet_size;
cdns3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
/* init ep out */
cdns3_select_ep(priv_dev, USB_DIR_OUT);
if (priv_dev->dev_ver >= DEV_VER_V3) {
cdns3_set_register_bit(&priv_dev->regs->dtrans,
BIT(0) | BIT(16));
cdns3_set_register_bit(&priv_dev->regs->tdl_from_trb,
BIT(0) | BIT(16));
}
ep_cfg = EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size);
if (!(priv_ep->flags & EP_CONFIGURED))
writel(ep_cfg, ®s->ep_cfg);
writel(EP_STS_EN_SETUPEN | EP_STS_EN_DESCMISEN | EP_STS_EN_TRBERREN,
®s->ep_sts_en);
/* init ep in */
cdns3_select_ep(priv_dev, USB_DIR_IN);
if (!(priv_ep->flags & EP_CONFIGURED))
writel(ep_cfg, ®s->ep_cfg);
priv_ep->flags |= EP_CONFIGURED;
writel(EP_STS_EN_SETUPEN | EP_STS_EN_TRBERREN, ®s->ep_sts_en);
cdns3_set_register_bit(®s->usb_conf, USB_CONF_U1DS | USB_CONF_U2DS);
}
/**
* cdns3_init_ep0 - Initializes software endpoint 0 of gadget
* @priv_dev: extended gadget object
* @priv_ep: extended endpoint object
*
* Returns 0 on success else error code.
*/
int cdns3_init_ep0(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
sprintf(priv_ep->name, "ep0");
/* fill linux fields */
priv_ep->endpoint.ops = &cdns3_gadget_ep0_ops;
priv_ep->endpoint.maxburst = 1;
usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
CDNS3_EP0_MAX_PACKET_LIMIT);
priv_ep->endpoint.address = 0;
priv_ep->endpoint.caps.type_control = 1;
priv_ep->endpoint.caps.dir_in = 1;
priv_ep->endpoint.caps.dir_out = 1;
priv_ep->endpoint.name = priv_ep->name;
priv_ep->endpoint.desc = &cdns3_gadget_ep0_desc;
priv_dev->gadget.ep0 = &priv_ep->endpoint;
priv_ep->type = USB_ENDPOINT_XFER_CONTROL;
return cdns3_allocate_trb_pool(priv_ep);
}
| linux-master | drivers/usb/cdns3/cdns3-ep0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBSS and USBSSP DRD Driver - host side
*
* Copyright (C) 2018-2019 Cadence Design Systems.
* Copyright (C) 2017-2018 NXP
*
* Authors: Peter Chen <[email protected]>
* Pawel Laszczak <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "core.h"
#include "drd.h"
#include "host-export.h"
#include <linux/usb/hcd.h>
#include "../host/xhci.h"
#include "../host/xhci-plat.h"
#define XECP_PORT_CAP_REG 0x8000
#define XECP_AUX_CTRL_REG1 0x8120
#define CFG_RXDET_P3_EN BIT(15)
#define LPM_2_STB_SWITCH_EN BIT(25)
static void xhci_cdns3_plat_start(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 value;
/* set usbcmd.EU3S */
value = readl(&xhci->op_regs->command);
value |= CMD_PM_INDEX;
writel(value, &xhci->op_regs->command);
if (hcd->regs) {
value = readl(hcd->regs + XECP_AUX_CTRL_REG1);
value |= CFG_RXDET_P3_EN;
writel(value, hcd->regs + XECP_AUX_CTRL_REG1);
value = readl(hcd->regs + XECP_PORT_CAP_REG);
value |= LPM_2_STB_SWITCH_EN;
writel(value, hcd->regs + XECP_PORT_CAP_REG);
}
}
static int xhci_cdns3_resume_quirk(struct usb_hcd *hcd)
{
xhci_cdns3_plat_start(hcd);
return 0;
}
static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI,
.plat_start = xhci_cdns3_plat_start,
.resume_quirk = xhci_cdns3_resume_quirk,
};
static int __cdns_host_init(struct cdns *cdns)
{
struct platform_device *xhci;
int ret;
struct usb_hcd *hcd;
cdns_drd_host_on(cdns);
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
dev_err(cdns->dev, "couldn't allocate xHCI device\n");
return -ENOMEM;
}
xhci->dev.parent = cdns->dev;
cdns->host_dev = xhci;
ret = platform_device_add_resources(xhci, cdns->xhci_res,
CDNS_XHCI_RESOURCES_NUM);
if (ret) {
dev_err(cdns->dev, "couldn't add resources to xHCI device\n");
goto err1;
}
cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
sizeof(struct xhci_plat_priv), GFP_KERNEL);
if (!cdns->xhci_plat_data) {
ret = -ENOMEM;
goto err1;
}
if (cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW))
cdns->xhci_plat_data->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
ret = platform_device_add_data(xhci, cdns->xhci_plat_data,
sizeof(struct xhci_plat_priv));
if (ret)
goto free_memory;
ret = platform_device_add(xhci);
if (ret) {
dev_err(cdns->dev, "failed to register xHCI device\n");
goto free_memory;
}
/* Glue needs to access xHCI region register for Power management */
hcd = platform_get_drvdata(xhci);
if (hcd)
cdns->xhci_regs = hcd->regs;
return 0;
free_memory:
kfree(cdns->xhci_plat_data);
err1:
platform_device_put(xhci);
return ret;
}
static void cdns_host_exit(struct cdns *cdns)
{
kfree(cdns->xhci_plat_data);
platform_device_unregister(cdns->host_dev);
cdns->host_dev = NULL;
cdns_drd_host_off(cdns);
}
int cdns_host_init(struct cdns *cdns)
{
struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
rdrv->start = __cdns_host_init;
rdrv->stop = cdns_host_exit;
rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "host";
cdns->roles[USB_ROLE_HOST] = rdrv;
return 0;
}
| linux-master | drivers/usb/cdns3/host.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBSS PCI Glue driver
*
* Copyright (C) 2018-2019 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
struct cdns3_wrap {
struct platform_device *plat_dev;
struct resource dev_res[6];
int devfn;
};
#define RES_IRQ_HOST_ID 0
#define RES_IRQ_PERIPHERAL_ID 1
#define RES_IRQ_OTG_ID 2
#define RES_HOST_ID 3
#define RES_DEV_ID 4
#define RES_DRD_ID 5
#define PCI_BAR_HOST 0
#define PCI_BAR_DEV 2
#define PCI_BAR_OTG 0
#define PCI_DEV_FN_HOST_DEVICE 0
#define PCI_DEV_FN_OTG 1
#define PCI_DRIVER_NAME "cdns3-pci-usbss"
#define PLAT_DRIVER_NAME "cdns-usb3"
#define CDNS_VENDOR_ID 0x17cd
#define CDNS_DEVICE_ID 0x0100
static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
{
struct pci_dev *func;
/*
* Gets the second function.
* It's little tricky, but this platform has two function.
* The fist keeps resources for Host/Device while the second
* keeps resources for DRD/OTG.
*/
func = pci_get_device(pdev->vendor, pdev->device, NULL);
if (unlikely(!func))
return NULL;
if (func->devfn == pdev->devfn) {
func = pci_get_device(pdev->vendor, pdev->device, func);
if (unlikely(!func))
return NULL;
}
if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
func->devfn != PCI_DEV_FN_OTG) {
return NULL;
}
return func;
}
static int cdns3_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct platform_device_info plat_info;
struct cdns3_wrap *wrap;
struct resource *res;
struct pci_dev *func;
int err;
/*
* for GADGET/HOST PCI (devfn) function number is 0,
* for OTG PCI (devfn) function number is 1
*/
if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE &&
pdev->devfn != PCI_DEV_FN_OTG))
return -EINVAL;
func = cdns3_get_second_fun(pdev);
if (unlikely(!func))
return -EINVAL;
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", err);
return err;
}
pci_set_master(pdev);
if (pci_is_enabled(func)) {
wrap = pci_get_drvdata(func);
} else {
wrap = kzalloc(sizeof(*wrap), GFP_KERNEL);
if (!wrap) {
pci_disable_device(pdev);
return -ENOMEM;
}
}
res = wrap->dev_res;
if (pdev->devfn == PCI_DEV_FN_HOST_DEVICE) {
/* function 0: host(BAR_0) + device(BAR_1).*/
dev_dbg(&pdev->dev, "Initialize Device resources\n");
res[RES_DEV_ID].start = pci_resource_start(pdev, PCI_BAR_DEV);
res[RES_DEV_ID].end = pci_resource_end(pdev, PCI_BAR_DEV);
res[RES_DEV_ID].name = "dev";
res[RES_DEV_ID].flags = IORESOURCE_MEM;
dev_dbg(&pdev->dev, "USBSS-DEV physical base addr: %pa\n",
&res[RES_DEV_ID].start);
res[RES_HOST_ID].start = pci_resource_start(pdev, PCI_BAR_HOST);
res[RES_HOST_ID].end = pci_resource_end(pdev, PCI_BAR_HOST);
res[RES_HOST_ID].name = "xhci";
res[RES_HOST_ID].flags = IORESOURCE_MEM;
dev_dbg(&pdev->dev, "USBSS-XHCI physical base addr: %pa\n",
&res[RES_HOST_ID].start);
/* Interrupt for XHCI */
wrap->dev_res[RES_IRQ_HOST_ID].start = pdev->irq;
wrap->dev_res[RES_IRQ_HOST_ID].name = "host";
wrap->dev_res[RES_IRQ_HOST_ID].flags = IORESOURCE_IRQ;
/* Interrupt device. It's the same as for HOST. */
wrap->dev_res[RES_IRQ_PERIPHERAL_ID].start = pdev->irq;
wrap->dev_res[RES_IRQ_PERIPHERAL_ID].name = "peripheral";
wrap->dev_res[RES_IRQ_PERIPHERAL_ID].flags = IORESOURCE_IRQ;
} else {
res[RES_DRD_ID].start = pci_resource_start(pdev, PCI_BAR_OTG);
res[RES_DRD_ID].end = pci_resource_end(pdev, PCI_BAR_OTG);
res[RES_DRD_ID].name = "otg";
res[RES_DRD_ID].flags = IORESOURCE_MEM;
dev_dbg(&pdev->dev, "USBSS-DRD physical base addr: %pa\n",
&res[RES_DRD_ID].start);
/* Interrupt for OTG/DRD. */
wrap->dev_res[RES_IRQ_OTG_ID].start = pdev->irq;
wrap->dev_res[RES_IRQ_OTG_ID].name = "otg";
wrap->dev_res[RES_IRQ_OTG_ID].flags = IORESOURCE_IRQ;
}
if (pci_is_enabled(func)) {
/* set up platform device info */
memset(&plat_info, 0, sizeof(plat_info));
plat_info.parent = &pdev->dev;
plat_info.fwnode = pdev->dev.fwnode;
plat_info.name = PLAT_DRIVER_NAME;
plat_info.id = pdev->devfn;
wrap->devfn = pdev->devfn;
plat_info.res = wrap->dev_res;
plat_info.num_res = ARRAY_SIZE(wrap->dev_res);
plat_info.dma_mask = pdev->dma_mask;
/* register platform device */
wrap->plat_dev = platform_device_register_full(&plat_info);
if (IS_ERR(wrap->plat_dev)) {
pci_disable_device(pdev);
err = PTR_ERR(wrap->plat_dev);
kfree(wrap);
return err;
}
}
pci_set_drvdata(pdev, wrap);
return err;
}
static void cdns3_pci_remove(struct pci_dev *pdev)
{
struct cdns3_wrap *wrap;
struct pci_dev *func;
func = cdns3_get_second_fun(pdev);
wrap = (struct cdns3_wrap *)pci_get_drvdata(pdev);
if (wrap->devfn == pdev->devfn)
platform_device_unregister(wrap->plat_dev);
if (!pci_is_enabled(func))
kfree(wrap);
}
static const struct pci_device_id cdns3_pci_ids[] = {
{ PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
{ 0, }
};
static struct pci_driver cdns3_pci_driver = {
.name = PCI_DRIVER_NAME,
.id_table = cdns3_pci_ids,
.probe = cdns3_pci_probe,
.remove = cdns3_pci_remove,
};
module_pci_driver(cdns3_pci_driver);
MODULE_DEVICE_TABLE(pci, cdns3_pci_ids);
MODULE_AUTHOR("Pawel Laszczak <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Cadence USBSS PCI wrapper");
| linux-master | drivers/usb/cdns3/cdns3-pci-wrap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBSS and USBSSP DRD Driver.
*
* Copyright (C) 2018-2020 Cadence.
* Copyright (C) 2019 Texas Instruments
*
* Author: Pawel Laszczak <[email protected]>
* Roger Quadros <[email protected]>
*
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/usb/otg.h>
#include "drd.h"
#include "core.h"
/**
* cdns_set_mode - change mode of OTG Core
* @cdns: pointer to context structure
* @mode: selected mode from cdns_role
*
* Returns 0 on success otherwise negative errno
*/
static int cdns_set_mode(struct cdns *cdns, enum usb_dr_mode mode)
{
void __iomem *override_reg;
u32 reg;
switch (mode) {
case USB_DR_MODE_PERIPHERAL:
break;
case USB_DR_MODE_HOST:
break;
case USB_DR_MODE_OTG:
dev_dbg(cdns->dev, "Set controller to OTG mode\n");
if (cdns->version == CDNSP_CONTROLLER_V2)
override_reg = &cdns->otg_cdnsp_regs->override;
else if (cdns->version == CDNS3_CONTROLLER_V1)
override_reg = &cdns->otg_v1_regs->override;
else
override_reg = &cdns->otg_v0_regs->ctrl1;
reg = readl(override_reg);
if (cdns->version != CDNS3_CONTROLLER_V0)
reg |= OVERRIDE_IDPULLUP;
else
reg |= OVERRIDE_IDPULLUP_V0;
writel(reg, override_reg);
if (cdns->version == CDNS3_CONTROLLER_V1) {
/*
* Enable work around feature built into the
* controller to address issue with RX Sensitivity
* est (EL_17) for USB2 PHY. The issue only occures
* for 0x0002450D controller version.
*/
if (cdns->phyrst_a_enable) {
reg = readl(&cdns->otg_v1_regs->phyrst_cfg);
reg |= PHYRST_CFG_PHYRST_A_ENABLE;
writel(reg, &cdns->otg_v1_regs->phyrst_cfg);
}
}
/*
* Hardware specification says: "ID_VALUE must be valid within
* 50ms after idpullup is set to '1" so driver must wait
* 50ms before reading this pin.
*/
usleep_range(50000, 60000);
break;
default:
dev_err(cdns->dev, "Unsupported mode of operation %d\n", mode);
return -EINVAL;
}
return 0;
}
int cdns_get_id(struct cdns *cdns)
{
int id;
id = readl(&cdns->otg_regs->sts) & OTGSTS_ID_VALUE;
dev_dbg(cdns->dev, "OTG ID: %d", id);
return id;
}
int cdns_get_vbus(struct cdns *cdns)
{
int vbus;
vbus = !!(readl(&cdns->otg_regs->sts) & OTGSTS_VBUS_VALID);
dev_dbg(cdns->dev, "OTG VBUS: %d", vbus);
return vbus;
}
void cdns_clear_vbus(struct cdns *cdns)
{
u32 reg;
if (cdns->version != CDNSP_CONTROLLER_V2)
return;
reg = readl(&cdns->otg_cdnsp_regs->override);
reg |= OVERRIDE_SESS_VLD_SEL;
writel(reg, &cdns->otg_cdnsp_regs->override);
}
EXPORT_SYMBOL_GPL(cdns_clear_vbus);
void cdns_set_vbus(struct cdns *cdns)
{
u32 reg;
if (cdns->version != CDNSP_CONTROLLER_V2)
return;
reg = readl(&cdns->otg_cdnsp_regs->override);
reg &= ~OVERRIDE_SESS_VLD_SEL;
writel(reg, &cdns->otg_cdnsp_regs->override);
}
EXPORT_SYMBOL_GPL(cdns_set_vbus);
bool cdns_is_host(struct cdns *cdns)
{
if (cdns->dr_mode == USB_DR_MODE_HOST)
return true;
else if (cdns_get_id(cdns) == CDNS3_ID_HOST)
return true;
return false;
}
bool cdns_is_device(struct cdns *cdns)
{
if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL)
return true;
else if (cdns->dr_mode == USB_DR_MODE_OTG)
if (cdns_get_id(cdns) == CDNS3_ID_PERIPHERAL)
return true;
return false;
}
/**
* cdns_otg_disable_irq - Disable all OTG interrupts
* @cdns: Pointer to controller context structure
*/
static void cdns_otg_disable_irq(struct cdns *cdns)
{
writel(0, &cdns->otg_irq_regs->ien);
}
/**
* cdns_otg_enable_irq - enable id and sess_valid interrupts
* @cdns: Pointer to controller context structure
*/
static void cdns_otg_enable_irq(struct cdns *cdns)
{
writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT |
OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien);
}
/**
* cdns_drd_host_on - start host.
* @cdns: Pointer to controller context structure.
*
* Returns 0 on success otherwise negative errno.
*/
int cdns_drd_host_on(struct cdns *cdns)
{
u32 val, ready_bit;
int ret;
/* Enable host mode. */
writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS,
&cdns->otg_regs->cmd);
if (cdns->version == CDNSP_CONTROLLER_V2)
ready_bit = OTGSTS_CDNSP_XHCI_READY;
else
ready_bit = OTGSTS_CDNS3_XHCI_READY;
dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n");
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
val & ready_bit, 1, 100000);
if (ret)
dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_HOST);
phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_HOST);
return ret;
}
/**
* cdns_drd_host_off - stop host.
* @cdns: Pointer to controller context structure.
*/
void cdns_drd_host_off(struct cdns *cdns)
{
u32 val;
writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP |
OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF,
&cdns->otg_regs->cmd);
/* Waiting till H_IDLE state.*/
readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
!(val & OTGSTATE_HOST_STATE_MASK),
1, 2000000);
phy_set_mode(cdns->usb2_phy, PHY_MODE_INVALID);
phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
/**
* cdns_drd_gadget_on - start gadget.
* @cdns: Pointer to controller context structure.
*
* Returns 0 on success otherwise negative errno
*/
int cdns_drd_gadget_on(struct cdns *cdns)
{
u32 reg = OTGCMD_OTG_DIS;
u32 ready_bit;
int ret, val;
/* switch OTG core */
writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd);
dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n");
if (cdns->version == CDNSP_CONTROLLER_V2)
ready_bit = OTGSTS_CDNSP_DEV_READY;
else
ready_bit = OTGSTS_CDNS3_DEV_READY;
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
val & ready_bit, 1, 100000);
if (ret) {
dev_err(cdns->dev, "timeout waiting for dev_ready\n");
return ret;
}
phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_DEVICE);
phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE);
return 0;
}
EXPORT_SYMBOL_GPL(cdns_drd_gadget_on);
/**
* cdns_drd_gadget_off - stop gadget.
* @cdns: Pointer to controller context structure.
*/
void cdns_drd_gadget_off(struct cdns *cdns)
{
u32 val;
/*
* Driver should wait at least 10us after disabling Device
* before turning-off Device (DEV_BUS_DROP).
*/
usleep_range(20, 30);
writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP |
OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF,
&cdns->otg_regs->cmd);
/* Waiting till DEV_IDLE state.*/
readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
!(val & OTGSTATE_DEV_STATE_MASK),
1, 2000000);
phy_set_mode(cdns->usb2_phy, PHY_MODE_INVALID);
phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
EXPORT_SYMBOL_GPL(cdns_drd_gadget_off);
/**
* cdns_init_otg_mode - initialize drd controller
* @cdns: Pointer to controller context structure
*
* Returns 0 on success otherwise negative errno
*/
static int cdns_init_otg_mode(struct cdns *cdns)
{
int ret;
cdns_otg_disable_irq(cdns);
/* clear all interrupts */
writel(~0, &cdns->otg_irq_regs->ivect);
ret = cdns_set_mode(cdns, USB_DR_MODE_OTG);
if (ret)
return ret;
cdns_otg_enable_irq(cdns);
return 0;
}
/**
* cdns_drd_update_mode - initialize mode of operation
* @cdns: Pointer to controller context structure
*
* Returns 0 on success otherwise negative errno
*/
int cdns_drd_update_mode(struct cdns *cdns)
{
int ret;
switch (cdns->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
ret = cdns_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
break;
case USB_DR_MODE_HOST:
ret = cdns_set_mode(cdns, USB_DR_MODE_HOST);
break;
case USB_DR_MODE_OTG:
ret = cdns_init_otg_mode(cdns);
break;
default:
dev_err(cdns->dev, "Unsupported mode of operation %d\n",
cdns->dr_mode);
return -EINVAL;
}
return ret;
}
static irqreturn_t cdns_drd_thread_irq(int irq, void *data)
{
struct cdns *cdns = data;
cdns_hw_role_switch(cdns);
return IRQ_HANDLED;
}
/**
* cdns_drd_irq - interrupt handler for OTG events
*
* @irq: irq number for cdns core device
* @data: structure of cdns
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
static irqreturn_t cdns_drd_irq(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
struct cdns *cdns = data;
u32 reg;
if (cdns->dr_mode != USB_DR_MODE_OTG)
return IRQ_NONE;
if (cdns->in_lpm)
return ret;
reg = readl(&cdns->otg_irq_regs->ivect);
if (!reg)
return IRQ_NONE;
if (reg & OTGIEN_ID_CHANGE_INT) {
dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n",
cdns_get_id(cdns));
ret = IRQ_WAKE_THREAD;
}
if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) {
dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n",
cdns_get_vbus(cdns));
ret = IRQ_WAKE_THREAD;
}
writel(~0, &cdns->otg_irq_regs->ivect);
return ret;
}
int cdns_drd_init(struct cdns *cdns)
{
void __iomem *regs;
u32 state;
int ret;
regs = devm_ioremap_resource(cdns->dev, &cdns->otg_res);
if (IS_ERR(regs))
return PTR_ERR(regs);
/* Detection of DRD version. Controller has been released
* in three versions. All are very similar and are software compatible,
* but they have same changes in register maps.
* The first register in oldest version is command register and it's
* read only. Driver should read 0 from it. On the other hand, in v1
* and v2 the first register contains device ID number which is not
* set to 0. Driver uses this fact to detect the proper version of
* controller.
*/
cdns->otg_v0_regs = regs;
if (!readl(&cdns->otg_v0_regs->cmd)) {
cdns->version = CDNS3_CONTROLLER_V0;
cdns->otg_v1_regs = NULL;
cdns->otg_cdnsp_regs = NULL;
cdns->otg_regs = regs;
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_v0_regs->ien;
writel(1, &cdns->otg_v0_regs->simulate);
dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
readl(&cdns->otg_v0_regs->version));
} else {
cdns->otg_v0_regs = NULL;
cdns->otg_v1_regs = regs;
cdns->otg_cdnsp_regs = regs;
cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_cdnsp_regs->ien;
cdns->version = CDNSP_CONTROLLER_V2;
} else {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_v1_regs->ien;
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
}
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
readl(&cdns->otg_v1_regs->did),
readl(&cdns->otg_v1_regs->rid));
}
state = OTGSTS_STRAP(readl(&cdns->otg_regs->sts));
/* Update dr_mode according to STRAP configuration. */
cdns->dr_mode = USB_DR_MODE_OTG;
if ((cdns->version == CDNSP_CONTROLLER_V2 &&
state == OTGSTS_CDNSP_STRAP_HOST) ||
(cdns->version != CDNSP_CONTROLLER_V2 &&
state == OTGSTS_STRAP_HOST)) {
dev_dbg(cdns->dev, "Controller strapped to HOST\n");
cdns->dr_mode = USB_DR_MODE_HOST;
} else if ((cdns->version == CDNSP_CONTROLLER_V2 &&
state == OTGSTS_CDNSP_STRAP_GADGET) ||
(cdns->version != CDNSP_CONTROLLER_V2 &&
state == OTGSTS_STRAP_GADGET)) {
dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n");
cdns->dr_mode = USB_DR_MODE_PERIPHERAL;
}
ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq,
cdns_drd_irq,
cdns_drd_thread_irq,
IRQF_SHARED,
dev_name(cdns->dev), cdns);
if (ret) {
dev_err(cdns->dev, "couldn't get otg_irq\n");
return ret;
}
state = readl(&cdns->otg_regs->sts);
if (OTGSTS_OTG_NRDY(state)) {
dev_err(cdns->dev, "Cadence USB3 OTG device not ready\n");
return -ENODEV;
}
return 0;
}
int cdns_drd_exit(struct cdns *cdns)
{
cdns_otg_disable_irq(cdns);
return 0;
}
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
if (cdns->version == CDNS3_CONTROLLER_V0) {
if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
return true;
} else {
if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(cdns_power_is_lost);
| linux-master | drivers/usb/cdns3/drd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* cdns3-ti.c - TI specific Glue layer for Cadence USB Controller
*
* Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
/* USB Wrapper register offsets */
#define USBSS_PID 0x0
#define USBSS_W1 0x4
#define USBSS_STATIC_CONFIG 0x8
#define USBSS_PHY_TEST 0xc
#define USBSS_DEBUG_CTRL 0x10
#define USBSS_DEBUG_INFO 0x14
#define USBSS_DEBUG_LINK_STATE 0x18
#define USBSS_DEVICE_CTRL 0x1c
/* Wrapper 1 register bits */
#define USBSS_W1_PWRUP_RST BIT(0)
#define USBSS_W1_OVERCURRENT_SEL BIT(8)
#define USBSS_W1_MODESTRAP_SEL BIT(9)
#define USBSS_W1_OVERCURRENT BIT(16)
#define USBSS_W1_MODESTRAP_MASK GENMASK(18, 17)
#define USBSS_W1_MODESTRAP_SHIFT 17
#define USBSS_W1_USB2_ONLY BIT(19)
/* Static config register bits */
#define USBSS1_STATIC_PLL_REF_SEL_MASK GENMASK(8, 5)
#define USBSS1_STATIC_PLL_REF_SEL_SHIFT 5
#define USBSS1_STATIC_LOOPBACK_MODE_MASK GENMASK(4, 3)
#define USBSS1_STATIC_LOOPBACK_MODE_SHIFT 3
#define USBSS1_STATIC_VBUS_SEL_MASK GENMASK(2, 1)
#define USBSS1_STATIC_VBUS_SEL_SHIFT 1
#define USBSS1_STATIC_LANE_REVERSE BIT(0)
/* Modestrap modes */
enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE,
USBSS_MODESTRAP_MODE_HOST,
USBSS_MODESTRAP_MODE_PERIPHERAL};
struct cdns_ti {
struct device *dev;
void __iomem *usbss;
unsigned usb2_only:1;
unsigned vbus_divider:1;
struct clk *usb2_refclk;
struct clk *lpm_clk;
};
static const int cdns_ti_rate_table[] = { /* in KHZ */
9600,
10000,
12000,
19200,
20000,
24000,
25000,
26000,
38400,
40000,
58000,
50000,
52000,
};
static inline u32 cdns_ti_readl(struct cdns_ti *data, u32 offset)
{
return readl(data->usbss + offset);
}
static inline void cdns_ti_writel(struct cdns_ti *data, u32 offset, u32 value)
{
writel(value, data->usbss + offset);
}
static int cdns_ti_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct cdns_ti *data;
int error;
u32 reg;
int rate_code, i;
unsigned long rate;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
platform_set_drvdata(pdev, data);
data->dev = dev;
data->usbss = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->usbss)) {
dev_err(dev, "can't map IOMEM resource\n");
return PTR_ERR(data->usbss);
}
data->usb2_refclk = devm_clk_get(dev, "ref");
if (IS_ERR(data->usb2_refclk)) {
dev_err(dev, "can't get usb2_refclk\n");
return PTR_ERR(data->usb2_refclk);
}
data->lpm_clk = devm_clk_get(dev, "lpm");
if (IS_ERR(data->lpm_clk)) {
dev_err(dev, "can't get lpm_clk\n");
return PTR_ERR(data->lpm_clk);
}
rate = clk_get_rate(data->usb2_refclk);
rate /= 1000; /* To KHz */
for (i = 0; i < ARRAY_SIZE(cdns_ti_rate_table); i++) {
if (cdns_ti_rate_table[i] == rate)
break;
}
if (i == ARRAY_SIZE(cdns_ti_rate_table)) {
dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
return -EINVAL;
}
rate_code = i;
pm_runtime_enable(dev);
error = pm_runtime_get_sync(dev);
if (error < 0) {
dev_err(dev, "pm_runtime_get_sync failed: %d\n", error);
goto err;
}
/* assert RESET */
reg = cdns_ti_readl(data, USBSS_W1);
reg &= ~USBSS_W1_PWRUP_RST;
cdns_ti_writel(data, USBSS_W1, reg);
/* set static config */
reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
if (data->vbus_divider)
reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
/* set USB2_ONLY mode if requested */
reg = cdns_ti_readl(data, USBSS_W1);
data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
if (data->usb2_only)
reg |= USBSS_W1_USB2_ONLY;
/* set default modestrap */
reg |= USBSS_W1_MODESTRAP_SEL;
reg &= ~USBSS_W1_MODESTRAP_MASK;
reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
cdns_ti_writel(data, USBSS_W1, reg);
/* de-assert RESET */
reg |= USBSS_W1_PWRUP_RST;
cdns_ti_writel(data, USBSS_W1, reg);
error = of_platform_populate(node, NULL, NULL, dev);
if (error) {
dev_err(dev, "failed to create children: %d\n", error);
goto err;
}
return 0;
err:
pm_runtime_put_sync(data->dev);
pm_runtime_disable(data->dev);
return error;
}
static int cdns_ti_remove_core(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static void cdns_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
device_for_each_child(dev, NULL, cdns_ti_remove_core);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
platform_set_drvdata(pdev, NULL);
}
static const struct of_device_id cdns_ti_of_match[] = {
{ .compatible = "ti,j721e-usb", },
{ .compatible = "ti,am64-usb", },
{},
};
MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
static struct platform_driver cdns_ti_driver = {
.probe = cdns_ti_probe,
.remove_new = cdns_ti_remove,
.driver = {
.name = "cdns3-ti",
.of_match_table = cdns_ti_of_match,
},
};
module_platform_driver(cdns_ti_driver);
MODULE_ALIAS("platform:cdns3-ti");
MODULE_AUTHOR("Roger Quadros <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Cadence USB3 TI Glue Layer");
| linux-master | drivers/usb/cdns3/cdns3-ti.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence CDNSP DRD Driver.
*
* Copyright (C) 2020 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*
*/
#define CREATE_TRACE_POINTS
#include "cdnsp-trace.h"
| linux-master | drivers/usb/cdns3/cdnsp-trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence USBSS DRD Driver - gadget side.
*
* Copyright (C) 2018-2019 Cadence Design Systems.
* Copyright (C) 2017-2018 NXP
*
* Authors: Pawel Jez <[email protected]>,
* Pawel Laszczak <[email protected]>
* Peter Chen <[email protected]>
*/
/*
* Work around 1:
* At some situations, the controller may get stale data address in TRB
* at below sequences:
* 1. Controller read TRB includes data address
* 2. Software updates TRBs includes data address and Cycle bit
* 3. Controller read TRB which includes Cycle bit
* 4. DMA run with stale data address
*
* To fix this problem, driver needs to make the first TRB in TD as invalid.
* After preparing all TRBs driver needs to check the position of DMA and
* if the DMA point to the first just added TRB and doorbell is 1,
* then driver must defer making this TRB as valid. This TRB will be make
* as valid during adding next TRB only if DMA is stopped or at TRBERR
* interrupt.
*
* Issue has been fixed in DEV_VER_V3 version of controller.
*
* Work around 2:
* Controller for OUT endpoints has shared on-chip buffers for all incoming
* packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA
* in correct order. If the first packet in the buffer will not be handled,
* then the following packets directed for other endpoints and functions
* will be blocked.
* Additionally the packets directed to one endpoint can block entire on-chip
* buffers. In this case transfer to other endpoints also will blocked.
*
* To resolve this issue after raising the descriptor missing interrupt
* driver prepares internal usb_request object and use it to arm DMA transfer.
*
* The problematic situation was observed in case when endpoint has been enabled
* but no usb_request were queued. Driver try detects such endpoints and will
* use this workaround only for these endpoint.
*
* Driver use limited number of buffer. This number can be set by macro
* CDNS3_WA2_NUM_BUFFERS.
*
* Such blocking situation was observed on ACM gadget. For this function
* host send OUT data packet but ACM function is not prepared for this packet.
* It's cause that buffer placed in on chip memory block transfer to other
* endpoints.
*
* Issue has been fixed in DEV_VER_V2 version of controller.
*
*/
#include <linux/dma-mapping.h>
#include <linux/usb/gadget.h>
#include <linux/module.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
#include <linux/property.h>
#include "core.h"
#include "gadget-export.h"
#include "cdns3-gadget.h"
#include "cdns3-trace.h"
#include "drd.h"
static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags);
static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
struct usb_request *request);
static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
struct usb_request *request);
/**
* cdns3_clear_register_bit - clear bit in given register.
* @ptr: address of device controller register to be read and changed
* @mask: bits requested to clar
*/
static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) & ~mask;
writel(mask, ptr);
}
/**
* cdns3_set_register_bit - set bit in given register.
* @ptr: address of device controller register to be read and changed
* @mask: bits requested to set
*/
void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) | mask;
writel(mask, ptr);
}
/**
* cdns3_ep_addr_to_index - Macro converts endpoint address to
* index of endpoint object in cdns3_device.eps[] container
* @ep_addr: endpoint address for which endpoint object is required
*
*/
u8 cdns3_ep_addr_to_index(u8 ep_addr)
{
return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
}
static int cdns3_get_dma_pos(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
int dma_index;
dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma;
return dma_index / TRB_SIZE;
}
/**
* cdns3_next_request - returns next request from list
* @list: list containing requests
*
* Returns request or NULL if no requests in list
*/
struct usb_request *cdns3_next_request(struct list_head *list)
{
return list_first_entry_or_null(list, struct usb_request, list);
}
/**
* cdns3_next_align_buf - returns next buffer from list
* @list: list containing buffers
*
* Returns buffer or NULL if no buffers in list
*/
static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
{
return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
}
/**
* cdns3_next_priv_request - returns next request from list
* @list: list containing requests
*
* Returns request or NULL if no requests in list
*/
static struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
{
return list_first_entry_or_null(list, struct cdns3_request, list);
}
/**
* cdns3_select_ep - selects endpoint
* @priv_dev: extended gadget object
* @ep: endpoint address
*/
void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
{
if (priv_dev->selected_ep == ep)
return;
priv_dev->selected_ep = ep;
writel(ep, &priv_dev->regs->ep_sel);
}
/**
* cdns3_get_tdl - gets current tdl for selected endpoint.
* @priv_dev: extended gadget object
*
* Before calling this function the appropriate endpoint must
* be selected by means of cdns3_select_ep function.
*/
static int cdns3_get_tdl(struct cdns3_device *priv_dev)
{
if (priv_dev->dev_ver < DEV_VER_V3)
return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
else
return readl(&priv_dev->regs->ep_tdl);
}
dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
struct cdns3_trb *trb)
{
u32 offset = (char *)trb - (char *)priv_ep->trb_pool;
return priv_ep->trb_pool_dma + offset;
}
static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
if (priv_ep->trb_pool) {
dma_pool_free(priv_dev->eps_dma_pool,
priv_ep->trb_pool, priv_ep->trb_pool_dma);
priv_ep->trb_pool = NULL;
}
}
/**
* cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint
* @priv_ep: endpoint object
*
* Function will return 0 on success or -ENOMEM on allocation error
*/
int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
int ring_size = TRB_RING_SIZE;
int num_trbs = ring_size / TRB_SIZE;
struct cdns3_trb *link_trb;
if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
cdns3_free_trb_pool(priv_ep);
if (!priv_ep->trb_pool) {
priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool,
GFP_ATOMIC,
&priv_ep->trb_pool_dma);
if (!priv_ep->trb_pool)
return -ENOMEM;
priv_ep->alloc_ring_size = ring_size;
}
memset(priv_ep->trb_pool, 0, ring_size);
priv_ep->num_trbs = num_trbs;
if (!priv_ep->num)
return 0;
/* Initialize the last TRB as Link TRB */
link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
if (priv_ep->use_streams) {
/*
* For stream capable endpoints driver use single correct TRB.
* The last trb has zeroed cycle bit
*/
link_trb->control = 0;
} else {
link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma));
link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE);
}
return 0;
}
/**
* cdns3_ep_stall_flush - Stalls and flushes selected endpoint
* @priv_ep: endpoint object
*
* Endpoint must be selected before call to this function
*/
static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
int val;
trace_cdns3_halt(priv_ep, 1, 1);
writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL,
&priv_dev->regs->ep_cmd);
/* wait for DFLUSH cleared */
readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & EP_CMD_DFLUSH), 1, 1000);
priv_ep->flags |= EP_STALLED;
priv_ep->flags &= ~EP_STALL_PENDING;
}
/**
* cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller.
* @priv_dev: extended gadget object
*/
void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
{
int i;
writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
cdns3_allow_enable_l1(priv_dev, 0);
priv_dev->hw_configured_flag = 0;
priv_dev->onchip_used_size = 0;
priv_dev->out_mem_is_allocated = 0;
priv_dev->wait_for_setup = 0;
priv_dev->using_streams = 0;
for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
if (priv_dev->eps[i])
priv_dev->eps[i]->flags &= ~EP_CONFIGURED;
}
/**
* cdns3_ep_inc_trb - increment a trb index.
* @index: Pointer to the TRB index to increment.
* @cs: Cycle state
* @trb_in_seg: number of TRBs in segment
*
* The index should never point to the link TRB. After incrementing,
* if it is point to the link TRB, wrap around to the beginning and revert
* cycle state bit The
* link TRB is always at the last TRB entry.
*/
static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
{
(*index)++;
if (*index == (trb_in_seg - 1)) {
*index = 0;
*cs ^= 1;
}
}
/**
* cdns3_ep_inc_enq - increment endpoint's enqueue pointer
* @priv_ep: The endpoint whose enqueue pointer we're incrementing
*/
static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep)
{
priv_ep->free_trbs--;
cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs);
}
/**
* cdns3_ep_inc_deq - increment endpoint's dequeue pointer
* @priv_ep: The endpoint whose dequeue pointer we're incrementing
*/
static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
{
priv_ep->free_trbs++;
cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
}
/**
* cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
* @priv_dev: Extended gadget object
* @enable: Enable/disable permit to transition to L1.
*
* If bit USB_CONF_L1EN is set and device receive Extended Token packet,
* then controller answer with ACK handshake.
* If bit USB_CONF_L1DS is set and device receive Extended Token packet,
* then controller answer with NYET handshake.
*/
void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
{
if (enable)
writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf);
else
writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
}
enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev)
{
u32 reg;
reg = readl(&priv_dev->regs->usb_sts);
if (DEV_SUPERSPEED(reg))
return USB_SPEED_SUPER;
else if (DEV_HIGHSPEED(reg))
return USB_SPEED_HIGH;
else if (DEV_FULLSPEED(reg))
return USB_SPEED_FULL;
else if (DEV_LOWSPEED(reg))
return USB_SPEED_LOW;
return USB_SPEED_UNKNOWN;
}
/**
* cdns3_start_all_request - add to ring all request not started
* @priv_dev: Extended gadget object
* @priv_ep: The endpoint for whom request will be started.
*
* Returns return ENOMEM if transfer ring i not enough TRBs to start
* all requests.
*/
static int cdns3_start_all_request(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
struct usb_request *request;
int ret = 0;
u8 pending_empty = list_empty(&priv_ep->pending_req_list);
/*
* If the last pending transfer is INTERNAL
* OR streams are enabled for this endpoint
* do NOT start new transfer till the last one is pending
*/
if (!pending_empty) {
struct cdns3_request *priv_req;
request = cdns3_next_request(&priv_ep->pending_req_list);
priv_req = to_cdns3_request(request);
if ((priv_req->flags & REQUEST_INTERNAL) ||
(priv_ep->flags & EP_TDLCHK_EN) ||
priv_ep->use_streams) {
dev_dbg(priv_dev->dev, "Blocking external request\n");
return ret;
}
}
while (!list_empty(&priv_ep->deferred_req_list)) {
request = cdns3_next_request(&priv_ep->deferred_req_list);
if (!priv_ep->use_streams) {
ret = cdns3_ep_run_transfer(priv_ep, request);
} else {
priv_ep->stream_sg_idx = 0;
ret = cdns3_ep_run_stream_transfer(priv_ep, request);
}
if (ret)
return ret;
list_move_tail(&request->list, &priv_ep->pending_req_list);
if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
break;
}
priv_ep->flags &= ~EP_RING_FULL;
return ret;
}
/*
* WA2: Set flag for all not ISOC OUT endpoints. If this flag is set
* driver try to detect whether endpoint need additional internal
* buffer for unblocking on-chip FIFO buffer. This flag will be cleared
* if before first DESCMISS interrupt the DMA will be armed.
*/
#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \
if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
(reg) |= EP_STS_EN_DESCMISEN; \
} } while (0)
static void __cdns3_descmiss_copy_data(struct usb_request *request,
struct usb_request *descmiss_req)
{
int length = request->actual + descmiss_req->actual;
struct scatterlist *s = request->sg;
if (!s) {
if (length <= request->length) {
memcpy(&((u8 *)request->buf)[request->actual],
descmiss_req->buf,
descmiss_req->actual);
request->actual = length;
} else {
/* It should never occures */
request->status = -ENOMEM;
}
} else {
if (length <= sg_dma_len(s)) {
void *p = phys_to_virt(sg_dma_address(s));
memcpy(&((u8 *)p)[request->actual],
descmiss_req->buf,
descmiss_req->actual);
request->actual = length;
} else {
request->status = -ENOMEM;
}
}
}
/**
* cdns3_wa2_descmiss_copy_data - copy data from internal requests to
* request queued by class driver.
* @priv_ep: extended endpoint object
* @request: request object
*/
static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
struct usb_request *request)
{
struct usb_request *descmiss_req;
struct cdns3_request *descmiss_priv_req;
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
int chunk_end;
descmiss_priv_req =
cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
descmiss_req = &descmiss_priv_req->request;
/* driver can't touch pending request */
if (descmiss_priv_req->flags & REQUEST_PENDING)
break;
chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
request->status = descmiss_req->status;
__cdns3_descmiss_copy_data(request, descmiss_req);
list_del_init(&descmiss_priv_req->list);
kfree(descmiss_req->buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
--priv_ep->wa2_counter;
if (!chunk_end)
break;
}
}
static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req)
{
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
priv_req->flags & REQUEST_INTERNAL) {
struct usb_request *req;
req = cdns3_next_request(&priv_ep->deferred_req_list);
priv_ep->descmis_req = NULL;
if (!req)
return NULL;
/* unmap the gadget request before copying data */
usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
priv_ep->dir);
cdns3_wa2_descmiss_copy_data(priv_ep, req);
if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
req->length != req->actual) {
/* wait for next part of transfer */
/* re-map the gadget request buffer*/
usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
usb_endpoint_dir_in(priv_ep->endpoint.desc));
return NULL;
}
if (req->status == -EINPROGRESS)
req->status = 0;
list_del_init(&req->list);
cdns3_start_all_request(priv_dev, priv_ep);
return req;
}
return &priv_req->request;
}
static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req)
{
int deferred = 0;
/*
* If transfer was queued before DESCMISS appear than we
* can disable handling of DESCMISS interrupt. Driver assumes that it
* can disable special treatment for this endpoint.
*/
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
u32 reg;
cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir);
priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
reg = readl(&priv_dev->regs->ep_sts_en);
reg &= ~EP_STS_EN_DESCMISEN;
trace_cdns3_wa2(priv_ep, "workaround disabled\n");
writel(reg, &priv_dev->regs->ep_sts_en);
}
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
u8 pending_empty = list_empty(&priv_ep->pending_req_list);
u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list);
/*
* DESCMISS transfer has been finished, so data will be
* directly copied from internal allocated usb_request
* objects.
*/
if (pending_empty && !descmiss_empty &&
!(priv_req->flags & REQUEST_INTERNAL)) {
cdns3_wa2_descmiss_copy_data(priv_ep,
&priv_req->request);
trace_cdns3_wa2(priv_ep, "get internal stored data");
list_add_tail(&priv_req->request.list,
&priv_ep->pending_req_list);
cdns3_gadget_giveback(priv_ep, priv_req,
priv_req->request.status);
/*
* Intentionally driver returns positive value as
* correct value. It informs that transfer has
* been finished.
*/
return EINPROGRESS;
}
/*
* Driver will wait for completion DESCMISS transfer,
* before starts new, not DESCMISS transfer.
*/
if (!pending_empty && !descmiss_empty) {
trace_cdns3_wa2(priv_ep, "wait for pending transfer\n");
deferred = 1;
}
if (priv_req->flags & REQUEST_INTERNAL)
list_add_tail(&priv_req->list,
&priv_ep->wa2_descmiss_req_list);
}
return deferred;
}
static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
{
struct cdns3_request *priv_req;
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
u8 chain;
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
chain = !!(priv_req->flags & REQUEST_INTERNAL_CH);
trace_cdns3_wa2(priv_ep, "removes eldest request");
kfree(priv_req->request.buf);
list_del_init(&priv_req->list);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
--priv_ep->wa2_counter;
if (!chain)
break;
}
}
/**
* cdns3_wa2_descmissing_packet - handles descriptor missing event.
* @priv_ep: extended gadget object
*
* This function is used only for WA2. For more information see Work around 2
* description.
*/
static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
{
struct cdns3_request *priv_req;
struct usb_request *request;
u8 pending_empty = list_empty(&priv_ep->pending_req_list);
/* check for pending transfer */
if (!pending_empty) {
trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n");
return;
}
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN;
}
trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) {
trace_cdns3_wa2(priv_ep, "WA2 overflow\n");
cdns3_wa2_remove_old_request(priv_ep);
}
request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
GFP_ATOMIC);
if (!request)
goto err;
priv_req = to_cdns3_request(request);
priv_req->flags |= REQUEST_INTERNAL;
/* if this field is still assigned it indicate that transfer related
* with this request has not been finished yet. Driver in this
* case simply allocate next request and assign flag REQUEST_INTERNAL_CH
* flag to previous one. It will indicate that current request is
* part of the previous one.
*/
if (priv_ep->descmis_req)
priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE,
GFP_ATOMIC);
priv_ep->wa2_counter++;
if (!priv_req->request.buf) {
cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
goto err;
}
priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE;
priv_ep->descmis_req = priv_req;
__cdns3_gadget_ep_queue(&priv_ep->endpoint,
&priv_ep->descmis_req->request,
GFP_ATOMIC);
return;
err:
dev_err(priv_ep->cdns3_dev->dev,
"Failed: No sufficient memory for DESCMIS\n");
}
static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev)
{
u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
if (tdl) {
u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl;
writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL,
&priv_dev->regs->ep_cmd);
}
}
static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev)
{
u32 ep_sts_reg;
/* select EP0-out */
cdns3_select_ep(priv_dev, 0);
ep_sts_reg = readl(&priv_dev->regs->ep_sts);
if (EP_STS_OUTQ_VAL(ep_sts_reg)) {
u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg);
struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num];
if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) &&
outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) {
u8 pending_empty = list_empty(&outq_ep->pending_req_list);
if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) ||
(outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) ||
!pending_empty) {
} else {
u32 ep_sts_en_reg;
u32 ep_cmd_reg;
cdns3_select_ep(priv_dev, outq_ep->num |
outq_ep->dir);
ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en);
ep_cmd_reg = readl(&priv_dev->regs->ep_cmd);
outq_ep->flags |= EP_TDLCHK_EN;
cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
EP_CFG_TDL_CHK);
cdns3_wa2_enable_detection(priv_dev, outq_ep,
ep_sts_en_reg);
writel(ep_sts_en_reg,
&priv_dev->regs->ep_sts_en);
/* reset tdl value to zero */
cdns3_wa2_reset_tdl(priv_dev);
/*
* Memory barrier - Reset tdl before ringing the
* doorbell.
*/
wmb();
if (EP_CMD_DRDY & ep_cmd_reg) {
trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n");
} else {
trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n");
/*
* ring doorbell to generate DESCMIS irq
*/
writel(EP_CMD_DRDY,
&priv_dev->regs->ep_cmd);
}
}
}
}
}
/**
* cdns3_gadget_giveback - call struct usb_request's ->complete callback
* @priv_ep: The endpoint to whom the request belongs to
* @priv_req: The request we're giving back
* @status: completion code for the request
*
* Must be called with controller's lock held and interrupts disabled. This
* function will unmap @req and call its ->complete() callback to notify upper
* layers that it has completed.
*/
void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req,
int status)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct usb_request *request = &priv_req->request;
list_del_init(&request->list);
if (request->status == -EINPROGRESS)
request->status = status;
if (likely(!(priv_req->flags & REQUEST_UNALIGNED)))
usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
priv_ep->dir);
if ((priv_req->flags & REQUEST_UNALIGNED) &&
priv_ep->dir == USB_DIR_OUT && !request->status) {
/* Make DMA buffer CPU accessible */
dma_sync_single_for_cpu(priv_dev->sysdev,
priv_req->aligned_buf->dma,
request->actual,
priv_req->aligned_buf->dir);
memcpy(request->buf, priv_req->aligned_buf->buf,
request->actual);
}
priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
/* All TRBs have finished, clear the counter */
priv_req->finished_trb = 0;
trace_cdns3_gadget_giveback(priv_req);
if (priv_dev->dev_ver < DEV_VER_V2) {
request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
priv_req);
if (!request)
return;
}
if (request->complete) {
spin_unlock(&priv_dev->lock);
usb_gadget_giveback_request(&priv_ep->endpoint,
request);
spin_lock(&priv_dev->lock);
}
if (request->buf == priv_dev->zlp_buf)
cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
}
static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
{
/* Work around for stale data address in TRB*/
if (priv_ep->wa1_set) {
trace_cdns3_wa1(priv_ep, "restore cycle bit");
priv_ep->wa1_set = 0;
priv_ep->wa1_trb_index = 0xFFFF;
if (priv_ep->wa1_cycle_bit) {
priv_ep->wa1_trb->control =
priv_ep->wa1_trb->control | cpu_to_le32(0x1);
} else {
priv_ep->wa1_trb->control =
priv_ep->wa1_trb->control & cpu_to_le32(~0x1);
}
}
}
static void cdns3_free_aligned_request_buf(struct work_struct *work)
{
struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
aligned_buf_wq);
struct cdns3_aligned_buf *buf, *tmp;
unsigned long flags;
spin_lock_irqsave(&priv_dev->lock, flags);
list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) {
if (!buf->in_use) {
list_del(&buf->list);
/*
* Re-enable interrupts to free DMA capable memory.
* Driver can't free this memory with disabled
* interrupts.
*/
spin_unlock_irqrestore(&priv_dev->lock, flags);
dma_free_noncoherent(priv_dev->sysdev, buf->size,
buf->buf, buf->dma, buf->dir);
kfree(buf);
spin_lock_irqsave(&priv_dev->lock, flags);
}
}
spin_unlock_irqrestore(&priv_dev->lock, flags);
}
static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req)
{
struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_aligned_buf *buf;
/* check if buffer is aligned to 8. */
if (!((uintptr_t)priv_req->request.buf & 0x7))
return 0;
buf = priv_req->aligned_buf;
if (!buf || priv_req->request.length > buf->size) {
buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
if (!buf)
return -ENOMEM;
buf->size = priv_req->request.length;
buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
buf->buf = dma_alloc_noncoherent(priv_dev->sysdev,
buf->size,
&buf->dma,
buf->dir,
GFP_ATOMIC);
if (!buf->buf) {
kfree(buf);
return -ENOMEM;
}
if (priv_req->aligned_buf) {
trace_cdns3_free_aligned_request(priv_req);
priv_req->aligned_buf->in_use = 0;
queue_work(system_freezable_wq,
&priv_dev->aligned_buf_wq);
}
buf->in_use = 1;
priv_req->aligned_buf = buf;
list_add_tail(&buf->list,
&priv_dev->aligned_buf_list);
}
if (priv_ep->dir == USB_DIR_IN) {
/* Make DMA buffer CPU accessible */
dma_sync_single_for_cpu(priv_dev->sysdev,
buf->dma, buf->size, buf->dir);
memcpy(buf->buf, priv_req->request.buf,
priv_req->request.length);
}
/* Transfer DMA buffer ownership back to device */
dma_sync_single_for_device(priv_dev->sysdev,
buf->dma, buf->size, buf->dir);
priv_req->flags |= REQUEST_UNALIGNED;
trace_cdns3_prepare_aligned_request(priv_req);
return 0;
}
static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep,
struct cdns3_trb *trb)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
if (!priv_ep->wa1_set) {
u32 doorbell;
doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
if (doorbell) {
priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0;
priv_ep->wa1_set = 1;
priv_ep->wa1_trb = trb;
priv_ep->wa1_trb_index = priv_ep->enqueue;
trace_cdns3_wa1(priv_ep, "set guard");
return 0;
}
}
return 1;
}
static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
int dma_index;
u32 doorbell;
doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
if (!doorbell || dma_index != priv_ep->wa1_trb_index)
cdns3_wa1_restore_cycle_bit(priv_ep);
}
static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
struct usb_request *request)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_request *priv_req;
struct cdns3_trb *trb;
dma_addr_t trb_dma;
int address;
u32 control;
u32 length;
u32 tdl;
unsigned int sg_idx = priv_ep->stream_sg_idx;
priv_req = to_cdns3_request(request);
address = priv_ep->endpoint.desc->bEndpointAddress;
priv_ep->flags |= EP_PENDING_REQUEST;
/* must allocate buffer aligned to 8 */
if (priv_req->flags & REQUEST_UNALIGNED)
trb_dma = priv_req->aligned_buf->dma;
else
trb_dma = request->dma;
/* For stream capable endpoints driver use only single TD. */
trb = priv_ep->trb_pool + priv_ep->enqueue;
priv_req->start_trb = priv_ep->enqueue;
priv_req->end_trb = priv_req->start_trb;
priv_req->trb = trb;
cdns3_select_ep(priv_ep->cdns3_dev, address);
control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE |
TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
if (!request->num_sgs) {
trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
length = request->length;
} else {
trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address));
length = request->sg[sg_idx].length;
}
tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length));
/*
* For DEV_VER_V2 controller version we have enabled
* USB_CONF2_EN_TDL_TRB in DMULT configuration.
* This enables TDL calculation based on TRB, hence setting TDL in TRB.
*/
if (priv_dev->dev_ver >= DEV_VER_V2) {
if (priv_dev->gadget.speed == USB_SPEED_SUPER)
trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl));
}
priv_req->flags |= REQUEST_PENDING;
trb->control = cpu_to_le32(control);
trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
/*
* Memory barrier - Cycle Bit must be set before trb->length and
* trb->buffer fields.
*/
wmb();
/* always first element */
writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
&priv_dev->regs->ep_traddr);
if (!(priv_ep->flags & EP_STALLED)) {
trace_cdns3_ring(priv_ep);
/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
priv_ep->prime_flag = false;
/*
* Controller version DEV_VER_V2 tdl calculation
* is based on TRB
*/
if (priv_dev->dev_ver < DEV_VER_V2)
writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
&priv_dev->regs->ep_cmd);
else if (priv_dev->dev_ver > DEV_VER_V2)
writel(tdl, &priv_dev->regs->ep_tdl);
priv_ep->last_stream_id = priv_req->request.stream_id;
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) |
EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
trace_cdns3_doorbell_epx(priv_ep->name,
readl(&priv_dev->regs->ep_traddr));
}
/* WORKAROUND for transition to L0 */
__cdns3_gadget_wakeup(priv_dev);
return 0;
}
static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
if (priv_dev->dev_ver < DEV_VER_V3)
return;
if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
}
}
/**
* cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
* @priv_ep: endpoint object
* @request: request object
*
* Returns zero on success or negative value on failure
*/
static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
struct usb_request *request)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_request *priv_req;
struct cdns3_trb *trb;
struct cdns3_trb *link_trb = NULL;
dma_addr_t trb_dma;
u32 togle_pcs = 1;
int sg_iter = 0;
int num_trb;
int address;
u32 control;
int pcs;
u16 total_tdl = 0;
struct scatterlist *s = NULL;
bool sg_supported = !!(request->num_mapped_sgs);
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
num_trb = priv_ep->interval;
else
num_trb = sg_supported ? request->num_mapped_sgs : 1;
if (num_trb > priv_ep->free_trbs) {
priv_ep->flags |= EP_RING_FULL;
return -ENOBUFS;
}
priv_req = to_cdns3_request(request);
address = priv_ep->endpoint.desc->bEndpointAddress;
priv_ep->flags |= EP_PENDING_REQUEST;
/* must allocate buffer aligned to 8 */
if (priv_req->flags & REQUEST_UNALIGNED)
trb_dma = priv_req->aligned_buf->dma;
else
trb_dma = request->dma;
trb = priv_ep->trb_pool + priv_ep->enqueue;
priv_req->start_trb = priv_ep->enqueue;
priv_req->trb = trb;
cdns3_select_ep(priv_ep->cdns3_dev, address);
/* prepare ring */
if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) {
int doorbell, dma_index;
u32 ch_bit = 0;
doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
/* Driver can't update LINK TRB if it is current processed. */
if (doorbell && dma_index == priv_ep->num_trbs - 1) {
priv_ep->flags |= EP_DEFERRED_DRDY;
return -ENOBUFS;
}
/*updating C bt in Link TRB before starting DMA*/
link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1);
/*
* For TRs size equal 2 enabling TRB_CHAIN for epXin causes
* that DMA stuck at the LINK TRB.
* On the other hand, removing TRB_CHAIN for longer TRs for
* epXout cause that DMA stuck after handling LINK TRB.
* To eliminate this strange behavioral driver set TRB_CHAIN
* bit only for TR size > 2.
*/
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
TRBS_PER_SEGMENT > 2)
ch_bit = TRB_CHAIN;
link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
}
if (priv_dev->dev_ver <= DEV_VER_V2)
togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
if (sg_supported)
s = request->sg;
/* set incorrect Cycle Bit for first trb*/
control = priv_ep->pcs ? 0 : TRB_CYCLE;
trb->length = 0;
if (priv_dev->dev_ver >= DEV_VER_V2) {
u16 td_size;
td_size = DIV_ROUND_UP(request->length,
priv_ep->endpoint.maxpacket);
if (priv_dev->gadget.speed == USB_SPEED_SUPER)
trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
else
control |= TRB_TDL_HS_SIZE(td_size);
}
do {
u32 length;
/* fill TRB */
control |= TRB_TYPE(TRB_NORMAL);
if (sg_supported) {
trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s)));
length = sg_dma_len(s);
} else {
trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
length = request->length;
}
if (priv_ep->flags & EP_TDLCHK_EN)
total_tdl += DIV_ROUND_UP(length,
priv_ep->endpoint.maxpacket);
trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
TRB_LEN(length));
pcs = priv_ep->pcs ? TRB_CYCLE : 0;
/*
* first trb should be prepared as last to avoid processing
* transfer to early
*/
if (sg_iter != 0)
control |= pcs;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
control |= TRB_IOC | TRB_ISP;
} else {
/* for last element in TD or in SG list */
if (sg_iter == (num_trb - 1) && sg_iter != 0)
control |= pcs | TRB_IOC | TRB_ISP;
}
if (sg_iter)
trb->control = cpu_to_le32(control);
else
priv_req->trb->control = cpu_to_le32(control);
if (sg_supported) {
trb->control |= cpu_to_le32(TRB_ISP);
/* Don't set chain bit for last TRB */
if (sg_iter < num_trb - 1)
trb->control |= cpu_to_le32(TRB_CHAIN);
s = sg_next(s);
}
control = 0;
++sg_iter;
priv_req->end_trb = priv_ep->enqueue;
cdns3_ep_inc_enq(priv_ep);
trb = priv_ep->trb_pool + priv_ep->enqueue;
trb->length = 0;
} while (sg_iter < num_trb);
trb = priv_req->trb;
priv_req->flags |= REQUEST_PENDING;
priv_req->num_of_trb = num_trb;
if (sg_iter == 1)
trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
if (priv_dev->dev_ver < DEV_VER_V2 &&
(priv_ep->flags & EP_TDLCHK_EN)) {
u16 tdl = total_tdl;
u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
if (tdl > EP_CMD_TDL_MAX) {
tdl = EP_CMD_TDL_MAX;
priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX;
}
if (old_tdl < tdl) {
tdl -= old_tdl;
writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
&priv_dev->regs->ep_cmd);
}
}
/*
* Memory barrier - cycle bit must be set before other filds in trb.
*/
wmb();
/* give the TD to the consumer*/
if (togle_pcs)
trb->control = trb->control ^ cpu_to_le32(1);
if (priv_dev->dev_ver <= DEV_VER_V2)
cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
if (num_trb > 1) {
int i = 0;
while (i < num_trb) {
trace_cdns3_prepare_trb(priv_ep, trb + i);
if (trb + i == link_trb) {
trb = priv_ep->trb_pool;
num_trb = num_trb - i;
i = 0;
} else {
i++;
}
}
} else {
trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
}
/*
* Memory barrier - Cycle Bit must be set before trb->length and
* trb->buffer fields.
*/
wmb();
/*
* For DMULT mode we can set address to transfer ring only once after
* enabling endpoint.
*/
if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) {
/*
* Until SW is not ready to handle the OUT transfer the ISO OUT
* Endpoint should be disabled (EP_CFG.ENABLE = 0).
* EP_CFG_ENABLE must be set before updating ep_traddr.
*/
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir &&
!(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
EP_CFG_ENABLE);
}
writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma +
priv_req->start_trb * TRB_SIZE),
&priv_dev->regs->ep_traddr);
priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR;
}
if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) {
trace_cdns3_ring(priv_ep);
/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
cdns3_rearm_drdy_if_needed(priv_ep);
trace_cdns3_doorbell_epx(priv_ep->name,
readl(&priv_dev->regs->ep_traddr));
}
/* WORKAROUND for transition to L0 */
__cdns3_gadget_wakeup(priv_dev);
return 0;
}
void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
{
struct cdns3_endpoint *priv_ep;
struct usb_ep *ep;
if (priv_dev->hw_configured_flag)
return;
writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf);
cdns3_set_register_bit(&priv_dev->regs->usb_conf,
USB_CONF_U1EN | USB_CONF_U2EN);
priv_dev->hw_configured_flag = 1;
list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
if (ep->enabled) {
priv_ep = ep_to_cdns3_ep(ep);
cdns3_start_all_request(priv_dev, priv_ep);
}
}
cdns3_allow_enable_l1(priv_dev, 1);
}
/**
* cdns3_trb_handled - check whether trb has been handled by DMA
*
* @priv_ep: extended endpoint object.
* @priv_req: request object for checking
*
* Endpoint must be selected before invoking this function.
*
* Returns false if request has not been handled by DMA, else returns true.
*
* SR - start ring
* ER - end ring
* DQ = priv_ep->dequeue - dequeue position
* EQ = priv_ep->enqueue - enqueue position
* ST = priv_req->start_trb - index of first TRB in transfer ring
* ET = priv_req->end_trb - index of last TRB in transfer ring
* CI = current_index - index of processed TRB by DMA.
*
* As first step, we check if the TRB between the ST and ET.
* Then, we check if cycle bit for index priv_ep->dequeue
* is correct.
*
* some rules:
* 1. priv_ep->dequeue never equals to current_index.
* 2 priv_ep->enqueue never exceed priv_ep->dequeue
* 3. exception: priv_ep->enqueue == priv_ep->dequeue
* and priv_ep->free_trbs is zero.
* This case indicate that TR is full.
*
* At below two cases, the request have been handled.
* Case 1 - priv_ep->dequeue < current_index
* SR ... EQ ... DQ ... CI ... ER
* SR ... DQ ... CI ... EQ ... ER
*
* Case 2 - priv_ep->dequeue > current_index
* This situation takes place when CI go through the LINK TRB at the end of
* transfer ring.
* SR ... CI ... EQ ... DQ ... ER
*/
static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_trb *trb;
int current_index = 0;
int handled = 0;
int doorbell;
current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
/* current trb doesn't belong to this request */
if (priv_req->start_trb < priv_req->end_trb) {
if (priv_ep->dequeue > priv_req->end_trb)
goto finish;
if (priv_ep->dequeue < priv_req->start_trb)
goto finish;
}
if ((priv_req->start_trb > priv_req->end_trb) &&
(priv_ep->dequeue > priv_req->end_trb) &&
(priv_ep->dequeue < priv_req->start_trb))
goto finish;
if ((priv_req->start_trb == priv_req->end_trb) &&
(priv_ep->dequeue != priv_req->end_trb))
goto finish;
trb = &priv_ep->trb_pool[priv_ep->dequeue];
if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs)
goto finish;
if (doorbell == 1 && current_index == priv_ep->dequeue)
goto finish;
/* The corner case for TRBS_PER_SEGMENT equal 2). */
if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
handled = 1;
goto finish;
}
if (priv_ep->enqueue == priv_ep->dequeue &&
priv_ep->free_trbs == 0) {
handled = 1;
} else if (priv_ep->dequeue < current_index) {
if ((current_index == (priv_ep->num_trbs - 1)) &&
!priv_ep->dequeue)
goto finish;
handled = 1;
} else if (priv_ep->dequeue > current_index) {
handled = 1;
}
finish:
trace_cdns3_request_handled(priv_req, current_index, handled);
return handled;
}
static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
struct cdns3_request *priv_req;
struct usb_request *request;
struct cdns3_trb *trb;
bool request_handled = false;
bool transfer_end = false;
while (!list_empty(&priv_ep->pending_req_list)) {
request = cdns3_next_request(&priv_ep->pending_req_list);
priv_req = to_cdns3_request(request);
trb = priv_ep->trb_pool + priv_ep->dequeue;
/* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
trace_cdns3_complete_trb(priv_ep, trb);
cdns3_ep_inc_deq(priv_ep);
trb = priv_ep->trb_pool + priv_ep->dequeue;
}
if (!request->stream_id) {
/* Re-select endpoint. It could be changed by other CPU
* during handling usb_gadget_giveback_request.
*/
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
while (cdns3_trb_handled(priv_ep, priv_req)) {
priv_req->finished_trb++;
if (priv_req->finished_trb >= priv_req->num_of_trb)
request_handled = true;
trb = priv_ep->trb_pool + priv_ep->dequeue;
trace_cdns3_complete_trb(priv_ep, trb);
if (!transfer_end)
request->actual +=
TRB_LEN(le32_to_cpu(trb->length));
if (priv_req->num_of_trb > 1 &&
le32_to_cpu(trb->control) & TRB_SMM &&
le32_to_cpu(trb->control) & TRB_CHAIN)
transfer_end = true;
cdns3_ep_inc_deq(priv_ep);
}
if (request_handled) {
cdns3_gadget_giveback(priv_ep, priv_req, 0);
request_handled = false;
transfer_end = false;
} else {
goto prepare_next_td;
}
if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
TRBS_PER_SEGMENT == 2)
break;
} else {
/* Re-select endpoint. It could be changed by other CPU
* during handling usb_gadget_giveback_request.
*/
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
trb = priv_ep->trb_pool;
trace_cdns3_complete_trb(priv_ep, trb);
if (trb != priv_req->trb)
dev_warn(priv_dev->dev,
"request_trb=0x%p, queue_trb=0x%p\n",
priv_req->trb, trb);
request->actual += TRB_LEN(le32_to_cpu(trb->length));
if (!request->num_sgs ||
(request->num_sgs == (priv_ep->stream_sg_idx + 1))) {
priv_ep->stream_sg_idx = 0;
cdns3_gadget_giveback(priv_ep, priv_req, 0);
} else {
priv_ep->stream_sg_idx++;
cdns3_ep_run_stream_transfer(priv_ep, request);
}
break;
}
}
priv_ep->flags &= ~EP_PENDING_REQUEST;
prepare_next_td:
if (!(priv_ep->flags & EP_STALLED) &&
!(priv_ep->flags & EP_STALL_PENDING))
cdns3_start_all_request(priv_dev, priv_ep);
}
void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
cdns3_wa1_restore_cycle_bit(priv_ep);
if (rearm) {
trace_cdns3_ring(priv_ep);
/* Cycle Bit must be updated before arming DMA. */
wmb();
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
__cdns3_gadget_wakeup(priv_dev);
trace_cdns3_doorbell_epx(priv_ep->name,
readl(&priv_dev->regs->ep_traddr));
}
}
static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep)
{
u16 tdl = priv_ep->pending_tdl;
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
if (tdl > EP_CMD_TDL_MAX) {
tdl = EP_CMD_TDL_MAX;
priv_ep->pending_tdl -= EP_CMD_TDL_MAX;
} else {
priv_ep->pending_tdl = 0;
}
writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd);
}
/**
* cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
* @priv_ep: endpoint object
*
* Returns 0
*/
static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
u32 ep_sts_reg;
struct usb_request *deferred_request;
struct usb_request *pending_request;
u32 tdl = 0;
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
trace_cdns3_epx_irq(priv_dev, priv_ep);
ep_sts_reg = readl(&priv_dev->regs->ep_sts);
writel(ep_sts_reg, &priv_dev->regs->ep_sts);
if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) {
bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY);
tdl = cdns3_get_tdl(priv_dev);
/*
* Continue the previous transfer:
* There is some racing between ERDY and PRIME. The device send
* ERDY and almost in the same time Host send PRIME. It cause
* that host ignore the ERDY packet and driver has to send it
* again.
*/
if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) ||
EP_STS_HOSTPP(ep_sts_reg))) {
writel(EP_CMD_ERDY |
EP_CMD_ERDY_SID(priv_ep->last_stream_id),
&priv_dev->regs->ep_cmd);
ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
} else {
priv_ep->prime_flag = true;
pending_request = cdns3_next_request(&priv_ep->pending_req_list);
deferred_request = cdns3_next_request(&priv_ep->deferred_req_list);
if (deferred_request && !pending_request) {
cdns3_start_all_request(priv_dev, priv_ep);
}
}
}
if (ep_sts_reg & EP_STS_TRBERR) {
if (priv_ep->flags & EP_STALL_PENDING &&
!(ep_sts_reg & EP_STS_DESCMIS &&
priv_dev->dev_ver < DEV_VER_V2)) {
cdns3_ep_stall_flush(priv_ep);
}
/*
* For isochronous transfer driver completes request on
* IOC or on TRBERR. IOC appears only when device receive
* OUT data packet. If host disable stream or lost some packet
* then the only way to finish all queued transfer is to do it
* on TRBERR event.
*/
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
!priv_ep->wa1_set) {
if (!priv_ep->dir) {
u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
}
cdns3_transfer_completed(priv_dev, priv_ep);
} else if (!(priv_ep->flags & EP_STALLED) &&
!(priv_ep->flags & EP_STALL_PENDING)) {
if (priv_ep->flags & EP_DEFERRED_DRDY) {
priv_ep->flags &= ~EP_DEFERRED_DRDY;
cdns3_start_all_request(priv_dev, priv_ep);
} else {
cdns3_rearm_transfer(priv_ep,
priv_ep->wa1_set);
}
}
}
if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) ||
(ep_sts_reg & EP_STS_IOT)) {
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
if (ep_sts_reg & EP_STS_ISP)
priv_ep->flags |= EP_QUIRK_END_TRANSFER;
else
priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
}
if (!priv_ep->use_streams) {
if ((ep_sts_reg & EP_STS_IOC) ||
(ep_sts_reg & EP_STS_ISP)) {
cdns3_transfer_completed(priv_dev, priv_ep);
} else if ((priv_ep->flags & EP_TDLCHK_EN) &
priv_ep->pending_tdl) {
/* handle IOT with pending tdl */
cdns3_reprogram_tdl(priv_ep);
}
} else if (priv_ep->dir == USB_DIR_OUT) {
priv_ep->ep_sts_pending |= ep_sts_reg;
} else if (ep_sts_reg & EP_STS_IOT) {
cdns3_transfer_completed(priv_dev, priv_ep);
}
}
/*
* MD_EXIT interrupt sets when stream capable endpoint exits
* from MOVE DATA state of Bulk IN/OUT stream protocol state machine
*/
if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
(priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
priv_ep->ep_sts_pending = 0;
cdns3_transfer_completed(priv_dev, priv_ep);
}
/*
* WA2: this condition should only be meet when
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
* In other cases this interrupt will be disabled.
*/
if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
!(priv_ep->flags & EP_STALLED))
cdns3_wa2_descmissing_packet(priv_ep);
return 0;
}
static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
{
if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect)
priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
}
/**
* cdns3_check_usb_interrupt_proceed - Processes interrupt related to device
* @priv_dev: extended gadget object
* @usb_ists: bitmap representation of device's reported interrupts
* (usb_ists register value)
*/
static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
u32 usb_ists)
__must_hold(&priv_dev->lock)
{
int speed = 0;
trace_cdns3_usb_irq(priv_dev, usb_ists);
if (usb_ists & USB_ISTS_L1ENTI) {
/*
* WORKAROUND: CDNS3 controller has issue with hardware resuming
* from L1. To fix it, if any DMA transfer is pending driver
* must starts driving resume signal immediately.
*/
if (readl(&priv_dev->regs->drbl))
__cdns3_gadget_wakeup(priv_dev);
}
/* Connection detected */
if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
speed = cdns3_get_speed(priv_dev);
priv_dev->gadget.speed = speed;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED);
cdns3_ep0_config(priv_dev);
}
/* Disconnection detected */
if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
spin_unlock(&priv_dev->lock);
cdns3_disconnect_gadget(priv_dev);
spin_lock(&priv_dev->lock);
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
cdns3_hw_reset_eps_config(priv_dev);
}
if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) {
if (priv_dev->gadget_driver &&
priv_dev->gadget_driver->suspend) {
spin_unlock(&priv_dev->lock);
priv_dev->gadget_driver->suspend(&priv_dev->gadget);
spin_lock(&priv_dev->lock);
}
}
if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) {
if (priv_dev->gadget_driver &&
priv_dev->gadget_driver->resume) {
spin_unlock(&priv_dev->lock);
priv_dev->gadget_driver->resume(&priv_dev->gadget);
spin_lock(&priv_dev->lock);
}
}
/* reset*/
if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) {
if (priv_dev->gadget_driver) {
spin_unlock(&priv_dev->lock);
usb_gadget_udc_reset(&priv_dev->gadget,
priv_dev->gadget_driver);
spin_lock(&priv_dev->lock);
/*read again to check the actual speed*/
speed = cdns3_get_speed(priv_dev);
priv_dev->gadget.speed = speed;
cdns3_hw_reset_eps_config(priv_dev);
cdns3_ep0_config(priv_dev);
}
}
}
/**
* cdns3_device_irq_handler - interrupt handler for device part of controller
*
* @irq: irq number for cdns3 core device
* @data: structure of cdns3
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{
struct cdns3_device *priv_dev = data;
struct cdns *cdns = dev_get_drvdata(priv_dev->dev);
irqreturn_t ret = IRQ_NONE;
u32 reg;
if (cdns->in_lpm)
return ret;
/* check USB device interrupt */
reg = readl(&priv_dev->regs->usb_ists);
if (reg) {
/* After masking interrupts the new interrupts won't be
* reported in usb_ists/ep_ists. In order to not lose some
* of them driver disables only detected interrupts.
* They will be enabled ASAP after clearing source of
* interrupt. This an unusual behavior only applies to
* usb_ists register.
*/
reg = ~reg & readl(&priv_dev->regs->usb_ien);
/* mask deferred interrupt. */
writel(reg, &priv_dev->regs->usb_ien);
ret = IRQ_WAKE_THREAD;
}
/* check endpoint interrupt */
reg = readl(&priv_dev->regs->ep_ists);
if (reg) {
writel(0, &priv_dev->regs->ep_ien);
ret = IRQ_WAKE_THREAD;
}
return ret;
}
/**
* cdns3_device_thread_irq_handler - interrupt handler for device part
* of controller
*
* @irq: irq number for cdns3 core device
* @data: structure of cdns3
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
{
struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
unsigned int bit;
unsigned long reg;
spin_lock_irqsave(&priv_dev->lock, flags);
reg = readl(&priv_dev->regs->usb_ists);
if (reg) {
writel(reg, &priv_dev->regs->usb_ists);
writel(USB_IEN_INIT, &priv_dev->regs->usb_ien);
cdns3_check_usb_interrupt_proceed(priv_dev, reg);
ret = IRQ_HANDLED;
}
reg = readl(&priv_dev->regs->ep_ists);
/* handle default endpoint OUT */
if (reg & EP_ISTS_EP_OUT0) {
cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT);
ret = IRQ_HANDLED;
}
/* handle default endpoint IN */
if (reg & EP_ISTS_EP_IN0) {
cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN);
ret = IRQ_HANDLED;
}
/* check if interrupt from non default endpoint, if no exit */
reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0);
if (!reg)
goto irqend;
for_each_set_bit(bit, ®,
sizeof(u32) * BITS_PER_BYTE) {
cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]);
ret = IRQ_HANDLED;
}
if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams)
cdns3_wa2_check_outq_status(priv_dev);
irqend:
writel(~0, &priv_dev->regs->ep_ien);
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
/**
* cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP
*
* The real reservation will occur during write to EP_CFG register,
* this function is used to check if the 'size' reservation is allowed.
*
* @priv_dev: extended gadget object
* @size: the size (KB) for EP would like to allocate
* @is_in: endpoint direction
*
* Return 0 if the required size can met or negative value on failure
*/
static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
int size, int is_in)
{
int remained;
/* 2KB are reserved for EP0*/
remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
if (is_in) {
if (remained < size)
return -EPERM;
priv_dev->onchip_used_size += size;
} else {
int required;
/**
* ALL OUT EPs are shared the same chunk onchip memory, so
* driver checks if it already has assigned enough buffers
*/
if (priv_dev->out_mem_is_allocated >= size)
return 0;
required = size - priv_dev->out_mem_is_allocated;
if (required > remained)
return -EPERM;
priv_dev->out_mem_is_allocated += required;
priv_dev->onchip_used_size += required;
}
return 0;
}
static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
/* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */
if (priv_dev->dev_ver <= DEV_VER_V2)
writel(USB_CONF_DMULT, ®s->usb_conf);
if (priv_dev->dev_ver == DEV_VER_V2)
writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2);
if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) {
u32 mask;
if (priv_ep->dir)
mask = BIT(priv_ep->num + 16);
else
mask = BIT(priv_ep->num);
if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
cdns3_set_register_bit(®s->tdl_from_trb, mask);
cdns3_set_register_bit(®s->tdl_beh, mask);
cdns3_set_register_bit(®s->tdl_beh2, mask);
cdns3_set_register_bit(®s->dma_adv_td, mask);
}
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
cdns3_set_register_bit(®s->tdl_from_trb, mask);
cdns3_set_register_bit(®s->dtrans, mask);
}
}
/**
* cdns3_ep_config - Configure hardware endpoint
* @priv_ep: extended endpoint object
* @enable: set EP_CFG_ENABLE bit in ep_cfg register.
*/
int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
{
bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
u32 max_packet_size = 0;
u8 maxburst = 0;
u32 ep_cfg = 0;
u8 buffering;
u8 mult = 0;
int ret;
buffering = priv_dev->ep_buf_size - 1;
cdns3_configure_dmult(priv_dev, priv_ep);
switch (priv_ep->type) {
case USB_ENDPOINT_XFER_INT:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
case USB_ENDPOINT_XFER_BULK:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
default:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
mult = priv_dev->ep_iso_burst - 1;
buffering = mult + 1;
}
switch (priv_dev->gadget.speed) {
case USB_SPEED_FULL:
max_packet_size = is_iso_ep ? 1023 : 64;
break;
case USB_SPEED_HIGH:
max_packet_size = is_iso_ep ? 1024 : 512;
break;
case USB_SPEED_SUPER:
/* It's limitation that driver assumes in driver. */
mult = 0;
max_packet_size = 1024;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
maxburst = priv_dev->ep_iso_burst - 1;
buffering = (mult + 1) *
(maxburst + 1);
if (priv_ep->interval > 1)
buffering++;
} else {
maxburst = priv_dev->ep_buf_size - 1;
}
break;
default:
/* all other speed are not supported */
return -EINVAL;
}
if (max_packet_size == 1024)
priv_ep->trb_burst_size = 128;
else if (max_packet_size >= 512)
priv_ep->trb_burst_size = 64;
else
priv_ep->trb_burst_size = 16;
/*
* In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs
* in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the
* address is not aligned to 128 Bytes (which is a product of the 64-bit AXI
* and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This
* results in data corruption when it crosses the 4K border. The corruption
* specifically occurs from the position (4K - (address & 0x7F)) to 4K.
*
* So force trb_burst_size to 16 at such platform.
*/
if (priv_dev->dev_ver < DEV_VER_V2)
priv_ep->trb_burst_size = 16;
mult = min_t(u8, mult, EP_CFG_MULT_MAX);
buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
/* onchip buffer is only allocated before configuration */
if (!priv_dev->hw_configured_flag) {
ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
!!priv_ep->dir);
if (ret) {
dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
return ret;
}
}
if (enable)
ep_cfg |= EP_CFG_ENABLE;
if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) {
if (priv_dev->dev_ver >= DEV_VER_V3) {
u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
/*
* Stream capable endpoints are handled by using ep_tdl
* register. Other endpoints use TDL from TRB feature.
*/
cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb,
mask);
}
/* Enable Stream Bit TDL chk and SID chk */
ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK;
}
ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
EP_CFG_MULT(mult) |
EP_CFG_BUFFERING(buffering) |
EP_CFG_MAXBURST(maxburst);
cdns3_select_ep(priv_dev, bEndpointAddress);
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags |= EP_CONFIGURED;
dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
priv_ep->name, ep_cfg);
return 0;
}
/* Find correct direction for HW endpoint according to description */
static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc,
struct cdns3_endpoint *priv_ep)
{
return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) ||
(priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc));
}
static struct
cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev,
struct usb_endpoint_descriptor *desc)
{
struct usb_ep *ep;
struct cdns3_endpoint *priv_ep;
list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
unsigned long num;
int ret;
/* ep name pattern likes epXin or epXout */
char c[2] = {ep->name[2], '\0'};
ret = kstrtoul(c, 10, &num);
if (ret)
return ERR_PTR(ret);
priv_ep = ep_to_cdns3_ep(ep);
if (cdns3_ep_dir_is_correct(desc, priv_ep)) {
if (!(priv_ep->flags & EP_CLAIMED)) {
priv_ep->num = num;
return priv_ep;
}
}
}
return ERR_PTR(-ENOENT);
}
/*
* Cadence IP has one limitation that all endpoints must be configured
* (Type & MaxPacketSize) before setting configuration through hardware
* register, it means we can't change endpoints configuration after
* set_configuration.
*
* This function set EP_CLAIMED flag which is added when the gadget driver
* uses usb_ep_autoconfig to configure specific endpoint;
* When the udc driver receives set_configurion request,
* it goes through all claimed endpoints, and configure all endpoints
* accordingly.
*
* At usb_ep_ops.enable/disable, we only enable and disable endpoint through
* ep_cfg register which can be changed after set_configuration, and do
* some software operation accordingly.
*/
static struct
usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
struct cdns3_endpoint *priv_ep;
unsigned long flags;
priv_ep = cdns3_find_available_ep(priv_dev, desc);
if (IS_ERR(priv_ep)) {
dev_err(priv_dev->dev, "no available ep\n");
return NULL;
}
dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name);
spin_lock_irqsave(&priv_dev->lock, flags);
priv_ep->endpoint.desc = desc;
priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT;
priv_ep->type = usb_endpoint_type(desc);
priv_ep->flags |= EP_CLAIMED;
priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
spin_unlock_irqrestore(&priv_dev->lock, flags);
return &priv_ep->endpoint;
}
/**
* cdns3_gadget_ep_alloc_request - Allocates request
* @ep: endpoint object associated with request
* @gfp_flags: gfp flags
*
* Returns allocated request address, NULL on allocation error
*/
struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
struct cdns3_request *priv_req;
priv_req = kzalloc(sizeof(*priv_req), gfp_flags);
if (!priv_req)
return NULL;
priv_req->priv_ep = priv_ep;
trace_cdns3_alloc_request(priv_req);
return &priv_req->request;
}
/**
* cdns3_gadget_ep_free_request - Free memory occupied by request
* @ep: endpoint object associated with request
* @request: request to free memory
*/
void cdns3_gadget_ep_free_request(struct usb_ep *ep,
struct usb_request *request)
{
struct cdns3_request *priv_req = to_cdns3_request(request);
if (priv_req->aligned_buf)
priv_req->aligned_buf->in_use = 0;
trace_cdns3_free_request(priv_req);
kfree(priv_req);
}
/**
* cdns3_gadget_ep_enable - Enable endpoint
* @ep: endpoint object
* @desc: endpoint descriptor
*
* Returns 0 on success, error code elsewhere
*/
static int cdns3_gadget_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct cdns3_endpoint *priv_ep;
struct cdns3_device *priv_dev;
const struct usb_ss_ep_comp_descriptor *comp_desc;
u32 reg = EP_STS_EN_TRBERREN;
u32 bEndpointAddress;
unsigned long flags;
int enable = 1;
int ret = 0;
int val;
if (!ep) {
pr_debug("usbss: ep not configured?\n");
return -EINVAL;
}
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
comp_desc = priv_ep->endpoint.comp_desc;
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
return -EINVAL;
}
if (!desc->wMaxPacketSize) {
dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n");
return -EINVAL;
}
if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED,
"%s is already enabled\n", priv_ep->name))
return 0;
spin_lock_irqsave(&priv_dev->lock, flags);
priv_ep->endpoint.desc = desc;
priv_ep->type = usb_endpoint_type(desc);
priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
if (priv_ep->interval > ISO_MAX_INTERVAL &&
priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
dev_err(priv_dev->dev, "Driver is limited to %d period\n",
ISO_MAX_INTERVAL);
ret = -EINVAL;
goto exit;
}
bEndpointAddress = priv_ep->num | priv_ep->dir;
cdns3_select_ep(priv_dev, bEndpointAddress);
/*
* For some versions of controller at some point during ISO OUT traffic
* DMA reads Transfer Ring for the EP which has never got doorbell.
* This issue was detected only on simulation, but to avoid this issue
* driver add protection against it. To fix it driver enable ISO OUT
* endpoint before setting DRBL. This special treatment of ISO OUT
* endpoints are recommended by controller specification.
*/
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
enable = 0;
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
/*
* Enable stream support (SS mode) related interrupts
* in EP_STS_EN Register
*/
if (priv_dev->gadget.speed >= USB_SPEED_SUPER) {
reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN |
EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
EP_STS_EN_STREAMREN;
priv_ep->use_streams = true;
ret = cdns3_ep_config(priv_ep, enable);
priv_dev->using_streams |= true;
}
} else {
ret = cdns3_ep_config(priv_ep, enable);
}
if (ret)
goto exit;
ret = cdns3_allocate_trb_pool(priv_ep);
if (ret)
goto exit;
bEndpointAddress = priv_ep->num | priv_ep->dir;
cdns3_select_ep(priv_dev, bEndpointAddress);
trace_cdns3_gadget_ep_enable(priv_ep);
writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
1, 1000);
if (unlikely(ret)) {
cdns3_free_trb_pool(priv_ep);
ret = -EINVAL;
goto exit;
}
/* enable interrupt for selected endpoint */
cdns3_set_register_bit(&priv_dev->regs->ep_ien,
BIT(cdns3_ep_addr_to_index(bEndpointAddress)));
if (priv_dev->dev_ver < DEV_VER_V2)
cdns3_wa2_enable_detection(priv_dev, priv_ep, reg);
writel(reg, &priv_dev->regs->ep_sts_en);
ep->desc = desc;
priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR;
priv_ep->wa1_set = 0;
priv_ep->enqueue = 0;
priv_ep->dequeue = 0;
reg = readl(&priv_dev->regs->ep_sts);
priv_ep->pcs = !!EP_STS_CCS(reg);
priv_ep->ccs = !!EP_STS_CCS(reg);
/* one TRB is reserved for link TRB used in DMULT mode*/
priv_ep->free_trbs = priv_ep->num_trbs - 1;
exit:
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
/**
* cdns3_gadget_ep_disable - Disable endpoint
* @ep: endpoint object
*
* Returns 0 on success, error code elsewhere
*/
static int cdns3_gadget_ep_disable(struct usb_ep *ep)
{
struct cdns3_endpoint *priv_ep;
struct cdns3_request *priv_req;
struct cdns3_device *priv_dev;
struct usb_request *request;
unsigned long flags;
int ret = 0;
u32 ep_cfg;
int val;
if (!ep) {
pr_err("usbss: invalid parameters\n");
return -EINVAL;
}
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED),
"%s is already disabled\n", priv_ep->name))
return 0;
spin_lock_irqsave(&priv_dev->lock, flags);
trace_cdns3_gadget_ep_disable(priv_ep);
cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
ep_cfg = readl(&priv_dev->regs->ep_cfg);
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
/**
* Driver needs some time before resetting endpoint.
* It need waits for clearing DBUSY bit or for timeout expired.
* 10us is enough time for controller to stop transfer.
*/
readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val,
!(val & EP_STS_DBUSY), 1, 10);
writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
1, 1000);
if (unlikely(ret))
dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n",
priv_ep->name);
while (!list_empty(&priv_ep->pending_req_list)) {
request = cdns3_next_request(&priv_ep->pending_req_list);
cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
-ESHUTDOWN);
}
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
kfree(priv_req->request.buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
}
while (!list_empty(&priv_ep->deferred_req_list)) {
request = cdns3_next_request(&priv_ep->deferred_req_list);
cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
-ESHUTDOWN);
}
priv_ep->descmis_req = NULL;
ep->desc = NULL;
priv_ep->flags &= ~EP_ENABLED;
priv_ep->use_streams = false;
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
/**
* __cdns3_gadget_ep_queue - Transfer data on endpoint
* @ep: endpoint object
* @request: request object
* @gfp_flags: gfp flags
*
* Returns 0 on success, error code elsewhere
*/
static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_request *priv_req;
int ret = 0;
request->actual = 0;
request->status = -EINPROGRESS;
priv_req = to_cdns3_request(request);
trace_cdns3_ep_queue(priv_req);
if (priv_dev->dev_ver < DEV_VER_V2) {
ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep,
priv_req);
if (ret == EINPROGRESS)
return 0;
}
ret = cdns3_prepare_aligned_request_buf(priv_req);
if (ret < 0)
return ret;
if (likely(!(priv_req->flags & REQUEST_UNALIGNED))) {
ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
usb_endpoint_dir_in(ep->desc));
if (ret)
return ret;
}
list_add_tail(&request->list, &priv_ep->deferred_req_list);
/*
* For stream capable endpoint if prime irq flag is set then only start
* request.
* If hardware endpoint configuration has not been set yet then
* just queue request in deferred list. Transfer will be started in
* cdns3_set_hw_configuration.
*/
if (!request->stream_id) {
if (priv_dev->hw_configured_flag &&
!(priv_ep->flags & EP_STALLED) &&
!(priv_ep->flags & EP_STALL_PENDING))
cdns3_start_all_request(priv_dev, priv_ep);
} else {
if (priv_dev->hw_configured_flag && priv_ep->prime_flag)
cdns3_start_all_request(priv_dev, priv_ep);
}
return 0;
}
static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
struct usb_request *zlp_request;
struct cdns3_endpoint *priv_ep;
struct cdns3_device *priv_dev;
unsigned long flags;
int ret;
if (!request || !ep)
return -EINVAL;
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
spin_lock_irqsave(&priv_dev->lock, flags);
ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags);
if (ret == 0 && request->zero && request->length &&
(request->length % ep->maxpacket == 0)) {
struct cdns3_request *priv_req;
zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
zlp_request->buf = priv_dev->zlp_buf;
zlp_request->length = 0;
priv_req = to_cdns3_request(zlp_request);
priv_req->flags |= REQUEST_ZLP;
dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n",
priv_ep->name);
ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags);
}
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
/**
* cdns3_gadget_ep_dequeue - Remove request from transfer queue
* @ep: endpoint object associated with request
* @request: request object
*
* Returns 0 on success, error code elsewhere
*/
int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
struct usb_request *request)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
struct cdns3_device *priv_dev;
struct usb_request *req, *req_temp;
struct cdns3_request *priv_req;
struct cdns3_trb *link_trb;
u8 req_on_hw_ring = 0;
unsigned long flags;
int ret = 0;
int val;
if (!ep || !request || !ep->desc)
return -EINVAL;
priv_dev = priv_ep->cdns3_dev;
spin_lock_irqsave(&priv_dev->lock, flags);
priv_req = to_cdns3_request(request);
trace_cdns3_ep_dequeue(priv_req);
cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
list) {
if (request == req) {
req_on_hw_ring = 1;
goto found;
}
}
list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
list) {
if (request == req)
goto found;
}
goto not_found;
found:
link_trb = priv_req->trb;
/* Update ring only if removed request is on pending_req_list list */
if (req_on_hw_ring && link_trb) {
/* Stop DMA */
writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd);
/* wait for DFLUSH cleared */
readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & EP_CMD_DFLUSH), 1, 1000);
link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
((priv_req->end_trb + 1) * TRB_SIZE)));
link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
TRB_TYPE(TRB_LINK) | TRB_CHAIN);
if (priv_ep->wa1_trb == priv_req->trb)
cdns3_wa1_restore_cycle_bit(priv_ep);
}
cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
req = cdns3_next_request(&priv_ep->pending_req_list);
if (req)
cdns3_rearm_transfer(priv_ep, 1);
not_found:
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
/**
* __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint
* Should be called after acquiring spin_lock and selecting ep
* @priv_ep: endpoint object to set stall on.
*/
void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
trace_cdns3_halt(priv_ep, 1, 0);
if (!(priv_ep->flags & EP_STALLED)) {
u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
if (!(ep_sts_reg & EP_STS_DBUSY))
cdns3_ep_stall_flush(priv_ep);
else
priv_ep->flags |= EP_STALL_PENDING;
}
}
/**
* __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint
* Should be called after acquiring spin_lock and selecting ep
* @priv_ep: endpoint object to clear stall on
*/
int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct usb_request *request;
struct cdns3_request *priv_req;
struct cdns3_trb *trb = NULL;
struct cdns3_trb trb_tmp;
int ret;
int val;
trace_cdns3_halt(priv_ep, 0, 0);
request = cdns3_next_request(&priv_ep->pending_req_list);
if (request) {
priv_req = to_cdns3_request(request);
trb = priv_req->trb;
if (trb) {
trb_tmp = *trb;
trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
}
}
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
/* wait for EPRST cleared */
ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & EP_CMD_EPRST), 1, 100);
if (ret)
return -EINVAL;
priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
if (request) {
if (trb)
*trb = trb_tmp;
cdns3_rearm_transfer(priv_ep, 1);
}
cdns3_start_all_request(priv_dev, priv_ep);
return ret;
}
/**
* cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint
* @ep: endpoint object to set/clear stall on
* @value: 1 for set stall, 0 for clear stall
*
* Returns 0 on success, error code elsewhere
*/
int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
unsigned long flags;
int ret = 0;
if (!(priv_ep->flags & EP_ENABLED))
return -EPERM;
spin_lock_irqsave(&priv_dev->lock, flags);
cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
if (!value) {
priv_ep->flags &= ~EP_WEDGE;
ret = __cdns3_gadget_ep_clear_halt(priv_ep);
} else {
__cdns3_gadget_ep_set_halt(priv_ep);
}
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
extern const struct usb_ep_ops cdns3_gadget_ep0_ops;
static const struct usb_ep_ops cdns3_gadget_ep_ops = {
.enable = cdns3_gadget_ep_enable,
.disable = cdns3_gadget_ep_disable,
.alloc_request = cdns3_gadget_ep_alloc_request,
.free_request = cdns3_gadget_ep_free_request,
.queue = cdns3_gadget_ep_queue,
.dequeue = cdns3_gadget_ep_dequeue,
.set_halt = cdns3_gadget_ep_set_halt,
.set_wedge = cdns3_gadget_ep_set_wedge,
};
/**
* cdns3_gadget_get_frame - Returns number of actual ITP frame
* @gadget: gadget object
*
* Returns number of actual ITP frame
*/
static int cdns3_gadget_get_frame(struct usb_gadget *gadget)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
return readl(&priv_dev->regs->usb_itpn);
}
int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev)
{
enum usb_device_speed speed;
speed = cdns3_get_speed(priv_dev);
if (speed >= USB_SPEED_SUPER)
return 0;
/* Start driving resume signaling to indicate remote wakeup. */
writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf);
return 0;
}
static int cdns3_gadget_wakeup(struct usb_gadget *gadget)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&priv_dev->lock, flags);
ret = __cdns3_gadget_wakeup(priv_dev);
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
}
static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget,
int is_selfpowered)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
unsigned long flags;
spin_lock_irqsave(&priv_dev->lock, flags);
priv_dev->is_selfpowered = !!is_selfpowered;
spin_unlock_irqrestore(&priv_dev->lock, flags);
return 0;
}
static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
if (is_on) {
writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
} else {
writel(~0, &priv_dev->regs->ep_ists);
writel(~0, &priv_dev->regs->usb_ists);
writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
}
return 0;
}
static void cdns3_gadget_config(struct cdns3_device *priv_dev)
{
struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
u32 reg;
cdns3_ep0_config(priv_dev);
/* enable interrupts for endpoint 0 (in and out) */
writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien);
/*
* Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1
* revision of controller.
*/
if (priv_dev->dev_ver == DEV_VER_TI_V1) {
reg = readl(®s->dbg_link1);
reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK;
reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) |
DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET;
writel(reg, ®s->dbg_link1);
}
/*
* By default some platforms has set protected access to memory.
* This cause problem with cache, so driver restore non-secure
* access to memory.
*/
reg = readl(®s->dma_axi_ctrl);
reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) |
DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE);
writel(reg, ®s->dma_axi_ctrl);
/* enable generic interrupt*/
writel(USB_IEN_INIT, ®s->usb_ien);
writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf);
/* keep Fast Access bit */
writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr);
cdns3_configure_dmult(priv_dev, NULL);
}
/**
* cdns3_gadget_udc_start - Gadget start
* @gadget: gadget object
* @driver: driver which operates on this gadget
*
* Returns 0 on success, error code elsewhere
*/
static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
unsigned long flags;
enum usb_device_speed max_speed = driver->max_speed;
spin_lock_irqsave(&priv_dev->lock, flags);
priv_dev->gadget_driver = driver;
/* limit speed if necessary */
max_speed = min(driver->max_speed, gadget->max_speed);
switch (max_speed) {
case USB_SPEED_FULL:
writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
break;
case USB_SPEED_HIGH:
writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
break;
case USB_SPEED_SUPER:
break;
default:
dev_err(priv_dev->dev,
"invalid maximum_speed parameter %d\n",
max_speed);
fallthrough;
case USB_SPEED_UNKNOWN:
/* default to superspeed */
max_speed = USB_SPEED_SUPER;
break;
}
cdns3_gadget_config(priv_dev);
spin_unlock_irqrestore(&priv_dev->lock, flags);
return 0;
}
/**
* cdns3_gadget_udc_stop - Stops gadget
* @gadget: gadget object
*
* Returns 0
*/
static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
struct cdns3_endpoint *priv_ep;
u32 bEndpointAddress;
struct usb_ep *ep;
int val;
priv_dev->gadget_driver = NULL;
priv_dev->onchip_used_size = 0;
priv_dev->out_mem_is_allocated = 0;
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
priv_ep = ep_to_cdns3_ep(ep);
bEndpointAddress = priv_ep->num | priv_ep->dir;
cdns3_select_ep(priv_dev, bEndpointAddress);
writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & EP_CMD_EPRST), 1, 100);
priv_ep->flags &= ~EP_CLAIMED;
}
/* disable interrupt for device */
writel(0, &priv_dev->regs->usb_ien);
writel(0, &priv_dev->regs->usb_pwr);
writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
return 0;
}
/**
* cdns3_gadget_check_config - ensure cdns3 can support the USB configuration
* @gadget: pointer to the USB gadget
*
* Used to record the maximum number of endpoints being used in a USB composite
* device. (across all configurations) This is to be used in the calculation
* of the TXFIFO sizes when resizing internal memory for individual endpoints.
* It will help ensured that the resizing logic reserves enough space for at
* least one max packet.
*/
static int cdns3_gadget_check_config(struct usb_gadget *gadget)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
struct cdns3_endpoint *priv_ep;
struct usb_ep *ep;
int n_in = 0;
int total;
list_for_each_entry(ep, &gadget->ep_list, ep_list) {
priv_ep = ep_to_cdns3_ep(ep);
if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
n_in++;
}
/* 2KB are reserved for EP0, 1KB for out*/
total = 2 + n_in + 1;
if (total > priv_dev->onchip_buffers)
return -ENOMEM;
priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
(priv_dev->onchip_buffers - 2) / (n_in + 1);
return 0;
}
static const struct usb_gadget_ops cdns3_gadget_ops = {
.get_frame = cdns3_gadget_get_frame,
.wakeup = cdns3_gadget_wakeup,
.set_selfpowered = cdns3_gadget_set_selfpowered,
.pullup = cdns3_gadget_pullup,
.udc_start = cdns3_gadget_udc_start,
.udc_stop = cdns3_gadget_udc_stop,
.match_ep = cdns3_gadget_match_ep,
.check_config = cdns3_gadget_check_config,
};
static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
{
int i;
/* ep0 OUT point to ep0 IN. */
priv_dev->eps[16] = NULL;
for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
if (priv_dev->eps[i]) {
cdns3_free_trb_pool(priv_dev->eps[i]);
devm_kfree(priv_dev->dev, priv_dev->eps[i]);
}
}
/**
* cdns3_init_eps - Initializes software endpoints of gadget
* @priv_dev: extended gadget object
*
* Returns 0 on success, error code elsewhere
*/
static int cdns3_init_eps(struct cdns3_device *priv_dev)
{
u32 ep_enabled_reg, iso_ep_reg;
struct cdns3_endpoint *priv_ep;
int ep_dir, ep_number;
u32 ep_mask;
int ret = 0;
int i;
/* Read it from USB_CAP3 to USB_CAP5 */
ep_enabled_reg = readl(&priv_dev->regs->usb_cap3);
iso_ep_reg = readl(&priv_dev->regs->usb_cap4);
dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n");
for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
ep_dir = i >> 4; /* i div 16 */
ep_number = i & 0xF; /* i % 16 */
ep_mask = BIT(i);
if (!(ep_enabled_reg & ep_mask))
continue;
if (ep_dir && !ep_number) {
priv_dev->eps[i] = priv_dev->eps[0];
continue;
}
priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep),
GFP_KERNEL);
if (!priv_ep)
goto err;
/* set parent of endpoint object */
priv_ep->cdns3_dev = priv_dev;
priv_dev->eps[i] = priv_ep;
priv_ep->num = ep_number;
priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT;
if (!ep_number) {
ret = cdns3_init_ep0(priv_dev, priv_ep);
if (ret) {
dev_err(priv_dev->dev, "Failed to init ep0\n");
goto err;
}
} else {
snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s",
ep_number, !!ep_dir ? "in" : "out");
priv_ep->endpoint.name = priv_ep->name;
usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
CDNS3_EP_MAX_PACKET_LIMIT);
priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS;
priv_ep->endpoint.ops = &cdns3_gadget_ep_ops;
if (ep_dir)
priv_ep->endpoint.caps.dir_in = 1;
else
priv_ep->endpoint.caps.dir_out = 1;
if (iso_ep_reg & ep_mask)
priv_ep->endpoint.caps.type_iso = 1;
priv_ep->endpoint.caps.type_bulk = 1;
priv_ep->endpoint.caps.type_int = 1;
list_add_tail(&priv_ep->endpoint.ep_list,
&priv_dev->gadget.ep_list);
}
priv_ep->flags = 0;
dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n",
priv_ep->name,
priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
priv_ep->endpoint.caps.type_iso ? "ISO" : "");
INIT_LIST_HEAD(&priv_ep->pending_req_list);
INIT_LIST_HEAD(&priv_ep->deferred_req_list);
INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list);
}
return 0;
err:
cdns3_free_all_eps(priv_dev);
return -ENOMEM;
}
static void cdns3_gadget_release(struct device *dev)
{
struct cdns3_device *priv_dev = container_of(dev,
struct cdns3_device, gadget.dev);
kfree(priv_dev);
}
static void cdns3_gadget_exit(struct cdns *cdns)
{
struct cdns3_device *priv_dev;
priv_dev = cdns->gadget_dev;
pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
usb_del_gadget(&priv_dev->gadget);
devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
cdns3_free_all_eps(priv_dev);
while (!list_empty(&priv_dev->aligned_buf_list)) {
struct cdns3_aligned_buf *buf;
buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list);
dma_free_noncoherent(priv_dev->sysdev, buf->size,
buf->buf,
buf->dma,
buf->dir);
list_del(&buf->list);
kfree(buf);
}
dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
priv_dev->setup_dma);
dma_pool_destroy(priv_dev->eps_dma_pool);
kfree(priv_dev->zlp_buf);
usb_put_gadget(&priv_dev->gadget);
cdns->gadget_dev = NULL;
cdns_drd_gadget_off(cdns);
}
static int cdns3_gadget_start(struct cdns *cdns)
{
struct cdns3_device *priv_dev;
u32 max_speed;
int ret;
priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
if (!priv_dev)
return -ENOMEM;
usb_initialize_gadget(cdns->dev, &priv_dev->gadget,
cdns3_gadget_release);
cdns->gadget_dev = priv_dev;
priv_dev->sysdev = cdns->dev;
priv_dev->dev = cdns->dev;
priv_dev->regs = cdns->dev_regs;
device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size",
&priv_dev->onchip_buffers);
if (priv_dev->onchip_buffers <= 0) {
u32 reg = readl(&priv_dev->regs->usb_cap2);
priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg);
}
if (!priv_dev->onchip_buffers)
priv_dev->onchip_buffers = 256;
max_speed = usb_get_maximum_speed(cdns->dev);
/* Check the maximum_speed parameter */
switch (max_speed) {
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
break;
default:
dev_err(cdns->dev, "invalid maximum_speed parameter %d\n",
max_speed);
fallthrough;
case USB_SPEED_UNKNOWN:
/* default to superspeed */
max_speed = USB_SPEED_SUPER;
break;
}
/* fill gadget fields */
priv_dev->gadget.max_speed = max_speed;
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
priv_dev->gadget.ops = &cdns3_gadget_ops;
priv_dev->gadget.name = "usb-ss-gadget";
priv_dev->gadget.quirk_avoids_skb_reserve = 1;
priv_dev->gadget.irq = cdns->dev_irq;
spin_lock_init(&priv_dev->lock);
INIT_WORK(&priv_dev->pending_status_wq,
cdns3_pending_setup_status_handler);
INIT_WORK(&priv_dev->aligned_buf_wq,
cdns3_free_aligned_request_buf);
/* initialize endpoint container */
INIT_LIST_HEAD(&priv_dev->gadget.ep_list);
INIT_LIST_HEAD(&priv_dev->aligned_buf_list);
priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool",
priv_dev->sysdev,
TRB_RING_SIZE, 8, 0);
if (!priv_dev->eps_dma_pool) {
dev_err(priv_dev->dev, "Failed to create TRB dma pool\n");
ret = -ENOMEM;
goto err1;
}
ret = cdns3_init_eps(priv_dev);
if (ret) {
dev_err(priv_dev->dev, "Failed to create endpoints\n");
goto err1;
}
/* allocate memory for setup packet buffer */
priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8,
&priv_dev->setup_dma, GFP_DMA);
if (!priv_dev->setup_buf) {
ret = -ENOMEM;
goto err2;
}
priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6);
dev_dbg(priv_dev->dev, "Device Controller version: %08x\n",
readl(&priv_dev->regs->usb_cap6));
dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n",
readl(&priv_dev->regs->usb_cap1));
dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n",
readl(&priv_dev->regs->usb_cap2));
priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
if (priv_dev->dev_ver >= DEV_VER_V2)
priv_dev->gadget.sg_supported = 1;
priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
if (!priv_dev->zlp_buf) {
ret = -ENOMEM;
goto err3;
}
/* add USB gadget device */
ret = usb_add_gadget(&priv_dev->gadget);
if (ret < 0) {
dev_err(priv_dev->dev, "Failed to add gadget\n");
goto err4;
}
return 0;
err4:
kfree(priv_dev->zlp_buf);
err3:
dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
priv_dev->setup_dma);
err2:
cdns3_free_all_eps(priv_dev);
err1:
dma_pool_destroy(priv_dev->eps_dma_pool);
usb_put_gadget(&priv_dev->gadget);
cdns->gadget_dev = NULL;
return ret;
}
static int __cdns3_gadget_init(struct cdns *cdns)
{
int ret = 0;
/* Ensure 32-bit DMA Mask in case we switched back from Host mode */
ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
return ret;
}
cdns_drd_gadget_on(cdns);
pm_runtime_get_sync(cdns->dev);
ret = cdns3_gadget_start(cdns);
if (ret) {
pm_runtime_put_sync(cdns->dev);
return ret;
}
/*
* Because interrupt line can be shared with other components in
* driver it can't use IRQF_ONESHOT flag here.
*/
ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
cdns3_device_irq_handler,
cdns3_device_thread_irq_handler,
IRQF_SHARED, dev_name(cdns->dev),
cdns->gadget_dev);
if (ret)
goto err0;
return 0;
err0:
cdns3_gadget_exit(cdns);
return ret;
}
static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup)
__must_hold(&cdns->lock)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
spin_unlock(&cdns->lock);
cdns3_disconnect_gadget(priv_dev);
spin_lock(&cdns->lock);
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
cdns3_hw_reset_eps_config(priv_dev);
/* disable interrupt for device */
writel(0, &priv_dev->regs->usb_ien);
return 0;
}
static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
if (!priv_dev->gadget_driver)
return 0;
cdns3_gadget_config(priv_dev);
if (hibernated)
writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
return 0;
}
/**
* cdns3_gadget_init - initialize device structure
*
* @cdns: cdns instance
*
* This function initializes the gadget.
*/
int cdns3_gadget_init(struct cdns *cdns)
{
struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
rdrv->start = __cdns3_gadget_init;
rdrv->stop = cdns3_gadget_exit;
rdrv->suspend = cdns3_gadget_suspend;
rdrv->resume = cdns3_gadget_resume;
rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "gadget";
cdns->roles[USB_ROLE_DEVICE] = rdrv;
return 0;
}
| linux-master | drivers/usb/cdns3/cdns3-gadget.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence CDNSP DRD Driver.
*
* Copyright (C) 2020 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*
* Code based on Linux XHCI driver.
* Origin: Copyright (C) 2008 Intel Corp
*/
/*
* Ring initialization rules:
* 1. Each segment is initialized to zero, except for link TRBs.
* 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
* Consumer Cycle State (CCS), depending on ring function.
* 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
*
* Ring behavior rules:
* 1. A ring is empty if enqueue == dequeue. This means there will always be at
* least one free TRB in the ring. This is useful if you want to turn that
* into a link TRB and expand the ring.
* 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
* link TRB, then load the pointer with the address in the link TRB. If the
* link TRB had its toggle bit set, you may need to update the ring cycle
* state (see cycle bit rules). You may have to do this multiple times
* until you reach a non-link TRB.
* 3. A ring is full if enqueue++ (for the definition of increment above)
* equals the dequeue pointer.
*
* Cycle bit rules:
* 1. When a consumer increments a dequeue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
* 2. When a producer increments an enqueue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
*
* Producer rules:
* 1. Check if ring is full before you enqueue.
* 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
* Update enqueue pointer between each write (which may update the ring
* cycle state).
* 3. Notify consumer. If SW is producer, it rings the doorbell for command
* and endpoint rings. If controller is the producer for the event ring,
* and it generates an interrupt according to interrupt modulation rules.
*
* Consumer rules:
* 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
* the TRB is owned by the consumer.
* 2. Update dequeue pointer (which may update the ring cycle state) and
* continue processing TRBs until you reach a TRB which is not owned by you.
* 3. Notify the producer. SW is the consumer for the event ring, and it
* updates event ring dequeue pointer. Controller is the consumer for the
* command and endpoint rings; it generates events on the event ring
* for these.
*/
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include "cdnsp-trace.h"
#include "cdnsp-gadget.h"
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
*/
dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
union cdnsp_trb *trb)
{
unsigned long segment_offset = trb - seg->trbs;
if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
{
return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
}
static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
{
return TRB_TYPE_LINK_LE32(trb->link.control);
}
bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
{
return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}
bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
struct cdnsp_segment *seg,
union cdnsp_trb *trb)
{
return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
}
static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
{
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
{
if (cdnsp_trb_is_link(trb)) {
/* Unchain chained link TRBs. */
trb->link.control &= cpu_to_le32(~TRB_CHAIN);
} else {
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB. */
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
}
}
/*
* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
*/
static void cdnsp_next_trb(struct cdnsp_device *pdev,
struct cdnsp_ring *ring,
struct cdnsp_segment **seg,
union cdnsp_trb **trb)
{
if (cdnsp_trb_is_link(*trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
(*trb)++;
}
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
{
/* event ring doesn't have link trbs, check for last trb. */
if (ring->type == TYPE_EVENT) {
if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
goto out;
}
if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
goto out;
}
/* All other rings have link trbs. */
if (!cdnsp_trb_is_link(ring->dequeue)) {
ring->dequeue++;
ring->num_trbs_free++;
}
while (cdnsp_trb_is_link(ring->dequeue)) {
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
}
out:
trace_cdnsp_inc_deq(ring);
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
* chain bit is set), then set the chain bit in all the following link TRBs.
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
* @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell.
*/
static void cdnsp_inc_enq(struct cdnsp_device *pdev,
struct cdnsp_ring *ring,
bool more_trbs_coming)
{
union cdnsp_trb *next;
u32 chain;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB. */
if (!cdnsp_trb_is_link(ring->enqueue))
ring->num_trbs_free--;
next = ++(ring->enqueue);
/* Update the dequeue pointer further if that was a link TRB */
while (cdnsp_trb_is_link(next)) {
/*
* If the caller doesn't plan on enqueuing more TDs before
* ringing the doorbell, then we don't want to give the link TRB
* to the hardware just yet. We'll give the link TRB back in
* cdnsp_prepare_ring() just before we enqueue the TD at the
* top of the ring.
*/
if (!chain && !more_trbs_coming)
break;
next->link.control &= cpu_to_le32(~TRB_CHAIN);
next->link.control |= cpu_to_le32(chain);
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (cdnsp_link_trb_toggles_cycle(next))
ring->cycle_state ^= 1;
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
trace_cdnsp_inc_enq(ring);
}
/*
* Check to see if there's room to enqueue num_trbs on the ring and make sure
* enqueue pointer will not advance into dequeue segment.
*/
static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
struct cdnsp_ring *ring,
unsigned int num_trbs)
{
int num_trbs_in_deq_seg;
if (ring->num_trbs_free < num_trbs)
return false;
if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return false;
}
return true;
}
/*
* Workaround for L1: controller has issue with resuming from L1 after
* setting doorbell for endpoint during L1 state. This function forces
* resume signal in such case.
*/
static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
{
if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
}
/* Ring the doorbell after placing a command on the ring. */
void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
{
writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
}
/*
* Ring the doorbell after placing a transfer on the ring.
* Returns true if doorbell was set, otherwise false.
*/
static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
unsigned int stream_id)
{
__le32 __iomem *reg_addr = &pdev->dba->ep_db;
unsigned int ep_state = pep->ep_state;
unsigned int db_value;
/*
* Don't ring the doorbell for this endpoint if endpoint is halted or
* disabled.
*/
if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
return false;
/* For stream capable endpoints driver can ring doorbell only twice. */
if (pep->ep_state & EP_HAS_STREAMS) {
if (pep->stream_info.drbls_count >= 2)
return false;
pep->stream_info.drbls_count++;
}
pep->ep_state &= ~EP_STOPPED;
if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
!pdev->ep0_expect_in)
db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
else
db_value = DB_VALUE(pep->idx, stream_id);
trace_cdnsp_tr_drbl(pep, stream_id);
writel(db_value, reg_addr);
cdnsp_force_l0_go(pdev);
/* Doorbell was set. */
return true;
}
/*
* Get the right ring for the given pep and stream_id.
* If the endpoint supports streams, boundary check the USB request's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
*/
static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
unsigned int stream_id)
{
if (!(pep->ep_state & EP_HAS_STREAMS))
return pep->ring;
if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
pep->name, stream_id);
return NULL;
}
return pep->stream_info.stream_rings[stream_id];
}
static struct cdnsp_ring *
cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
struct cdnsp_request *preq)
{
return cdnsp_get_transfer_ring(pdev, preq->pep,
preq->request.stream_id);
}
/* Ring the doorbell for any rings with pending requests. */
void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
struct cdnsp_ep *pep)
{
struct cdnsp_stream_info *stream_info;
unsigned int stream_id;
int ret;
if (pep->ep_state & EP_DIS_IN_RROGRESS)
return;
/* A ring has pending Request if its TD list is not empty. */
if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
if (pep->ring && !list_empty(&pep->ring->td_list))
cdnsp_ring_ep_doorbell(pdev, pep, 0);
return;
}
stream_info = &pep->stream_info;
for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
struct cdnsp_td *td, *td_temp;
struct cdnsp_ring *ep_ring;
if (stream_info->drbls_count >= 2)
return;
ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
if (!ep_ring)
continue;
if (!ep_ring->stream_active || ep_ring->stream_rejected)
continue;
list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
td_list) {
if (td->drbl)
continue;
ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
if (ret)
td->drbl = 1;
}
}
}
/*
* Get the hw dequeue pointer controller stopped on, either directly from the
* endpoint context, or if streams are in use from the stream context.
* The returned hw_dequeue contains the lowest four bits with cycle state
* and possible stream context type.
*/
static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
unsigned int ep_index,
unsigned int stream_id)
{
struct cdnsp_stream_ctx *st_ctx;
struct cdnsp_ep *pep;
pep = &pdev->eps[stream_id];
if (pep->ep_state & EP_HAS_STREAMS) {
st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
return le64_to_cpu(st_ctx->stream_ring);
}
return le64_to_cpu(pep->out_ctx->deq);
}
/*
* Move the controller endpoint ring dequeue pointer past cur_td.
* Record the new state of the controller endpoint ring dequeue segment,
* dequeue pointer, and new consumer cycle state in state.
* Update internal representation of the ring's dequeue pointer.
*
* We do this in three jumps:
* - First we update our new ring state to be the same as when the
* controller stopped.
* - Then we traverse the ring to find the segment that contains
* the last TRB in the TD. We toggle the controller new cycle state
* when we pass any link TRBs with the toggle cycle bit set.
* - Finally we move the dequeue state one TRB further, toggling the cycle bit
* if we've moved it past a link TRB with the toggle cycle bit set.
*/
static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
unsigned int stream_id,
struct cdnsp_td *cur_td,
struct cdnsp_dequeue_state *state)
{
bool td_last_trb_found = false;
struct cdnsp_segment *new_seg;
struct cdnsp_ring *ep_ring;
union cdnsp_trb *new_deq;
bool cycle_found = false;
u64 hw_dequeue;
ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
if (!ep_ring)
return;
/*
* Dig out the cycle state saved by the controller during the
* stop endpoint command.
*/
hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
state->new_cycle_state = hw_dequeue & 0x1;
state->stream_id = stream_id;
/*
* We want to find the pointer, segment and cycle state of the new trb
* (the one after current TD's last_trb). We know the cycle state at
* hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
* found.
*/
do {
if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
== (dma_addr_t)(hw_dequeue & ~0xf)) {
cycle_found = true;
if (td_last_trb_found)
break;
}
if (new_deq == cur_td->last_trb)
td_last_trb_found = true;
if (cycle_found && cdnsp_trb_is_link(new_deq) &&
cdnsp_link_trb_toggles_cycle(new_deq))
state->new_cycle_state ^= 0x1;
cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
/* Search wrapped around, bail out. */
if (new_deq == pep->ring->dequeue) {
dev_err(pdev->dev,
"Error: Failed finding new dequeue state\n");
state->new_deq_seg = NULL;
state->new_deq_ptr = NULL;
return;
}
} while (!cycle_found || !td_last_trb_found);
state->new_deq_seg = new_seg;
state->new_deq_ptr = new_deq;
trace_cdnsp_new_deq_state(state);
}
/*
* flip_cycle means flip the cycle bit of all but the first and last TRB.
* (The last TRB actually points to the ring enqueue pointer, which is not part
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
struct cdnsp_ring *ep_ring,
struct cdnsp_td *td,
bool flip_cycle)
{
struct cdnsp_segment *seg = td->start_seg;
union cdnsp_trb *trb = td->first_trb;
while (1) {
cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
/* flip cycle if asked to */
if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
if (trb == td->last_trb)
break;
cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
}
}
/*
* This TD is defined by the TRBs starting at start_trb in start_seg and ending
* at end_trb, which may be in another segment. If the suspect DMA address is a
* TRB in this TD, this function returns that TRB's segment. Otherwise it
* returns 0.
*/
static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
struct cdnsp_segment *start_seg,
union cdnsp_trb *start_trb,
union cdnsp_trb *end_trb,
dma_addr_t suspect_dma)
{
struct cdnsp_segment *cur_seg;
union cdnsp_trb *temp_trb;
dma_addr_t end_seg_dma;
dma_addr_t end_trb_dma;
dma_addr_t start_dma;
start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
cur_seg = start_seg;
do {
if (start_dma == 0)
return NULL;
temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
/* If the end TRB isn't in this segment, this is set to 0 */
end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
end_trb_dma, cur_seg->dma,
end_seg_dma);
if (end_trb_dma > 0) {
/*
* The end TRB is in this segment, so suspect should
* be here
*/
if (start_dma <= end_trb_dma) {
if (suspect_dma >= start_dma &&
suspect_dma <= end_trb_dma) {
return cur_seg;
}
} else {
/*
* Case for one segment with a
* TD wrapped around to the top
*/
if ((suspect_dma >= start_dma &&
suspect_dma <= end_seg_dma) ||
(suspect_dma >= cur_seg->dma &&
suspect_dma <= end_trb_dma)) {
return cur_seg;
}
}
return NULL;
}
/* Might still be somewhere in this segment */
if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
return cur_seg;
cur_seg = cur_seg->next;
start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
} while (cur_seg != start_seg);
return NULL;
}
static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
struct cdnsp_ring *ring,
struct cdnsp_td *td)
{
struct cdnsp_segment *seg = td->bounce_seg;
struct cdnsp_request *preq;
size_t len;
if (!seg)
return;
preq = td->preq;
trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
seg->bounce_dma, 0);
if (!preq->direction) {
dma_unmap_single(pdev->dev, seg->bounce_dma,
ring->bounce_buf_len, DMA_TO_DEVICE);
return;
}
dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* For in transfers we need to copy the data from bounce to sg */
len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
seg->bounce_buf, seg->bounce_len,
seg->bounce_offs);
if (len != seg->bounce_len)
dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
len, seg->bounce_len);
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
struct cdnsp_dequeue_state *deq_state)
{
struct cdnsp_ring *ep_ring;
int ret;
if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
cdnsp_ring_doorbell_for_active_rings(pdev, pep);
return 0;
}
cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
/*
* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
if (cdnsp_trb_is_link(ep_ring->dequeue)) {
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
while (ep_ring->dequeue != deq_state->new_deq_ptr) {
ep_ring->num_trbs_free++;
ep_ring->dequeue++;
if (cdnsp_trb_is_link(ep_ring->dequeue)) {
if (ep_ring->dequeue == deq_state->new_deq_ptr)
break;
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
}
/*
* Probably there was TIMEOUT during handling Set Dequeue Pointer
* command. It's critical error and controller will be stopped.
*/
if (ret)
return -ESHUTDOWN;
/* Restart any rings with pending requests */
cdnsp_ring_doorbell_for_active_rings(pdev, pep);
return 0;
}
int cdnsp_remove_request(struct cdnsp_device *pdev,
struct cdnsp_request *preq,
struct cdnsp_ep *pep)
{
struct cdnsp_dequeue_state deq_state;
struct cdnsp_td *cur_td = NULL;
struct cdnsp_ring *ep_ring;
struct cdnsp_segment *seg;
int status = -ECONNRESET;
int ret = 0;
u64 hw_deq;
memset(&deq_state, 0, sizeof(deq_state));
trace_cdnsp_remove_request(pep->out_ctx);
trace_cdnsp_remove_request_td(preq);
cur_td = &preq->td;
ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
/*
* If we stopped on the TD we need to cancel, then we have to
* move the controller endpoint ring dequeue pointer past
* this TD.
*/
hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
hw_deq &= ~0xf;
seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
cur_td->last_trb, hw_deq);
if (seg && (pep->ep_state & EP_ENABLED))
cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
cur_td, &deq_state);
else
cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
/*
* The event handler won't see a completion for this TD anymore,
* so remove it from the endpoint ring's TD list.
*/
list_del_init(&cur_td->td_list);
ep_ring->num_tds--;
pep->stream_info.td_count--;
/*
* During disconnecting all endpoint will be disabled so we don't
* have to worry about updating dequeue pointer.
*/
if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
status = -ESHUTDOWN;
ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
}
cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
cdnsp_gadget_giveback(pep, cur_td->preq, status);
return ret;
}
static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
{
struct cdnsp_port *port = pdev->active_port;
u8 old_port = 0;
if (port && port->port_num == port_id)
return 0;
if (port)
old_port = port->port_num;
if (port_id == pdev->usb2_port.port_num) {
port = &pdev->usb2_port;
} else if (port_id == pdev->usb3_port.port_num) {
port = &pdev->usb3_port;
} else {
dev_err(pdev->dev, "Port event with invalid port ID %d\n",
port_id);
return -EINVAL;
}
if (port_id != old_port) {
cdnsp_disable_slot(pdev);
pdev->active_port = port;
cdnsp_enable_slot(pdev);
}
if (port_id == pdev->usb2_port.port_num)
cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
else
writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
&pdev->usb3_port.regs->portpmsc);
return 0;
}
static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
union cdnsp_trb *event)
{
struct cdnsp_port_regs __iomem *port_regs;
u32 portsc, cmd_regs;
bool port2 = false;
u32 link_state;
u32 port_id;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
dev_err(pdev->dev, "ERR: incorrect PSC event\n");
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
if (cdnsp_update_port_id(pdev, port_id))
goto cleanup;
port_regs = pdev->active_port->regs;
if (port_id == pdev->usb2_port.port_num)
port2 = true;
new_event:
portsc = readl(&port_regs->portsc);
writel(cdnsp_port_state_to_neutral(portsc) |
(portsc & PORT_CHANGE_BITS), &port_regs->portsc);
trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
pdev->gadget.speed = cdnsp_port_speed(portsc);
link_state = portsc & PORT_PLS_MASK;
/* Port Link State change detected. */
if ((portsc & PORT_PLC)) {
if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
link_state == XDEV_RESUME) {
cmd_regs = readl(&pdev->op_regs->command);
if (!(cmd_regs & CMD_R_S))
goto cleanup;
if (DEV_SUPERSPEED_ANY(portsc)) {
cdnsp_set_link_state(pdev, &port_regs->portsc,
XDEV_U0);
cdnsp_resume_gadget(pdev);
}
}
if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
link_state == XDEV_U0) {
pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
cdnsp_force_header_wakeup(pdev, 1);
cdnsp_ring_cmd_db(pdev);
cdnsp_wait_for_cmd_compl(pdev);
}
if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
!DEV_SUPERSPEED_ANY(portsc))
cdnsp_resume_gadget(pdev);
if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3)
cdnsp_suspend_gadget(pdev);
pdev->link_state = link_state;
}
if (portsc & PORT_CSC) {
/* Detach device. */
if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
cdnsp_disconnect_gadget(pdev);
/* Attach device. */
if (portsc & PORT_CONNECT) {
if (!port2)
cdnsp_irq_reset(pdev);
usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
}
}
/* Port reset. */
if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
cdnsp_irq_reset(pdev);
pdev->u1_allowed = 0;
pdev->u2_allowed = 0;
pdev->may_wakeup = 0;
}
if (portsc & PORT_CEC)
dev_err(pdev->dev, "Port Over Current detected\n");
if (portsc & PORT_CEC)
dev_err(pdev->dev, "Port Configure Error detected\n");
if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
goto new_event;
cleanup:
cdnsp_inc_deq(pdev, pdev->event_ring);
}
static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
struct cdnsp_td *td,
struct cdnsp_ring *ep_ring,
int *status)
{
struct cdnsp_request *preq = td->preq;
/* if a bounce buffer was used to align this td then unmap it */
cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
/*
* If the controller said we transferred more data than the buffer
* length, Play it safe and say we didn't transfer anything.
*/
if (preq->request.actual > preq->request.length) {
preq->request.actual = 0;
*status = 0;
}
list_del_init(&td->td_list);
ep_ring->num_tds--;
preq->pep->stream_info.td_count--;
cdnsp_gadget_giveback(preq->pep, preq, *status);
}
static void cdnsp_finish_td(struct cdnsp_device *pdev,
struct cdnsp_td *td,
struct cdnsp_transfer_event *event,
struct cdnsp_ep *ep,
int *status)
{
struct cdnsp_ring *ep_ring;
u32 trb_comp_code;
ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
/*
* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
return;
}
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
cdnsp_inc_deq(pdev, ep_ring);
cdnsp_inc_deq(pdev, ep_ring);
cdnsp_td_cleanup(pdev, td, ep_ring, status);
}
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
struct cdnsp_ring *ring,
union cdnsp_trb *stop_trb)
{
struct cdnsp_segment *seg = ring->deq_seg;
union cdnsp_trb *trb = ring->dequeue;
u32 sum;
for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
}
return sum;
}
static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
unsigned int stream_id,
int start_cycle,
struct cdnsp_generic_trb *start_trb)
{
/*
* Pass all the TRBs to the hardware at once and make sure this write
* isn't reordered.
*/
wmb();
if (start_cycle)
start_trb->field[3] |= cpu_to_le32(start_cycle);
else
start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
if ((pep->ep_state & EP_HAS_STREAMS) &&
!pep->stream_info.first_prime_det) {
trace_cdnsp_wait_for_prime(pep, stream_id);
return 0;
}
return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
}
/*
* Process control tds, update USB request status and actual_length.
*/
static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
struct cdnsp_td *td,
union cdnsp_trb *event_trb,
struct cdnsp_transfer_event *event,
struct cdnsp_ep *pep,
int *status)
{
struct cdnsp_ring *ep_ring;
u32 remaining;
u32 trb_type;
trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
/*
* if on data stage then update the actual_length of the USB
* request and flag it as set, so it won't be overwritten in the event
* for the last TRB.
*/
if (trb_type == TRB_DATA) {
td->request_length_set = true;
td->preq->request.actual = td->preq->request.length - remaining;
}
/* at status stage */
if (!td->request_length_set)
td->preq->request.actual = td->preq->request.length;
if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
pdev->three_stage_setup) {
td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
td_list);
pdev->ep0_stage = CDNSP_STATUS_STAGE;
cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
&td->last_trb->generic);
return;
}
*status = 0;
cdnsp_finish_td(pdev, td, event, pep, status);
}
/*
* Process isochronous tds, update usb request status and actual_length.
*/
static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
struct cdnsp_td *td,
union cdnsp_trb *ep_trb,
struct cdnsp_transfer_event *event,
struct cdnsp_ep *pep,
int status)
{
struct cdnsp_request *preq = td->preq;
u32 remaining, requested, ep_trb_len;
bool sum_trbs_for_length = false;
struct cdnsp_ring *ep_ring;
u32 trb_comp_code;
u32 td_length;
ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
requested = preq->request.length;
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
preq->request.status = 0;
break;
case COMP_SHORT_PACKET:
preq->request.status = 0;
sum_trbs_for_length = true;
break;
case COMP_ISOCH_BUFFER_OVERRUN:
case COMP_BABBLE_DETECTED_ERROR:
preq->request.status = -EOVERFLOW;
break;
case COMP_STOPPED:
sum_trbs_for_length = true;
break;
case COMP_STOPPED_SHORT_PACKET:
/* field normally containing residue now contains transferred */
preq->request.status = 0;
requested = remaining;
break;
case COMP_STOPPED_LENGTH_INVALID:
requested = 0;
remaining = 0;
break;
default:
sum_trbs_for_length = true;
preq->request.status = -1;
break;
}
if (sum_trbs_for_length) {
td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
td_length += ep_trb_len - remaining;
} else {
td_length = requested;
}
td->preq->request.actual += td_length;
cdnsp_finish_td(pdev, td, event, pep, &status);
}
static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
struct cdnsp_td *td,
struct cdnsp_transfer_event *event,
struct cdnsp_ep *pep,
int status)
{
struct cdnsp_ring *ep_ring;
ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
td->preq->request.status = -EXDEV;
td->preq->request.actual = 0;
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
cdnsp_inc_deq(pdev, ep_ring);
cdnsp_inc_deq(pdev, ep_ring);
cdnsp_td_cleanup(pdev, td, ep_ring, &status);
}
/*
* Process bulk and interrupt tds, update usb request status and actual_length.
*/
static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
struct cdnsp_td *td,
union cdnsp_trb *ep_trb,
struct cdnsp_transfer_event *event,
struct cdnsp_ep *ep,
int *status)
{
u32 remaining, requested, ep_trb_len;
struct cdnsp_ring *ep_ring;
u32 trb_comp_code;
ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
requested = td->preq->request.length;
switch (trb_comp_code) {
case COMP_SUCCESS:
case COMP_SHORT_PACKET:
*status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
td->preq->request.actual = remaining;
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* Stopped on ep trb with invalid length, exclude it. */
ep_trb_len = 0;
remaining = 0;
break;
}
if (ep_trb == td->last_trb)
ep_trb_len = requested - remaining;
else
ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
ep_trb_len - remaining;
td->preq->request.actual = ep_trb_len;
finish_td:
ep->stream_info.drbls_count--;
cdnsp_finish_td(pdev, td, event, ep, status);
}
static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
struct cdnsp_transfer_event *event)
{
struct cdnsp_generic_trb *generic;
struct cdnsp_ring *ep_ring;
struct cdnsp_ep *pep;
int cur_stream;
int ep_index;
int host_sid;
int dev_sid;
generic = (struct cdnsp_generic_trb *)event;
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
pep = &pdev->eps[ep_index];
if (!(pep->ep_state & EP_HAS_STREAMS))
return;
if (host_sid == STREAM_PRIME_ACK) {
pep->stream_info.first_prime_det = 1;
for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
cur_stream++) {
ep_ring = pep->stream_info.stream_rings[cur_stream];
ep_ring->stream_active = 1;
ep_ring->stream_rejected = 0;
}
}
if (host_sid == STREAM_REJECTED) {
struct cdnsp_td *td, *td_temp;
pep->stream_info.drbls_count--;
ep_ring = pep->stream_info.stream_rings[dev_sid];
ep_ring->stream_active = 0;
ep_ring->stream_rejected = 1;
list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
td_list) {
td->drbl = 0;
}
}
cdnsp_ring_doorbell_for_active_rings(pdev, pep);
}
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted TRB DMA address or endpoint is disabled.
*/
static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
struct cdnsp_transfer_event *event)
{
const struct usb_endpoint_descriptor *desc;
bool handling_skipped_tds = false;
struct cdnsp_segment *ep_seg;
struct cdnsp_ring *ep_ring;
int status = -EINPROGRESS;
union cdnsp_trb *ep_trb;
dma_addr_t ep_trb_dma;
struct cdnsp_ep *pep;
struct cdnsp_td *td;
u32 trb_comp_code;
int invalidate;
int ep_index;
invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
ep_trb_dma = le64_to_cpu(event->buffer);
pep = &pdev->eps[ep_index];
ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
/*
* If device is disconnect then all requests will be dequeued
* by upper layers as part of disconnect sequence.
* We don't want handle such event to avoid racing.
*/
if (invalidate || !pdev->gadget.connected)
goto cleanup;
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
trace_cdnsp_ep_disabled(pep->out_ctx);
goto err_out;
}
/* Some transfer events don't always point to a trb*/
if (!ep_ring) {
switch (trb_comp_code) {
case COMP_INVALID_STREAM_TYPE_ERROR:
case COMP_INVALID_STREAM_ID_ERROR:
case COMP_RING_UNDERRUN:
case COMP_RING_OVERRUN:
goto cleanup;
default:
dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
pep->name);
goto err_out;
}
}
/* Look for some error cases that need special treatment. */
switch (trb_comp_code) {
case COMP_BABBLE_DETECTED_ERROR:
status = -EOVERFLOW;
break;
case COMP_RING_UNDERRUN:
case COMP_RING_OVERRUN:
/*
* When the Isoch ring is empty, the controller will generate
* a Ring Overrun Event for IN Isoch endpoint or Ring
* Underrun Event for OUT Isoch endpoint.
*/
goto cleanup;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
* may be missed by controller.
* Set skip flag of the ep_ring; Complete the missed tds as
* short transfer when process the ep_ring next time.
*/
pep->skip = true;
break;
}
do {
/*
* This TRB should be in the TD at the head of this ring's TD
* list.
*/
if (list_empty(&ep_ring->td_list)) {
/*
* Don't print warnings if it's due to a stopped
* endpoint generating an extra completion event, or
* a event for the last TRB of a short TD we already
* got a short event for.
* The short TD is already removed from the TD list.
*/
if (!(trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
ep_ring->last_td_was_short))
trace_cdnsp_trb_without_td(ep_ring,
(struct cdnsp_generic_trb *)event);
if (pep->skip) {
pep->skip = false;
trace_cdnsp_ep_list_empty_with_skip(pep, 0);
}
goto cleanup;
}
td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
td_list);
/* Is this a TRB in the currently executing TD? */
ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
ep_ring->dequeue, td->last_trb,
ep_trb_dma);
desc = td->preq->pep->endpoint.desc;
if (ep_seg) {
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
/ sizeof(*ep_trb)];
trace_cdnsp_handle_transfer(ep_ring,
(struct cdnsp_generic_trb *)ep_trb);
if (pep->skip && usb_endpoint_xfer_isoc(desc) &&
td->last_trb != ep_trb)
return -EAGAIN;
}
/*
* Skip the Force Stopped Event. The event_trb(ep_trb_dma)
* of FSE is not in the current TD pointed by ep_ring->dequeue
* because that the hardware dequeue pointer still at the
* previous TRB of the current TD. The previous TRB maybe a
* Link TD or the last TRB of the previous TD. The command
* completion handle will take care the rest.
*/
if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
pep->skip = false;
goto cleanup;
}
if (!ep_seg) {
if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
/* Something is busted, give up! */
dev_err(pdev->dev,
"ERROR Transfer event TRB DMA ptr not "
"part of current TD ep_index %d "
"comp_code %u\n", ep_index,
trb_comp_code);
return -EINVAL;
}
cdnsp_skip_isoc_td(pdev, td, event, pep, status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_PACKET)
ep_ring->last_td_was_short = true;
else
ep_ring->last_td_was_short = false;
if (pep->skip) {
pep->skip = false;
cdnsp_skip_isoc_td(pdev, td, event, pep, status);
goto cleanup;
}
if (cdnsp_trb_is_noop(ep_trb))
goto cleanup;
if (usb_endpoint_xfer_control(desc))
cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
&status);
else if (usb_endpoint_xfer_isoc(desc))
cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
status);
else
cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
&status);
cleanup:
handling_skipped_tds = pep->skip;
/*
* Do not update event ring dequeue pointer if we're in a loop
* processing missed tds.
*/
if (!handling_skipped_tds)
cdnsp_inc_deq(pdev, pdev->event_ring);
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
* Process them as short transfer until reach the td pointed by
* the event.
*/
} while (handling_skipped_tds);
return 0;
err_out:
dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long)
cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
pdev->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
return -EINVAL;
}
/*
* This function handles all events on the event ring.
* Returns true for "possibly more events to process" (caller should call
* again), otherwise false if done.
*/
static bool cdnsp_handle_event(struct cdnsp_device *pdev)
{
unsigned int comp_code;
union cdnsp_trb *event;
bool update_ptrs = true;
u32 cycle_bit;
int ret = 0;
u32 flags;
event = pdev->event_ring->dequeue;
flags = le32_to_cpu(event->event_cmd.flags);
cycle_bit = (flags & TRB_CYCLE);
/* Does the controller or driver own the TRB? */
if (cycle_bit != pdev->event_ring->cycle_state)
return false;
trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
* reads of the event's flags/data below.
*/
rmb();
switch (flags & TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_COMPLETION):
/*
* Command can't be handled in interrupt context so just
* increment command ring dequeue pointer.
*/
cdnsp_inc_deq(pdev, pdev->cmd_ring);
break;
case TRB_TYPE(TRB_PORT_STATUS):
cdnsp_handle_port_status(pdev, event);
update_ptrs = false;
break;
case TRB_TYPE(TRB_TRANSFER):
ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
if (ret >= 0)
update_ptrs = false;
break;
case TRB_TYPE(TRB_SETUP):
pdev->ep0_stage = CDNSP_SETUP_STAGE;
pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
pdev->setup = *((struct usb_ctrlrequest *)
&event->trans_event.buffer);
cdnsp_setup_analyze(pdev);
break;
case TRB_TYPE(TRB_ENDPOINT_NRDY):
cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
break;
case TRB_TYPE(TRB_HC_EVENT): {
comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
switch (comp_code) {
case COMP_EVENT_RING_FULL_ERROR:
dev_err(pdev->dev, "Event Ring Full\n");
break;
default:
dev_err(pdev->dev, "Controller error code 0x%02x\n",
comp_code);
}
break;
}
case TRB_TYPE(TRB_MFINDEX_WRAP):
case TRB_TYPE(TRB_DRB_OVERFLOW):
break;
default:
dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
TRB_FIELD_TO_TYPE(flags));
}
if (update_ptrs)
/* Update SW event ring dequeue pointer. */
cdnsp_inc_deq(pdev, pdev->event_ring);
/*
* Caller will call us again to check if there are more items
* on the event ring.
*/
return true;
}
irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
{
struct cdnsp_device *pdev = (struct cdnsp_device *)data;
union cdnsp_trb *event_ring_deq;
unsigned long flags;
int counter = 0;
spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
/*
* While removing or stopping driver there may still be deferred
* not handled interrupt which should not be treated as error.
* Driver should simply ignore it.
*/
if (pdev->gadget_driver)
cdnsp_died(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
event_ring_deq = pdev->event_ring->dequeue;
while (cdnsp_handle_event(pdev)) {
if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
event_ring_deq = pdev->event_ring->dequeue;
counter = 0;
}
}
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
irqreturn_t cdnsp_irq_handler(int irq, void *priv)
{
struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
u32 irq_pending;
u32 status;
status = readl(&pdev->op_regs->status);
if (status == ~(u32)0) {
cdnsp_died(pdev);
return IRQ_HANDLED;
}
if (!(status & STS_EINT))
return IRQ_NONE;
writel(status | STS_EINT, &pdev->op_regs->status);
irq_pending = readl(&pdev->ir_set->irq_pending);
irq_pending |= IMAN_IP;
writel(irq_pending, &pdev->ir_set->irq_pending);
if (status & STS_FATAL) {
cdnsp_died(pdev);
return IRQ_HANDLED;
}
return IRQ_WAKE_THREAD;
}
/*
* Generic function for queuing a TRB on a ring.
* The caller must have checked to make sure there's room on the ring.
*
* @more_trbs_coming: Will you enqueue more TRBs before setting doorbell?
*/
static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
bool more_trbs_coming, u32 field1, u32 field2,
u32 field3, u32 field4)
{
struct cdnsp_generic_trb *trb;
trb = &ring->enqueue->generic;
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
trace_cdnsp_queue_trb(ring, trb);
cdnsp_inc_enq(pdev, ring, more_trbs_coming);
}
/*
* Does various checks on the endpoint ring, and makes it ready to
* queue num_trbs.
*/
static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
struct cdnsp_ring *ep_ring,
u32 ep_state, unsigned
int num_trbs,
gfp_t mem_flags)
{
unsigned int num_trbs_needed;
/* Make sure the endpoint has been added to controller schedule. */
switch (ep_state) {
case EP_STATE_STOPPED:
case EP_STATE_RUNNING:
case EP_STATE_HALTED:
break;
default:
dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
return -EINVAL;
}
while (1) {
if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
break;
trace_cdnsp_no_room_on_ring("try ring expansion");
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
mem_flags)) {
dev_err(pdev->dev, "Ring expansion failed\n");
return -ENOMEM;
}
}
while (cdnsp_trb_is_link(ep_ring->enqueue)) {
ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
/* The cycle bit must be set as the last operation. */
wmb();
ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
ep_ring->cycle_state ^= 1;
ep_ring->enq_seg = ep_ring->enq_seg->next;
ep_ring->enqueue = ep_ring->enq_seg->trbs;
}
return 0;
}
static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
struct cdnsp_request *preq,
unsigned int num_trbs)
{
struct cdnsp_ring *ep_ring;
int ret;
ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
preq->request.stream_id);
if (!ep_ring)
return -EINVAL;
ret = cdnsp_prepare_ring(pdev, ep_ring,
GET_EP_CTX_STATE(preq->pep->out_ctx),
num_trbs, GFP_ATOMIC);
if (ret)
return ret;
INIT_LIST_HEAD(&preq->td.td_list);
preq->td.preq = preq;
/* Add this TD to the tail of the endpoint ring's TD list. */
list_add_tail(&preq->td.td_list, &ep_ring->td_list);
ep_ring->num_tds++;
preq->pep->stream_info.td_count++;
preq->td.start_seg = ep_ring->enq_seg;
preq->td.first_trb = ep_ring->enqueue;
return 0;
}
static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
{
unsigned int num_trbs;
num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
TRB_MAX_BUFF_SIZE);
if (num_trbs == 0)
num_trbs++;
return num_trbs;
}
static unsigned int count_trbs_needed(struct cdnsp_request *preq)
{
return cdnsp_count_trbs(preq->request.dma, preq->request.length);
}
static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
{
unsigned int i, len, full_len, num_trbs = 0;
struct scatterlist *sg;
full_len = preq->request.length;
for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
len = sg_dma_len(sg);
num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
len = min(len, full_len);
full_len -= len;
if (full_len == 0)
break;
}
return num_trbs;
}
static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
{
if (running_total != preq->request.length)
dev_err(preq->pep->pdev->dev,
"%s - Miscalculated tx length, "
"queued %#x, asked for %#x (%d)\n",
preq->pep->name, running_total,
preq->request.length, preq->request.actual);
}
/*
* TD size is the number of max packet sized packets remaining in the TD
* (*not* including this TRB).
*
* Total TD packet count = total_packet_count =
* DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
*
* Packets transferred up to and including this TRB = packets_transferred =
* rounddown(total bytes transferred including this TRB / wMaxPacketSize)
*
* TD size = total_packet_count - packets_transferred
*
* It must fit in bits 21:17, so it can't be bigger than 31.
* This is taken care of in the TRB_TD_SIZE() macro
*
* The last TRB in a TD must have the TD size set to zero.
*/
static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
int transferred,
int trb_buff_len,
unsigned int td_total_len,
struct cdnsp_request *preq,
bool more_trbs_coming,
bool zlp)
{
u32 maxp, total_packet_count;
/* Before ZLP driver needs set TD_SIZE = 1. */
if (zlp)
return 1;
/* One TRB with a zero-length data packet. */
if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
trb_buff_len == td_total_len)
return 0;
maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
/* Queuing functions don't count the current TRB into transferred. */
return (total_packet_count - ((transferred + trb_buff_len) / maxp));
}
static int cdnsp_align_td(struct cdnsp_device *pdev,
struct cdnsp_request *preq, u32 enqd_len,
u32 *trb_buff_len, struct cdnsp_segment *seg)
{
struct device *dev = pdev->dev;
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
unalign = (enqd_len + *trb_buff_len) % max_pkt;
/* We got lucky, last normal TRB data on segment is packet aligned. */
if (unalign == 0)
return 0;
/* Is the last nornal TRB alignable by splitting it. */
if (*trb_buff_len > unalign) {
*trb_buff_len -= unalign;
trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
enqd_len, 0, unalign);
return 0;
}
/*
* We want enqd_len + trb_buff_len to sum up to a number aligned to
* number which is divisible by the endpoint's wMaxPacketSize. IOW:
* (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
*/
new_buff_len = max_pkt - (enqd_len % max_pkt);
if (new_buff_len > (preq->request.length - enqd_len))
new_buff_len = (preq->request.length - enqd_len);
/* Create a max max_pkt sized bounce buffer pointed to by last trb. */
if (preq->direction) {
sg_pcopy_to_buffer(preq->request.sg,
preq->request.num_mapped_sgs,
seg->bounce_buf, new_buff_len, enqd_len);
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_FROM_DEVICE);
}
if (dma_mapping_error(dev, seg->bounce_dma)) {
/* Try without aligning.*/
dev_warn(pdev->dev,
"Failed mapping bounce buffer, not aligning\n");
return 0;
}
*trb_buff_len = new_buff_len;
seg->bounce_len = new_buff_len;
seg->bounce_offs = enqd_len;
trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
unalign);
/*
* Bounce buffer successful aligned and seg->bounce_dma will be used
* in transfer TRB as new transfer buffer address.
*/
return 1;
}
int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
{
unsigned int enqd_len, block_len, trb_buff_len, full_len;
unsigned int start_cycle, num_sgs = 0;
struct cdnsp_generic_trb *start_trb;
u32 field, length_field, remainder;
struct scatterlist *sg = NULL;
bool more_trbs_coming = true;
bool need_zero_pkt = false;
bool zero_len_trb = false;
struct cdnsp_ring *ring;
bool first_trb = true;
unsigned int num_trbs;
struct cdnsp_ep *pep;
u64 addr, send_addr;
int sent_len, ret;
ring = cdnsp_request_to_transfer_ring(pdev, preq);
if (!ring)
return -EINVAL;
full_len = preq->request.length;
if (preq->request.num_sgs) {
num_sgs = preq->request.num_sgs;
sg = preq->request.sg;
addr = (u64)sg_dma_address(sg);
block_len = sg_dma_len(sg);
num_trbs = count_sg_trbs_needed(preq);
} else {
num_trbs = count_trbs_needed(preq);
addr = (u64)preq->request.dma;
block_len = full_len;
}
pep = preq->pep;
/* Deal with request.zero - need one more td/trb. */
if (preq->request.zero && preq->request.length &&
IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
need_zero_pkt = true;
num_trbs++;
}
ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
if (ret)
return ret;
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ring->enqueue->generic;
start_cycle = ring->cycle_state;
send_addr = addr;
/* Queue the TRBs, even if they are zero-length */
for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL);
/* TRB buffer should not cross 64KB boundaries */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
trb_buff_len = min(trb_buff_len, block_len);
if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - enqd_len;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb) {
first_trb = false;
if (start_cycle == 0)
field |= TRB_CYCLE;
} else {
field |= ring->cycle_state;
}
/*
* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
field |= TRB_CHAIN;
if (cdnsp_trb_is_link(ring->enqueue + 1)) {
if (cdnsp_align_td(pdev, preq, enqd_len,
&trb_buff_len,
ring->enq_seg)) {
send_addr = ring->enq_seg->bounce_dma;
/* Assuming TD won't span 2 segs */
preq->td.bounce_seg = ring->enq_seg;
}
}
}
if (enqd_len + trb_buff_len >= full_len) {
if (need_zero_pkt && !zero_len_trb) {
zero_len_trb = true;
} else {
zero_len_trb = false;
field &= ~TRB_CHAIN;
field |= TRB_IOC;
more_trbs_coming = false;
need_zero_pkt = false;
preq->td.last_trb = ring->enqueue;
}
}
/* Only set interrupt on short packet for OUT endpoints. */
if (!preq->direction)
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
full_len, preq,
more_trbs_coming,
zero_len_trb);
length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
cdnsp_queue_trb(pdev, ring, more_trbs_coming,
lower_32_bits(send_addr),
upper_32_bits(send_addr),
length_field,
field);
addr += trb_buff_len;
sent_len = trb_buff_len;
while (sg && sent_len >= block_len) {
/* New sg entry */
--num_sgs;
sent_len -= block_len;
if (num_sgs != 0) {
sg = sg_next(sg);
block_len = sg_dma_len(sg);
addr = (u64)sg_dma_address(sg);
addr += sent_len;
}
}
block_len -= sent_len;
send_addr = addr;
}
cdnsp_check_trb_math(preq, enqd_len);
ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
start_cycle, start_trb);
if (ret)
preq->td.drbl = 1;
return 0;
}
int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
{
u32 field, length_field, zlp = 0;
struct cdnsp_ep *pep = preq->pep;
struct cdnsp_ring *ep_ring;
int num_trbs;
u32 maxp;
int ret;
ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
if (!ep_ring)
return -EINVAL;
/* 1 TRB for data, 1 for status */
num_trbs = (pdev->three_stage_setup) ? 2 : 1;
maxp = usb_endpoint_maxp(pep->endpoint.desc);
if (preq->request.zero && preq->request.length &&
(preq->request.length % maxp == 0)) {
num_trbs++;
zlp = 1;
}
ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
if (ret)
return ret;
/* If there's data, queue data TRBs */
if (preq->request.length > 0) {
field = TRB_TYPE(TRB_DATA);
if (zlp)
field |= TRB_CHAIN;
else
field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP);
if (pdev->ep0_expect_in)
field |= TRB_DIR_IN;
length_field = TRB_LEN(preq->request.length) |
TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0);
cdnsp_queue_trb(pdev, ep_ring, true,
lower_32_bits(preq->request.dma),
upper_32_bits(preq->request.dma), length_field,
field | ep_ring->cycle_state |
TRB_SETUPID(pdev->setup_id) |
pdev->setup_speed);
if (zlp) {
field = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
if (!pdev->ep0_expect_in)
field = TRB_ISP;
cdnsp_queue_trb(pdev, ep_ring, true,
lower_32_bits(preq->request.dma),
upper_32_bits(preq->request.dma), 0,
field | ep_ring->cycle_state |
TRB_SETUPID(pdev->setup_id) |
pdev->setup_speed);
}
pdev->ep0_stage = CDNSP_DATA_STAGE;
}
/* Save the DMA address of the last TRB in the TD. */
preq->td.last_trb = ep_ring->enqueue;
/* Queue status TRB. */
if (preq->request.length == 0)
field = ep_ring->cycle_state;
else
field = (ep_ring->cycle_state ^ 1);
if (preq->request.length > 0 && pdev->ep0_expect_in)
field |= TRB_DIR_IN;
if (pep->ep_state & EP0_HALTED_STATUS) {
pep->ep_state &= ~EP0_HALTED_STATUS;
field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
} else {
field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
}
cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
return 0;
}
int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
int ret = 0;
if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED ||
ep_state == EP_STATE_HALTED) {
trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
goto ep_stopped;
}
cdnsp_queue_stop_endpoint(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
ep_stopped:
pep->ep_state |= EP_STOPPED;
return ret;
}
int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
int ret;
cdnsp_queue_flush_endpoint(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
return ret;
}
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
* devices can burst up to bMaxBurst number of packets per service interval.
* This field is zero based, meaning a value of zero in the field means one
* burst. Basically, for everything but SuperSpeed devices, this field will be
* zero.
*/
static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
struct cdnsp_request *preq,
unsigned int total_packet_count)
{
unsigned int max_burst;
if (pdev->gadget.speed < USB_SPEED_SUPER)
return 0;
max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
}
/*
* Returns the number of packets in the last "burst" of packets. This field is
* valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
* the last burst packet count is equal to the total number of packets in the
* TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
* must contain (bMaxBurst + 1) number of packets, but the last burst can
* contain 1 to (bMaxBurst + 1) packets.
*/
static unsigned int
cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
struct cdnsp_request *preq,
unsigned int total_packet_count)
{
unsigned int max_burst;
unsigned int residue;
if (pdev->gadget.speed >= USB_SPEED_SUPER) {
/* bMaxBurst is zero based: 0 means 1 packet per burst. */
max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
residue = total_packet_count % (max_burst + 1);
/*
* If residue is zero, the last burst contains (max_burst + 1)
* number of packets, but the TLBPC field is zero-based.
*/
if (residue == 0)
return max_burst;
return residue - 1;
}
if (total_packet_count == 0)
return 0;
return total_packet_count - 1;
}
/* Queue function isoc transfer */
int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
struct cdnsp_request *preq)
{
unsigned int trb_buff_len, td_len, td_remain_len, block_len;
unsigned int burst_count, last_burst_pkt;
unsigned int total_pkt_count, max_pkt;
struct cdnsp_generic_trb *start_trb;
struct scatterlist *sg = NULL;
bool more_trbs_coming = true;
struct cdnsp_ring *ep_ring;
unsigned int num_sgs = 0;
int running_total = 0;
u32 field, length_field;
u64 addr, send_addr;
int start_cycle;
int trbs_per_td;
int i, sent_len, ret;
ep_ring = preq->pep->ring;
td_len = preq->request.length;
if (preq->request.num_sgs) {
num_sgs = preq->request.num_sgs;
sg = preq->request.sg;
addr = (u64)sg_dma_address(sg);
block_len = sg_dma_len(sg);
trbs_per_td = count_sg_trbs_needed(preq);
} else {
addr = (u64)preq->request.dma;
block_len = td_len;
trbs_per_td = count_trbs_needed(preq);
}
ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
if (ret)
return ret;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
td_remain_len = td_len;
send_addr = addr;
max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
/* A zero-length transfer still involves at least one packet. */
if (total_pkt_count == 0)
total_pkt_count++;
burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
total_pkt_count);
/*
* Set isoc specific data for the first TRB in a TD.
* Prevent HW from getting the TRBs by keeping the cycle state
* inverted in the first TDs isoc TRB.
*/
field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
TRB_SIA | TRB_TBC(burst_count);
if (!start_cycle)
field |= TRB_CYCLE;
/* Fill the rest of the TRB fields, and remaining normal TRBs. */
for (i = 0; i < trbs_per_td; i++) {
u32 remainder;
/* Calculate TRB length. */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
trb_buff_len = min(trb_buff_len, block_len);
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
remainder = cdnsp_td_remainder(pdev, running_total,
trb_buff_len, td_len, preq,
more_trbs_coming, 0);
length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
/* Only first TRB is isoc, overwrite otherwise. */
if (i) {
field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
length_field |= TRB_TD_SIZE(remainder);
} else {
length_field |= TRB_TD_SIZE_TBC(burst_count);
}
/* Only set interrupt on short packet for OUT EPs. */
if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
field |= TRB_ISP;
/* Set the chain bit for all except the last TRB. */
if (i < trbs_per_td - 1) {
more_trbs_coming = true;
field |= TRB_CHAIN;
} else {
more_trbs_coming = false;
preq->td.last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
lower_32_bits(send_addr), upper_32_bits(send_addr),
length_field, field);
running_total += trb_buff_len;
addr += trb_buff_len;
td_remain_len -= trb_buff_len;
sent_len = trb_buff_len;
while (sg && sent_len >= block_len) {
/* New sg entry */
--num_sgs;
sent_len -= block_len;
if (num_sgs != 0) {
sg = sg_next(sg);
block_len = sg_dma_len(sg);
addr = (u64)sg_dma_address(sg);
addr += sent_len;
}
}
block_len -= sent_len;
send_addr = addr;
}
/* Check TD length */
if (running_total != td_len) {
dev_err(pdev->dev, "ISOC TD length unmatch\n");
ret = -EINVAL;
goto cleanup;
}
cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
start_cycle, start_trb);
return 0;
cleanup:
/* Clean up a partially enqueued isoc transfer. */
list_del_init(&preq->td.td_list);
ep_ring->num_tds--;
/*
* Use the first TD as a temporary variable to turn the TDs we've
* queued into No-ops with a software-owned cycle bit.
* That way the hardware won't accidentally start executing bogus TDs
* when we partially overwrite them.
* td->first_trb and td->start_seg are already set.
*/
preq->td.last_trb = ep_ring->enqueue;
/* Every TRB except the first & last will have its cycle bit flipped. */
cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
ep_ring->enqueue = preq->td.first_trb;
ep_ring->enq_seg = preq->td.start_seg;
ep_ring->cycle_state = start_cycle;
return ret;
}
/**** Command Ring Operations ****/
/*
* Generic function for queuing a command TRB on the command ring.
* Driver queue only one command to ring in the moment.
*/
static void cdnsp_queue_command(struct cdnsp_device *pdev,
u32 field1,
u32 field2,
u32 field3,
u32 field4)
{
cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
GFP_ATOMIC);
pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
field3, field4 | pdev->cmd_ring->cycle_state);
}
/* Queue a slot enable or disable request on the command ring */
void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
{
cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
SLOT_ID_FOR_TRB(pdev->slot_id));
}
/* Queue an address device command TRB */
void cdnsp_queue_address_device(struct cdnsp_device *pdev,
dma_addr_t in_ctx_ptr,
enum cdnsp_setup_dev setup)
{
cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_ADDR_DEV) |
SLOT_ID_FOR_TRB(pdev->slot_id) |
(setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
}
/* Queue a reset device command TRB */
void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
{
cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
SLOT_ID_FOR_TRB(pdev->slot_id));
}
/* Queue a configure endpoint command TRB */
void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
dma_addr_t in_ctx_ptr)
{
cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) |
SLOT_ID_FOR_TRB(pdev->slot_id));
}
/*
* Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
* activity on an endpoint that is about to be suspended.
*/
void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
{
cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
}
/* Set Transfer Ring Dequeue Pointer command. */
void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
struct cdnsp_dequeue_state *deq_state)
{
u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
u32 type = TRB_TYPE(TRB_SET_DEQ);
u32 trb_sct = 0;
dma_addr_t addr;
addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
deq_state->new_deq_ptr);
if (deq_state->stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
deq_state->new_cycle_state, upper_32_bits(addr),
trb_stream_id, trb_slot_id |
EP_ID_FOR_TRB(pep->idx) | type);
}
void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
{
return cdnsp_queue_command(pdev, 0, 0, 0,
SLOT_ID_FOR_TRB(pdev->slot_id) |
EP_ID_FOR_TRB(ep_index) |
TRB_TYPE(TRB_RESET_EP));
}
/*
* Queue a halt endpoint request on the command ring.
*/
void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
{
cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
SLOT_ID_FOR_TRB(pdev->slot_id) |
EP_ID_FOR_TRB(ep_index));
}
/*
* Queue a flush endpoint request on the command ring.
*/
void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
unsigned int ep_index)
{
cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
SLOT_ID_FOR_TRB(pdev->slot_id) |
EP_ID_FOR_TRB(ep_index));
}
void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
{
u32 lo, mid;
lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
mid = TRB_FH_TR_PACKET_DEV_NOT |
TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
TRB_FH_TO_INTERFACE(intf_num);
cdnsp_queue_command(pdev, lo, mid, 0,
TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
}
| linux-master | drivers/usb/cdns3/cdnsp-ring.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence CDNSP DRD Driver.
*
* Copyright (C) 2020 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*
*/
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/list.h>
#include "cdnsp-gadget.h"
#include "cdnsp-trace.h"
static void cdnsp_ep0_stall(struct cdnsp_device *pdev)
{
struct cdnsp_request *preq;
struct cdnsp_ep *pep;
pep = &pdev->eps[0];
preq = next_request(&pep->pending_list);
if (pdev->three_stage_setup) {
cdnsp_halt_endpoint(pdev, pep, true);
if (preq)
cdnsp_gadget_giveback(pep, preq, -ECONNRESET);
} else {
pep->ep_state |= EP0_HALTED_STATUS;
if (preq)
list_del(&preq->list);
cdnsp_status_stage(pdev);
}
}
static int cdnsp_ep0_delegate_req(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl)
{
int ret;
spin_unlock(&pdev->lock);
ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl);
spin_lock(&pdev->lock);
return ret;
}
static int cdnsp_ep0_set_config(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = pdev->gadget.state;
u32 cfg;
int ret;
cfg = le16_to_cpu(ctrl->wValue);
switch (state) {
case USB_STATE_ADDRESS:
trace_cdnsp_ep0_set_config("from Address state");
break;
case USB_STATE_CONFIGURED:
trace_cdnsp_ep0_set_config("from Configured state");
break;
default:
dev_err(pdev->dev, "Set Configuration - bad device state\n");
return -EINVAL;
}
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
if (ret)
return ret;
if (!cfg)
usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
return 0;
}
static int cdnsp_ep0_set_address(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = pdev->gadget.state;
struct cdnsp_slot_ctx *slot_ctx;
unsigned int slot_state;
int ret;
u32 addr;
addr = le16_to_cpu(ctrl->wValue);
if (addr > 127) {
dev_err(pdev->dev, "Invalid device address %d\n", addr);
return -EINVAL;
}
slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
if (state == USB_STATE_CONFIGURED) {
dev_err(pdev->dev, "Can't Set Address from Configured State\n");
return -EINVAL;
}
pdev->device_address = le16_to_cpu(ctrl->wValue);
slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
if (slot_state == SLOT_STATE_ADDRESSED)
cdnsp_reset_device(pdev);
/*set device address*/
ret = cdnsp_setup_device(pdev, SETUP_CONTEXT_ADDRESS);
if (ret)
return ret;
if (addr)
usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
else
usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
return 0;
}
int cdnsp_status_stage(struct cdnsp_device *pdev)
{
pdev->ep0_stage = CDNSP_STATUS_STAGE;
pdev->ep0_preq.request.length = 0;
return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
}
static int cdnsp_w_index_to_ep_index(u16 wIndex)
{
if (!(wIndex & USB_ENDPOINT_NUMBER_MASK))
return 0;
return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) +
(wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1;
}
static int cdnsp_ep0_handle_status(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl)
{
struct cdnsp_ep *pep;
__le16 *response;
int ep_sts = 0;
u16 status = 0;
u32 recipient;
recipient = ctrl->bRequestType & USB_RECIP_MASK;
switch (recipient) {
case USB_RECIP_DEVICE:
status = pdev->gadget.is_selfpowered;
status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
if (pdev->gadget.speed >= USB_SPEED_SUPER) {
status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED;
status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED;
}
break;
case USB_RECIP_INTERFACE:
/*
* Function Remote Wake Capable D0
* Function Remote Wakeup D1
*/
return cdnsp_ep0_delegate_req(pdev, ctrl);
case USB_RECIP_ENDPOINT:
ep_sts = cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex));
pep = &pdev->eps[ep_sts];
ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
/* check if endpoint is stalled */
if (ep_sts == EP_STATE_HALTED)
status = BIT(USB_ENDPOINT_HALT);
break;
default:
return -EINVAL;
}
response = (__le16 *)pdev->setup_buf;
*response = cpu_to_le16(status);
pdev->ep0_preq.request.length = sizeof(*response);
pdev->ep0_preq.request.buf = pdev->setup_buf;
return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
}
static void cdnsp_enter_test_mode(struct cdnsp_device *pdev)
{
u32 temp;
temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28);
temp |= PORT_TEST_MODE(pdev->test_mode);
writel(temp, &pdev->active_port->regs->portpmsc);
}
static int cdnsp_ep0_handle_feature_device(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
enum usb_device_state state;
enum usb_device_speed speed;
u16 tmode;
state = pdev->gadget.state;
speed = pdev->gadget.speed;
switch (le16_to_cpu(ctrl->wValue)) {
case USB_DEVICE_REMOTE_WAKEUP:
pdev->may_wakeup = !!set;
trace_cdnsp_may_wakeup(set);
break;
case USB_DEVICE_U1_ENABLE:
if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
return -EINVAL;
pdev->u1_allowed = !!set;
trace_cdnsp_u1(set);
break;
case USB_DEVICE_U2_ENABLE:
if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
return -EINVAL;
pdev->u2_allowed = !!set;
trace_cdnsp_u2(set);
break;
case USB_DEVICE_LTM_ENABLE:
return -EINVAL;
case USB_DEVICE_TEST_MODE:
if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
return -EINVAL;
tmode = le16_to_cpu(ctrl->wIndex);
if (!set || (tmode & 0xff) != 0)
return -EINVAL;
tmode = tmode >> 8;
if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J)
return -EINVAL;
pdev->test_mode = tmode;
/*
* Test mode must be set before Status Stage but controller
* will start testing sequence after Status Stage.
*/
cdnsp_enter_test_mode(pdev);
break;
default:
return -EINVAL;
}
return 0;
}
static int cdnsp_ep0_handle_feature_intf(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
u16 wValue, wIndex;
int ret;
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
switch (wValue) {
case USB_INTRF_FUNC_SUSPEND:
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
if (ret)
return ret;
/*
* Remote wakeup is enabled when any function within a device
* is enabled for function remote wakeup.
*/
if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
pdev->may_wakeup++;
else
if (pdev->may_wakeup > 0)
pdev->may_wakeup--;
return 0;
default:
return -EINVAL;
}
return 0;
}
static int cdnsp_ep0_handle_feature_endpoint(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
struct cdnsp_ep *pep;
u16 wValue;
wValue = le16_to_cpu(ctrl->wValue);
pep = &pdev->eps[cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))];
switch (wValue) {
case USB_ENDPOINT_HALT:
if (!set && (pep->ep_state & EP_WEDGE)) {
/* Resets Sequence Number */
cdnsp_halt_endpoint(pdev, pep, 0);
cdnsp_halt_endpoint(pdev, pep, 1);
break;
}
return cdnsp_halt_endpoint(pdev, pep, set);
default:
dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue);
return -EINVAL;
}
return 0;
}
static int cdnsp_ep0_handle_feature(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl,
int set)
{
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
return cdnsp_ep0_handle_feature_device(pdev, ctrl, set);
case USB_RECIP_INTERFACE:
return cdnsp_ep0_handle_feature_intf(pdev, ctrl, set);
case USB_RECIP_ENDPOINT:
return cdnsp_ep0_handle_feature_endpoint(pdev, ctrl, set);
default:
return -EINVAL;
}
}
static int cdnsp_ep0_set_sel(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = pdev->gadget.state;
u16 wLength;
if (state == USB_STATE_DEFAULT)
return -EINVAL;
wLength = le16_to_cpu(ctrl->wLength);
if (wLength != 6) {
dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n",
wLength);
return -EINVAL;
}
/*
* To handle Set SEL we need to receive 6 bytes from Host. So let's
* queue a usb_request for 6 bytes.
*/
pdev->ep0_preq.request.length = 6;
pdev->ep0_preq.request.buf = pdev->setup_buf;
return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
}
static int cdnsp_ep0_set_isoch_delay(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl)
{
if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength))
return -EINVAL;
pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue);
return 0;
}
static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
struct usb_ctrlrequest *ctrl)
{
int ret;
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
ret = cdnsp_ep0_handle_status(pdev, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
ret = cdnsp_ep0_handle_feature(pdev, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
ret = cdnsp_ep0_handle_feature(pdev, ctrl, 1);
break;
case USB_REQ_SET_ADDRESS:
ret = cdnsp_ep0_set_address(pdev, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
ret = cdnsp_ep0_set_config(pdev, ctrl);
break;
case USB_REQ_SET_SEL:
ret = cdnsp_ep0_set_sel(pdev, ctrl);
break;
case USB_REQ_SET_ISOCH_DELAY:
ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
break;
default:
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
break;
}
return ret;
}
void cdnsp_setup_analyze(struct cdnsp_device *pdev)
{
struct usb_ctrlrequest *ctrl = &pdev->setup;
int ret = -EINVAL;
u16 len;
trace_cdnsp_ctrl_req(ctrl);
if (!pdev->gadget_driver)
goto out;
if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
goto out;
}
/* Restore the ep0 to Stopped/Running state. */
if (pdev->eps[0].ep_state & EP_HALTED) {
trace_cdnsp_ep0_halted("Restore to normal state");
cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
}
/*
* Finishing previous SETUP transfer by removing request from
* list and informing upper layer
*/
if (!list_empty(&pdev->eps[0].pending_list)) {
struct cdnsp_request *req;
trace_cdnsp_ep0_request("Remove previous");
req = next_request(&pdev->eps[0].pending_list);
cdnsp_ep_dequeue(&pdev->eps[0], req);
}
len = le16_to_cpu(ctrl->wLength);
if (!len) {
pdev->three_stage_setup = false;
pdev->ep0_expect_in = false;
} else {
pdev->three_stage_setup = true;
pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
}
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
ret = cdnsp_ep0_std_request(pdev, ctrl);
else
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
if (ret == USB_GADGET_DELAYED_STATUS) {
trace_cdnsp_ep0_status_stage("delayed");
return;
}
out:
if (ret < 0)
cdnsp_ep0_stall(pdev);
else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
cdnsp_status_stage(pdev);
}
| linux-master | drivers/usb/cdns3/cdnsp-ep0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence PCI Glue driver.
*
* Copyright (C) 2019 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include "core.h"
#include "gadget-export.h"
#define PCI_BAR_HOST 0
#define PCI_BAR_OTG 0
#define PCI_BAR_DEV 2
#define PCI_DEV_FN_HOST_DEVICE 0
#define PCI_DEV_FN_OTG 1
#define PCI_DRIVER_NAME "cdns-pci-usbssp"
#define PLAT_DRIVER_NAME "cdns-usbssp"
#define CDNS_VENDOR_ID 0x17cd
#define CDNS_DEVICE_ID 0x0200
#define CDNS_DRD_ID 0x0100
#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
/*
* Gets the second function.
* Platform has two function. The fist keeps resources for
* Host/Device while the secon keeps resources for DRD/OTG.
*/
if (pdev->device == CDNS_DEVICE_ID)
return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
else if (pdev->device == CDNS_DRD_ID)
return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
return NULL;
}
static int cdnsp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct pci_dev *func;
struct resource *res;
struct cdns *cdnsp;
int ret;
/*
* For GADGET/HOST PCI (devfn) function number is 0,
* for OTG PCI (devfn) function number is 1.
*/
if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE &&
pdev->devfn != PCI_DEV_FN_OTG))
return -EINVAL;
func = cdnsp_get_second_fun(pdev);
if (!func)
return -EINVAL;
if (func->class == PCI_CLASS_SERIAL_USB_XHCI ||
pdev->class == PCI_CLASS_SERIAL_USB_XHCI) {
ret = -EINVAL;
goto put_pci;
}
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret);
goto put_pci;
}
pci_set_master(pdev);
if (pci_is_enabled(func)) {
cdnsp = pci_get_drvdata(func);
} else {
cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL);
if (!cdnsp) {
ret = -ENOMEM;
goto disable_pci;
}
}
/* For GADGET device function number is 0. */
if (pdev->devfn == 0) {
resource_size_t rsrc_start, rsrc_len;
/* Function 0: host(BAR_0) + device(BAR_1).*/
dev_dbg(dev, "Initialize resources\n");
rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV);
rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV);
res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev");
if (!res) {
dev_dbg(dev, "controller already in use\n");
ret = -EBUSY;
goto free_cdnsp;
}
cdnsp->dev_regs = devm_ioremap(dev, rsrc_start, rsrc_len);
if (!cdnsp->dev_regs) {
dev_dbg(dev, "error mapping memory\n");
ret = -EFAULT;
goto free_cdnsp;
}
cdnsp->dev_irq = pdev->irq;
dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n",
&rsrc_start);
res = &cdnsp->xhci_res[0];
res->start = pci_resource_start(pdev, PCI_BAR_HOST);
res->end = pci_resource_end(pdev, PCI_BAR_HOST);
res->name = "xhci";
res->flags = IORESOURCE_MEM;
dev_dbg(dev, "USBSS-XHCI physical base addr: %pa\n",
&res->start);
/* Interrupt for XHCI, */
res = &cdnsp->xhci_res[1];
res->start = pdev->irq;
res->name = "host";
res->flags = IORESOURCE_IRQ;
} else {
res = &cdnsp->otg_res;
res->start = pci_resource_start(pdev, PCI_BAR_OTG);
res->end = pci_resource_end(pdev, PCI_BAR_OTG);
res->name = "otg";
res->flags = IORESOURCE_MEM;
dev_dbg(dev, "CDNSP-DRD physical base addr: %pa\n",
&res->start);
/* Interrupt for OTG/DRD. */
cdnsp->otg_irq = pdev->irq;
}
if (pci_is_enabled(func)) {
cdnsp->dev = dev;
cdnsp->gadget_init = cdnsp_gadget_init;
ret = cdns_init(cdnsp);
if (ret)
goto free_cdnsp;
}
pci_set_drvdata(pdev, cdnsp);
device_wakeup_enable(&pdev->dev);
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
return 0;
free_cdnsp:
if (!pci_is_enabled(func))
kfree(cdnsp);
disable_pci:
pci_disable_device(pdev);
put_pci:
pci_dev_put(func);
return ret;
}
static void cdnsp_pci_remove(struct pci_dev *pdev)
{
struct cdns *cdnsp;
struct pci_dev *func;
func = cdnsp_get_second_fun(pdev);
cdnsp = (struct cdns *)pci_get_drvdata(pdev);
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
if (pci_is_enabled(func)) {
cdns_remove(cdnsp);
} else {
kfree(cdnsp);
}
pci_dev_put(func);
}
static int __maybe_unused cdnsp_pci_suspend(struct device *dev)
{
struct cdns *cdns = dev_get_drvdata(dev);
return cdns_suspend(cdns);
}
static int __maybe_unused cdnsp_pci_resume(struct device *dev)
{
struct cdns *cdns = dev_get_drvdata(dev);
unsigned long flags;
int ret;
spin_lock_irqsave(&cdns->lock, flags);
ret = cdns_resume(cdns);
spin_unlock_irqrestore(&cdns->lock, flags);
cdns_set_active(cdns, 1);
return ret;
}
static const struct dev_pm_ops cdnsp_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cdnsp_pci_suspend, cdnsp_pci_resume)
};
static const struct pci_device_id cdnsp_pci_ids[] = {
{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
CDNS_DRD_IF, PCI_ANY_ID },
{ PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
CDNS_DRD_IF, PCI_ANY_ID },
{ 0, }
};
static struct pci_driver cdnsp_pci_driver = {
.name = "cdnsp-pci",
.id_table = &cdnsp_pci_ids[0],
.probe = cdnsp_pci_probe,
.remove = cdnsp_pci_remove,
.driver = {
.pm = &cdnsp_pci_pm_ops,
}
};
module_pci_driver(cdnsp_pci_driver);
MODULE_DEVICE_TABLE(pci, cdnsp_pci_ids);
MODULE_ALIAS("pci:cdnsp");
MODULE_AUTHOR("Pawel Laszczak <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Cadence CDNSP PCI driver");
| linux-master | drivers/usb/cdns3/cdnsp-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence CDNSP DRD Driver.
*
* Copyright (C) 2020 Cadence.
*
* Author: Pawel Laszczak <[email protected]>
*
* Code based on Linux XHCI driver.
* Origin: Copyright (C) 2008 Intel Corp.
*/
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "cdnsp-gadget.h"
#include "cdnsp-trace.h"
static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
struct cdnsp_ep *pep);
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
* initializes the segment to zero, and sets the private next pointer to NULL.
*
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
unsigned int cycle_state,
unsigned int max_packet,
gfp_t flags)
{
struct cdnsp_segment *seg;
dma_addr_t dma;
int i;
seg = kzalloc(sizeof(*seg), flags);
if (!seg)
return NULL;
seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
if (max_packet) {
seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
if (!seg->bounce_buf)
goto free_dma;
}
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
}
seg->dma = dma;
seg->next = NULL;
return seg;
free_dma:
dma_pool_free(pdev->segment_pool, seg->trbs, dma);
kfree(seg);
return NULL;
}
static void cdnsp_segment_free(struct cdnsp_device *pdev,
struct cdnsp_segment *seg)
{
if (seg->trbs)
dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
kfree(seg->bounce_buf);
kfree(seg);
}
static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
struct cdnsp_segment *first)
{
struct cdnsp_segment *seg;
seg = first->next;
while (seg != first) {
struct cdnsp_segment *next = seg->next;
cdnsp_segment_free(pdev, seg);
seg = next;
}
cdnsp_segment_free(pdev, first);
}
/*
* Make the prev segment point to the next segment.
*
* Change the last TRB in the prev segment to be a Link TRB which points to the
* DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void cdnsp_link_segments(struct cdnsp_device *pdev,
struct cdnsp_segment *prev,
struct cdnsp_segment *next,
enum cdnsp_ring_type type)
{
struct cdnsp_link_trb *link;
u32 val;
if (!prev || !next)
return;
prev->next = next;
if (type != TYPE_EVENT) {
link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
link->segment_ptr = cpu_to_le64(next->dma);
/*
* Set the last TRB in the segment to have a TRB type ID
* of Link TRB
*/
val = le32_to_cpu(link->control);
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
link->control = cpu_to_le32(val);
}
}
/*
* Link the ring to the new segments.
* Set Toggle Cycle for the new ring if needed.
*/
static void cdnsp_link_rings(struct cdnsp_device *pdev,
struct cdnsp_ring *ring,
struct cdnsp_segment *first,
struct cdnsp_segment *last,
unsigned int num_segs)
{
struct cdnsp_segment *next;
if (!ring || !first || !last)
return;
next = ring->enq_seg->next;
cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
cdnsp_link_segments(pdev, last, next, ring->type);
ring->num_segs += num_segs;
ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
~cpu_to_le32(LINK_TOGGLE);
last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
ring->last_seg = last;
}
}
/*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the device controller won't
* tell us which stream ring the TRB came from. We could store the stream ID
* in an event data TRB, but that doesn't help us for the cancellation case,
* since the endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example,
* say I have segments of size 1KB, that are always 1KB aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
struct cdnsp_ring *ring,
struct cdnsp_segment *seg,
gfp_t mem_flags)
{
unsigned long key;
int ret;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
/* Skip any segments that were already added. */
if (radix_tree_lookup(trb_address_map, key))
return 0;
ret = radix_tree_maybe_preload(mem_flags);
if (ret)
return ret;
ret = radix_tree_insert(trb_address_map, key, ring);
radix_tree_preload_end();
return ret;
}
static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
struct cdnsp_segment *seg)
{
unsigned long key;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
if (radix_tree_lookup(trb_address_map, key))
radix_tree_delete(trb_address_map, key);
}
static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
struct cdnsp_ring *ring,
struct cdnsp_segment *first_seg,
struct cdnsp_segment *last_seg,
gfp_t mem_flags)
{
struct cdnsp_segment *failed_seg;
struct cdnsp_segment *seg;
int ret;
seg = first_seg;
do {
ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
mem_flags);
if (ret)
goto remove_streams;
if (seg == last_seg)
return 0;
seg = seg->next;
} while (seg != first_seg);
return 0;
remove_streams:
failed_seg = seg;
seg = first_seg;
do {
cdnsp_remove_segment_mapping(trb_address_map, seg);
if (seg == failed_seg)
return ret;
seg = seg->next;
} while (seg != first_seg);
return ret;
}
static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
{
struct cdnsp_segment *seg;
seg = ring->first_seg;
do {
cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
seg = seg->next;
} while (seg != ring->first_seg);
}
static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
{
return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
ring->first_seg, ring->last_seg, GFP_ATOMIC);
}
static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
{
if (!ring)
return;
trace_cdnsp_ring_free(ring);
if (ring->first_seg) {
if (ring->type == TYPE_STREAM)
cdnsp_remove_stream_mapping(ring);
cdnsp_free_segments_for_ring(pdev, ring->first_seg);
}
kfree(ring);
}
void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
{
ring->enqueue = ring->first_seg->trbs;
ring->enq_seg = ring->first_seg;
ring->dequeue = ring->enqueue;
ring->deq_seg = ring->first_seg;
/*
* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
*
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
ring->cycle_state = 1;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
* accounting purpose
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
/* Allocate segments and link them for a ring. */
static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
struct cdnsp_segment **first,
struct cdnsp_segment **last,
unsigned int num_segs,
unsigned int cycle_state,
enum cdnsp_ring_type type,
unsigned int max_packet,
gfp_t flags)
{
struct cdnsp_segment *prev;
/* Allocate first segment. */
prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
if (!prev)
return -ENOMEM;
num_segs--;
*first = prev;
/* Allocate all other segments. */
while (num_segs > 0) {
struct cdnsp_segment *next;
next = cdnsp_segment_alloc(pdev, cycle_state,
max_packet, flags);
if (!next) {
cdnsp_free_segments_for_ring(pdev, *first);
return -ENOMEM;
}
cdnsp_link_segments(pdev, prev, next, type);
prev = next;
num_segs--;
}
cdnsp_link_segments(pdev, prev, *first, type);
*last = prev;
return 0;
}
/*
* Create a new ring with zero or more segments.
*
* Link each segment together into a ring.
* Set the end flag and the cycle toggle bit on the last segment.
*/
static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
unsigned int num_segs,
enum cdnsp_ring_type type,
unsigned int max_packet,
gfp_t flags)
{
struct cdnsp_ring *ring;
int ret;
ring = kzalloc(sizeof *(ring), flags);
if (!ring)
return NULL;
ring->num_segs = num_segs;
ring->bounce_buf_len = max_packet;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
&ring->last_seg, num_segs,
1, type, max_packet, flags);
if (ret)
goto fail;
/* Only event ring does not use link TRB. */
if (type != TYPE_EVENT)
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
cdnsp_initialize_ring_info(ring);
trace_cdnsp_ring_alloc(ring);
return ring;
fail:
kfree(ring);
return NULL;
}
void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
cdnsp_ring_free(pdev, pep->ring);
pep->ring = NULL;
cdnsp_free_stream_info(pdev, pep);
}
/*
* Expand an existing ring.
* Allocate a new ring which has same segment numbers and link the two rings.
*/
int cdnsp_ring_expansion(struct cdnsp_device *pdev,
struct cdnsp_ring *ring,
unsigned int num_trbs,
gfp_t flags)
{
unsigned int num_segs_needed;
struct cdnsp_segment *first;
struct cdnsp_segment *last;
unsigned int num_segs;
int ret;
num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
(TRBS_PER_SEGMENT - 1);
/* Allocate number of segments we needed, or double the ring size. */
num_segs = max(ring->num_segs, num_segs_needed);
ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
ring->cycle_state, ring->type,
ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
if (ring->type == TYPE_STREAM)
ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
ring, first,
last, flags);
if (ret) {
cdnsp_free_segments_for_ring(pdev, first);
return ret;
}
cdnsp_link_rings(pdev, ring, first, last, num_segs);
trace_cdnsp_ring_expansion(ring);
return 0;
}
static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
{
int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
pdev->out_ctx.size = size;
pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
&pdev->out_ctx.dma);
if (!pdev->out_ctx.bytes)
return -ENOMEM;
pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
&pdev->in_ctx.dma);
if (!pdev->in_ctx.bytes) {
dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
pdev->out_ctx.dma);
return -ENOMEM;
}
return 0;
}
struct cdnsp_input_control_ctx
*cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
{
if (ctx->type != CDNSP_CTX_TYPE_INPUT)
return NULL;
return (struct cdnsp_input_control_ctx *)ctx->bytes;
}
struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
{
if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
return (struct cdnsp_slot_ctx *)ctx->bytes;
return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
}
struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
unsigned int ep_index)
{
/* Increment ep index by offset of start of ep ctx array. */
ep_index++;
if (ctx->type == CDNSP_CTX_TYPE_INPUT)
ep_index++;
return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
}
static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
struct cdnsp_ep *pep)
{
dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
pep->stream_info.ctx_array_dma);
}
/* The stream context array must be a power of 2. */
static struct cdnsp_stream_ctx
*cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
size_t size = sizeof(struct cdnsp_stream_ctx) *
pep->stream_info.num_stream_ctxs;
if (size > CDNSP_CTX_SIZE)
return NULL;
/**
* Driver uses intentionally the device_pool to allocated stream
* context array. Device Pool has 2048 bytes of size what gives us
* 128 entries.
*/
return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
&pep->stream_info.ctx_array_dma);
}
struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
{
if (pep->ep_state & EP_HAS_STREAMS)
return radix_tree_lookup(&pep->stream_info.trb_address_map,
address >> TRB_SEGMENT_SHIFT);
return pep->ring;
}
/*
* Change an endpoint's internal structure so it supports stream IDs.
* The number of requested streams includes stream 0, which cannot be used by
* driver.
*
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
*/
int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
unsigned int num_stream_ctxs,
unsigned int num_streams)
{
struct cdnsp_stream_info *stream_info;
struct cdnsp_ring *cur_ring;
u32 cur_stream;
u64 addr;
int ret;
int mps;
stream_info = &pep->stream_info;
stream_info->num_streams = num_streams;
stream_info->num_stream_ctxs = num_stream_ctxs;
/* Initialize the array of virtual pointers to stream rings. */
stream_info->stream_rings = kcalloc(num_streams,
sizeof(struct cdnsp_ring *),
GFP_ATOMIC);
if (!stream_info->stream_rings)
return -ENOMEM;
/* Initialize the array of DMA addresses for stream rings for the HW. */
stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
if (!stream_info->stream_ctx_array)
goto cleanup_stream_rings;
memset(stream_info->stream_ctx_array, 0,
sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
mps = usb_endpoint_maxp(pep->endpoint.desc);
/*
* Allocate rings for all the streams that the driver will use,
* and add their segment DMA addresses to the radix tree.
* Stream 0 is reserved.
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
GFP_ATOMIC);
stream_info->stream_rings[cur_stream] = cur_ring;
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
cur_ring->trb_address_map = &stream_info->trb_address_map;
/* Set deq ptr, cycle bit, and stream context type. */
addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
cur_ring->cycle_state;
stream_info->stream_ctx_array[cur_stream].stream_ring =
cpu_to_le64(addr);
trace_cdnsp_set_stream_ring(cur_ring);
ret = cdnsp_update_stream_mapping(cur_ring);
if (ret)
goto cleanup_rings;
}
return 0;
cleanup_rings:
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
cdnsp_ring_free(pdev, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
cleanup_stream_rings:
kfree(pep->stream_info.stream_rings);
return -ENOMEM;
}
/* Frees all stream contexts associated with the endpoint. */
static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
struct cdnsp_ep *pep)
{
struct cdnsp_stream_info *stream_info = &pep->stream_info;
struct cdnsp_ring *cur_ring;
int cur_stream;
if (!(pep->ep_state & EP_HAS_STREAMS))
return;
for (cur_stream = 1; cur_stream < stream_info->num_streams;
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
cdnsp_ring_free(pdev, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
if (stream_info->stream_ctx_array)
cdnsp_free_stream_ctx(pdev, pep);
kfree(stream_info->stream_rings);
pep->ep_state &= ~EP_HAS_STREAMS;
}
/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
{
pdev->dcbaa->dev_context_ptrs[1] = 0;
cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
if (pdev->in_ctx.bytes)
dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
pdev->in_ctx.dma);
if (pdev->out_ctx.bytes)
dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
pdev->out_ctx.dma);
pdev->in_ctx.bytes = NULL;
pdev->out_ctx.bytes = NULL;
}
static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev)
{
int ret;
ret = cdnsp_init_device_ctx(pdev);
if (ret)
return ret;
/* Allocate endpoint 0 ring. */
pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC);
if (!pdev->eps[0].ring)
goto fail;
/* Point to output device context in dcbaa. */
pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
pdev->cmd.in_ctx = &pdev->in_ctx;
trace_cdnsp_alloc_priv_device(pdev);
return 0;
fail:
dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
pdev->out_ctx.dma);
dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
pdev->in_ctx.dma);
return ret;
}
void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
{
struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
dma_addr_t dma;
dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
}
/* Setup an controller private device for a Set Address command. */
int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
{
struct cdnsp_slot_ctx *slot_ctx;
struct cdnsp_ep_ctx *ep0_ctx;
u32 max_packets, port;
ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
/* Only the control endpoint is valid - one endpoint context. */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
switch (pdev->gadget.speed) {
case USB_SPEED_SUPER_PLUS:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
max_packets = MAX_PACKET(512);
break;
case USB_SPEED_SUPER:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
max_packets = MAX_PACKET(512);
break;
case USB_SPEED_HIGH:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
max_packets = MAX_PACKET(64);
break;
case USB_SPEED_FULL:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
max_packets = MAX_PACKET(64);
break;
default:
/* Speed was not set , this shouldn't happen. */
return -EINVAL;
}
port = DEV_PORT(pdev->active_port->port_num);
slot_ctx->dev_port |= cpu_to_le32(port);
slot_ctx->dev_state = cpu_to_le32((pdev->device_address &
DEV_ADDR_MASK));
ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8));
ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
max_packets);
ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
pdev->eps[0].ring->cycle_state);
trace_cdnsp_setup_addressable_priv_device(pdev);
return 0;
}
/*
* Convert interval expressed as 2^(bInterval - 1) == interval into
* straight exponent value 2^n == interval.
*/
static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
struct cdnsp_ep *pep)
{
unsigned int interval;
interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
if (interval != pep->endpoint.desc->bInterval - 1)
dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
pep->name, 1 << interval,
g->speed == USB_SPEED_FULL ? "" : "micro");
/*
* Full speed isoc endpoints specify interval in frames,
* not microframes. We are using microframes everywhere,
* so adjust accordingly.
*/
if (g->speed == USB_SPEED_FULL)
interval += 3; /* 1 frame = 2^3 uframes */
/* Controller handles only up to 512ms (2^12). */
if (interval > 12)
interval = 12;
return interval;
}
/*
* Convert bInterval expressed in microframes (in 1-255 range) to exponent of
* microframes, rounded down to nearest power of 2.
*/
static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
struct cdnsp_ep *pep,
unsigned int desc_interval,
unsigned int min_exponent,
unsigned int max_exponent)
{
unsigned int interval;
interval = fls(desc_interval) - 1;
return clamp_val(interval, min_exponent, max_exponent);
}
/*
* Return the polling interval.
*
* The polling interval is expressed in "microframes". If controllers's Interval
* field is set to N, it will service the endpoint every 2^(Interval)*125us.
*/
static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
struct cdnsp_ep *pep)
{
unsigned int interval = 0;
switch (g->speed) {
case USB_SPEED_HIGH:
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
usb_endpoint_xfer_isoc(pep->endpoint.desc))
interval = cdnsp_parse_exponent_interval(g, pep);
break;
case USB_SPEED_FULL:
if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
interval = cdnsp_parse_exponent_interval(g, pep);
} else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
interval = pep->endpoint.desc->bInterval << 3;
interval = cdnsp_microframes_to_exponent(g, pep,
interval,
3, 10);
}
break;
default:
WARN_ON(1);
}
return interval;
}
/*
* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
* High speed endpoint descriptors can define "the number of additional
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
{
if (g->speed < USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(pep->endpoint.desc))
return 0;
return pep->endpoint.comp_desc->bmAttributes;
}
static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
struct cdnsp_ep *pep)
{
/* Super speed and Plus have max burst in ep companion desc */
if (g->speed >= USB_SPEED_SUPER)
return pep->endpoint.comp_desc->bMaxBurst;
if (g->speed == USB_SPEED_HIGH &&
(usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
usb_endpoint_xfer_int(pep->endpoint.desc)))
return usb_endpoint_maxp_mult(pep->endpoint.desc) - 1;
return 0;
}
static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
{
int in;
in = usb_endpoint_dir_in(desc);
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
return CTRL_EP;
case USB_ENDPOINT_XFER_BULK:
return in ? BULK_IN_EP : BULK_OUT_EP;
case USB_ENDPOINT_XFER_ISOC:
return in ? ISOC_IN_EP : ISOC_OUT_EP;
case USB_ENDPOINT_XFER_INT:
return in ? INT_IN_EP : INT_OUT_EP;
}
return 0;
}
/*
* Return the maximum endpoint service interval time (ESIT) payload.
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
struct cdnsp_ep *pep)
{
int max_packet;
int max_burst;
/* Only applies for interrupt or isochronous endpoints*/
if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
usb_endpoint_xfer_bulk(pep->endpoint.desc))
return 0;
/* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
if (g->speed >= USB_SPEED_SUPER_PLUS &&
USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
else if (g->speed >= USB_SPEED_SUPER)
return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
max_packet = usb_endpoint_maxp(pep->endpoint.desc);
max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
/* A 0 in max burst means 1 transfer per ESIT */
return max_packet * max_burst;
}
int cdnsp_endpoint_init(struct cdnsp_device *pdev,
struct cdnsp_ep *pep,
gfp_t mem_flags)
{
enum cdnsp_ring_type ring_type;
struct cdnsp_ep_ctx *ep_ctx;
unsigned int err_count = 0;
unsigned int avg_trb_len;
unsigned int max_packet;
unsigned int max_burst;
unsigned int interval;
u32 max_esit_payload;
unsigned int mult;
u32 endpoint_type;
int ret;
ep_ctx = pep->in_ctx;
endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
if (!endpoint_type)
return -EINVAL;
ring_type = usb_endpoint_type(pep->endpoint.desc);
/*
* Get values to fill the endpoint context, mostly from ep descriptor.
* The average TRB buffer length for bulk endpoints is unclear as we
* have no clue on scatter gather list entry size. For Isoc and Int,
* set it to max available.
*/
max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
max_packet = usb_endpoint_maxp(pep->endpoint.desc);
max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
avg_trb_len = max_esit_payload;
/* Allow 3 retries for everything but isoc, set CErr = 3. */
if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
err_count = 3;
if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
pdev->gadget.speed == USB_SPEED_HIGH)
max_packet = 512;
/* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
if (usb_endpoint_xfer_control(pep->endpoint.desc))
avg_trb_len = 8;
/* Set up the endpoint ring. */
pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
if (!pep->ring)
return -ENOMEM;
pep->skip = false;
/* Fill the endpoint context */
ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
EP_INTERVAL(interval) | EP_MULT(mult));
ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
ERROR_COUNT(err_count));
ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
pep->ring->cycle_state);
ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
EP_AVG_TRB_LENGTH(avg_trb_len));
if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
pdev->gadget.speed > USB_SPEED_HIGH) {
ret = cdnsp_alloc_streams(pdev, pep);
if (ret < 0)
return ret;
}
return 0;
}
void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
pep->in_ctx->ep_info = 0;
pep->in_ctx->ep_info2 = 0;
pep->in_ctx->deq = 0;
pep->in_ctx->tx_info = 0;
}
static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
struct cdnsp_ring *evt_ring,
struct cdnsp_erst *erst)
{
struct cdnsp_erst_entry *entry;
struct cdnsp_segment *seg;
unsigned int val;
size_t size;
size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
erst->entries = dma_alloc_coherent(pdev->dev, size,
&erst->erst_dma_addr, GFP_KERNEL);
if (!erst->entries)
return -ENOMEM;
erst->num_entries = evt_ring->num_segs;
seg = evt_ring->first_seg;
for (val = 0; val < evt_ring->num_segs; val++) {
entry = &erst->entries[val];
entry->seg_addr = cpu_to_le64(seg->dma);
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
entry->rsvd = 0;
seg = seg->next;
}
return 0;
}
static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
{
size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
struct device *dev = pdev->dev;
if (erst->entries)
dma_free_coherent(dev, size, erst->entries,
erst->erst_dma_addr);
erst->entries = NULL;
}
void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
{
struct device *dev = pdev->dev;
cdnsp_free_priv_device(pdev);
cdnsp_free_erst(pdev, &pdev->erst);
if (pdev->event_ring)
cdnsp_ring_free(pdev, pdev->event_ring);
pdev->event_ring = NULL;
if (pdev->cmd_ring)
cdnsp_ring_free(pdev, pdev->cmd_ring);
pdev->cmd_ring = NULL;
dma_pool_destroy(pdev->segment_pool);
pdev->segment_pool = NULL;
dma_pool_destroy(pdev->device_pool);
pdev->device_pool = NULL;
dma_free_coherent(dev, sizeof(*pdev->dcbaa),
pdev->dcbaa, pdev->dcbaa->dma);
pdev->dcbaa = NULL;
pdev->usb2_port.exist = 0;
pdev->usb3_port.exist = 0;
pdev->usb2_port.port_num = 0;
pdev->usb3_port.port_num = 0;
pdev->active_port = NULL;
}
static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
{
dma_addr_t deq;
u64 temp;
deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
pdev->event_ring->dequeue);
/* Update controller event ring dequeue pointer */
temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/*
* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
temp &= ~ERST_EHB;
cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
&pdev->ir_set->erst_dequeue);
}
static void cdnsp_add_in_port(struct cdnsp_device *pdev,
struct cdnsp_port *port,
__le32 __iomem *addr)
{
u32 temp, port_offset, port_count;
temp = readl(addr);
port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
/* Port offset and count in the third dword.*/
temp = readl(addr + 2);
port_offset = CDNSP_EXT_PORT_OFF(temp);
port_count = CDNSP_EXT_PORT_COUNT(temp);
trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev);
port->port_num = port_offset;
port->exist = 1;
}
/*
* Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
* specify what speeds each port is supposed to be.
*/
static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
{
void __iomem *base;
u32 offset;
int i;
base = &pdev->cap_regs->hc_capbase;
offset = cdnsp_find_next_ext_cap(base, 0,
EXT_CAP_CFG_DEV_20PORT_CAP_ID);
pdev->port20_regs = base + offset;
offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
pdev->port3x_regs = base + offset;
offset = 0;
base = &pdev->cap_regs->hc_capbase;
/* Driver expects max 2 extended protocol capability. */
for (i = 0; i < 2; i++) {
u32 temp;
offset = cdnsp_find_next_ext_cap(base, offset,
EXT_CAPS_PROTOCOL);
temp = readl(base + offset);
if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
!pdev->usb3_port.port_num)
cdnsp_add_in_port(pdev, &pdev->usb3_port,
base + offset);
if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
!pdev->usb2_port.port_num)
cdnsp_add_in_port(pdev, &pdev->usb2_port,
base + offset);
}
if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
dev_err(pdev->dev, "Error: Only one port detected\n");
return -ENODEV;
}
trace_cdnsp_init("Found USB 2.0 ports and USB 3.0 ports.");
pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *)
(&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
(pdev->usb2_port.port_num - 1));
pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *)
(&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
(pdev->usb3_port.port_num - 1));
return 0;
}
/*
* Initialize memory for CDNSP (one-time init).
*
* Program the PAGESIZE register, initialize the device context array, create
* device contexts, set up a command ring segment, create event
* ring (one for now).
*/
int cdnsp_mem_init(struct cdnsp_device *pdev)
{
struct device *dev = pdev->dev;
int ret = -ENOMEM;
unsigned int val;
dma_addr_t dma;
u32 page_size;
u64 val_64;
/*
* Use 4K pages, since that's common and the minimum the
* controller supports
*/
page_size = 1 << 12;
val = readl(&pdev->op_regs->config_reg);
val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
writel(val, &pdev->op_regs->config_reg);
/*
* Doorbell array must be physically contiguous
* and 64-byte (cache line) aligned.
*/
pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
&dma, GFP_KERNEL);
if (!pdev->dcbaa)
return -ENOMEM;
pdev->dcbaa->dma = dma;
cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
* however, the command ring segment needs 64-byte aligned segments
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
* need.
*/
pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
page_size);
if (!pdev->segment_pool)
goto release_dcbaa;
pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
CDNSP_CTX_SIZE, 64, page_size);
if (!pdev->device_pool)
goto destroy_segment_pool;
/* Set up the command ring to have one segments for now. */
pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL);
if (!pdev->cmd_ring)
goto destroy_device_pool;
/* Set the address in the Command Ring Control register */
val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
(pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
pdev->cmd_ring->cycle_state;
cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
val = readl(&pdev->cap_regs->db_off);
val &= DBOFF_MASK;
pdev->dba = (void __iomem *)pdev->cap_regs + val;
/* Set ir_set to interrupt register set 0 */
pdev->ir_set = &pdev->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST).
*/
pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
0, GFP_KERNEL);
if (!pdev->event_ring)
goto free_cmd_ring;
ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst);
if (ret)
goto free_event_ring;
/* Set ERST count with the number of entries in the segment table. */
val = readl(&pdev->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
writel(val, &pdev->ir_set->erst_size);
/* Set the segment table base address. */
val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
/* Set the event ring dequeue address. */
cdnsp_set_event_deq(pdev);
ret = cdnsp_setup_port_arrays(pdev);
if (ret)
goto free_erst;
ret = cdnsp_alloc_priv_device(pdev);
if (ret) {
dev_err(pdev->dev,
"Could not allocate cdnsp_device data structures\n");
goto free_erst;
}
return 0;
free_erst:
cdnsp_free_erst(pdev, &pdev->erst);
free_event_ring:
cdnsp_ring_free(pdev, pdev->event_ring);
free_cmd_ring:
cdnsp_ring_free(pdev, pdev->cmd_ring);
destroy_device_pool:
dma_pool_destroy(pdev->device_pool);
destroy_segment_pool:
dma_pool_destroy(pdev->segment_pool);
release_dcbaa:
dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa,
pdev->dcbaa->dma);
cdnsp_reset(pdev);
return ret;
}
| linux-master | drivers/usb/cdns3/cdnsp-mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Role Switch Support
*
* Copyright (C) 2018 Intel Corporation
* Author: Heikki Krogerus <[email protected]>
* Hans de Goede <[email protected]>
*/
#include <linux/usb/role.h>
#include <linux/property.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
static const struct class role_class = {
.name = "usb_role",
};
struct usb_role_switch {
struct device dev;
struct mutex lock; /* device lock*/
enum usb_role role;
/* From descriptor */
struct device *usb2_port;
struct device *usb3_port;
struct device *udc;
usb_role_switch_set_t set;
usb_role_switch_get_t get;
bool allow_userspace_control;
};
#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
/**
* usb_role_switch_set_role - Set USB role for a switch
* @sw: USB role switch
* @role: USB role to be switched to
*
* Set USB role @role for @sw.
*/
int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
{
int ret;
if (IS_ERR_OR_NULL(sw))
return 0;
mutex_lock(&sw->lock);
ret = sw->set(sw, role);
if (!ret) {
sw->role = role;
kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
}
mutex_unlock(&sw->lock);
return ret;
}
EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
/**
* usb_role_switch_get_role - Get the USB role for a switch
* @sw: USB role switch
*
* Depending on the role-switch-driver this function returns either a cached
* value of the last set role, or reads back the actual value from the hardware.
*/
enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
{
enum usb_role role;
if (IS_ERR_OR_NULL(sw))
return USB_ROLE_NONE;
mutex_lock(&sw->lock);
if (sw->get)
role = sw->get(sw);
else
role = sw->role;
mutex_unlock(&sw->lock);
return role;
}
EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
static void *usb_role_switch_match(const struct fwnode_handle *fwnode, const char *id,
void *data)
{
struct device *dev;
if (id && !fwnode_property_present(fwnode, id))
return NULL;
dev = class_find_device_by_fwnode(&role_class, fwnode);
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
static struct usb_role_switch *
usb_role_switch_is_parent(struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent = fwnode_get_parent(fwnode);
struct device *dev;
if (!fwnode_property_present(parent, "usb-role-switch")) {
fwnode_handle_put(parent);
return NULL;
}
dev = class_find_device_by_fwnode(&role_class, parent);
fwnode_handle_put(parent);
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
* usb_role_switch_get - Find USB role switch linked with the caller
* @dev: The caller device
*
* Finds and returns role switch linked with @dev. The reference count for the
* found switch is incremented.
*/
struct usb_role_switch *usb_role_switch_get(struct device *dev)
{
struct usb_role_switch *sw;
sw = usb_role_switch_is_parent(dev_fwnode(dev));
if (!sw)
sw = device_connection_find_match(dev, "usb-role-switch", NULL,
usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_get);
/**
* fwnode_usb_role_switch_get - Find USB role switch linked with the caller
* @fwnode: The caller device node
*
* This is similar to the usb_role_switch_get() function above, but it searches
* the switch using fwnode instead of device entry.
*/
struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
{
struct usb_role_switch *sw;
sw = usb_role_switch_is_parent(fwnode);
if (!sw)
sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
NULL, usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
return sw;
}
EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
/**
* usb_role_switch_put - Release handle to a switch
* @sw: USB Role Switch
*
* Decrement reference count for @sw.
*/
void usb_role_switch_put(struct usb_role_switch *sw)
{
if (!IS_ERR_OR_NULL(sw)) {
module_put(sw->dev.parent->driver->owner);
put_device(&sw->dev);
}
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
/**
* usb_role_switch_find_by_fwnode - Find USB role switch with its fwnode
* @fwnode: fwnode of the USB Role Switch
*
* Finds and returns role switch with @fwnode. The reference count for the
* found switch is incremented.
*/
struct usb_role_switch *
usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
{
struct device *dev;
if (!fwnode)
return NULL;
dev = class_find_device_by_fwnode(&role_class, fwnode);
if (dev)
WARN_ON(!try_module_get(dev->parent->driver->owner));
return dev ? to_role_switch(dev) : NULL;
}
EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
static umode_t
usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct usb_role_switch *sw = to_role_switch(dev);
if (sw->allow_userspace_control)
return attr->mode;
return 0;
}
static const char * const usb_roles[] = {
[USB_ROLE_NONE] = "none",
[USB_ROLE_HOST] = "host",
[USB_ROLE_DEVICE] = "device",
};
const char *usb_role_string(enum usb_role role)
{
if (role < 0 || role >= ARRAY_SIZE(usb_roles))
return "unknown";
return usb_roles[role];
}
EXPORT_SYMBOL_GPL(usb_role_string);
static ssize_t
role_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_role_switch *sw = to_role_switch(dev);
enum usb_role role = usb_role_switch_get_role(sw);
return sprintf(buf, "%s\n", usb_roles[role]);
}
static ssize_t role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct usb_role_switch *sw = to_role_switch(dev);
int ret;
ret = sysfs_match_string(usb_roles, buf);
if (ret < 0) {
bool res;
/* Extra check if the user wants to disable the switch */
ret = kstrtobool(buf, &res);
if (ret || res)
return -EINVAL;
}
ret = usb_role_switch_set_role(sw, ret);
if (ret)
return ret;
return size;
}
static DEVICE_ATTR_RW(role);
static struct attribute *usb_role_switch_attrs[] = {
&dev_attr_role.attr,
NULL,
};
static const struct attribute_group usb_role_switch_group = {
.is_visible = usb_role_switch_is_visible,
.attrs = usb_role_switch_attrs,
};
static const struct attribute_group *usb_role_switch_groups[] = {
&usb_role_switch_group,
NULL,
};
static int usb_role_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int ret;
ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
if (ret)
dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
return ret;
}
static void usb_role_switch_release(struct device *dev)
{
struct usb_role_switch *sw = to_role_switch(dev);
kfree(sw);
}
static const struct device_type usb_role_dev_type = {
.name = "usb_role_switch",
.groups = usb_role_switch_groups,
.uevent = usb_role_switch_uevent,
.release = usb_role_switch_release,
};
/**
* usb_role_switch_register - Register USB Role Switch
* @parent: Parent device for the switch
* @desc: Description of the switch
*
* USB Role Switch is a device capable or choosing the role for USB connector.
* On platforms where the USB controller is dual-role capable, the controller
* driver will need to register the switch. On platforms where the USB host and
* USB device controllers behind the connector are separate, there will be a
* mux, and the driver for that mux will need to register the switch.
*
* Returns handle to a new role switch or ERR_PTR. The content of @desc is
* copied.
*/
struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc)
{
struct usb_role_switch *sw;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
sw = kzalloc(sizeof(*sw), GFP_KERNEL);
if (!sw)
return ERR_PTR(-ENOMEM);
mutex_init(&sw->lock);
sw->allow_userspace_control = desc->allow_userspace_control;
sw->usb2_port = desc->usb2_port;
sw->usb3_port = desc->usb3_port;
sw->udc = desc->udc;
sw->set = desc->set;
sw->get = desc->get;
sw->dev.parent = parent;
sw->dev.fwnode = desc->fwnode;
sw->dev.class = &role_class;
sw->dev.type = &usb_role_dev_type;
dev_set_drvdata(&sw->dev, desc->driver_data);
dev_set_name(&sw->dev, "%s-role-switch",
desc->name ? desc->name : dev_name(parent));
ret = device_register(&sw->dev);
if (ret) {
put_device(&sw->dev);
return ERR_PTR(ret);
}
/* TODO: Symlinks for the host port and the device controller. */
return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_register);
/**
* usb_role_switch_unregister - Unregsiter USB Role Switch
* @sw: USB Role Switch
*
* Unregister switch that was registered with usb_role_switch_register().
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
if (!IS_ERR_OR_NULL(sw))
device_unregister(&sw->dev);
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
/**
* usb_role_switch_set_drvdata - Assign private data pointer to a switch
* @sw: USB Role Switch
* @data: Private data pointer
*/
void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data)
{
dev_set_drvdata(&sw->dev, data);
}
EXPORT_SYMBOL_GPL(usb_role_switch_set_drvdata);
/**
* usb_role_switch_get_drvdata - Get the private data pointer of a switch
* @sw: USB Role Switch
*/
void *usb_role_switch_get_drvdata(struct usb_role_switch *sw)
{
return dev_get_drvdata(&sw->dev);
}
EXPORT_SYMBOL_GPL(usb_role_switch_get_drvdata);
static int __init usb_roles_init(void)
{
return class_register(&role_class);
}
subsys_initcall(usb_roles_init);
static void __exit usb_roles_exit(void)
{
class_unregister(&role_class);
}
module_exit(usb_roles_exit);
MODULE_AUTHOR("Heikki Krogerus <[email protected]>");
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("USB Role Class");
| linux-master | drivers/usb/roles/class.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Intel XHCI (Cherry Trail, Broxton and others) USB OTG role switch driver
*
* Copyright (c) 2016-2017 Hans de Goede <[email protected]>
*
* Loosely based on android x86 kernel code which is:
*
* Copyright (C) 2014 Intel Corp.
*
* Author: Wu, Hao
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/usb/role.h>
/* register definition */
#define DUAL_ROLE_CFG0 0x68
#define SW_VBUS_VALID BIT(24)
#define SW_IDPIN_EN BIT(21)
#define SW_IDPIN BIT(20)
#define SW_SWITCH_EN BIT(16)
#define DRD_CONFIG_DYNAMIC 0
#define DRD_CONFIG_STATIC_HOST 1
#define DRD_CONFIG_STATIC_DEVICE 2
#define DRD_CONFIG_MASK 3
#define DUAL_ROLE_CFG1 0x6c
#define HOST_MODE BIT(29)
#define DUAL_ROLE_CFG1_POLL_TIMEOUT 1000
#define DRV_NAME "intel_xhci_usb_sw"
struct intel_xhci_usb_data {
struct device *dev;
struct usb_role_switch *role_sw;
void __iomem *base;
bool enable_sw_switch;
};
static const struct software_node intel_xhci_usb_node = {
"intel-xhci-usb-sw",
};
static int intel_xhci_usb_set_role(struct usb_role_switch *sw,
enum usb_role role)
{
struct intel_xhci_usb_data *data = usb_role_switch_get_drvdata(sw);
unsigned long timeout;
acpi_status status;
u32 glk, val;
u32 drd_config = DRD_CONFIG_DYNAMIC;
/*
* On many CHT devices ACPI event (_AEI) handlers read / modify /
* write the cfg0 register, just like we do. Take the ACPI lock
* to avoid us racing with the AML code.
*/
status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
dev_err(data->dev, "Error could not acquire lock\n");
return -EIO;
}
pm_runtime_get_sync(data->dev);
/*
* Set idpin value as requested.
* Since some devices rely on firmware setting DRD_CONFIG and
* SW_SWITCH_EN bits to be zero for role switch,
* do not set these bits for those devices.
*/
val = readl(data->base + DUAL_ROLE_CFG0);
switch (role) {
case USB_ROLE_NONE:
val |= SW_IDPIN;
val &= ~SW_VBUS_VALID;
drd_config = DRD_CONFIG_DYNAMIC;
break;
case USB_ROLE_HOST:
val &= ~SW_IDPIN;
val &= ~SW_VBUS_VALID;
drd_config = DRD_CONFIG_STATIC_HOST;
break;
case USB_ROLE_DEVICE:
val |= SW_IDPIN;
val |= SW_VBUS_VALID;
drd_config = DRD_CONFIG_STATIC_DEVICE;
break;
}
val |= SW_IDPIN_EN;
if (data->enable_sw_switch) {
val &= ~DRD_CONFIG_MASK;
val |= SW_SWITCH_EN | drd_config;
}
writel(val, data->base + DUAL_ROLE_CFG0);
acpi_release_global_lock(glk);
/* In most case it takes about 600ms to finish mode switching */
timeout = jiffies + msecs_to_jiffies(DUAL_ROLE_CFG1_POLL_TIMEOUT);
/* Polling on CFG1 register to confirm mode switch.*/
do {
val = readl(data->base + DUAL_ROLE_CFG1);
if (!!(val & HOST_MODE) == (role == USB_ROLE_HOST)) {
pm_runtime_put(data->dev);
return 0;
}
/* Interval for polling is set to about 5 - 10 ms */
usleep_range(5000, 10000);
} while (time_before(jiffies, timeout));
pm_runtime_put(data->dev);
dev_warn(data->dev, "Timeout waiting for role-switch\n");
return -ETIMEDOUT;
}
static enum usb_role intel_xhci_usb_get_role(struct usb_role_switch *sw)
{
struct intel_xhci_usb_data *data = usb_role_switch_get_drvdata(sw);
enum usb_role role;
u32 val;
pm_runtime_get_sync(data->dev);
val = readl(data->base + DUAL_ROLE_CFG0);
pm_runtime_put(data->dev);
if (!(val & SW_IDPIN))
role = USB_ROLE_HOST;
else if (val & SW_VBUS_VALID)
role = USB_ROLE_DEVICE;
else
role = USB_ROLE_NONE;
return role;
}
static int intel_xhci_usb_probe(struct platform_device *pdev)
{
struct usb_role_switch_desc sw_desc = { };
struct device *dev = &pdev->dev;
struct intel_xhci_usb_data *data;
struct resource *res;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
data->base = devm_ioremap(dev, res->start, resource_size(res));
if (!data->base)
return -ENOMEM;
platform_set_drvdata(pdev, data);
ret = software_node_register(&intel_xhci_usb_node);
if (ret)
return ret;
sw_desc.set = intel_xhci_usb_set_role,
sw_desc.get = intel_xhci_usb_get_role,
sw_desc.allow_userspace_control = true,
sw_desc.fwnode = software_node_fwnode(&intel_xhci_usb_node);
sw_desc.driver_data = data;
data->dev = dev;
data->enable_sw_switch = !device_property_read_bool(dev,
"sw_switch_disable");
data->role_sw = usb_role_switch_register(dev, &sw_desc);
if (IS_ERR(data->role_sw)) {
fwnode_handle_put(sw_desc.fwnode);
return PTR_ERR(data->role_sw);
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
static void intel_xhci_usb_remove(struct platform_device *pdev)
{
struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
usb_role_switch_unregister(data->role_sw);
fwnode_handle_put(software_node_fwnode(&intel_xhci_usb_node));
}
static const struct platform_device_id intel_xhci_usb_table[] = {
{ .name = DRV_NAME },
{}
};
MODULE_DEVICE_TABLE(platform, intel_xhci_usb_table);
static struct platform_driver intel_xhci_usb_driver = {
.driver = {
.name = DRV_NAME,
},
.id_table = intel_xhci_usb_table,
.probe = intel_xhci_usb_probe,
.remove_new = intel_xhci_usb_remove,
};
module_platform_driver(intel_xhci_usb_driver);
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_DESCRIPTION("Intel XHCI USB role switch driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/roles/intel-xhci-usb-role-switch.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <[email protected]>
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "common.h"
#include "rcar2.h"
#include "rcar3.h"
#include "rza.h"
/*
* image of renesas_usbhs
*
* ex) gadget case
* mod.c
* mod_gadget.c
* mod_host.c pipe.c fifo.c
*
* +-------+ +-----------+
* | pipe0 |------>| fifo pio |
* +------------+ +-------+ +-----------+
* | mod_gadget |=====> | pipe1 |--+
* +------------+ +-------+ | +-----------+
* | pipe2 | | +-| fifo dma0 |
* +------------+ +-------+ | | +-----------+
* | mod_host | | pipe3 |<-|--+
* +------------+ +-------+ | +-----------+
* | .... | +--->| fifo dma1 |
* | .... | +-----------+
*/
/*
* platform call back
*
* renesas usb support platform callback function.
* Below macro call it.
* if platform doesn't have callback, it return 0 (no error)
*/
#define usbhs_platform_call(priv, func, args...)\
(!(priv) ? -ENODEV : \
!((priv)->pfunc->func) ? 0 : \
(priv)->pfunc->func(args))
/*
* common functions
*/
u16 usbhs_read(struct usbhs_priv *priv, u32 reg)
{
return ioread16(priv->base + reg);
}
void usbhs_write(struct usbhs_priv *priv, u32 reg, u16 data)
{
iowrite16(data, priv->base + reg);
}
void usbhs_bset(struct usbhs_priv *priv, u32 reg, u16 mask, u16 data)
{
u16 val = usbhs_read(priv, reg);
val &= ~mask;
val |= data & mask;
usbhs_write(priv, reg, val);
}
struct usbhs_priv *usbhs_pdev_to_priv(struct platform_device *pdev)
{
return dev_get_drvdata(&pdev->dev);
}
int usbhs_get_id_as_gadget(struct platform_device *pdev)
{
return USBHS_GADGET;
}
/*
* syscfg functions
*/
static void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
{
usbhs_bset(priv, SYSCFG, SCKE, enable ? SCKE : 0);
}
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
{
u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
u16 val = DCFM | DRPD | HSE | USBE;
/*
* if enable
*
* - select Host mode
* - D+ Line/D- Line Pull-down
*/
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
{
u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
u16 val = HSE | USBE;
/* CNEN bit is required for function operation */
if (usbhs_get_dparam(priv, has_cnen)) {
mask |= CNEN;
val |= CNEN;
}
/*
* if enable
*
* - select Function mode
* - D+ Line Pull-up is disabled
* When D+ Line Pull-up is enabled,
* calling usbhs_sys_function_pullup(,1)
*/
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
void usbhs_sys_function_pullup(struct usbhs_priv *priv, int enable)
{
usbhs_bset(priv, SYSCFG, DPRPU, enable ? DPRPU : 0);
}
void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode)
{
usbhs_write(priv, TESTMODE, mode);
}
/*
* frame functions
*/
int usbhs_frame_get_num(struct usbhs_priv *priv)
{
return usbhs_read(priv, FRMNUM) & FRNM_MASK;
}
/*
* usb request functions
*/
void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
{
u16 val;
val = usbhs_read(priv, USBREQ);
req->bRequest = (val >> 8) & 0xFF;
req->bRequestType = (val >> 0) & 0xFF;
req->wValue = cpu_to_le16(usbhs_read(priv, USBVAL));
req->wIndex = cpu_to_le16(usbhs_read(priv, USBINDX));
req->wLength = cpu_to_le16(usbhs_read(priv, USBLENG));
}
void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
{
usbhs_write(priv, USBREQ, (req->bRequest << 8) | req->bRequestType);
usbhs_write(priv, USBVAL, le16_to_cpu(req->wValue));
usbhs_write(priv, USBINDX, le16_to_cpu(req->wIndex));
usbhs_write(priv, USBLENG, le16_to_cpu(req->wLength));
usbhs_bset(priv, DCPCTR, SUREQ, SUREQ);
}
/*
* bus/vbus functions
*/
void usbhs_bus_send_sof_enable(struct usbhs_priv *priv)
{
u16 status = usbhs_read(priv, DVSTCTR) & (USBRST | UACT);
if (status != USBRST) {
struct device *dev = usbhs_priv_to_dev(priv);
dev_err(dev, "usbhs should be reset\n");
}
usbhs_bset(priv, DVSTCTR, (USBRST | UACT), UACT);
}
void usbhs_bus_send_reset(struct usbhs_priv *priv)
{
usbhs_bset(priv, DVSTCTR, (USBRST | UACT), USBRST);
}
int usbhs_bus_get_speed(struct usbhs_priv *priv)
{
u16 dvstctr = usbhs_read(priv, DVSTCTR);
switch (RHST & dvstctr) {
case RHST_LOW_SPEED:
return USB_SPEED_LOW;
case RHST_FULL_SPEED:
return USB_SPEED_FULL;
case RHST_HIGH_SPEED:
return USB_SPEED_HIGH;
}
return USB_SPEED_UNKNOWN;
}
int usbhs_vbus_ctrl(struct usbhs_priv *priv, int enable)
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
return usbhs_platform_call(priv, set_vbus, pdev, enable);
}
static void usbhsc_bus_init(struct usbhs_priv *priv)
{
usbhs_write(priv, DVSTCTR, 0);
usbhs_vbus_ctrl(priv, 0);
}
/*
* device configuration
*/
int usbhs_set_device_config(struct usbhs_priv *priv, int devnum,
u16 upphub, u16 hubport, u16 speed)
{
struct device *dev = usbhs_priv_to_dev(priv);
u16 usbspd = 0;
u32 reg = DEVADD0 + (2 * devnum);
if (devnum > 10) {
dev_err(dev, "cannot set speed to unknown device %d\n", devnum);
return -EIO;
}
if (upphub > 0xA) {
dev_err(dev, "unsupported hub number %d\n", upphub);
return -EIO;
}
switch (speed) {
case USB_SPEED_LOW:
usbspd = USBSPD_SPEED_LOW;
break;
case USB_SPEED_FULL:
usbspd = USBSPD_SPEED_FULL;
break;
case USB_SPEED_HIGH:
usbspd = USBSPD_SPEED_HIGH;
break;
default:
dev_err(dev, "unsupported speed %d\n", speed);
return -EIO;
}
usbhs_write(priv, reg, UPPHUB(upphub) |
HUBPORT(hubport)|
USBSPD(usbspd));
return 0;
}
/*
* interrupt functions
*/
void usbhs_xxxsts_clear(struct usbhs_priv *priv, u16 sts_reg, u16 bit)
{
u16 pipe_mask = (u16)GENMASK(usbhs_get_dparam(priv, pipe_size), 0);
usbhs_write(priv, sts_reg, ~(1 << bit) & pipe_mask);
}
/*
* local functions
*/
static void usbhsc_set_buswait(struct usbhs_priv *priv)
{
int wait = usbhs_get_dparam(priv, buswait_bwait);
/* set bus wait if platform have */
if (wait)
usbhs_bset(priv, BUSWAIT, 0x000F, wait);
}
static bool usbhsc_is_multi_clks(struct usbhs_priv *priv)
{
return priv->dparam.multi_clks;
}
static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
{
if (!usbhsc_is_multi_clks(priv))
return 0;
/* The first clock should exist */
priv->clks[0] = of_clk_get(dev_of_node(dev), 0);
if (IS_ERR(priv->clks[0]))
return PTR_ERR(priv->clks[0]);
/*
* To backward compatibility with old DT, this driver checks the return
* value if it's -ENOENT or not.
*/
priv->clks[1] = of_clk_get(dev_of_node(dev), 1);
if (PTR_ERR(priv->clks[1]) == -ENOENT)
priv->clks[1] = NULL;
else if (IS_ERR(priv->clks[1]))
return PTR_ERR(priv->clks[1]);
return 0;
}
static void usbhsc_clk_put(struct usbhs_priv *priv)
{
int i;
if (!usbhsc_is_multi_clks(priv))
return;
for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
clk_put(priv->clks[i]);
}
static int usbhsc_clk_prepare_enable(struct usbhs_priv *priv)
{
int i, ret;
if (!usbhsc_is_multi_clks(priv))
return 0;
for (i = 0; i < ARRAY_SIZE(priv->clks); i++) {
ret = clk_prepare_enable(priv->clks[i]);
if (ret) {
while (--i >= 0)
clk_disable_unprepare(priv->clks[i]);
return ret;
}
}
return ret;
}
static void usbhsc_clk_disable_unprepare(struct usbhs_priv *priv)
{
int i;
if (!usbhsc_is_multi_clks(priv))
return;
for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
clk_disable_unprepare(priv->clks[i]);
}
/*
* platform default param
*/
/* commonly used on old SH-Mobile SoCs */
static struct renesas_usbhs_driver_pipe_config usbhsc_default_pipe[] = {
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_CONTROL, 64, 0x00, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x08, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x18, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x28, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x38, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x48, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x04, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x05, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x06, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x07, false),
};
/* commonly used on newer SH-Mobile and R-Car SoCs */
static struct renesas_usbhs_driver_pipe_config usbhsc_new_pipe[] = {
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_CONTROL, 64, 0x00, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x08, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_ISOC, 1024, 0x28, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x48, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x58, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x68, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x04, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x05, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_INT, 64, 0x06, false),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x78, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x88, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0x98, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xa8, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xb8, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xc8, true),
RENESAS_USBHS_PIPE(USB_ENDPOINT_XFER_BULK, 512, 0xd8, true),
};
/*
* power control
*/
static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct device *dev = usbhs_priv_to_dev(priv);
if (enable) {
/* enable PM */
pm_runtime_get_sync(dev);
/* enable clks */
if (usbhsc_clk_prepare_enable(priv))
return;
/* enable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
/* USB on */
usbhs_sys_clock_ctrl(priv, enable);
} else {
/* USB off */
usbhs_sys_clock_ctrl(priv, enable);
/* disable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
/* disable clks */
usbhsc_clk_disable_unprepare(priv);
/* disable PM */
pm_runtime_put_sync(dev);
}
}
/*
* hotplug
*/
static void usbhsc_hotplug(struct usbhs_priv *priv)
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
int id;
int enable;
int cable;
int ret;
/*
* get vbus status from platform
*/
enable = usbhs_mod_info_call(priv, get_vbus, pdev);
/*
* get id from platform
*/
id = usbhs_platform_call(priv, get_id, pdev);
if (enable && !mod) {
if (priv->edev) {
cable = extcon_get_state(priv->edev, EXTCON_USB_HOST);
if ((cable > 0 && id != USBHS_HOST) ||
(!cable && id != USBHS_GADGET)) {
dev_info(&pdev->dev,
"USB cable plugged in doesn't match the selected role!\n");
return;
}
}
ret = usbhs_mod_change(priv, id);
if (ret < 0)
return;
dev_dbg(&pdev->dev, "%s enable\n", __func__);
/* power on */
if (usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, enable);
/* bus init */
usbhsc_set_buswait(priv);
usbhsc_bus_init(priv);
/* module start */
usbhs_mod_call(priv, start, priv);
} else if (!enable && mod) {
dev_dbg(&pdev->dev, "%s disable\n", __func__);
/* module stop */
usbhs_mod_call(priv, stop, priv);
/* bus init */
usbhsc_bus_init(priv);
/* power off */
if (usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, enable);
usbhs_mod_change(priv, -1);
/* reset phy for next connection */
usbhs_platform_call(priv, phy_reset, pdev);
}
}
/*
* notify hotplug
*/
static void usbhsc_notify_hotplug(struct work_struct *work)
{
struct usbhs_priv *priv = container_of(work,
struct usbhs_priv,
notify_hotplug_work.work);
usbhsc_hotplug(priv);
}
int usbhsc_schedule_notify_hotplug(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
int delay = usbhs_get_dparam(priv, detection_delay);
/*
* This functions will be called in interrupt.
* To make sure safety context,
* use workqueue for usbhs_notify_hotplug
*/
schedule_delayed_work(&priv->notify_hotplug_work,
msecs_to_jiffies(delay));
return 0;
}
/*
* platform functions
*/
static const struct of_device_id usbhs_of_match[] = {
{
.compatible = "renesas,usbhs-r8a774c0",
.data = &usbhs_rcar_gen3_with_pll_plat_info,
},
{
.compatible = "renesas,usbhs-r8a7790",
.data = &usbhs_rcar_gen2_plat_info,
},
{
.compatible = "renesas,usbhs-r8a7791",
.data = &usbhs_rcar_gen2_plat_info,
},
{
.compatible = "renesas,usbhs-r8a7794",
.data = &usbhs_rcar_gen2_plat_info,
},
{
.compatible = "renesas,usbhs-r8a7795",
.data = &usbhs_rcar_gen3_plat_info,
},
{
.compatible = "renesas,usbhs-r8a7796",
.data = &usbhs_rcar_gen3_plat_info,
},
{
.compatible = "renesas,usbhs-r8a77990",
.data = &usbhs_rcar_gen3_with_pll_plat_info,
},
{
.compatible = "renesas,usbhs-r8a77995",
.data = &usbhs_rcar_gen3_with_pll_plat_info,
},
{
.compatible = "renesas,rcar-gen2-usbhs",
.data = &usbhs_rcar_gen2_plat_info,
},
{
.compatible = "renesas,rcar-gen3-usbhs",
.data = &usbhs_rcar_gen3_plat_info,
},
{
.compatible = "renesas,rza1-usbhs",
.data = &usbhs_rza1_plat_info,
},
{
.compatible = "renesas,rza2-usbhs",
.data = &usbhs_rza2_plat_info,
},
{ },
};
MODULE_DEVICE_TABLE(of, usbhs_of_match);
static int usbhs_probe(struct platform_device *pdev)
{
const struct renesas_usbhs_platform_info *info;
struct usbhs_priv *priv;
struct device *dev = &pdev->dev;
struct gpio_desc *gpiod;
int ret;
u32 tmp;
int irq;
/* check device node */
if (dev_of_node(dev))
info = of_device_get_match_data(dev);
else
info = renesas_usbhs_get_info(pdev);
/* check platform information */
if (!info) {
dev_err(dev, "no platform information\n");
return -EINVAL;
}
/* platform data */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* usb private data */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
if (of_property_read_bool(dev_of_node(dev), "extcon")) {
priv->edev = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(priv->edev))
return PTR_ERR(priv->edev);
}
priv->rsts = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(priv->rsts))
return PTR_ERR(priv->rsts);
/*
* care platform info
*/
priv->dparam = info->driver_param;
if (!info->platform_callback.get_id) {
dev_err(dev, "no platform callbacks\n");
return -EINVAL;
}
priv->pfunc = &info->platform_callback;
/* set default param if platform doesn't have */
if (usbhs_get_dparam(priv, has_new_pipe_configs)) {
priv->dparam.pipe_configs = usbhsc_new_pipe;
priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
} else if (!priv->dparam.pipe_configs) {
priv->dparam.pipe_configs = usbhsc_default_pipe;
priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_default_pipe);
}
if (!priv->dparam.pio_dma_border)
priv->dparam.pio_dma_border = 64; /* 64byte */
if (!of_property_read_u32(dev_of_node(dev), "renesas,buswait", &tmp))
priv->dparam.buswait_bwait = tmp;
gpiod = devm_gpiod_get_optional(dev, "renesas,enable", GPIOD_IN);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
/* FIXME */
/* runtime power control ? */
if (priv->pfunc->get_vbus)
usbhs_get_dparam(priv, runtime_pwctrl) = 1;
/*
* priv settings
*/
priv->irq = irq;
priv->pdev = pdev;
INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug);
spin_lock_init(usbhs_priv_to_lock(priv));
/* call pipe and module init */
ret = usbhs_pipe_probe(priv);
if (ret < 0)
return ret;
ret = usbhs_fifo_probe(priv);
if (ret < 0)
goto probe_end_pipe_exit;
ret = usbhs_mod_probe(priv);
if (ret < 0)
goto probe_end_fifo_exit;
/* dev_set_drvdata should be called after usbhs_mod_init */
platform_set_drvdata(pdev, priv);
ret = reset_control_deassert(priv->rsts);
if (ret)
goto probe_fail_rst;
ret = usbhsc_clk_get(dev, priv);
if (ret)
goto probe_fail_clks;
/*
* deviece reset here because
* USB device might be used in boot loader.
*/
usbhs_sys_clock_ctrl(priv, 0);
/* check GPIO determining if USB function should be enabled */
if (gpiod) {
ret = !gpiod_get_value(gpiod);
if (ret) {
dev_warn(dev, "USB function not selected (GPIO)\n");
ret = -ENOTSUPP;
goto probe_end_mod_exit;
}
}
/*
* platform call
*
* USB phy setup might depend on CPU/Board.
* If platform has its callback functions,
* call it here.
*/
ret = usbhs_platform_call(priv, hardware_init, pdev);
if (ret < 0) {
dev_err(dev, "platform init failed.\n");
goto probe_end_mod_exit;
}
/* reset phy for connection */
usbhs_platform_call(priv, phy_reset, pdev);
/* power control */
pm_runtime_enable(dev);
if (!usbhs_get_dparam(priv, runtime_pwctrl)) {
usbhsc_power_ctrl(priv, 1);
usbhs_mod_autonomy_mode(priv);
} else {
usbhs_mod_non_autonomy_mode(priv);
}
/*
* manual call notify_hotplug for cold plug
*/
usbhsc_schedule_notify_hotplug(pdev);
dev_info(dev, "probed\n");
return ret;
probe_end_mod_exit:
usbhsc_clk_put(priv);
probe_fail_clks:
reset_control_assert(priv->rsts);
probe_fail_rst:
usbhs_mod_remove(priv);
probe_end_fifo_exit:
usbhs_fifo_remove(priv);
probe_end_pipe_exit:
usbhs_pipe_remove(priv);
dev_info(dev, "probe failed (%d)\n", ret);
return ret;
}
static void usbhs_remove(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
dev_dbg(&pdev->dev, "usb remove\n");
/* power off */
if (!usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, 0);
pm_runtime_disable(&pdev->dev);
usbhs_platform_call(priv, hardware_exit, pdev);
usbhsc_clk_put(priv);
reset_control_assert(priv->rsts);
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
}
static __maybe_unused int usbhsc_suspend(struct device *dev)
{
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
if (mod) {
usbhs_mod_call(priv, stop, priv);
usbhs_mod_change(priv, -1);
}
if (mod || !usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, 0);
return 0;
}
static __maybe_unused int usbhsc_resume(struct device *dev)
{
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
if (!usbhs_get_dparam(priv, runtime_pwctrl)) {
usbhsc_power_ctrl(priv, 1);
usbhs_mod_autonomy_mode(priv);
}
usbhs_platform_call(priv, phy_reset, pdev);
usbhsc_schedule_notify_hotplug(pdev);
return 0;
}
static SIMPLE_DEV_PM_OPS(usbhsc_pm_ops, usbhsc_suspend, usbhsc_resume);
static struct platform_driver renesas_usbhs_driver = {
.driver = {
.name = "renesas_usbhs",
.pm = &usbhsc_pm_ops,
.of_match_table = usbhs_of_match,
},
.probe = usbhs_probe,
.remove_new = usbhs_remove,
};
module_platform_driver(renesas_usbhs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas USB driver");
MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
| linux-master | drivers/usb/renesas_usbhs/common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB driver RZ/A2 initialization and power control
*
* Copyright (C) 2019 Chris Brandt
* Copyright (C) 2019 Renesas Electronics Corporation
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/phy/phy.h>
#include "common.h"
#include "rza.h"
static int usbhs_rza2_hardware_init(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
struct phy *phy = phy_get(&pdev->dev, "usb");
if (IS_ERR(phy))
return PTR_ERR(phy);
priv->phy = phy;
return 0;
}
static int usbhs_rza2_hardware_exit(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
return 0;
}
static int usbhs_rza2_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
int retval = 0;
if (!priv->phy)
return -ENODEV;
if (enable) {
retval = phy_init(priv->phy);
usbhs_bset(priv, SUSPMODE, SUSPM, SUSPM);
udelay(100); /* Wait for PLL to become stable */
if (!retval)
retval = phy_power_on(priv->phy);
} else {
usbhs_bset(priv, SUSPMODE, SUSPM, 0);
phy_power_off(priv->phy);
phy_exit(priv->phy);
}
return retval;
}
const struct renesas_usbhs_platform_info usbhs_rza2_plat_info = {
.platform_callback = {
.hardware_init = usbhs_rza2_hardware_init,
.hardware_exit = usbhs_rza2_hardware_exit,
.power_ctrl = usbhs_rza2_power_ctrl,
.get_id = usbhs_get_id_as_gadget,
},
.driver_param = {
.has_cnen = 1,
.cfifo_byte_addr = 1,
.has_new_pipe_configs = 1,
},
};
| linux-master | drivers/usb/renesas_usbhs/rza2.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <[email protected]>
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "common.h"
#include "pipe.h"
/*
* macros
*/
#define usbhsp_addr_offset(p) ((usbhs_pipe_number(p) - 1) * 2)
#define usbhsp_flags_set(p, f) ((p)->flags |= USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_clr(p, f) ((p)->flags &= ~USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_has(p, f) ((p)->flags & USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_init(p) do {(p)->flags = 0; } while (0)
/*
* for debug
*/
static char *usbhsp_pipe_name[] = {
[USB_ENDPOINT_XFER_CONTROL] = "DCP",
[USB_ENDPOINT_XFER_BULK] = "BULK",
[USB_ENDPOINT_XFER_INT] = "INT",
[USB_ENDPOINT_XFER_ISOC] = "ISO",
};
char *usbhs_pipe_name(struct usbhs_pipe *pipe)
{
return usbhsp_pipe_name[usbhs_pipe_type(pipe)];
}
static struct renesas_usbhs_driver_pipe_config
*usbhsp_get_pipe_config(struct usbhs_priv *priv, int pipe_num)
{
struct renesas_usbhs_driver_pipe_config *pipe_configs =
usbhs_get_dparam(priv, pipe_configs);
return &pipe_configs[pipe_num];
}
/*
* DCPCTR/PIPEnCTR functions
*/
static void usbhsp_pipectrl_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int offset = usbhsp_addr_offset(pipe);
if (usbhs_pipe_is_dcp(pipe))
usbhs_bset(priv, DCPCTR, mask, val);
else
usbhs_bset(priv, PIPEnCTR + offset, mask, val);
}
static u16 usbhsp_pipectrl_get(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int offset = usbhsp_addr_offset(pipe);
if (usbhs_pipe_is_dcp(pipe))
return usbhs_read(priv, DCPCTR);
else
return usbhs_read(priv, PIPEnCTR + offset);
}
/*
* DCP/PIPE functions
*/
static void __usbhsp_pipe_xxx_set(struct usbhs_pipe *pipe,
u16 dcp_reg, u16 pipe_reg,
u16 mask, u16 val)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
if (usbhs_pipe_is_dcp(pipe))
usbhs_bset(priv, dcp_reg, mask, val);
else
usbhs_bset(priv, pipe_reg, mask, val);
}
static u16 __usbhsp_pipe_xxx_get(struct usbhs_pipe *pipe,
u16 dcp_reg, u16 pipe_reg)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
if (usbhs_pipe_is_dcp(pipe))
return usbhs_read(priv, dcp_reg);
else
return usbhs_read(priv, pipe_reg);
}
/*
* DCPCFG/PIPECFG functions
*/
static void usbhsp_pipe_cfg_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
__usbhsp_pipe_xxx_set(pipe, DCPCFG, PIPECFG, mask, val);
}
static u16 usbhsp_pipe_cfg_get(struct usbhs_pipe *pipe)
{
return __usbhsp_pipe_xxx_get(pipe, DCPCFG, PIPECFG);
}
/*
* PIPEnTRN/PIPEnTRE functions
*/
static void usbhsp_pipe_trn_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int num = usbhs_pipe_number(pipe);
u16 reg;
/*
* It is impossible to calculate address,
* since PIPEnTRN addresses were mapped randomly.
*/
#define CASE_PIPExTRN(a) \
case 0x ## a: \
reg = PIPE ## a ## TRN; \
break;
switch (num) {
CASE_PIPExTRN(1);
CASE_PIPExTRN(2);
CASE_PIPExTRN(3);
CASE_PIPExTRN(4);
CASE_PIPExTRN(5);
CASE_PIPExTRN(B);
CASE_PIPExTRN(C);
CASE_PIPExTRN(D);
CASE_PIPExTRN(E);
CASE_PIPExTRN(F);
CASE_PIPExTRN(9);
CASE_PIPExTRN(A);
default:
dev_err(dev, "unknown pipe (%d)\n", num);
return;
}
__usbhsp_pipe_xxx_set(pipe, 0, reg, mask, val);
}
static void usbhsp_pipe_tre_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int num = usbhs_pipe_number(pipe);
u16 reg;
/*
* It is impossible to calculate address,
* since PIPEnTRE addresses were mapped randomly.
*/
#define CASE_PIPExTRE(a) \
case 0x ## a: \
reg = PIPE ## a ## TRE; \
break;
switch (num) {
CASE_PIPExTRE(1);
CASE_PIPExTRE(2);
CASE_PIPExTRE(3);
CASE_PIPExTRE(4);
CASE_PIPExTRE(5);
CASE_PIPExTRE(B);
CASE_PIPExTRE(C);
CASE_PIPExTRE(D);
CASE_PIPExTRE(E);
CASE_PIPExTRE(F);
CASE_PIPExTRE(9);
CASE_PIPExTRE(A);
default:
dev_err(dev, "unknown pipe (%d)\n", num);
return;
}
__usbhsp_pipe_xxx_set(pipe, 0, reg, mask, val);
}
/*
* PIPEBUF
*/
static void usbhsp_pipe_buf_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
if (usbhs_pipe_is_dcp(pipe))
return;
__usbhsp_pipe_xxx_set(pipe, 0, PIPEBUF, mask, val);
}
/*
* DCPMAXP/PIPEMAXP
*/
static void usbhsp_pipe_maxp_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
__usbhsp_pipe_xxx_set(pipe, DCPMAXP, PIPEMAXP, mask, val);
}
/*
* pipe control functions
*/
static void usbhsp_pipe_select(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
/*
* On pipe, this is necessary before
* accesses to below registers.
*
* PIPESEL : usbhsp_pipe_select
* PIPECFG : usbhsp_pipe_cfg_xxx
* PIPEBUF : usbhsp_pipe_buf_xxx
* PIPEMAXP : usbhsp_pipe_maxp_xxx
* PIPEPERI
*/
/*
* if pipe is dcp, no pipe is selected.
* it is no problem, because dcp have its register
*/
usbhs_write(priv, PIPESEL, 0xF & usbhs_pipe_number(pipe));
}
static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int timeout = 1024;
u16 mask = usbhs_mod_is_host(priv) ? (CSSTS | PID_MASK) : PID_MASK;
/*
* make sure....
*
* Modify these bits when CSSTS = 0, PID = NAK, and no pipe number is
* specified by the CURPIPE bits.
* When changing the setting of this bit after changing
* the PID bits for the selected pipe from BUF to NAK,
* check that CSSTS = 0 and PBUSY = 0.
*/
/*
* CURPIPE bit = 0
*
* see also
* "Operation"
* - "Pipe Control"
* - "Pipe Control Registers Switching Procedure"
*/
usbhs_write(priv, CFIFOSEL, 0);
usbhs_pipe_disable(pipe);
do {
if (!(usbhsp_pipectrl_get(pipe) & mask))
return 0;
udelay(10);
} while (timeout--);
return -EBUSY;
}
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
{
u16 val;
val = usbhsp_pipectrl_get(pipe);
if (val & BSTS)
return 0;
return -EBUSY;
}
bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
{
u16 val;
/* Do not support for DCP pipe */
if (usbhs_pipe_is_dcp(pipe))
return false;
val = usbhsp_pipectrl_get(pipe);
if (val & INBUFM)
return true;
return false;
}
/*
* PID ctrl
*/
static void __usbhsp_pid_try_nak_if_stall(struct usbhs_pipe *pipe)
{
u16 pid = usbhsp_pipectrl_get(pipe);
pid &= PID_MASK;
/*
* see
* "Pipe n Control Register" - "PID"
*/
switch (pid) {
case PID_STALL11:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_STALL10);
fallthrough;
case PID_STALL10:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_NAK);
}
}
void usbhs_pipe_disable(struct usbhs_pipe *pipe)
{
int timeout = 1024;
u16 val;
/* see "Pipe n Control Register" - "PID" */
__usbhsp_pid_try_nak_if_stall(pipe);
usbhsp_pipectrl_set(pipe, PID_MASK, PID_NAK);
do {
val = usbhsp_pipectrl_get(pipe);
val &= PBUSY;
if (!val)
break;
udelay(10);
} while (timeout--);
}
void usbhs_pipe_enable(struct usbhs_pipe *pipe)
{
/* see "Pipe n Control Register" - "PID" */
__usbhsp_pid_try_nak_if_stall(pipe);
usbhsp_pipectrl_set(pipe, PID_MASK, PID_BUF);
}
void usbhs_pipe_stall(struct usbhs_pipe *pipe)
{
u16 pid = usbhsp_pipectrl_get(pipe);
pid &= PID_MASK;
/*
* see
* "Pipe n Control Register" - "PID"
*/
switch (pid) {
case PID_NAK:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_STALL10);
break;
case PID_BUF:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_STALL11);
break;
}
}
int usbhs_pipe_is_stall(struct usbhs_pipe *pipe)
{
u16 pid = usbhsp_pipectrl_get(pipe) & PID_MASK;
return (int)(pid == PID_STALL10 || pid == PID_STALL11);
}
void usbhs_pipe_set_trans_count_if_bulk(struct usbhs_pipe *pipe, int len)
{
if (!usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
return;
/*
* clear and disable transfer counter for IN/OUT pipe
*/
usbhsp_pipe_tre_set(pipe, TRCLR | TRENB, TRCLR);
/*
* Only IN direction bulk pipe can use transfer count.
* Without using this function,
* received data will break if it was large data size.
* see PIPEnTRN/PIPEnTRE for detail
*/
if (usbhs_pipe_is_dir_in(pipe)) {
int maxp = usbhs_pipe_get_maxpacket(pipe);
usbhsp_pipe_trn_set(pipe, 0xffff, DIV_ROUND_UP(len, maxp));
usbhsp_pipe_tre_set(pipe, TRENB, TRENB); /* enable */
}
}
/*
* pipe setup
*/
static int usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, int is_host,
int dir_in, u16 *pipecfg)
{
u16 type = 0;
u16 bfre = 0;
u16 dblb = 0;
u16 cntmd = 0;
u16 dir = 0;
u16 epnum = 0;
u16 shtnak = 0;
static const u16 type_array[] = {
[USB_ENDPOINT_XFER_BULK] = TYPE_BULK,
[USB_ENDPOINT_XFER_INT] = TYPE_INT,
[USB_ENDPOINT_XFER_ISOC] = TYPE_ISO,
};
if (usbhs_pipe_is_dcp(pipe))
return -EINVAL;
/*
* PIPECFG
*
* see
* - "Register Descriptions" - "PIPECFG" register
* - "Features" - "Pipe configuration"
* - "Operation" - "Pipe Control"
*/
/* TYPE */
type = type_array[usbhs_pipe_type(pipe)];
/* BFRE */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC) ||
usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
bfre = 0; /* FIXME */
/* DBLB: see usbhs_pipe_config_update() */
/* CNTMD */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
cntmd = 0; /* FIXME */
/* DIR */
if (dir_in)
usbhsp_flags_set(pipe, IS_DIR_HOST);
if (!!is_host ^ !!dir_in)
dir |= DIR_OUT;
if (!dir)
usbhsp_flags_set(pipe, IS_DIR_IN);
/* SHTNAK */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK) &&
!dir)
shtnak = SHTNAK;
/* EPNUM */
epnum = 0; /* see usbhs_pipe_config_update() */
*pipecfg = type |
bfre |
dblb |
cntmd |
dir |
shtnak |
epnum;
return 0;
}
static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int pipe_num = usbhs_pipe_number(pipe);
u16 buff_size;
u16 bufnmb;
u16 bufnmb_cnt;
struct renesas_usbhs_driver_pipe_config *pipe_config =
usbhsp_get_pipe_config(priv, pipe_num);
/*
* PIPEBUF
*
* see
* - "Register Descriptions" - "PIPEBUF" register
* - "Features" - "Pipe configuration"
* - "Operation" - "FIFO Buffer Memory"
* - "Operation" - "Pipe Control"
*/
buff_size = pipe_config->bufsize;
bufnmb = pipe_config->bufnum;
/* change buff_size to register value */
bufnmb_cnt = (buff_size / 64) - 1;
dev_dbg(dev, "pipe : %d : buff_size 0x%x: bufnmb 0x%x\n",
pipe_num, buff_size, bufnmb);
return (0x1f & bufnmb_cnt) << 10 |
(0xff & bufnmb) << 0;
}
void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel,
u16 epnum, u16 maxp)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int pipe_num = usbhs_pipe_number(pipe);
struct renesas_usbhs_driver_pipe_config *pipe_config =
usbhsp_get_pipe_config(priv, pipe_num);
u16 dblb = pipe_config->double_buf ? DBLB : 0;
if (devsel > 0xA) {
struct device *dev = usbhs_priv_to_dev(priv);
dev_err(dev, "devsel error %d\n", devsel);
devsel = 0;
}
usbhsp_pipe_barrier(pipe);
pipe->maxp = maxp;
usbhsp_pipe_select(pipe);
usbhsp_pipe_maxp_set(pipe, 0xFFFF,
(devsel << 12) |
maxp);
if (!usbhs_pipe_is_dcp(pipe))
usbhsp_pipe_cfg_set(pipe, 0x000F | DBLB, epnum | dblb);
}
/*
* pipe control
*/
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe)
{
/*
* see
* usbhs_pipe_config_update()
* usbhs_dcp_malloc()
*/
return pipe->maxp;
}
int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe)
{
return usbhsp_flags_has(pipe, IS_DIR_IN);
}
int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
{
return usbhsp_flags_has(pipe, IS_DIR_HOST);
}
int usbhs_pipe_is_running(struct usbhs_pipe *pipe)
{
return usbhsp_flags_has(pipe, IS_RUNNING);
}
void usbhs_pipe_running(struct usbhs_pipe *pipe, int running)
{
if (running)
usbhsp_flags_set(pipe, IS_RUNNING);
else
usbhsp_flags_clr(pipe, IS_RUNNING);
}
void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
{
u16 mask = (SQCLR | SQSET);
u16 val;
/*
* sequence
* 0 : data0
* 1 : data1
* -1 : no change
*/
switch (sequence) {
case 0:
val = SQCLR;
break;
case 1:
val = SQSET;
break;
default:
return;
}
usbhsp_pipectrl_set(pipe, mask, val);
}
static int usbhs_pipe_get_data_sequence(struct usbhs_pipe *pipe)
{
return !!(usbhsp_pipectrl_get(pipe) & SQMON);
}
void usbhs_pipe_clear(struct usbhs_pipe *pipe)
{
if (usbhs_pipe_is_dcp(pipe)) {
usbhs_fifo_clear_dcp(pipe);
} else {
usbhsp_pipectrl_set(pipe, ACLRM, ACLRM);
usbhsp_pipectrl_set(pipe, ACLRM, 0);
}
}
/* Should call usbhsp_pipe_select() before */
void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
int needs_bfre, int bfre_enable)
{
int sequence;
usbhsp_pipe_select(pipe);
sequence = usbhs_pipe_get_data_sequence(pipe);
if (needs_bfre)
usbhsp_pipe_cfg_set(pipe, BFRE, bfre_enable ? BFRE : 0);
usbhs_pipe_clear(pipe);
usbhs_pipe_data_sequence(pipe, sequence);
}
void usbhs_pipe_config_change_bfre(struct usbhs_pipe *pipe, int enable)
{
if (usbhs_pipe_is_dcp(pipe))
return;
usbhsp_pipe_select(pipe);
/* check if the driver needs to change the BFRE value */
if (!(enable ^ !!(usbhsp_pipe_cfg_get(pipe) & BFRE)))
return;
usbhs_pipe_clear_without_sequence(pipe, 1, enable);
}
static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
{
struct usbhs_pipe *pos, *pipe;
int i;
/*
* find target pipe
*/
pipe = NULL;
usbhs_for_each_pipe_with_dcp(pos, priv, i) {
if (!usbhs_pipe_type_is(pos, type))
continue;
if (usbhsp_flags_has(pos, IS_USED))
continue;
pipe = pos;
break;
}
if (!pipe)
return NULL;
/*
* initialize pipe flags
*/
usbhsp_flags_init(pipe);
usbhsp_flags_set(pipe, IS_USED);
return pipe;
}
static void usbhsp_put_pipe(struct usbhs_pipe *pipe)
{
usbhsp_flags_init(pipe);
}
void usbhs_pipe_init(struct usbhs_priv *priv,
int (*dma_map_ctrl)(struct device *dma_dev,
struct usbhs_pkt *pkt, int map))
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_pipe *pipe;
int i;
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
usbhsp_flags_init(pipe);
pipe->fifo = NULL;
pipe->mod_private = NULL;
INIT_LIST_HEAD(&pipe->list);
/* pipe force init */
usbhs_pipe_clear(pipe);
}
info->dma_map_ctrl = dma_map_ctrl;
}
struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
int endpoint_type,
int dir_in)
{
struct device *dev = usbhs_priv_to_dev(priv);
struct usbhs_pipe *pipe;
int is_host = usbhs_mod_is_host(priv);
int ret;
u16 pipecfg, pipebuf;
pipe = usbhsp_get_pipe(priv, endpoint_type);
if (!pipe) {
dev_err(dev, "can't get pipe (%s)\n",
usbhsp_pipe_name[endpoint_type]);
return NULL;
}
INIT_LIST_HEAD(&pipe->list);
usbhs_pipe_disable(pipe);
/* make sure pipe is not busy */
ret = usbhsp_pipe_barrier(pipe);
if (ret < 0) {
dev_err(dev, "pipe setup failed %d\n", usbhs_pipe_number(pipe));
return NULL;
}
if (usbhsp_setup_pipecfg(pipe, is_host, dir_in, &pipecfg)) {
dev_err(dev, "can't setup pipe\n");
return NULL;
}
pipebuf = usbhsp_setup_pipebuff(pipe);
usbhsp_pipe_select(pipe);
usbhsp_pipe_cfg_set(pipe, 0xFFFF, pipecfg);
usbhsp_pipe_buf_set(pipe, 0xFFFF, pipebuf);
usbhs_pipe_clear(pipe);
usbhs_pipe_sequence_data0(pipe);
dev_dbg(dev, "enable pipe %d : %s (%s)\n",
usbhs_pipe_number(pipe),
usbhs_pipe_name(pipe),
usbhs_pipe_is_dir_in(pipe) ? "in" : "out");
/*
* epnum / maxp are still not set to this pipe.
* call usbhs_pipe_config_update() after this function !!
*/
return pipe;
}
void usbhs_pipe_free(struct usbhs_pipe *pipe)
{
usbhsp_pipe_select(pipe);
usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0);
usbhsp_put_pipe(pipe);
}
void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo)
{
if (pipe->fifo)
pipe->fifo->pipe = NULL;
pipe->fifo = fifo;
if (fifo)
fifo->pipe = pipe;
}
/*
* dcp control
*/
struct usbhs_pipe *usbhs_dcp_malloc(struct usbhs_priv *priv)
{
struct usbhs_pipe *pipe;
pipe = usbhsp_get_pipe(priv, USB_ENDPOINT_XFER_CONTROL);
if (!pipe)
return NULL;
INIT_LIST_HEAD(&pipe->list);
/*
* call usbhs_pipe_config_update() after this function !!
*/
return pipe;
}
void usbhs_dcp_control_transfer_done(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
WARN_ON(!usbhs_pipe_is_dcp(pipe));
usbhs_pipe_enable(pipe);
if (!usbhs_mod_is_host(priv)) /* funconly */
usbhsp_pipectrl_set(pipe, CCPL, CCPL);
}
void usbhs_dcp_dir_for_host(struct usbhs_pipe *pipe, int dir_out)
{
usbhsp_pipe_cfg_set(pipe, DIR_OUT,
dir_out ? DIR_OUT : 0);
}
/*
* pipe module function
*/
int usbhs_pipe_probe(struct usbhs_priv *priv)
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_pipe *pipe;
struct device *dev = usbhs_priv_to_dev(priv);
struct renesas_usbhs_driver_pipe_config *pipe_configs =
usbhs_get_dparam(priv, pipe_configs);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
int i;
/* This driver expects 1st pipe is DCP */
if (pipe_configs[0].type != USB_ENDPOINT_XFER_CONTROL) {
dev_err(dev, "1st PIPE is not DCP\n");
return -EINVAL;
}
info->pipe = kcalloc(pipe_size, sizeof(struct usbhs_pipe),
GFP_KERNEL);
if (!info->pipe)
return -ENOMEM;
info->size = pipe_size;
/*
* init pipe
*/
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
pipe->priv = priv;
usbhs_pipe_type(pipe) =
pipe_configs[i].type & USB_ENDPOINT_XFERTYPE_MASK;
dev_dbg(dev, "pipe %x\t: %s\n",
i, usbhsp_pipe_name[pipe_configs[i].type]);
}
return 0;
}
void usbhs_pipe_remove(struct usbhs_priv *priv)
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
kfree(info->pipe);
}
| linux-master | drivers/usb/renesas_usbhs/pipe.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <[email protected]>
*/
#include <linux/interrupt.h>
#include "common.h"
#include "mod.h"
/*
* autonomy
*
* these functions are used if platform doesn't have external phy.
* -> there is no "notify_hotplug" callback from platform
* -> call "notify_hotplug" by itself
* -> use own interrupt to connect/disconnect
* -> it mean module clock is always ON
* ~~~~~~~~~~~~~~~~~~~~~~~~~
*/
static int usbhsm_autonomy_get_vbus(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
return VBSTS & usbhs_read(priv, INTSTS0);
}
static int usbhsm_autonomy_irq_vbus(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
usbhsc_schedule_notify_hotplug(pdev);
return 0;
}
void usbhs_mod_autonomy_mode(struct usbhs_priv *priv)
{
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
info->irq_vbus = usbhsm_autonomy_irq_vbus;
info->get_vbus = usbhsm_autonomy_get_vbus;
usbhs_irq_callback_update(priv, NULL);
}
void usbhs_mod_non_autonomy_mode(struct usbhs_priv *priv)
{
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
info->get_vbus = priv->pfunc->get_vbus;
}
/*
* host / gadget functions
*
* renesas_usbhs host/gadget can register itself by below functions.
* these functions are called when probe
*
*/
void usbhs_mod_register(struct usbhs_priv *priv, struct usbhs_mod *mod, int id)
{
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
info->mod[id] = mod;
mod->priv = priv;
}
struct usbhs_mod *usbhs_mod_get(struct usbhs_priv *priv, int id)
{
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
struct usbhs_mod *ret = NULL;
switch (id) {
case USBHS_HOST:
case USBHS_GADGET:
ret = info->mod[id];
break;
}
return ret;
}
int usbhs_mod_is_host(struct usbhs_priv *priv)
{
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
if (!mod)
return -EINVAL;
return info->mod[USBHS_HOST] == mod;
}
struct usbhs_mod *usbhs_mod_get_current(struct usbhs_priv *priv)
{
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
return info->curt;
}
int usbhs_mod_change(struct usbhs_priv *priv, int id)
{
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
struct usbhs_mod *mod = NULL;
int ret = 0;
/* id < 0 mean no current */
switch (id) {
case USBHS_HOST:
case USBHS_GADGET:
mod = info->mod[id];
break;
default:
ret = -EINVAL;
}
info->curt = mod;
return ret;
}
static irqreturn_t usbhs_interrupt(int irq, void *data);
int usbhs_mod_probe(struct usbhs_priv *priv)
{
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
/*
* install host/gadget driver
*/
ret = usbhs_mod_host_probe(priv);
if (ret < 0)
return ret;
ret = usbhs_mod_gadget_probe(priv);
if (ret < 0)
goto mod_init_host_err;
/* irq settings */
ret = devm_request_irq(dev, priv->irq, usbhs_interrupt,
0, dev_name(dev), priv);
if (ret) {
dev_err(dev, "irq request err\n");
goto mod_init_gadget_err;
}
return ret;
mod_init_gadget_err:
usbhs_mod_gadget_remove(priv);
mod_init_host_err:
usbhs_mod_host_remove(priv);
return ret;
}
void usbhs_mod_remove(struct usbhs_priv *priv)
{
usbhs_mod_host_remove(priv);
usbhs_mod_gadget_remove(priv);
}
/*
* status functions
*/
int usbhs_status_get_device_state(struct usbhs_irq_state *irq_state)
{
return (int)irq_state->intsts0 & DVSQ_MASK;
}
int usbhs_status_get_ctrl_stage(struct usbhs_irq_state *irq_state)
{
/*
* return value
*
* IDLE_SETUP_STAGE
* READ_DATA_STAGE
* READ_STATUS_STAGE
* WRITE_DATA_STAGE
* WRITE_STATUS_STAGE
* NODATA_STATUS_STAGE
* SEQUENCE_ERROR
*/
return (int)irq_state->intsts0 & CTSQ_MASK;
}
static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
struct usbhs_irq_state *state)
{
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
u16 intenb0, intenb1;
unsigned long flags;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
state->intsts0 = usbhs_read(priv, INTSTS0);
intenb0 = usbhs_read(priv, INTENB0);
if (usbhs_mod_is_host(priv)) {
state->intsts1 = usbhs_read(priv, INTSTS1);
intenb1 = usbhs_read(priv, INTENB1);
} else {
state->intsts1 = intenb1 = 0;
}
/* mask */
if (mod) {
state->brdysts = usbhs_read(priv, BRDYSTS);
state->nrdysts = usbhs_read(priv, NRDYSTS);
state->bempsts = usbhs_read(priv, BEMPSTS);
state->bempsts &= mod->irq_bempsts;
state->brdysts &= mod->irq_brdysts;
}
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
return 0;
}
/*
* interrupt
*/
#define INTSTS0_MAGIC 0xF800 /* acknowledge magical interrupt sources */
#define INTSTS1_MAGIC 0xA870 /* acknowledge magical interrupt sources */
static irqreturn_t usbhs_interrupt(int irq, void *data)
{
struct usbhs_priv *priv = data;
struct usbhs_irq_state irq_state;
if (usbhs_status_get_each_irq(priv, &irq_state) < 0)
return IRQ_NONE;
/*
* clear interrupt
*
* The hardware is _very_ picky to clear interrupt bit.
* Especially INTSTS0_MAGIC, INTSTS1_MAGIC value.
*
* see
* "Operation"
* - "Control Transfer (DCP)"
* - Function :: VALID bit should 0
*/
usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC);
if (usbhs_mod_is_host(priv))
usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
/*
* The driver should not clear the xxxSTS after the line of
* "call irq callback functions" because each "if" statement is
* possible to call the callback function for avoiding any side effects.
*/
if (irq_state.intsts0 & BRDY)
usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
if (irq_state.intsts0 & BEMP)
usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
/*
* call irq callback functions
* see also
* usbhs_irq_setting_update
*/
/* INTSTS0 */
if (irq_state.intsts0 & VBINT)
usbhs_mod_info_call(priv, irq_vbus, priv, &irq_state);
if (irq_state.intsts0 & DVST)
usbhs_mod_call(priv, irq_dev_state, priv, &irq_state);
if (irq_state.intsts0 & CTRT)
usbhs_mod_call(priv, irq_ctrl_stage, priv, &irq_state);
if (irq_state.intsts0 & BEMP)
usbhs_mod_call(priv, irq_empty, priv, &irq_state);
if (irq_state.intsts0 & BRDY)
usbhs_mod_call(priv, irq_ready, priv, &irq_state);
if (usbhs_mod_is_host(priv)) {
/* INTSTS1 */
if (irq_state.intsts1 & ATTCH)
usbhs_mod_call(priv, irq_attch, priv, &irq_state);
if (irq_state.intsts1 & DTCH)
usbhs_mod_call(priv, irq_dtch, priv, &irq_state);
if (irq_state.intsts1 & SIGN)
usbhs_mod_call(priv, irq_sign, priv, &irq_state);
if (irq_state.intsts1 & SACK)
usbhs_mod_call(priv, irq_sack, priv, &irq_state);
}
return IRQ_HANDLED;
}
void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
{
u16 intenb0 = 0;
u16 intenb1 = 0;
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
/*
* BEMPENB/BRDYENB are picky.
* below method is required
*
* - clear INTSTS0
* - update BEMPENB/BRDYENB
* - update INTSTS0
*/
usbhs_write(priv, INTENB0, 0);
if (usbhs_mod_is_host(priv))
usbhs_write(priv, INTENB1, 0);
usbhs_write(priv, BEMPENB, 0);
usbhs_write(priv, BRDYENB, 0);
/*
* see also
* usbhs_interrupt
*/
if (info->irq_vbus)
intenb0 |= VBSE;
if (mod) {
/*
* INTSTS0
*/
if (mod->irq_ctrl_stage)
intenb0 |= CTRE;
if (mod->irq_dev_state)
intenb0 |= DVSE;
if (mod->irq_empty && mod->irq_bempsts) {
usbhs_write(priv, BEMPENB, mod->irq_bempsts);
intenb0 |= BEMPE;
}
if (mod->irq_ready && mod->irq_brdysts) {
usbhs_write(priv, BRDYENB, mod->irq_brdysts);
intenb0 |= BRDYE;
}
if (usbhs_mod_is_host(priv)) {
/*
* INTSTS1
*/
if (mod->irq_attch)
intenb1 |= ATTCHE;
if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
intenb1 |= SIGNE;
if (mod->irq_sack)
intenb1 |= SACKE;
}
}
if (intenb0)
usbhs_write(priv, INTENB0, intenb0);
if (usbhs_mod_is_host(priv) && intenb1)
usbhs_write(priv, INTENB1, intenb1);
}
| linux-master | drivers/usb/renesas_usbhs/mod.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver R-Car Gen. 2 initialization and power control
*
* Copyright (C) 2014 Ulrich Hecht
* Copyright (C) 2019 Renesas Electronics Corporation
*/
#include <linux/phy/phy.h>
#include "common.h"
#include "rcar2.h"
static int usbhs_rcar2_hardware_init(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
if (IS_ENABLED(CONFIG_GENERIC_PHY)) {
struct phy *phy = phy_get(&pdev->dev, "usb");
if (IS_ERR(phy))
return PTR_ERR(phy);
priv->phy = phy;
return 0;
}
return -ENXIO;
}
static int usbhs_rcar2_hardware_exit(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
if (priv->phy) {
phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
}
return 0;
}
static int usbhs_rcar2_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
int retval = -ENODEV;
if (priv->phy) {
if (enable) {
retval = phy_init(priv->phy);
if (!retval)
retval = phy_power_on(priv->phy);
} else {
phy_power_off(priv->phy);
phy_exit(priv->phy);
retval = 0;
}
}
return retval;
}
const struct renesas_usbhs_platform_info usbhs_rcar_gen2_plat_info = {
.platform_callback = {
.hardware_init = usbhs_rcar2_hardware_init,
.hardware_exit = usbhs_rcar2_hardware_exit,
.power_ctrl = usbhs_rcar2_power_ctrl,
.get_id = usbhs_get_id_as_gadget,
},
.driver_param = {
.has_usb_dmac = 1,
.has_new_pipe_configs = 1,
},
};
| linux-master | drivers/usb/renesas_usbhs/rcar2.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <[email protected]>
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/scatterlist.h>
#include "common.h"
#include "pipe.h"
#define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
#define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
/*
* packet initialize
*/
void usbhs_pkt_init(struct usbhs_pkt *pkt)
{
INIT_LIST_HEAD(&pkt->node);
}
/*
* packet control function
*/
static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
struct device *dev = usbhs_priv_to_dev(priv);
dev_err(dev, "null handler\n");
return -EINVAL;
}
static const struct usbhs_pkt_handle usbhsf_null_handler = {
.prepare = usbhsf_null_handle,
.try_run = usbhsf_null_handle,
};
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt),
void *buf, int len, int zero, int sequence)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
if (!done) {
dev_err(dev, "no done function\n");
return;
}
/******************** spin lock ********************/
usbhs_lock(priv, flags);
if (!pipe->handler) {
dev_err(dev, "no handler function\n");
pipe->handler = &usbhsf_null_handler;
}
list_move_tail(&pkt->node, &pipe->list);
/*
* each pkt must hold own handler.
* because handler might be changed by its situation.
* dma handler -> pio handler.
*/
pkt->pipe = pipe;
pkt->buf = buf;
pkt->handler = pipe->handler;
pkt->length = len;
pkt->zero = zero;
pkt->actual = 0;
pkt->done = done;
pkt->sequence = sequence;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
}
static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
{
list_del_init(&pkt->node);
}
struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
{
return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
}
static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo);
static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
struct usbhs_pkt *pkt);
#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
unsigned long flags;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
usbhs_pipe_disable(pipe);
if (!pkt)
pkt = __usbhsf_pkt_get(pipe);
if (pkt) {
struct dma_chan *chan = NULL;
if (fifo)
chan = usbhsf_dma_chan_get(fifo, pkt);
if (chan) {
dmaengine_terminate_all(chan);
usbhsf_dma_unmap(pkt);
} else {
if (usbhs_pipe_is_dir_in(pipe))
usbhsf_rx_irq_ctrl(pipe, 0);
else
usbhsf_tx_irq_ctrl(pipe, 0);
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt);
}
if (fifo)
usbhsf_fifo_unselect(pipe, fifo);
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
return pkt;
}
enum {
USBHSF_PKT_PREPARE,
USBHSF_PKT_TRY_RUN,
USBHSF_PKT_DMA_DONE,
};
static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_pkt *pkt;
struct device *dev = usbhs_priv_to_dev(priv);
int (*func)(struct usbhs_pkt *pkt, int *is_done);
unsigned long flags;
int ret = 0;
int is_done = 0;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
pkt = __usbhsf_pkt_get(pipe);
if (!pkt) {
ret = -EINVAL;
goto __usbhs_pkt_handler_end;
}
switch (type) {
case USBHSF_PKT_PREPARE:
func = pkt->handler->prepare;
break;
case USBHSF_PKT_TRY_RUN:
func = pkt->handler->try_run;
break;
case USBHSF_PKT_DMA_DONE:
func = pkt->handler->dma_done;
break;
default:
dev_err(dev, "unknown pkt handler\n");
goto __usbhs_pkt_handler_end;
}
if (likely(func))
ret = func(pkt, &is_done);
if (is_done)
__usbhsf_pkt_del(pkt);
__usbhs_pkt_handler_end:
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
if (is_done) {
pkt->done(priv, pkt);
usbhs_pkt_start(pipe);
}
return ret;
}
void usbhs_pkt_start(struct usbhs_pipe *pipe)
{
usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
}
/*
* irq enable/disable function
*/
#define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
#define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
#define usbhsf_irq_callback_ctrl(pipe, status, enable) \
({ \
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
u16 status = (1 << usbhs_pipe_number(pipe)); \
if (!mod) \
return; \
if (enable) \
mod->status |= status; \
else \
mod->status &= ~status; \
usbhs_irq_callback_update(priv, mod); \
})
static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
{
/*
* And DCP pipe can NOT use "ready interrupt" for "send"
* it should use "empty" interrupt.
* see
* "Operation" - "Interrupt Function" - "BRDY Interrupt"
*
* on the other hand, normal pipe can use "ready interrupt" for "send"
* even though it is single/double buffer
*/
if (usbhs_pipe_is_dcp(pipe))
usbhsf_irq_empty_ctrl(pipe, enable);
else
usbhsf_irq_ready_ctrl(pipe, enable);
}
static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
{
usbhsf_irq_ready_ctrl(pipe, enable);
}
/*
* FIFO ctrl
*/
static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
}
static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
struct usbhs_fifo *fifo)
{
/* The FIFO port is accessible */
if (usbhs_read(priv, fifo->ctr) & FRDY)
return 0;
return -EBUSY;
}
static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int ret = 0;
if (!usbhs_pipe_is_dcp(pipe)) {
/*
* This driver checks the pipe condition first to avoid -EBUSY
* from usbhsf_fifo_barrier() if the pipe is RX direction and
* empty.
*/
if (usbhs_pipe_is_dir_in(pipe))
ret = usbhs_pipe_is_accessible(pipe);
if (!ret)
ret = usbhsf_fifo_barrier(priv, fifo);
}
/*
* if non-DCP pipe, this driver should set BCLR when
* usbhsf_fifo_barrier() returns 0.
*/
if (!ret)
usbhs_write(priv, fifo->ctr, BCLR);
}
static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
struct usbhs_fifo *fifo)
{
return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
}
static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
usbhs_pipe_select_fifo(pipe, NULL);
usbhs_write(priv, fifo->sel, 0);
}
static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo,
int write)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int timeout = 1024;
u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
if (usbhs_pipe_is_busy(pipe) ||
usbhsf_fifo_is_busy(fifo))
return -EBUSY;
if (usbhs_pipe_is_dcp(pipe)) {
base |= (1 == write) << 5; /* ISEL */
if (usbhs_mod_is_host(priv))
usbhs_dcp_dir_for_host(pipe, write);
}
/* "base" will be used below */
usbhs_write(priv, fifo->sel, base | MBW_32);
/* check ISEL and CURPIPE value */
while (timeout--) {
if (base == (mask & usbhs_read(priv, fifo->sel))) {
usbhs_pipe_select_fifo(pipe, fifo);
return 0;
}
udelay(10);
}
dev_err(dev, "fifo select error\n");
return -EIO;
}
/*
* DCP status stage
*/
static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
usbhs_pipe_disable(pipe);
ret = usbhsf_fifo_select(pipe, fifo, 1);
if (ret < 0) {
dev_err(dev, "%s() failed\n", __func__);
return ret;
}
usbhs_pipe_sequence_data1(pipe); /* DATA1 */
usbhsf_fifo_clear(pipe, fifo);
usbhsf_send_terminator(pipe, fifo);
usbhsf_fifo_unselect(pipe, fifo);
usbhsf_tx_irq_ctrl(pipe, 1);
usbhs_pipe_enable(pipe);
return ret;
}
static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
usbhs_pipe_disable(pipe);
ret = usbhsf_fifo_select(pipe, fifo, 0);
if (ret < 0) {
dev_err(dev, "%s() fail\n", __func__);
return ret;
}
usbhs_pipe_sequence_data1(pipe); /* DATA1 */
usbhsf_fifo_clear(pipe, fifo);
usbhsf_fifo_unselect(pipe, fifo);
usbhsf_rx_irq_ctrl(pipe, 1);
usbhs_pipe_enable(pipe);
return ret;
}
static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
usbhsf_tx_irq_ctrl(pipe, 0);
else
usbhsf_rx_irq_ctrl(pipe, 0);
pkt->actual = pkt->length;
*is_done = 1;
return 0;
}
const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
.prepare = usbhs_dcp_dir_switch_to_write,
.try_run = usbhs_dcp_dir_switch_done,
};
const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
.prepare = usbhs_dcp_dir_switch_to_read,
.try_run = usbhs_dcp_dir_switch_done,
};
/*
* DCP data stage (push)
*/
static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
usbhs_pipe_sequence_data1(pipe); /* DATA1 */
/*
* change handler to PIO push
*/
pkt->handler = &usbhs_fifo_pio_push_handler;
return pkt->handler->prepare(pkt, is_done);
}
const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
.prepare = usbhsf_dcp_data_stage_try_push,
};
/*
* DCP data stage (pop)
*/
static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
if (usbhs_pipe_is_busy(pipe))
return 0;
/*
* prepare pop for DCP should
* - change DCP direction,
* - clear fifo
* - DATA1
*/
usbhs_pipe_disable(pipe);
usbhs_pipe_sequence_data1(pipe); /* DATA1 */
usbhsf_fifo_select(pipe, fifo, 0);
usbhsf_fifo_clear(pipe, fifo);
usbhsf_fifo_unselect(pipe, fifo);
/*
* change handler to PIO pop
*/
pkt->handler = &usbhs_fifo_pio_pop_handler;
return pkt->handler->prepare(pkt, is_done);
}
const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
.prepare = usbhsf_dcp_data_stage_prepare_pop,
};
/*
* PIO push handler
*/
static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
void __iomem *addr = priv->base + fifo->port;
u8 *buf;
int maxp = usbhs_pipe_get_maxpacket(pipe);
int total_len;
int i, ret, len;
int is_short;
usbhs_pipe_data_sequence(pipe, pkt->sequence);
pkt->sequence = -1; /* -1 sequence will be ignored */
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
ret = usbhsf_fifo_select(pipe, fifo, 1);
if (ret < 0)
return 0;
ret = usbhs_pipe_is_accessible(pipe);
if (ret < 0) {
/* inaccessible pipe is not an error */
ret = 0;
goto usbhs_fifo_write_busy;
}
ret = usbhsf_fifo_barrier(priv, fifo);
if (ret < 0)
goto usbhs_fifo_write_busy;
buf = pkt->buf + pkt->actual;
len = pkt->length - pkt->actual;
len = min(len, maxp);
total_len = len;
is_short = total_len < maxp;
/*
* FIXME
*
* 32-bit access only
*/
if (len >= 4 && !((unsigned long)buf & 0x03)) {
iowrite32_rep(addr, buf, len / 4);
len %= 4;
buf += total_len - len;
}
/* the rest operation */
if (usbhs_get_dparam(priv, cfifo_byte_addr)) {
for (i = 0; i < len; i++)
iowrite8(buf[i], addr + (i & 0x03));
} else {
for (i = 0; i < len; i++)
iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
}
/*
* variable update
*/
pkt->actual += total_len;
if (pkt->actual < pkt->length)
*is_done = 0; /* there are remainder data */
else if (is_short)
*is_done = 1; /* short packet */
else
*is_done = !pkt->zero; /* send zero packet ? */
/*
* pipe/irq handling
*/
if (is_short)
usbhsf_send_terminator(pipe, fifo);
usbhsf_tx_irq_ctrl(pipe, !*is_done);
usbhs_pipe_running(pipe, !*is_done);
usbhs_pipe_enable(pipe);
dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
usbhs_pipe_number(pipe),
pkt->length, pkt->actual, *is_done, pkt->zero);
usbhsf_fifo_unselect(pipe, fifo);
return 0;
usbhs_fifo_write_busy:
usbhsf_fifo_unselect(pipe, fifo);
/*
* pipe is busy.
* retry in interrupt
*/
usbhsf_tx_irq_ctrl(pipe, 1);
usbhs_pipe_running(pipe, 1);
return ret;
}
static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
{
if (usbhs_pipe_is_running(pkt->pipe))
return 0;
return usbhsf_pio_try_push(pkt, is_done);
}
const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
.prepare = usbhsf_pio_prepare_push,
.try_run = usbhsf_pio_try_push,
};
/*
* PIO pop handler
*/
static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
if (usbhs_pipe_is_busy(pipe))
return 0;
if (usbhs_pipe_is_running(pipe))
return 0;
/*
* pipe enable to prepare packet receive
*/
usbhs_pipe_data_sequence(pipe, pkt->sequence);
pkt->sequence = -1; /* -1 sequence will be ignored */
if (usbhs_pipe_is_dcp(pipe))
usbhsf_fifo_clear(pipe, fifo);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
usbhs_pipe_enable(pipe);
usbhs_pipe_running(pipe, 1);
usbhsf_rx_irq_ctrl(pipe, 1);
return 0;
}
static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
void __iomem *addr = priv->base + fifo->port;
u8 *buf;
u32 data = 0;
int maxp = usbhs_pipe_get_maxpacket(pipe);
int rcv_len, len;
int i, ret;
int total_len = 0;
ret = usbhsf_fifo_select(pipe, fifo, 0);
if (ret < 0)
return 0;
ret = usbhsf_fifo_barrier(priv, fifo);
if (ret < 0)
goto usbhs_fifo_read_busy;
rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
buf = pkt->buf + pkt->actual;
len = pkt->length - pkt->actual;
len = min(len, rcv_len);
total_len = len;
/*
* update actual length first here to decide disable pipe.
* if this pipe keeps BUF status and all data were popped,
* then, next interrupt/token will be issued again
*/
pkt->actual += total_len;
if ((pkt->actual == pkt->length) || /* receive all data */
(total_len < maxp)) { /* short packet */
*is_done = 1;
usbhsf_rx_irq_ctrl(pipe, 0);
usbhs_pipe_running(pipe, 0);
/*
* If function mode, since this controller is possible to enter
* Control Write status stage at this timing, this driver
* should not disable the pipe. If such a case happens, this
* controller is not able to complete the status stage.
*/
if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
usbhs_pipe_disable(pipe); /* disable pipe first */
}
/*
* Buffer clear if Zero-Length packet
*
* see
* "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
*/
if (0 == rcv_len) {
pkt->zero = 1;
usbhsf_fifo_clear(pipe, fifo);
goto usbhs_fifo_read_end;
}
/*
* FIXME
*
* 32-bit access only
*/
if (len >= 4 && !((unsigned long)buf & 0x03)) {
ioread32_rep(addr, buf, len / 4);
len %= 4;
buf += total_len - len;
}
/* the rest operation */
for (i = 0; i < len; i++) {
if (!(i & 0x03))
data = ioread32(addr);
buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
}
usbhs_fifo_read_end:
dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
usbhs_pipe_number(pipe),
pkt->length, pkt->actual, *is_done, pkt->zero);
usbhs_fifo_read_busy:
usbhsf_fifo_unselect(pipe, fifo);
return ret;
}
const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
.prepare = usbhsf_prepare_pop,
.try_run = usbhsf_pio_try_pop,
};
/*
* DCP ctrol statge handler
*/
static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
{
usbhs_dcp_control_transfer_done(pkt->pipe);
*is_done = 1;
return 0;
}
const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
.prepare = usbhsf_ctrl_stage_end,
.try_run = usbhsf_ctrl_stage_end,
};
/*
* DMA fifo functions
*/
static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
struct usbhs_pkt *pkt)
{
if (&usbhs_fifo_dma_push_handler == pkt->handler)
return fifo->tx_chan;
if (&usbhs_fifo_dma_pop_handler == pkt->handler)
return fifo->rx_chan;
return NULL;
}
static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
struct usbhs_pkt *pkt)
{
struct usbhs_fifo *fifo;
int i;
usbhs_for_each_dfifo(priv, fifo, i) {
if (usbhsf_dma_chan_get(fifo, pkt) &&
!usbhsf_fifo_is_busy(fifo))
return fifo;
}
return NULL;
}
#define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
#define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo,
u16 dreqe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
usbhs_bset(priv, fifo->sel, DREQE, dreqe);
}
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
return info->dma_map_ctrl(chan->device->dev, pkt, map);
}
static void usbhsf_dma_complete(void *arg,
const struct dmaengine_result *result);
static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_fifo *fifo;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
dma_cookie_t cookie;
fifo = usbhs_pipe_to_fifo(pipe);
if (!fifo)
return;
chan = usbhsf_dma_chan_get(fifo, pkt);
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
pkt->trans, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
return;
desc->callback_result = usbhsf_dma_complete;
desc->callback_param = pkt;
cookie = dmaengine_submit(desc);
if (cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
return;
}
dev_dbg(dev, " %s %d (%d/ %d)\n",
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
usbhs_pipe_running(pipe, 1);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe);
}
static void xfer_work(struct work_struct *work)
{
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
unsigned long flags;
usbhs_lock(priv, flags);
usbhsf_dma_xfer_preparing(pkt);
usbhs_unlock(priv, flags);
}
/*
* DMA push handler
*/
static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo;
int len = pkt->length - pkt->actual;
int ret;
uintptr_t align_mask;
if (usbhs_pipe_is_busy(pipe))
return 0;
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_push;
/* check data length if this driver don't use USB-DMAC */
if (!usbhs_get_dparam(priv, has_usb_dmac) && len & 0x7)
goto usbhsf_pio_prepare_push;
/* check buffer alignment */
align_mask = usbhs_get_dparam(priv, has_usb_dmac) ?
USBHS_USB_DMAC_XFER_SIZE - 1 : 0x7;
if ((uintptr_t)(pkt->buf + pkt->actual) & align_mask)
goto usbhsf_pio_prepare_push;
/* return at this time if the pipe is running */
if (usbhs_pipe_is_running(pipe))
return 0;
/* get enable DMA fifo */
fifo = usbhsf_get_dma_fifo(priv, pkt);
if (!fifo)
goto usbhsf_pio_prepare_push;
ret = usbhsf_fifo_select(pipe, fifo, 0);
if (ret < 0)
goto usbhsf_pio_prepare_push;
if (usbhsf_dma_map(pkt) < 0)
goto usbhsf_pio_prepare_push_unselect;
pkt->trans = len;
usbhsf_tx_irq_ctrl(pipe, 0);
/* FIXME: Workaound for usb dmac that driver can be used in atomic */
if (usbhs_get_dparam(priv, has_usb_dmac)) {
usbhsf_dma_xfer_preparing(pkt);
} else {
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);
}
return 0;
usbhsf_pio_prepare_push_unselect:
usbhsf_fifo_unselect(pipe, fifo);
usbhsf_pio_prepare_push:
/*
* change handler to PIO
*/
pkt->handler = &usbhs_fifo_pio_push_handler;
return pkt->handler->prepare(pkt, is_done);
}
static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
pkt->actual += pkt->trans;
if (pkt->actual < pkt->length)
*is_done = 0; /* there are remainder data */
else if (is_short)
*is_done = 1; /* short packet */
else
*is_done = !pkt->zero; /* send zero packet? */
usbhs_pipe_running(pipe, !*is_done);
usbhsf_dma_stop(pipe, pipe->fifo);
usbhsf_dma_unmap(pkt);
usbhsf_fifo_unselect(pipe, pipe->fifo);
if (!*is_done) {
/* change handler to PIO */
pkt->handler = &usbhs_fifo_pio_push_handler;
return pkt->handler->try_run(pkt, is_done);
}
return 0;
}
const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
.prepare = usbhsf_dma_prepare_push,
.dma_done = usbhsf_dma_push_done,
};
/*
* DMA pop handler
*/
static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt *pkt,
int *is_done)
{
return usbhsf_prepare_pop(pkt, is_done);
}
static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo;
int ret;
if (usbhs_pipe_is_busy(pipe))
return 0;
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_pop;
fifo = usbhsf_get_dma_fifo(priv, pkt);
if (!fifo)
goto usbhsf_pio_prepare_pop;
if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
goto usbhsf_pio_prepare_pop;
/* return at this time if the pipe is running */
if (usbhs_pipe_is_running(pipe))
return 0;
usbhs_pipe_config_change_bfre(pipe, 1);
ret = usbhsf_fifo_select(pipe, fifo, 0);
if (ret < 0)
goto usbhsf_pio_prepare_pop;
if (usbhsf_dma_map(pkt) < 0)
goto usbhsf_pio_prepare_pop_unselect;
/* DMA */
/*
* usbhs_fifo_dma_pop_handler :: prepare
* enabled irq to come here.
* but it is no longer needed for DMA. disable it.
*/
usbhsf_rx_irq_ctrl(pipe, 0);
pkt->trans = pkt->length;
usbhsf_dma_xfer_preparing(pkt);
return 0;
usbhsf_pio_prepare_pop_unselect:
usbhsf_fifo_unselect(pipe, fifo);
usbhsf_pio_prepare_pop:
/*
* change handler to PIO
*/
pkt->handler = &usbhs_fifo_pio_pop_handler;
usbhs_pipe_config_change_bfre(pipe, 0);
return pkt->handler->prepare(pkt, is_done);
}
static int usbhsf_dma_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
if (usbhs_get_dparam(priv, has_usb_dmac))
return usbhsf_dma_prepare_pop_with_usb_dmac(pkt, is_done);
else
return usbhsf_dma_prepare_pop_with_rx_irq(pkt, is_done);
}
static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo;
int len, ret;
if (usbhs_pipe_is_busy(pipe))
return 0;
if (usbhs_pipe_is_dcp(pipe))
goto usbhsf_pio_prepare_pop;
/* get enable DMA fifo */
fifo = usbhsf_get_dma_fifo(priv, pkt);
if (!fifo)
goto usbhsf_pio_prepare_pop;
if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_pop;
ret = usbhsf_fifo_select(pipe, fifo, 0);
if (ret < 0)
goto usbhsf_pio_prepare_pop;
/* use PIO if packet is less than pio_dma_border */
len = usbhsf_fifo_rcv_len(priv, fifo);
len = min(pkt->length - pkt->actual, len);
if (len & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_pop_unselect;
if (len < usbhs_get_dparam(priv, pio_dma_border))
goto usbhsf_pio_prepare_pop_unselect;
ret = usbhsf_fifo_barrier(priv, fifo);
if (ret < 0)
goto usbhsf_pio_prepare_pop_unselect;
if (usbhsf_dma_map(pkt) < 0)
goto usbhsf_pio_prepare_pop_unselect;
/* DMA */
/*
* usbhs_fifo_dma_pop_handler :: prepare
* enabled irq to come here.
* but it is no longer needed for DMA. disable it.
*/
usbhsf_rx_irq_ctrl(pipe, 0);
pkt->trans = len;
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);
return 0;
usbhsf_pio_prepare_pop_unselect:
usbhsf_fifo_unselect(pipe, fifo);
usbhsf_pio_prepare_pop:
/*
* change handler to PIO
*/
pkt->handler = &usbhs_fifo_pio_pop_handler;
return pkt->handler->try_run(pkt, is_done);
}
static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
BUG_ON(usbhs_get_dparam(priv, has_usb_dmac));
return usbhsf_dma_try_pop_with_rx_irq(pkt, is_done);
}
static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
int maxp = usbhs_pipe_get_maxpacket(pipe);
usbhsf_dma_stop(pipe, pipe->fifo);
usbhsf_dma_unmap(pkt);
usbhsf_fifo_unselect(pipe, pipe->fifo);
pkt->actual += pkt->trans;
if ((pkt->actual == pkt->length) || /* receive all data */
(pkt->trans < maxp)) { /* short packet */
*is_done = 1;
usbhs_pipe_running(pipe, 0);
} else {
/* re-enable */
usbhs_pipe_running(pipe, 0);
usbhsf_prepare_pop(pkt, is_done);
}
return 0;
}
static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
struct dma_chan *chan, int dtln)
{
struct usbhs_pipe *pipe = pkt->pipe;
size_t received_size;
int maxp = usbhs_pipe_get_maxpacket(pipe);
received_size = pkt->length - pkt->dma_result->residue;
if (dtln) {
received_size -= USBHS_USB_DMAC_XFER_SIZE;
received_size &= ~(maxp - 1);
received_size += dtln;
}
return received_size;
}
static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
int rcv_len;
/*
* Since the driver disables rx_irq in DMA mode, the interrupt handler
* cannot the BRDYSTS. So, the function clears it here because the
* driver may use PIO mode next time.
*/
usbhs_xxxsts_clear(priv, BRDYSTS, usbhs_pipe_number(pipe));
rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
usbhsf_fifo_clear(pipe, fifo);
pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
usbhs_pipe_running(pipe, 0);
usbhsf_dma_stop(pipe, fifo);
usbhsf_dma_unmap(pkt);
usbhsf_fifo_unselect(pipe, pipe->fifo);
/* The driver can assume the rx transaction is always "done" */
*is_done = 1;
return 0;
}
static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
if (usbhs_get_dparam(priv, has_usb_dmac))
return usbhsf_dma_pop_done_with_usb_dmac(pkt, is_done);
else
return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
}
const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
.prepare = usbhsf_dma_prepare_pop,
.try_run = usbhsf_dma_try_pop,
.dma_done = usbhsf_dma_pop_done
};
/*
* DMA setting
*/
static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
{
struct sh_dmae_slave *slave = param;
/*
* FIXME
*
* usbhs doesn't recognize id = 0 as valid DMA
*/
if (0 == slave->shdma_slave.slave_id)
return false;
chan->private = slave;
return true;
}
static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
{
if (fifo->tx_chan)
dma_release_channel(fifo->tx_chan);
if (fifo->rx_chan)
dma_release_channel(fifo->rx_chan);
fifo->tx_chan = NULL;
fifo->rx_chan = NULL;
}
static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo)
{
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
&fifo->tx_slave);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
&fifo->rx_slave);
}
static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
int channel)
{
char name[16];
/*
* To avoid complex handing for DnFIFOs, the driver uses each
* DnFIFO as TX or RX direction (not bi-direction).
* So, the driver uses odd channels for TX, even channels for RX.
*/
snprintf(name, sizeof(name), "ch%d", channel);
if (channel & 1) {
fifo->tx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->tx_chan))
fifo->tx_chan = NULL;
} else {
fifo->rx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->rx_chan))
fifo->rx_chan = NULL;
}
}
static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
int channel)
{
struct device *dev = usbhs_priv_to_dev(priv);
if (dev_of_node(dev))
usbhsf_dma_init_dt(dev, fifo, channel);
else
usbhsf_dma_init_pdev(fifo);
if (fifo->tx_chan || fifo->rx_chan)
dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
fifo->name,
fifo->tx_chan ? "[TX]" : " ",
fifo->rx_chan ? "[RX]" : " ");
}
/*
* irq functions
*/
static int usbhsf_irq_empty(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhs_pipe *pipe;
struct device *dev = usbhs_priv_to_dev(priv);
int i, ret;
if (!irq_state->bempsts) {
dev_err(dev, "debug %s !!\n", __func__);
return -EIO;
}
dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
/*
* search interrupted "pipe"
* not "uep".
*/
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
if (!(irq_state->bempsts & (1 << i)))
continue;
ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
if (ret < 0)
dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
}
return 0;
}
static int usbhsf_irq_ready(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhs_pipe *pipe;
struct device *dev = usbhs_priv_to_dev(priv);
int i, ret;
if (!irq_state->brdysts) {
dev_err(dev, "debug %s !!\n", __func__);
return -EIO;
}
dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
/*
* search interrupted "pipe"
* not "uep".
*/
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
if (!(irq_state->brdysts & (1 << i)))
continue;
ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
if (ret < 0)
dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
}
return 0;
}
static void usbhsf_dma_complete(void *arg,
const struct dmaengine_result *result)
{
struct usbhs_pkt *pkt = arg;
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
pkt->dma_result = result;
ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
if (ret < 0)
dev_err(dev, "dma_complete run_error %d : %d\n",
usbhs_pipe_number(pipe), ret);
}
void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
/* clear DCP FIFO of transmission */
if (usbhsf_fifo_select(pipe, fifo, 1) < 0)
return;
usbhsf_fifo_clear(pipe, fifo);
usbhsf_fifo_unselect(pipe, fifo);
/* clear DCP FIFO of reception */
if (usbhsf_fifo_select(pipe, fifo, 0) < 0)
return;
usbhsf_fifo_clear(pipe, fifo);
usbhsf_fifo_unselect(pipe, fifo);
}
/*
* fifo init
*/
void usbhs_fifo_init(struct usbhs_priv *priv)
{
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
struct usbhs_fifo *dfifo;
int i;
mod->irq_empty = usbhsf_irq_empty;
mod->irq_ready = usbhsf_irq_ready;
mod->irq_bempsts = 0;
mod->irq_brdysts = 0;
cfifo->pipe = NULL;
usbhs_for_each_dfifo(priv, dfifo, i)
dfifo->pipe = NULL;
}
void usbhs_fifo_quit(struct usbhs_priv *priv)
{
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
mod->irq_empty = NULL;
mod->irq_ready = NULL;
mod->irq_bempsts = 0;
mod->irq_brdysts = 0;
}
#define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
do { \
fifo = usbhsf_get_dnfifo(priv, channel); \
fifo->name = "D"#channel"FIFO"; \
fifo->port = fifo_port; \
fifo->sel = D##channel##FIFOSEL; \
fifo->ctr = D##channel##FIFOCTR; \
fifo->tx_slave.shdma_slave.slave_id = \
usbhs_get_dparam(priv, d##channel##_tx_id); \
fifo->rx_slave.shdma_slave.slave_id = \
usbhs_get_dparam(priv, d##channel##_rx_id); \
usbhsf_dma_init(priv, fifo, channel); \
} while (0)
#define USBHS_DFIFO_INIT(priv, fifo, channel) \
__USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
#define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
__USBHS_DFIFO_INIT(priv, fifo, channel, 0)
int usbhs_fifo_probe(struct usbhs_priv *priv)
{
struct usbhs_fifo *fifo;
/* CFIFO */
fifo = usbhsf_get_cfifo(priv);
fifo->name = "CFIFO";
fifo->port = CFIFO;
fifo->sel = CFIFOSEL;
fifo->ctr = CFIFOCTR;
/* DFIFO */
USBHS_DFIFO_INIT(priv, fifo, 0);
USBHS_DFIFO_INIT(priv, fifo, 1);
USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2);
USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3);
return 0;
}
void usbhs_fifo_remove(struct usbhs_priv *priv)
{
struct usbhs_fifo *fifo;
int i;
usbhs_for_each_dfifo(priv, fifo, i)
usbhsf_dma_quit(priv, fifo);
}
| linux-master | drivers/usb/renesas_usbhs/fifo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB driver R-Car Gen. 3 initialization and power control
*
* Copyright (C) 2016-2019 Renesas Electronics Corporation
*/
#include <linux/delay.h>
#include <linux/io.h>
#include "common.h"
#include "rcar3.h"
#define LPSTS 0x102
#define UGCTRL 0x180 /* 32-bit register */
#define UGCTRL2 0x184 /* 32-bit register */
#define UGSTS 0x188 /* 32-bit register */
/* Low Power Status register (LPSTS) */
#define LPSTS_SUSPM 0x4000
/* R-Car D3 only: USB General control register (UGCTRL) */
#define UGCTRL_PLLRESET 0x00000001
#define UGCTRL_CONNECT 0x00000004
/*
* USB General control register 2 (UGCTRL2)
* Remarks: bit[31:11] and bit[9:6] should be 0
*/
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
#define UGCTRL2_USB0SEL_HSUSB 0x00000020
#define UGCTRL2_USB0SEL_OTG 0x00000030
#define UGCTRL2_VBUSSEL 0x00000400
/* R-Car D3 only: USB General status register (UGSTS) */
#define UGSTS_LOCK 0x00000100
static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
iowrite32(data, priv->base + reg);
}
static u32 usbhs_read32(struct usbhs_priv *priv, u32 reg)
{
return ioread32(priv->base + reg);
}
static void usbhs_rcar3_set_ugctrl2(struct usbhs_priv *priv, u32 val)
{
usbhs_write32(priv, UGCTRL2, val | UGCTRL2_RESERVED_3);
}
static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_OTG | UGCTRL2_VBUSSEL);
if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
/* The controller on R-Car Gen3 needs to wait up to 45 usec */
usleep_range(45, 90);
} else {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
}
return 0;
}
/* R-Car D3 needs to release UGCTRL.PLLRESET */
static int usbhs_rcar3_power_and_pll_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
u32 val;
int timeout = 1000;
if (enable) {
usbhs_write32(priv, UGCTRL, 0); /* release PLLRESET */
usbhs_rcar3_set_ugctrl2(priv,
UGCTRL2_USB0SEL_OTG | UGCTRL2_VBUSSEL);
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
do {
val = usbhs_read32(priv, UGSTS);
udelay(1);
} while (!(val & UGSTS_LOCK) && timeout--);
usbhs_write32(priv, UGCTRL, UGCTRL_CONNECT);
} else {
usbhs_write32(priv, UGCTRL, 0);
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
usbhs_write32(priv, UGCTRL, UGCTRL_PLLRESET);
}
return 0;
}
const struct renesas_usbhs_platform_info usbhs_rcar_gen3_plat_info = {
.platform_callback = {
.power_ctrl = usbhs_rcar3_power_ctrl,
.get_id = usbhs_get_id_as_gadget,
},
.driver_param = {
.has_usb_dmac = 1,
.multi_clks = 1,
.has_new_pipe_configs = 1,
},
};
const struct renesas_usbhs_platform_info usbhs_rcar_gen3_with_pll_plat_info = {
.platform_callback = {
.power_ctrl = usbhs_rcar3_power_and_pll_ctrl,
.get_id = usbhs_get_id_as_gadget,
},
.driver_param = {
.has_usb_dmac = 1,
.multi_clks = 1,
.has_new_pipe_configs = 1,
},
};
| linux-master | drivers/usb/renesas_usbhs/rcar3.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <[email protected]>
*/
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "common.h"
/*
*** HARDWARE LIMITATION ***
*
* 1) renesas_usbhs has a limited number of controllable devices.
* it can control only 9 devices in generally.
* see DEVADDn / DCPMAXP / PIPEMAXP.
*
* 2) renesas_usbhs pipe number is limited.
* the pipe will be re-used for each devices.
* so, software should control DATA0/1 sequence of each devices.
*/
/*
* image of mod_host
*
* +--------+
* | udev 0 | --> it is used when set address
* +--------+
*
* +--------+ pipes are reused for each uep.
* | udev 1 |-+- [uep 0 (dcp) ] --+ pipe will be switched when
* +--------+ | | other device requested
* +- [uep 1 (bulk)] --|---+ +--------------+
* | +--------------> | pipe0 (dcp) |
* +- [uep 2 (bulk)] -@ | +--------------+
* | | pipe1 (isoc) |
* +--------+ | +--------------+
* | udev 2 |-+- [uep 0 (dcp) ] -@ +----------> | pipe2 (bulk) |
* +--------+ | +--------------+
* +- [uep 1 (int) ] ----+ +------> | pipe3 (bulk) |
* | | +--------------+
* +--------+ +-----|------> | pipe4 (int) |
* | udev 3 |-+- [uep 0 (dcp) ] -@ | +--------------+
* +--------+ | | | .... |
* +- [uep 1 (bulk)] -@ | | .... |
* | |
* +- [uep 2 (bulk)]-----------+
*
* @ : uep requested free pipe, but all have been used.
* now it is waiting for free pipe
*/
/*
* struct
*/
struct usbhsh_request {
struct urb *urb;
struct usbhs_pkt pkt;
};
struct usbhsh_device {
struct usb_device *usbv;
struct list_head ep_list_head; /* list of usbhsh_ep */
};
struct usbhsh_ep {
struct usbhs_pipe *pipe; /* attached pipe */
struct usbhsh_device *udev; /* attached udev */
struct usb_host_endpoint *ep;
struct list_head ep_list; /* list to usbhsh_device */
unsigned int counter; /* pipe attach counter */
};
#define USBHSH_DEVICE_MAX 10 /* see DEVADDn / DCPMAXP / PIPEMAXP */
#define USBHSH_PORT_MAX 7 /* see DEVADDn :: HUBPORT */
struct usbhsh_hpriv {
struct usbhs_mod mod;
struct usbhs_pipe *dcp;
struct usbhsh_device udev[USBHSH_DEVICE_MAX];
u32 port_stat; /* USB_PORT_STAT_xxx */
struct completion setup_ack_done;
};
static const char usbhsh_hcd_name[] = "renesas_usbhs host";
/*
* macro
*/
#define usbhsh_priv_to_hpriv(priv) \
container_of(usbhs_mod_get(priv, USBHS_HOST), struct usbhsh_hpriv, mod)
#define __usbhsh_for_each_udev(start, pos, h, i) \
for ((i) = start; \
((i) < USBHSH_DEVICE_MAX) && ((pos) = (h)->udev + (i)); \
(i)++)
#define usbhsh_for_each_udev(pos, hpriv, i) \
__usbhsh_for_each_udev(1, pos, hpriv, i)
#define usbhsh_for_each_udev_with_dev0(pos, hpriv, i) \
__usbhsh_for_each_udev(0, pos, hpriv, i)
#define usbhsh_hcd_to_hpriv(h) (struct usbhsh_hpriv *)((h)->hcd_priv)
#define usbhsh_hcd_to_dev(h) ((h)->self.controller)
#define usbhsh_hpriv_to_priv(h) ((h)->mod.priv)
#define usbhsh_hpriv_to_dcp(h) ((h)->dcp)
#define usbhsh_hpriv_to_hcd(h) \
container_of((void *)h, struct usb_hcd, hcd_priv)
#define usbhsh_ep_to_uep(u) ((u)->hcpriv)
#define usbhsh_uep_to_pipe(u) ((u)->pipe)
#define usbhsh_uep_to_udev(u) ((u)->udev)
#define usbhsh_uep_to_ep(u) ((u)->ep)
#define usbhsh_urb_to_ureq(u) ((u)->hcpriv)
#define usbhsh_urb_to_usbv(u) ((u)->dev)
#define usbhsh_usbv_to_udev(d) dev_get_drvdata(&(d)->dev)
#define usbhsh_udev_to_usbv(h) ((h)->usbv)
#define usbhsh_udev_is_used(h) usbhsh_udev_to_usbv(h)
#define usbhsh_pipe_to_uep(p) ((p)->mod_private)
#define usbhsh_device_parent(d) (usbhsh_usbv_to_udev((d)->usbv->parent))
#define usbhsh_device_hubport(d) ((d)->usbv->portnum)
#define usbhsh_device_number(h, d) ((int)((d) - (h)->udev))
#define usbhsh_device_nth(h, d) ((h)->udev + d)
#define usbhsh_device0(h) usbhsh_device_nth(h, 0)
#define usbhsh_port_stat_init(h) ((h)->port_stat = 0)
#define usbhsh_port_stat_set(h, s) ((h)->port_stat |= (s))
#define usbhsh_port_stat_clear(h, s) ((h)->port_stat &= ~(s))
#define usbhsh_port_stat_get(h) ((h)->port_stat)
#define usbhsh_pkt_to_ureq(p) \
container_of((void *)p, struct usbhsh_request, pkt)
/*
* req alloc/free
*/
static struct usbhsh_request *usbhsh_ureq_alloc(struct usbhsh_hpriv *hpriv,
struct urb *urb,
gfp_t mem_flags)
{
struct usbhsh_request *ureq;
ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
if (!ureq)
return NULL;
usbhs_pkt_init(&ureq->pkt);
ureq->urb = urb;
usbhsh_urb_to_ureq(urb) = ureq;
return ureq;
}
static void usbhsh_ureq_free(struct usbhsh_hpriv *hpriv,
struct usbhsh_request *ureq)
{
usbhsh_urb_to_ureq(ureq->urb) = NULL;
ureq->urb = NULL;
kfree(ureq);
}
/*
* status
*/
static int usbhsh_is_running(struct usbhsh_hpriv *hpriv)
{
/*
* we can decide some device is attached or not
* by checking mod.irq_attch
* see
* usbhsh_irq_attch()
* usbhsh_irq_dtch()
*/
return (hpriv->mod.irq_attch == NULL);
}
/*
* pipe control
*/
static void usbhsh_endpoint_sequence_save(struct usbhsh_hpriv *hpriv,
struct urb *urb,
struct usbhs_pkt *pkt)
{
int len = urb->actual_length;
int maxp = usb_endpoint_maxp(&urb->ep->desc);
int t = 0;
/* DCP is out of sequence control */
if (usb_pipecontrol(urb->pipe))
return;
/*
* renesas_usbhs pipe has a limitation in a number.
* So, driver should re-use the limited pipe for each device/endpoint.
* DATA0/1 sequence should be saved for it.
* see [image of mod_host]
* [HARDWARE LIMITATION]
*/
/*
* next sequence depends on actual_length
*
* ex) actual_length = 1147, maxp = 512
* data0 : 512
* data1 : 512
* data0 : 123
* data1 is the next sequence
*/
t = len / maxp;
if (len % maxp)
t++;
if (pkt->zero)
t++;
t %= 2;
if (t)
usb_dotoggle(urb->dev,
usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe));
}
static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
struct urb *urb);
static int usbhsh_pipe_attach(struct usbhsh_hpriv *hpriv,
struct urb *urb)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
struct usbhs_pipe *pipe;
struct usb_endpoint_descriptor *desc = &urb->ep->desc;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
int dir_in_req = !!usb_pipein(urb->pipe);
int is_dcp = usb_endpoint_xfer_control(desc);
int i, dir_in;
int ret = -EBUSY;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
/*
* if uep has been attached to pipe,
* reuse it
*/
if (usbhsh_uep_to_pipe(uep)) {
ret = 0;
goto usbhsh_pipe_attach_done;
}
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
/* check pipe type */
if (!usbhs_pipe_type_is(pipe, usb_endpoint_type(desc)))
continue;
/* check pipe direction if normal pipe */
if (!is_dcp) {
dir_in = !!usbhs_pipe_is_dir_in(pipe);
if (0 != (dir_in - dir_in_req))
continue;
}
/* check pipe is free */
if (usbhsh_pipe_to_uep(pipe))
continue;
/*
* attach pipe to uep
*
* usbhs_pipe_config_update() should be called after
* usbhs_set_device_config()
* see
* DCPMAXP/PIPEMAXP
*/
usbhsh_uep_to_pipe(uep) = pipe;
usbhsh_pipe_to_uep(pipe) = uep;
usbhs_pipe_config_update(pipe,
usbhsh_device_number(hpriv, udev),
usb_endpoint_num(desc),
usb_endpoint_maxp(desc));
dev_dbg(dev, "%s [%d-%d(%s:%s)]\n", __func__,
usbhsh_device_number(hpriv, udev),
usb_endpoint_num(desc),
usbhs_pipe_name(pipe),
dir_in_req ? "in" : "out");
ret = 0;
break;
}
usbhsh_pipe_attach_done:
if (0 == ret)
uep->counter++;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
return ret;
}
static void usbhsh_pipe_detach(struct usbhsh_hpriv *hpriv,
struct usbhsh_ep *uep)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct usbhs_pipe *pipe;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
if (unlikely(!uep)) {
dev_err(dev, "no uep\n");
return;
}
/******************** spin lock ********************/
usbhs_lock(priv, flags);
pipe = usbhsh_uep_to_pipe(uep);
if (unlikely(!pipe)) {
dev_err(dev, "uep doesn't have pipe\n");
} else if (1 == uep->counter--) { /* last user */
struct usb_host_endpoint *ep = usbhsh_uep_to_ep(uep);
struct usbhsh_device *udev = usbhsh_uep_to_udev(uep);
/* detach pipe from uep */
usbhsh_uep_to_pipe(uep) = NULL;
usbhsh_pipe_to_uep(pipe) = NULL;
dev_dbg(dev, "%s [%d-%d(%s)]\n", __func__,
usbhsh_device_number(hpriv, udev),
usb_endpoint_num(&ep->desc),
usbhs_pipe_name(pipe));
}
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
}
/*
* endpoint control
*/
static int usbhsh_endpoint_attach(struct usbhsh_hpriv *hpriv,
struct urb *urb,
gfp_t mem_flags)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
struct usb_host_endpoint *ep = urb->ep;
struct usbhsh_ep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
struct usb_endpoint_descriptor *desc = &ep->desc;
unsigned long flags;
uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
if (!uep)
return -ENOMEM;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
/*
* init endpoint
*/
uep->counter = 0;
INIT_LIST_HEAD(&uep->ep_list);
list_add_tail(&uep->ep_list, &udev->ep_list_head);
usbhsh_uep_to_udev(uep) = udev;
usbhsh_uep_to_ep(uep) = ep;
usbhsh_ep_to_uep(ep) = uep;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
dev_dbg(dev, "%s [%d-%d]\n", __func__,
usbhsh_device_number(hpriv, udev),
usb_endpoint_num(desc));
return 0;
}
static void usbhsh_endpoint_detach(struct usbhsh_hpriv *hpriv,
struct usb_host_endpoint *ep)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
unsigned long flags;
if (!uep)
return;
dev_dbg(dev, "%s [%d-%d]\n", __func__,
usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
usb_endpoint_num(&ep->desc));
if (usbhsh_uep_to_pipe(uep))
usbhsh_pipe_detach(hpriv, uep);
/******************** spin lock ********************/
usbhs_lock(priv, flags);
/* remove this endpoint from udev */
list_del_init(&uep->ep_list);
usbhsh_uep_to_udev(uep) = NULL;
usbhsh_uep_to_ep(uep) = NULL;
usbhsh_ep_to_uep(ep) = NULL;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
kfree(uep);
}
static void usbhsh_endpoint_detach_all(struct usbhsh_hpriv *hpriv,
struct usbhsh_device *udev)
{
struct usbhsh_ep *uep, *next;
list_for_each_entry_safe(uep, next, &udev->ep_list_head, ep_list)
usbhsh_endpoint_detach(hpriv, usbhsh_uep_to_ep(uep));
}
/*
* device control
*/
static int usbhsh_connected_to_rhdev(struct usb_hcd *hcd,
struct usbhsh_device *udev)
{
struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
return hcd->self.root_hub == usbv->parent;
}
static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
{
return !list_empty(&udev->ep_list_head);
}
static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
struct urb *urb)
{
struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
/* usbhsh_device_attach() is still not called */
if (!udev)
return NULL;
/* if it is device0, return it */
if (0 == usb_pipedevice(urb->pipe))
return usbhsh_device0(hpriv);
/* return attached device */
return udev;
}
static struct usbhsh_device *usbhsh_device_attach(struct usbhsh_hpriv *hpriv,
struct urb *urb)
{
struct usbhsh_device *udev = NULL;
struct usbhsh_device *udev0 = usbhsh_device0(hpriv);
struct usbhsh_device *pos;
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct device *dev = usbhsh_hcd_to_dev(hcd);
struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
unsigned long flags;
u16 upphub, hubport;
int i;
/*
* This function should be called only while urb is pointing to device0.
* It will attach unused usbhsh_device to urb (usbv),
* and initialize device0.
* You can use usbhsh_device_get() to get "current" udev,
* and usbhsh_usbv_to_udev() is for "attached" udev.
*/
if (0 != usb_pipedevice(urb->pipe)) {
dev_err(dev, "%s fail: urb isn't pointing device0\n", __func__);
return NULL;
}
/******************** spin lock ********************/
usbhs_lock(priv, flags);
/*
* find unused device
*/
usbhsh_for_each_udev(pos, hpriv, i) {
if (usbhsh_udev_is_used(pos))
continue;
udev = pos;
break;
}
if (udev) {
/*
* usbhsh_usbv_to_udev()
* usbhsh_udev_to_usbv()
* will be enable
*/
dev_set_drvdata(&usbv->dev, udev);
udev->usbv = usbv;
}
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
if (!udev) {
dev_err(dev, "no free usbhsh_device\n");
return NULL;
}
if (usbhsh_device_has_endpoint(udev)) {
dev_warn(dev, "udev have old endpoint\n");
usbhsh_endpoint_detach_all(hpriv, udev);
}
if (usbhsh_device_has_endpoint(udev0)) {
dev_warn(dev, "udev0 have old endpoint\n");
usbhsh_endpoint_detach_all(hpriv, udev0);
}
/* uep will be attached */
INIT_LIST_HEAD(&udev0->ep_list_head);
INIT_LIST_HEAD(&udev->ep_list_head);
/*
* set device0 config
*/
usbhs_set_device_config(priv,
0, 0, 0, usbv->speed);
/*
* set new device config
*/
upphub = 0;
hubport = 0;
if (!usbhsh_connected_to_rhdev(hcd, udev)) {
/* if udev is not connected to rhdev, it means parent is Hub */
struct usbhsh_device *parent = usbhsh_device_parent(udev);
upphub = usbhsh_device_number(hpriv, parent);
hubport = usbhsh_device_hubport(udev);
dev_dbg(dev, "%s connected to Hub [%d:%d](%p)\n", __func__,
upphub, hubport, parent);
}
usbhs_set_device_config(priv,
usbhsh_device_number(hpriv, udev),
upphub, hubport, usbv->speed);
dev_dbg(dev, "%s [%d](%p)\n", __func__,
usbhsh_device_number(hpriv, udev), udev);
return udev;
}
static void usbhsh_device_detach(struct usbhsh_hpriv *hpriv,
struct usbhsh_device *udev)
{
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhsh_hcd_to_dev(hcd);
struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
unsigned long flags;
dev_dbg(dev, "%s [%d](%p)\n", __func__,
usbhsh_device_number(hpriv, udev), udev);
if (usbhsh_device_has_endpoint(udev)) {
dev_warn(dev, "udev still have endpoint\n");
usbhsh_endpoint_detach_all(hpriv, udev);
}
/*
* There is nothing to do if it is device0.
* see
* usbhsh_device_attach()
* usbhsh_device_get()
*/
if (0 == usbhsh_device_number(hpriv, udev))
return;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
/*
* usbhsh_usbv_to_udev()
* usbhsh_udev_to_usbv()
* will be disable
*/
dev_set_drvdata(&usbv->dev, NULL);
udev->usbv = NULL;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
}
/*
* queue push/pop
*/
static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct urb *urb = ureq->urb;
struct device *dev = usbhs_priv_to_dev(priv);
int status = 0;
dev_dbg(dev, "%s\n", __func__);
if (!urb) {
dev_warn(dev, "pkt doesn't have urb\n");
return;
}
if (!usbhsh_is_running(hpriv))
status = -ESHUTDOWN;
urb->actual_length = pkt->actual;
usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
usbhsh_ureq_free(hpriv, ureq);
usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
usb_hcd_unlink_urb_from_ep(hcd, urb);
usb_hcd_giveback_urb(hcd, urb, status);
}
static int usbhsh_queue_push(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
struct usbhsh_request *ureq;
void *buf;
int len, sequence;
if (usb_pipeisoc(urb->pipe)) {
dev_err(dev, "pipe iso is not supported now\n");
return -EIO;
}
/* this ureq will be freed on usbhsh_queue_done() */
ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
if (unlikely(!ureq)) {
dev_err(dev, "ureq alloc fail\n");
return -ENOMEM;
}
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_fifo_dma_pop_handler;
else
pipe->handler = &usbhs_fifo_dma_push_handler;
buf = (void *)(urb->transfer_buffer + urb->actual_length);
len = urb->transfer_buffer_length - urb->actual_length;
sequence = usb_gettoggle(urb->dev,
usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe));
dev_dbg(dev, "%s\n", __func__);
usbhs_pkt_push(pipe, &ureq->pkt, usbhsh_queue_done,
buf, len, (urb->transfer_flags & URB_ZERO_PACKET),
sequence);
usbhs_pkt_start(pipe);
return 0;
}
static void usbhsh_queue_force_pop(struct usbhs_priv *priv,
struct usbhs_pipe *pipe)
{
struct usbhs_pkt *pkt;
while (1) {
pkt = usbhs_pkt_pop(pipe, NULL);
if (!pkt)
break;
/*
* if all packet are gone, usbhsh_endpoint_disable()
* will be called.
* then, attached device/endpoint/pipe will be detached
*/
usbhsh_queue_done(priv, pkt);
}
}
static void usbhsh_queue_force_pop_all(struct usbhs_priv *priv)
{
struct usbhs_pipe *pos;
int i;
usbhs_for_each_pipe_with_dcp(pos, priv, i)
usbhsh_queue_force_pop(priv, pos);
}
/*
* DCP setup stage
*/
static int usbhsh_is_request_address(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *)urb->setup_packet;
if ((DeviceOutRequest == req->bRequestType << 8) &&
(USB_REQ_SET_ADDRESS == req->bRequest))
return 1;
else
return 0;
}
static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
struct urb *urb,
struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct usb_ctrlrequest req;
struct device *dev = usbhs_priv_to_dev(priv);
/*
* wait setup packet ACK
* see
* usbhsh_irq_setup_ack()
* usbhsh_irq_setup_err()
*/
init_completion(&hpriv->setup_ack_done);
/* copy original request */
memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
/*
* renesas_usbhs can not use original usb address.
* see HARDWARE LIMITATION.
* modify usb address here to use attached device.
* see usbhsh_device_attach()
*/
if (usbhsh_is_request_address(urb)) {
struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
/* udev is a attached device */
req.wValue = usbhsh_device_number(hpriv, udev);
dev_dbg(dev, "create new address - %d\n", req.wValue);
}
/* set request */
usbhs_usbreq_set_val(priv, &req);
/*
* wait setup packet ACK
*/
wait_for_completion(&hpriv->setup_ack_done);
dev_dbg(dev, "%s done\n", __func__);
}
/*
* DCP data stage
*/
static void usbhsh_data_stage_packet_done(struct usbhs_priv *priv,
struct usbhs_pkt *pkt)
{
struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
/* this ureq was connected to urb when usbhsh_urb_enqueue() */
usbhsh_ureq_free(hpriv, ureq);
}
static int usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
struct urb *urb,
struct usbhs_pipe *pipe,
gfp_t mem_flags)
{
struct usbhsh_request *ureq;
/* this ureq will be freed on usbhsh_data_stage_packet_done() */
ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
if (unlikely(!ureq))
return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_data_stage_in_handler;
else
pipe->handler = &usbhs_dcp_data_stage_out_handler;
usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_data_stage_packet_done,
urb->transfer_buffer,
urb->transfer_buffer_length,
(urb->transfer_flags & URB_ZERO_PACKET),
-1);
return 0;
}
/*
* DCP status stage
*/
static int usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
struct urb *urb,
struct usbhs_pipe *pipe,
gfp_t mem_flags)
{
struct usbhsh_request *ureq;
/* This ureq will be freed on usbhsh_queue_done() */
ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
if (unlikely(!ureq))
return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_status_stage_in_handler;
else
pipe->handler = &usbhs_dcp_status_stage_out_handler;
usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_queue_done,
NULL,
urb->transfer_buffer_length,
0, -1);
return 0;
}
static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mflags)
{
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
int ret;
dev_dbg(dev, "%s\n", __func__);
/*
* setup stage
*
* usbhsh_send_setup_stage_packet() wait SACK/SIGN
*/
usbhsh_setup_stage_packet_push(hpriv, urb, pipe);
/*
* data stage
*
* It is pushed only when urb has buffer.
*/
if (urb->transfer_buffer_length) {
ret = usbhsh_data_stage_packet_push(hpriv, urb, pipe, mflags);
if (ret < 0) {
dev_err(dev, "data stage failed\n");
return ret;
}
}
/*
* status stage
*/
ret = usbhsh_status_stage_packet_push(hpriv, urb, pipe, mflags);
if (ret < 0) {
dev_err(dev, "status stage failed\n");
return ret;
}
/*
* start pushed packets
*/
usbhs_pkt_start(pipe);
return 0;
}
/*
* dma map functions
*/
static int usbhsh_dma_map_ctrl(struct device *dma_dev, struct usbhs_pkt *pkt,
int map)
{
if (map) {
struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct urb *urb = ureq->urb;
/* it can not use scatter/gather */
if (urb->num_sgs)
return -EINVAL;
pkt->dma = urb->transfer_dma;
if (!pkt->dma)
return -EINVAL;
}
return 0;
}
/*
* for hc_driver
*/
static int usbhsh_host_start(struct usb_hcd *hcd)
{
return 0;
}
static void usbhsh_host_stop(struct usb_hcd *hcd)
{
}
static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
struct usb_host_endpoint *ep = urb->ep;
struct usbhsh_device *new_udev = NULL;
int is_dir_in = usb_pipein(urb->pipe);
int ret;
dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
if (!usbhsh_is_running(hpriv)) {
ret = -EIO;
dev_err(dev, "host is not running\n");
goto usbhsh_urb_enqueue_error_not_linked;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret) {
dev_err(dev, "urb link failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
}
/*
* attach udev if needed
* see [image of mod_host]
*/
if (!usbhsh_device_get(hpriv, urb)) {
new_udev = usbhsh_device_attach(hpriv, urb);
if (!new_udev) {
ret = -EIO;
dev_err(dev, "device attach failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
}
}
/*
* attach endpoint if needed
* see [image of mod_host]
*/
if (!usbhsh_ep_to_uep(ep)) {
ret = usbhsh_endpoint_attach(hpriv, urb, mem_flags);
if (ret < 0) {
dev_err(dev, "endpoint attach failed\n");
goto usbhsh_urb_enqueue_error_free_device;
}
}
/*
* attach pipe to endpoint
* see [image of mod_host]
*/
ret = usbhsh_pipe_attach(hpriv, urb);
if (ret < 0) {
dev_err(dev, "pipe attach failed\n");
goto usbhsh_urb_enqueue_error_free_endpoint;
}
/*
* push packet
*/
if (usb_pipecontrol(urb->pipe))
ret = usbhsh_dcp_queue_push(hcd, urb, mem_flags);
else
ret = usbhsh_queue_push(hcd, urb, mem_flags);
return ret;
usbhsh_urb_enqueue_error_free_endpoint:
usbhsh_endpoint_detach(hpriv, ep);
usbhsh_urb_enqueue_error_free_device:
if (new_udev)
usbhsh_device_detach(hpriv, new_udev);
usbhsh_urb_enqueue_error_not_linked:
dev_dbg(dev, "%s error\n", __func__);
return ret;
}
static int usbhsh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
if (ureq) {
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct usbhs_pkt *pkt = &ureq->pkt;
usbhs_pkt_pop(pkt->pipe, pkt);
usbhsh_queue_done(priv, pkt);
}
return 0;
}
static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
struct usbhsh_device *udev;
struct usbhsh_hpriv *hpriv;
/*
* this function might be called manytimes by same hcd/ep
* in-endpoint == out-endpoint if ep == dcp.
*/
if (!uep)
return;
udev = usbhsh_uep_to_udev(uep);
hpriv = usbhsh_hcd_to_hpriv(hcd);
usbhsh_endpoint_detach(hpriv, ep);
/*
* if there is no endpoint,
* free device
*/
if (!usbhsh_device_has_endpoint(udev))
usbhsh_device_detach(hpriv, udev);
}
static int usbhsh_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
int roothub_id = 1; /* only 1 root hub */
/*
* does port stat was changed ?
* check USB_PORT_STAT_C_xxx << 16
*/
if (usbhsh_port_stat_get(hpriv) & 0xFFFF0000)
*buf = (1 << roothub_id);
else
*buf = 0;
return !!(*buf);
}
static int __usbhsh_hub_hub_feature(struct usbhsh_hpriv *hpriv,
u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
dev_dbg(dev, "%s :: C_HUB_xx\n", __func__);
return 0;
}
return -EPIPE;
}
static int __usbhsh_hub_port_feature(struct usbhsh_hpriv *hpriv,
u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
int enable = (typeReq == SetPortFeature);
int speed, i, timeout = 128;
int roothub_id = 1; /* only 1 root hub */
/* common error */
if (wIndex > roothub_id || wLength != 0)
return -EPIPE;
/* check wValue */
switch (wValue) {
case USB_PORT_FEAT_POWER:
usbhs_vbus_ctrl(priv, enable);
dev_dbg(dev, "%s :: USB_PORT_FEAT_POWER\n", __func__);
break;
case USB_PORT_FEAT_ENABLE:
case USB_PORT_FEAT_SUSPEND:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_RESET:
dev_dbg(dev, "%s :: USB_PORT_FEAT_xxx\n", __func__);
break;
case USB_PORT_FEAT_RESET:
if (!enable)
break;
usbhsh_port_stat_clear(hpriv,
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_LOW_SPEED);
usbhsh_queue_force_pop_all(priv);
usbhs_bus_send_reset(priv);
msleep(20);
usbhs_bus_send_sof_enable(priv);
for (i = 0; i < timeout ; i++) {
switch (usbhs_bus_get_speed(priv)) {
case USB_SPEED_LOW:
speed = USB_PORT_STAT_LOW_SPEED;
goto got_usb_bus_speed;
case USB_SPEED_HIGH:
speed = USB_PORT_STAT_HIGH_SPEED;
goto got_usb_bus_speed;
case USB_SPEED_FULL:
speed = 0;
goto got_usb_bus_speed;
}
msleep(20);
}
return -EPIPE;
got_usb_bus_speed:
usbhsh_port_stat_set(hpriv, speed);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_ENABLE);
dev_dbg(dev, "%s :: USB_PORT_FEAT_RESET (speed = %d)\n",
__func__, speed);
/* status change is not needed */
return 0;
default:
return -EPIPE;
}
/* set/clear status */
if (enable)
usbhsh_port_stat_set(hpriv, (1 << wValue));
else
usbhsh_port_stat_clear(hpriv, (1 << wValue));
return 0;
}
static int __usbhsh_hub_get_status(struct usbhsh_hpriv *hpriv,
u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct usb_hub_descriptor *desc = (struct usb_hub_descriptor *)buf;
struct device *dev = usbhs_priv_to_dev(priv);
int roothub_id = 1; /* only 1 root hub */
switch (typeReq) {
case GetHubStatus:
dev_dbg(dev, "%s :: GetHubStatus\n", __func__);
*buf = 0x00;
break;
case GetPortStatus:
if (wIndex != roothub_id)
return -EPIPE;
dev_dbg(dev, "%s :: GetPortStatus\n", __func__);
*(__le32 *)buf = cpu_to_le32(usbhsh_port_stat_get(hpriv));
break;
case GetHubDescriptor:
desc->bDescriptorType = USB_DT_HUB;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = roothub_id;
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
desc->wHubCharacteristics =
cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM);
desc->u.hs.DeviceRemovable[0] = (roothub_id << 1);
desc->u.hs.DeviceRemovable[1] = ~0;
dev_dbg(dev, "%s :: GetHubDescriptor\n", __func__);
break;
}
return 0;
}
static int usbhsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
int ret = -EPIPE;
switch (typeReq) {
/* Hub Feature */
case ClearHubFeature:
case SetHubFeature:
ret = __usbhsh_hub_hub_feature(hpriv, typeReq,
wValue, wIndex, buf, wLength);
break;
/* Port Feature */
case SetPortFeature:
case ClearPortFeature:
ret = __usbhsh_hub_port_feature(hpriv, typeReq,
wValue, wIndex, buf, wLength);
break;
/* Get status */
case GetHubStatus:
case GetPortStatus:
case GetHubDescriptor:
ret = __usbhsh_hub_get_status(hpriv, typeReq,
wValue, wIndex, buf, wLength);
break;
}
dev_dbg(dev, "typeReq = %x, ret = %d, port_stat = %x\n",
typeReq, ret, usbhsh_port_stat_get(hpriv));
return ret;
}
static int usbhsh_bus_nop(struct usb_hcd *hcd)
{
/* nothing to do */
return 0;
}
static const struct hc_driver usbhsh_driver = {
.description = usbhsh_hcd_name,
.hcd_priv_size = sizeof(struct usbhsh_hpriv),
/*
* generic hardware linkage
*/
.flags = HCD_DMA | HCD_USB2,
.start = usbhsh_host_start,
.stop = usbhsh_host_stop,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = usbhsh_urb_enqueue,
.urb_dequeue = usbhsh_urb_dequeue,
.endpoint_disable = usbhsh_endpoint_disable,
/*
* root hub
*/
.hub_status_data = usbhsh_hub_status_data,
.hub_control = usbhsh_hub_control,
.bus_suspend = usbhsh_bus_nop,
.bus_resume = usbhsh_bus_nop,
};
/*
* interrupt functions
*/
static int usbhsh_irq_attch(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct device *dev = usbhs_priv_to_dev(priv);
dev_dbg(dev, "device attached\n");
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
/*
* attch interrupt might happen infinitely on some device
* (on self power USB hub ?)
* disable it here.
*
* usbhsh_is_running() becomes effective
* according to this process.
* see
* usbhsh_is_running()
* usbhsh_urb_enqueue()
*/
hpriv->mod.irq_attch = NULL;
usbhs_irq_callback_update(priv, &hpriv->mod);
return 0;
}
static int usbhsh_irq_dtch(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct device *dev = usbhs_priv_to_dev(priv);
dev_dbg(dev, "device detached\n");
usbhsh_port_stat_clear(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
/*
* enable attch interrupt again
*
* usbhsh_is_running() becomes invalid
* according to this process.
* see
* usbhsh_is_running()
* usbhsh_urb_enqueue()
*/
hpriv->mod.irq_attch = usbhsh_irq_attch;
usbhs_irq_callback_update(priv, &hpriv->mod);
/*
* usbhsh_queue_force_pop_all() should be called
* after usbhsh_is_running() becomes invalid.
*/
usbhsh_queue_force_pop_all(priv);
return 0;
}
static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct device *dev = usbhs_priv_to_dev(priv);
dev_dbg(dev, "setup packet OK\n");
complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
return 0;
}
static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct device *dev = usbhs_priv_to_dev(priv);
dev_dbg(dev, "setup packet Err\n");
complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
return 0;
}
/*
* module start/stop
*/
static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usbhs_pipe *pipe;
struct renesas_usbhs_driver_pipe_config *pipe_configs =
usbhs_get_dparam(priv, pipe_configs);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
int old_type, dir_in, i;
/* init all pipe */
old_type = USB_ENDPOINT_XFER_CONTROL;
for (i = 0; i < pipe_size; i++) {
/*
* data "output" will be finished as soon as possible,
* but there is no guaranty at data "input" case.
*
* "input" needs "standby" pipe.
* So, "input" direction pipe > "output" direction pipe
* is good idea.
*
* 1st USB_ENDPOINT_XFER_xxx will be output direction,
* and the other will be input direction here.
*
* ex)
* ...
* USB_ENDPOINT_XFER_ISOC -> dir out
* USB_ENDPOINT_XFER_ISOC -> dir in
* USB_ENDPOINT_XFER_BULK -> dir out
* USB_ENDPOINT_XFER_BULK -> dir in
* USB_ENDPOINT_XFER_BULK -> dir in
* ...
*/
dir_in = (pipe_configs[i].type == old_type);
old_type = pipe_configs[i].type;
if (USB_ENDPOINT_XFER_CONTROL == pipe_configs[i].type) {
pipe = usbhs_dcp_malloc(priv);
usbhsh_hpriv_to_dcp(hpriv) = pipe;
} else {
pipe = usbhs_pipe_malloc(priv,
pipe_configs[i].type,
dir_in);
}
pipe->mod_private = NULL;
}
}
static int usbhsh_start(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
/* add hcd */
ret = usb_add_hcd(hcd, 0, 0);
if (ret < 0)
return 0;
device_wakeup_enable(hcd->self.controller);
/*
* pipe initialize and enable DCP
*/
usbhs_fifo_init(priv);
usbhs_pipe_init(priv,
usbhsh_dma_map_ctrl);
usbhsh_pipe_init_for_host(priv);
/*
* system config enble
* - HI speed
* - host
* - usb module
*/
usbhs_sys_host_ctrl(priv, 1);
/*
* enable irq callback
*/
mod->irq_attch = usbhsh_irq_attch;
mod->irq_dtch = usbhsh_irq_dtch;
mod->irq_sack = usbhsh_irq_setup_ack;
mod->irq_sign = usbhsh_irq_setup_err;
usbhs_irq_callback_update(priv, mod);
dev_dbg(dev, "start host\n");
return ret;
}
static int usbhsh_stop(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct device *dev = usbhs_priv_to_dev(priv);
/*
* disable irq callback
*/
mod->irq_attch = NULL;
mod->irq_dtch = NULL;
mod->irq_sack = NULL;
mod->irq_sign = NULL;
usbhs_irq_callback_update(priv, mod);
usb_remove_hcd(hcd);
/* disable sys */
usbhs_sys_host_ctrl(priv, 0);
dev_dbg(dev, "quit host\n");
return 0;
}
int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv;
struct usb_hcd *hcd;
struct usbhsh_device *udev;
struct device *dev = usbhs_priv_to_dev(priv);
int i;
/* initialize hcd */
hcd = usb_create_hcd(&usbhsh_driver, dev, usbhsh_hcd_name);
if (!hcd) {
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
hcd->has_tt = 1; /* for low/full speed */
/*
* CAUTION
*
* There is no guarantee that it is possible to access usb module here.
* Don't accesses to it.
* The accesse will be enable after "usbhsh_start"
*/
hpriv = usbhsh_hcd_to_hpriv(hcd);
/*
* register itself
*/
usbhs_mod_register(priv, &hpriv->mod, USBHS_HOST);
/* init hpriv */
hpriv->mod.name = "host";
hpriv->mod.start = usbhsh_start;
hpriv->mod.stop = usbhsh_stop;
usbhsh_port_stat_init(hpriv);
/* init all device */
usbhsh_for_each_udev_with_dev0(udev, hpriv, i) {
udev->usbv = NULL;
INIT_LIST_HEAD(&udev->ep_list_head);
}
dev_info(dev, "host probed\n");
return 0;
}
int usbhs_mod_host_remove(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
usb_put_hcd(hcd);
return 0;
}
| linux-master | drivers/usb/renesas_usbhs/mod_host.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <[email protected]>
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include "common.h"
/*
* struct
*/
struct usbhsg_request {
struct usb_request req;
struct usbhs_pkt pkt;
};
#define EP_NAME_SIZE 8
struct usbhsg_gpriv;
struct usbhsg_uep {
struct usb_ep ep;
struct usbhs_pipe *pipe;
spinlock_t lock; /* protect the pipe */
char ep_name[EP_NAME_SIZE];
struct usbhsg_gpriv *gpriv;
};
struct usbhsg_gpriv {
struct usb_gadget gadget;
struct usbhs_mod mod;
struct usbhsg_uep *uep;
int uep_size;
struct usb_gadget_driver *driver;
struct usb_phy *transceiver;
bool vbus_active;
u32 status;
#define USBHSG_STATUS_STARTED (1 << 0)
#define USBHSG_STATUS_REGISTERD (1 << 1)
#define USBHSG_STATUS_WEDGE (1 << 2)
#define USBHSG_STATUS_SELF_POWERED (1 << 3)
#define USBHSG_STATUS_SOFT_CONNECT (1 << 4)
};
struct usbhsg_recip_handle {
char *name;
int (*device)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl);
int (*interface)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl);
int (*endpoint)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl);
};
/*
* macro
*/
#define usbhsg_priv_to_gpriv(priv) \
container_of( \
usbhs_mod_get(priv, USBHS_GADGET), \
struct usbhsg_gpriv, mod)
#define __usbhsg_for_each_uep(start, pos, g, i) \
for ((i) = start; \
((i) < (g)->uep_size) && ((pos) = (g)->uep + (i)); \
(i)++)
#define usbhsg_for_each_uep(pos, gpriv, i) \
__usbhsg_for_each_uep(1, pos, gpriv, i)
#define usbhsg_for_each_uep_with_dcp(pos, gpriv, i) \
__usbhsg_for_each_uep(0, pos, gpriv, i)
#define usbhsg_gadget_to_gpriv(g)\
container_of(g, struct usbhsg_gpriv, gadget)
#define usbhsg_req_to_ureq(r)\
container_of(r, struct usbhsg_request, req)
#define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep)
#define usbhsg_gpriv_to_dev(gp) usbhs_priv_to_dev((gp)->mod.priv)
#define usbhsg_gpriv_to_priv(gp) ((gp)->mod.priv)
#define usbhsg_gpriv_to_dcp(gp) ((gp)->uep)
#define usbhsg_gpriv_to_nth_uep(gp, i) ((gp)->uep + i)
#define usbhsg_uep_to_gpriv(u) ((u)->gpriv)
#define usbhsg_uep_to_pipe(u) ((u)->pipe)
#define usbhsg_pipe_to_uep(p) ((p)->mod_private)
#define usbhsg_is_dcp(u) ((u) == usbhsg_gpriv_to_dcp((u)->gpriv))
#define usbhsg_ureq_to_pkt(u) (&(u)->pkt)
#define usbhsg_pkt_to_ureq(i) \
container_of(i, struct usbhsg_request, pkt)
#define usbhsg_is_not_connected(gp) ((gp)->gadget.speed == USB_SPEED_UNKNOWN)
/* status */
#define usbhsg_status_init(gp) do {(gp)->status = 0; } while (0)
#define usbhsg_status_set(gp, b) (gp->status |= b)
#define usbhsg_status_clr(gp, b) (gp->status &= ~b)
#define usbhsg_status_has(gp, b) (gp->status & b)
/*
* queue push/pop
*/
static void __usbhsg_queue_pop(struct usbhsg_uep *uep,
struct usbhsg_request *ureq,
int status)
{
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (pipe)
dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
ureq->req.status = status;
spin_unlock(usbhs_priv_to_lock(priv));
usb_gadget_giveback_request(&uep->ep, &ureq->req);
spin_lock(usbhs_priv_to_lock(priv));
}
static void usbhsg_queue_pop(struct usbhsg_uep *uep,
struct usbhsg_request *ureq,
int status)
{
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
unsigned long flags;
usbhs_lock(priv, flags);
__usbhsg_queue_pop(uep, ureq, status);
usbhs_unlock(priv, flags);
}
static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
unsigned long flags;
ureq->req.actual = pkt->actual;
usbhs_lock(priv, flags);
if (uep)
__usbhsg_queue_pop(uep, ureq, 0);
usbhs_unlock(priv, flags);
}
static void usbhsg_queue_push(struct usbhsg_uep *uep,
struct usbhsg_request *ureq)
{
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq);
struct usb_request *req = &ureq->req;
req->actual = 0;
req->status = -EINPROGRESS;
usbhs_pkt_push(pipe, pkt, usbhsg_queue_done,
req->buf, req->length, req->zero, -1);
usbhs_pkt_start(pipe);
dev_dbg(dev, "pipe %d : queue push (%d)\n",
usbhs_pipe_number(pipe),
req->length);
}
/*
* dma map/unmap
*/
static int usbhsg_dma_map_ctrl(struct device *dma_dev, struct usbhs_pkt *pkt,
int map)
{
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
struct usb_request *req = &ureq->req;
struct usbhs_pipe *pipe = pkt->pipe;
enum dma_data_direction dir;
int ret = 0;
dir = usbhs_pipe_is_dir_host(pipe);
if (map) {
/* it can not use scatter/gather */
WARN_ON(req->num_sgs);
ret = usb_gadget_map_request_by_dev(dma_dev, req, dir);
if (ret < 0)
return ret;
pkt->dma = req->dma;
} else {
usb_gadget_unmap_request_by_dev(dma_dev, req, dir);
}
return ret;
}
/*
* USB_TYPE_STANDARD / clear feature functions
*/
static int usbhsg_recip_handler_std_control_done(struct usbhs_priv *priv,
struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
usbhs_dcp_control_transfer_done(pipe);
return 0;
}
static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv,
struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl)
{
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) {
usbhs_pipe_disable(pipe);
usbhs_pipe_sequence_data0(pipe);
usbhs_pipe_enable(pipe);
}
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
usbhs_pkt_start(pipe);
return 0;
}
static struct usbhsg_recip_handle req_clear_feature = {
.name = "clear feature",
.device = usbhsg_recip_handler_std_control_done,
.interface = usbhsg_recip_handler_std_control_done,
.endpoint = usbhsg_recip_handler_std_clear_endpoint,
};
/*
* USB_TYPE_STANDARD / set feature functions
*/
static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv,
struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl)
{
switch (le16_to_cpu(ctrl->wValue)) {
case USB_DEVICE_TEST_MODE:
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
udelay(100);
usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex) >> 8);
break;
default:
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
break;
}
return 0;
}
static int usbhsg_recip_handler_std_set_endpoint(struct usbhs_priv *priv,
struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl)
{
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
usbhs_pipe_stall(pipe);
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
return 0;
}
static struct usbhsg_recip_handle req_set_feature = {
.name = "set feature",
.device = usbhsg_recip_handler_std_set_device,
.interface = usbhsg_recip_handler_std_control_done,
.endpoint = usbhsg_recip_handler_std_set_endpoint,
};
/*
* USB_TYPE_STANDARD / get status functions
*/
static void __usbhsg_recip_send_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
/* free allocated recip-buffer/usb_request */
kfree(ureq->pkt.buf);
usb_ep_free_request(ep, req);
}
static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
unsigned short status)
{
struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usb_request *req;
__le16 *buf;
/* alloc new usb_request for recip */
req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
if (!req) {
dev_err(dev, "recip request allocation fail\n");
return;
}
/* alloc recip data buffer */
buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
if (!buf) {
usb_ep_free_request(&dcp->ep, req);
return;
}
/* recip data is status */
*buf = cpu_to_le16(status);
/* allocated usb_request/buffer will be freed */
req->complete = __usbhsg_recip_send_complete;
req->buf = buf;
req->length = sizeof(*buf);
req->zero = 0;
/* push packet */
pipe->handler = &usbhs_fifo_pio_push_handler;
usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req));
}
static int usbhsg_recip_handler_std_get_device(struct usbhs_priv *priv,
struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl)
{
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
unsigned short status = 0;
if (usbhsg_status_has(gpriv, USBHSG_STATUS_SELF_POWERED))
status = 1 << USB_DEVICE_SELF_POWERED;
__usbhsg_recip_send_status(gpriv, status);
return 0;
}
static int usbhsg_recip_handler_std_get_interface(struct usbhs_priv *priv,
struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl)
{
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
unsigned short status = 0;
__usbhsg_recip_send_status(gpriv, status);
return 0;
}
static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv,
struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl)
{
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
unsigned short status = 0;
if (usbhs_pipe_is_stall(pipe))
status = 1 << USB_ENDPOINT_HALT;
__usbhsg_recip_send_status(gpriv, status);
return 0;
}
static struct usbhsg_recip_handle req_get_status = {
.name = "get status",
.device = usbhsg_recip_handler_std_get_device,
.interface = usbhsg_recip_handler_std_get_interface,
.endpoint = usbhsg_recip_handler_std_get_endpoint,
};
/*
* USB_TYPE handler
*/
static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
struct usbhsg_recip_handle *handler,
struct usb_ctrlrequest *ctrl)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usbhsg_uep *uep;
struct usbhs_pipe *pipe;
int recip = ctrl->bRequestType & USB_RECIP_MASK;
int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
int ret = 0;
int (*func)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl);
char *msg;
uep = usbhsg_gpriv_to_nth_uep(gpriv, nth);
pipe = usbhsg_uep_to_pipe(uep);
if (!pipe) {
dev_err(dev, "wrong recip request\n");
return -EINVAL;
}
switch (recip) {
case USB_RECIP_DEVICE:
msg = "DEVICE";
func = handler->device;
break;
case USB_RECIP_INTERFACE:
msg = "INTERFACE";
func = handler->interface;
break;
case USB_RECIP_ENDPOINT:
msg = "ENDPOINT";
func = handler->endpoint;
break;
default:
dev_warn(dev, "unsupported RECIP(%d)\n", recip);
func = NULL;
ret = -EINVAL;
}
if (func) {
dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg);
ret = func(priv, uep, ctrl);
}
return ret;
}
/*
* irq functions
*
* it will be called from usbhs_interrupt
*/
static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
int state = usbhs_status_get_device_state(irq_state);
gpriv->gadget.speed = usbhs_bus_get_speed(priv);
dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
(state & SUSPENDED_STATE)) {
if (gpriv->driver && gpriv->driver->suspend)
gpriv->driver->suspend(&gpriv->gadget);
usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
}
return 0;
}
static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usb_ctrlrequest ctrl;
struct usbhsg_recip_handle *recip_handler = NULL;
int stage = usbhs_status_get_ctrl_stage(irq_state);
int ret = 0;
dev_dbg(dev, "stage = %d\n", stage);
/*
* see Manual
*
* "Operation"
* - "Interrupt Function"
* - "Control Transfer Stage Transition Interrupt"
* - Fig. "Control Transfer Stage Transitions"
*/
switch (stage) {
case READ_DATA_STAGE:
pipe->handler = &usbhs_fifo_pio_push_handler;
break;
case WRITE_DATA_STAGE:
pipe->handler = &usbhs_fifo_pio_pop_handler;
break;
case NODATA_STATUS_STAGE:
pipe->handler = &usbhs_ctrl_stage_end_handler;
break;
case READ_STATUS_STAGE:
case WRITE_STATUS_STAGE:
usbhs_dcp_control_transfer_done(pipe);
fallthrough;
default:
return ret;
}
/*
* get usb request
*/
usbhs_usbreq_get_val(priv, &ctrl);
switch (ctrl.bRequestType & USB_TYPE_MASK) {
case USB_TYPE_STANDARD:
switch (ctrl.bRequest) {
case USB_REQ_CLEAR_FEATURE:
recip_handler = &req_clear_feature;
break;
case USB_REQ_SET_FEATURE:
recip_handler = &req_set_feature;
break;
case USB_REQ_GET_STATUS:
recip_handler = &req_get_status;
break;
}
}
/*
* setup stage / run recip
*/
if (recip_handler)
ret = usbhsg_recip_run_handle(priv, recip_handler, &ctrl);
else
ret = gpriv->driver->setup(&gpriv->gadget, &ctrl);
if (ret < 0)
usbhs_pipe_stall(pipe);
return ret;
}
/*
*
* usb_dcp_ops
*
*/
static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
{
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct usbhs_pkt *pkt;
while (1) {
pkt = usbhs_pkt_pop(pipe, NULL);
if (!pkt)
break;
usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ESHUTDOWN);
}
usbhs_pipe_disable(pipe);
return 0;
}
/*
*
* usb_ep_ops
*
*/
static int usbhsg_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct usbhs_pipe *pipe;
int ret = -EIO;
unsigned long flags;
usbhs_lock(priv, flags);
/*
* if it already have pipe,
* nothing to do
*/
if (uep->pipe) {
usbhs_pipe_clear(uep->pipe);
usbhs_pipe_sequence_data0(uep->pipe);
ret = 0;
goto usbhsg_ep_enable_end;
}
pipe = usbhs_pipe_malloc(priv,
usb_endpoint_type(desc),
usb_endpoint_dir_in(desc));
if (pipe) {
uep->pipe = pipe;
pipe->mod_private = uep;
/* set epnum / maxp */
usbhs_pipe_config_update(pipe, 0,
usb_endpoint_num(desc),
usb_endpoint_maxp(desc));
/*
* usbhs_fifo_dma_push/pop_handler try to
* use dmaengine if possible.
* It will use pio handler if impossible.
*/
if (usb_endpoint_dir_in(desc)) {
pipe->handler = &usbhs_fifo_dma_push_handler;
} else {
pipe->handler = &usbhs_fifo_dma_pop_handler;
usbhs_xxxsts_clear(priv, BRDYSTS,
usbhs_pipe_number(pipe));
}
ret = 0;
}
usbhsg_ep_enable_end:
usbhs_unlock(priv, flags);
return ret;
}
static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe;
unsigned long flags;
spin_lock_irqsave(&uep->lock, flags);
pipe = usbhsg_uep_to_pipe(uep);
if (!pipe)
goto out;
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
uep->pipe->mod_private = NULL;
uep->pipe = NULL;
out:
spin_unlock_irqrestore(&uep->lock, flags);
return 0;
}
static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct usbhsg_request *ureq;
ureq = kzalloc(sizeof *ureq, gfp_flags);
if (!ureq)
return NULL;
usbhs_pkt_init(usbhsg_ureq_to_pkt(ureq));
return &ureq->req;
}
static void usbhsg_ep_free_request(struct usb_ep *ep,
struct usb_request *req)
{
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
WARN_ON(!list_empty(&ureq->pkt.node));
kfree(ureq);
}
static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
/* param check */
if (usbhsg_is_not_connected(gpriv) ||
unlikely(!gpriv->driver) ||
unlikely(!pipe))
return -ESHUTDOWN;
usbhsg_queue_push(uep, ureq);
return 0;
}
static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
struct usbhs_pipe *pipe;
unsigned long flags;
spin_lock_irqsave(&uep->lock, flags);
pipe = usbhsg_uep_to_pipe(uep);
if (pipe)
usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
/*
* To dequeue a request, this driver should call the usbhsg_queue_pop()
* even if the pipe is NULL.
*/
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
spin_unlock_irqrestore(&uep->lock, flags);
return 0;
}
static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
unsigned long flags;
int ret = 0;
dev_dbg(dev, "set halt %d (pipe %d)\n",
halt, usbhs_pipe_number(pipe));
/******************** spin lock ********************/
usbhs_lock(priv, flags);
/*
* According to usb_ep_set_halt()'s description, this function should
* return -EAGAIN if the IN endpoint has any queue or data. Note
* that the usbhs_pipe_is_dir_in() returns false if the pipe is an
* IN endpoint in the gadget mode.
*/
if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
usbhs_pipe_contains_transmittable_data(pipe))) {
ret = -EAGAIN;
goto out;
}
if (halt)
usbhs_pipe_stall(pipe);
else
usbhs_pipe_disable(pipe);
if (halt && wedge)
usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE);
else
usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
out:
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
return ret;
}
static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
{
return __usbhsg_ep_set_halt_wedge(ep, value, 0);
}
static int usbhsg_ep_set_wedge(struct usb_ep *ep)
{
return __usbhsg_ep_set_halt_wedge(ep, 1, 1);
}
static const struct usb_ep_ops usbhsg_ep_ops = {
.enable = usbhsg_ep_enable,
.disable = usbhsg_ep_disable,
.alloc_request = usbhsg_ep_alloc_request,
.free_request = usbhsg_ep_free_request,
.queue = usbhsg_ep_queue,
.dequeue = usbhsg_ep_dequeue,
.set_halt = usbhsg_ep_set_halt,
.set_wedge = usbhsg_ep_set_wedge,
};
/*
* pullup control
*/
static int usbhsg_can_pullup(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
return gpriv->driver &&
usbhsg_status_has(gpriv, USBHSG_STATUS_SOFT_CONNECT);
}
static void usbhsg_update_pullup(struct usbhs_priv *priv)
{
if (usbhsg_can_pullup(priv))
usbhs_sys_function_pullup(priv, 1);
else
usbhs_sys_function_pullup(priv, 0);
}
/*
* usb module start/end
*/
static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
int ret = 0;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
usbhsg_status_set(gpriv, status);
if (!(usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) &&
usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD)))
ret = -1; /* not ready */
usbhs_unlock(priv, flags);
/******************** spin unlock ********************/
if (ret < 0)
return 0; /* not ready is not error */
/*
* enable interrupt and systems if ready
*/
dev_dbg(dev, "start gadget\n");
/*
* pipe initialize and enable DCP
*/
usbhs_fifo_init(priv);
usbhs_pipe_init(priv,
usbhsg_dma_map_ctrl);
/* dcp init instead of usbhsg_ep_enable() */
dcp->pipe = usbhs_dcp_malloc(priv);
dcp->pipe->mod_private = dcp;
usbhs_pipe_config_update(dcp->pipe, 0, 0, 64);
/*
* system config enble
* - HI speed
* - function
* - usb module
*/
usbhs_sys_function_ctrl(priv, 1);
usbhsg_update_pullup(priv);
/*
* enable irq callback
*/
mod->irq_dev_state = usbhsg_irq_dev_state;
mod->irq_ctrl_stage = usbhsg_irq_ctrl_stage;
usbhs_irq_callback_update(priv, mod);
return 0;
}
static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct usbhsg_uep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
int ret = 0, i;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
usbhsg_status_clr(gpriv, status);
if (!usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) &&
!usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD))
ret = -1; /* already done */
usbhs_unlock(priv, flags);
/******************** spin unlock ********************/
if (ret < 0)
return 0; /* already done is not error */
/*
* disable interrupt and systems if 1st try
*/
usbhs_fifo_quit(priv);
/* disable all irq */
mod->irq_dev_state = NULL;
mod->irq_ctrl_stage = NULL;
usbhs_irq_callback_update(priv, mod);
gpriv->gadget.speed = USB_SPEED_UNKNOWN;
/* disable sys */
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
/* disable all eps */
usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
usbhsg_ep_disable(&uep->ep);
dev_dbg(dev, "stop gadget\n");
return 0;
}
/*
* VBUS provided by the PHY
*/
static int usbhsm_phy_get_vbus(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
return gpriv->vbus_active;
}
static void usbhs_mod_phy_mode(struct usbhs_priv *priv)
{
struct usbhs_mod_info *info = &priv->mod_info;
info->irq_vbus = NULL;
info->get_vbus = usbhsm_phy_get_vbus;
usbhs_irq_callback_update(priv, NULL);
}
/*
*
* linux usb function
*
*/
static int usbhsg_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
if (!driver ||
!driver->setup ||
driver->max_speed < USB_SPEED_FULL)
return -EINVAL;
/* connect to bus through transceiver */
if (!IS_ERR_OR_NULL(gpriv->transceiver)) {
ret = otg_set_peripheral(gpriv->transceiver->otg,
&gpriv->gadget);
if (ret) {
dev_err(dev, "%s: can't bind to transceiver\n",
gpriv->gadget.name);
return ret;
}
/* get vbus using phy versions */
usbhs_mod_phy_mode(priv);
}
/* first hook up the driver ... */
gpriv->driver = driver;
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
}
static int usbhsg_gadget_stop(struct usb_gadget *gadget)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
if (!IS_ERR_OR_NULL(gpriv->transceiver))
otg_set_peripheral(gpriv->transceiver->otg, NULL);
gpriv->driver = NULL;
return 0;
}
/*
* usb gadget ops
*/
static int usbhsg_get_frame(struct usb_gadget *gadget)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
return usbhs_frame_get_num(priv);
}
static int usbhsg_pullup(struct usb_gadget *gadget, int is_on)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
unsigned long flags;
usbhs_lock(priv, flags);
if (is_on)
usbhsg_status_set(gpriv, USBHSG_STATUS_SOFT_CONNECT);
else
usbhsg_status_clr(gpriv, USBHSG_STATUS_SOFT_CONNECT);
usbhsg_update_pullup(priv);
usbhs_unlock(priv, flags);
return 0;
}
static int usbhsg_set_selfpowered(struct usb_gadget *gadget, int is_self)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
if (is_self)
usbhsg_status_set(gpriv, USBHSG_STATUS_SELF_POWERED);
else
usbhsg_status_clr(gpriv, USBHSG_STATUS_SELF_POWERED);
gadget->is_selfpowered = (is_self != 0);
return 0;
}
static int usbhsg_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
gpriv->vbus_active = !!is_active;
usbhsc_schedule_notify_hotplug(pdev);
return 0;
}
static const struct usb_gadget_ops usbhsg_gadget_ops = {
.get_frame = usbhsg_get_frame,
.set_selfpowered = usbhsg_set_selfpowered,
.udc_start = usbhsg_gadget_start,
.udc_stop = usbhsg_gadget_stop,
.pullup = usbhsg_pullup,
.vbus_session = usbhsg_vbus_session,
};
static int usbhsg_start(struct usbhs_priv *priv)
{
return usbhsg_try_start(priv, USBHSG_STATUS_STARTED);
}
static int usbhsg_stop(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
/* cable disconnect */
if (gpriv->driver &&
gpriv->driver->disconnect)
gpriv->driver->disconnect(&gpriv->gadget);
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv;
struct usbhsg_uep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
struct renesas_usbhs_driver_pipe_config *pipe_configs =
usbhs_get_dparam(priv, pipe_configs);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
int i;
int ret;
gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL);
if (!gpriv)
return -ENOMEM;
uep = kcalloc(pipe_size, sizeof(struct usbhsg_uep), GFP_KERNEL);
if (!uep) {
ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
!IS_ERR(gpriv->transceiver) ? "" : "no ");
/*
* CAUTION
*
* There is no guarantee that it is possible to access usb module here.
* Don't accesses to it.
* The accesse will be enable after "usbhsg_start"
*/
/*
* register itself
*/
usbhs_mod_register(priv, &gpriv->mod, USBHS_GADGET);
/* init gpriv */
gpriv->mod.name = "gadget";
gpriv->mod.start = usbhsg_start;
gpriv->mod.stop = usbhsg_stop;
gpriv->uep = uep;
gpriv->uep_size = pipe_size;
usbhsg_status_init(gpriv);
/*
* init gadget
*/
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
gpriv->gadget.max_speed = USB_SPEED_HIGH;
gpriv->gadget.quirk_avoids_skb_reserve = usbhs_get_dparam(priv,
has_usb_dmac);
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
/*
* init usb_ep
*/
usbhsg_for_each_uep_with_dcp(uep, gpriv, i) {
uep->gpriv = gpriv;
uep->pipe = NULL;
snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i);
uep->ep.name = uep->ep_name;
uep->ep.ops = &usbhsg_ep_ops;
INIT_LIST_HEAD(&uep->ep.ep_list);
spin_lock_init(&uep->lock);
/* init DCP */
if (usbhsg_is_dcp(uep)) {
gpriv->gadget.ep0 = &uep->ep;
usb_ep_set_maxpacket_limit(&uep->ep, 64);
uep->ep.caps.type_control = true;
} else {
/* init normal pipe */
if (pipe_configs[i].type == USB_ENDPOINT_XFER_ISOC)
uep->ep.caps.type_iso = true;
if (pipe_configs[i].type == USB_ENDPOINT_XFER_BULK)
uep->ep.caps.type_bulk = true;
if (pipe_configs[i].type == USB_ENDPOINT_XFER_INT)
uep->ep.caps.type_int = true;
usb_ep_set_maxpacket_limit(&uep->ep,
pipe_configs[i].bufsize);
list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list);
}
uep->ep.caps.dir_in = true;
uep->ep.caps.dir_out = true;
}
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
goto err_add_udc;
dev_info(dev, "gadget probed\n");
return 0;
err_add_udc:
kfree(gpriv->uep);
usbhs_mod_gadget_probe_err_gpriv:
kfree(gpriv);
return ret;
}
void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
usb_del_gadget_udc(&gpriv->gadget);
kfree(gpriv->uep);
kfree(gpriv);
}
| linux-master | drivers/usb/renesas_usbhs/mod_gadget.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB driver RZ/A initialization and power control
*
* Copyright (C) 2018 Chris Brandt
* Copyright (C) 2018-2019 Renesas Electronics Corporation
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include "common.h"
#include "rza.h"
static int usbhs_rza1_hardware_init(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
struct device_node *usb_x1_clk, *extal_clk;
u32 freq_usb = 0, freq_extal = 0;
/* Input Clock Selection (NOTE: ch0 controls both ch0 and ch1) */
usb_x1_clk = of_find_node_by_name(NULL, "usb_x1");
extal_clk = of_find_node_by_name(NULL, "extal");
of_property_read_u32(usb_x1_clk, "clock-frequency", &freq_usb);
of_property_read_u32(extal_clk, "clock-frequency", &freq_extal);
of_node_put(usb_x1_clk);
of_node_put(extal_clk);
if (freq_usb == 0) {
if (freq_extal == 12000000) {
/* Select 12MHz XTAL */
usbhs_bset(priv, SYSCFG, UCKSEL, UCKSEL);
} else {
dev_err(usbhs_priv_to_dev(priv), "A 48MHz USB clock or 12MHz main clock is required.\n");
return -EIO;
}
}
/* Enable USB PLL (NOTE: ch0 controls both ch0 and ch1) */
usbhs_bset(priv, SYSCFG, UPLLE, UPLLE);
usleep_range(1000, 2000);
usbhs_bset(priv, SUSPMODE, SUSPM, SUSPM);
return 0;
}
const struct renesas_usbhs_platform_info usbhs_rza1_plat_info = {
.platform_callback = {
.hardware_init = usbhs_rza1_hardware_init,
.get_id = usbhs_get_id_as_gadget,
},
.driver_param = {
.has_new_pipe_configs = 1,
},
};
| linux-master | drivers/usb/renesas_usbhs/rza.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* This is a text format reader.
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/uaccess.h>
#include "usb_mon.h"
/*
* No, we do not want arbitrarily long data strings.
* Use the binary interface if you want to capture bulk data!
*/
#define DATA_MAX 32
/*
* Defined by USB 2.0 clause 9.3, table 9.2.
*/
#define SETUP_MAX 8
/*
* This limit exists to prevent OOMs when the user process stops reading.
* If usbmon were available to unprivileged processes, it might be open
* to a local DoS. But we have to keep to root in order to prevent
* password sniffing from HID devices.
*/
#define EVENT_MAX (4*PAGE_SIZE / sizeof(struct mon_event_text))
/*
* Potentially unlimited number; we limit it for similar allocations.
* The usbfs limits this to 128, but we're not quite as generous.
*/
#define ISODESC_MAX 5
#define PRINTF_DFL 250 /* with 5 ISOs segs */
struct mon_iso_desc {
int status;
unsigned int offset;
unsigned int length; /* Unsigned here, signed in URB. Historic. */
};
struct mon_event_text {
struct list_head e_link;
int type; /* submit, complete, etc. */
unsigned long id; /* From pointer, most of the time */
unsigned int tstamp;
int busnum;
char devnum;
char epnum;
char is_in;
char xfertype;
int length; /* Depends on type: xfer length or act length */
int status;
int interval;
int start_frame;
int error_count;
char setup_flag;
char data_flag;
int numdesc; /* Full number */
struct mon_iso_desc isodesc[ISODESC_MAX];
unsigned char setup[SETUP_MAX];
unsigned char data[DATA_MAX];
};
#define SLAB_NAME_SZ 30
struct mon_reader_text {
struct kmem_cache *e_slab;
int nevents;
struct list_head e_list;
struct mon_reader r; /* In C, parent class can be placed anywhere */
wait_queue_head_t wait;
int printf_size;
size_t printf_offset;
size_t printf_togo;
char *printf_buf;
struct mutex printf_lock;
char slab_name[SLAB_NAME_SZ];
};
static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */
static void mon_text_ctor(void *);
struct mon_text_ptr {
int cnt, limit;
char *pbuf;
};
static struct mon_event_text *
mon_text_read_wait(struct mon_reader_text *rp, struct file *file);
static void mon_text_read_head_t(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep);
static void mon_text_read_head_u(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep);
static void mon_text_read_statset(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep);
static void mon_text_read_intstat(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep);
static void mon_text_read_isostat(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep);
static void mon_text_read_isodesc(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep);
static void mon_text_read_data(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep);
/*
* mon_text_submit
* mon_text_complete
*
* May be called from an interrupt.
*
* This is called with the whole mon_bus locked, so no additional lock.
*/
static inline char mon_text_get_setup(struct mon_event_text *ep,
struct urb *urb, char ev_type, struct mon_bus *mbus)
{
if (ep->xfertype != USB_ENDPOINT_XFER_CONTROL || ev_type != 'S')
return '-';
if (urb->setup_packet == NULL)
return 'Z'; /* '0' would be not as pretty. */
memcpy(ep->setup, urb->setup_packet, SETUP_MAX);
return 0;
}
static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
int len, char ev_type, struct mon_bus *mbus)
{
void *src;
if (len <= 0)
return 'L';
if (len >= DATA_MAX)
len = DATA_MAX;
if (ep->is_in) {
if (ev_type != 'C')
return '<';
} else {
if (ev_type != 'S')
return '>';
}
if (urb->num_sgs == 0) {
src = urb->transfer_buffer;
if (src == NULL)
return 'Z'; /* '0' would be not as pretty. */
} else {
struct scatterlist *sg = urb->sg;
if (PageHighMem(sg_page(sg)))
return 'D';
/* For the text interface we copy only the first sg buffer */
len = min_t(int, sg->length, len);
src = sg_virt(sg);
}
memcpy(ep->data, src, len);
return 0;
}
static inline unsigned int mon_get_timestamp(void)
{
struct timespec64 now;
unsigned int stamp;
ktime_get_ts64(&now);
stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */
stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC;
return stamp;
}
static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
char ev_type, int status)
{
struct mon_event_text *ep;
unsigned int stamp;
struct usb_iso_packet_descriptor *fp;
struct mon_iso_desc *dp;
int i, ndesc;
stamp = mon_get_timestamp();
if (rp->nevents >= EVENT_MAX ||
(ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
rp->r.m_bus->cnt_text_lost++;
return;
}
ep->type = ev_type;
ep->id = (unsigned long) urb;
ep->busnum = urb->dev->bus->busnum;
ep->devnum = urb->dev->devnum;
ep->epnum = usb_endpoint_num(&urb->ep->desc);
ep->xfertype = usb_endpoint_type(&urb->ep->desc);
ep->is_in = usb_urb_dir_in(urb);
ep->tstamp = stamp;
ep->length = (ev_type == 'S') ?
urb->transfer_buffer_length : urb->actual_length;
/* Collecting status makes debugging sense for submits, too */
ep->status = status;
if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
ep->interval = urb->interval;
} else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
ep->interval = urb->interval;
ep->start_frame = urb->start_frame;
ep->error_count = urb->error_count;
}
ep->numdesc = urb->number_of_packets;
if (ep->xfertype == USB_ENDPOINT_XFER_ISOC &&
urb->number_of_packets > 0) {
if ((ndesc = urb->number_of_packets) > ISODESC_MAX)
ndesc = ISODESC_MAX;
fp = urb->iso_frame_desc;
dp = ep->isodesc;
for (i = 0; i < ndesc; i++) {
dp->status = fp->status;
dp->offset = fp->offset;
dp->length = (ev_type == 'S') ?
fp->length : fp->actual_length;
fp++;
dp++;
}
/* Wasteful, but simple to understand: ISO 'C' is sparse. */
if (ev_type == 'C')
ep->length = urb->transfer_buffer_length;
}
ep->setup_flag = mon_text_get_setup(ep, urb, ev_type, rp->r.m_bus);
ep->data_flag = mon_text_get_data(ep, urb, ep->length, ev_type,
rp->r.m_bus);
rp->nevents++;
list_add_tail(&ep->e_link, &rp->e_list);
wake_up(&rp->wait);
}
static void mon_text_submit(void *data, struct urb *urb)
{
struct mon_reader_text *rp = data;
mon_text_event(rp, urb, 'S', -EINPROGRESS);
}
static void mon_text_complete(void *data, struct urb *urb, int status)
{
struct mon_reader_text *rp = data;
mon_text_event(rp, urb, 'C', status);
}
static void mon_text_error(void *data, struct urb *urb, int error)
{
struct mon_reader_text *rp = data;
struct mon_event_text *ep;
if (rp->nevents >= EVENT_MAX ||
(ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
rp->r.m_bus->cnt_text_lost++;
return;
}
ep->type = 'E';
ep->id = (unsigned long) urb;
ep->busnum = urb->dev->bus->busnum;
ep->devnum = urb->dev->devnum;
ep->epnum = usb_endpoint_num(&urb->ep->desc);
ep->xfertype = usb_endpoint_type(&urb->ep->desc);
ep->is_in = usb_urb_dir_in(urb);
ep->tstamp = mon_get_timestamp();
ep->length = 0;
ep->status = error;
ep->setup_flag = '-';
ep->data_flag = 'E';
rp->nevents++;
list_add_tail(&ep->e_link, &rp->e_list);
wake_up(&rp->wait);
}
/*
* Fetch next event from the circular buffer.
*/
static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp,
struct mon_bus *mbus)
{
struct list_head *p;
unsigned long flags;
spin_lock_irqsave(&mbus->lock, flags);
if (list_empty(&rp->e_list)) {
spin_unlock_irqrestore(&mbus->lock, flags);
return NULL;
}
p = rp->e_list.next;
list_del(p);
--rp->nevents;
spin_unlock_irqrestore(&mbus->lock, flags);
return list_entry(p, struct mon_event_text, e_link);
}
/*
*/
static int mon_text_open(struct inode *inode, struct file *file)
{
struct mon_bus *mbus;
struct mon_reader_text *rp;
int rc;
mutex_lock(&mon_lock);
mbus = inode->i_private;
rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL);
if (rp == NULL) {
rc = -ENOMEM;
goto err_alloc;
}
INIT_LIST_HEAD(&rp->e_list);
init_waitqueue_head(&rp->wait);
mutex_init(&rp->printf_lock);
rp->printf_size = PRINTF_DFL;
rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL);
if (rp->printf_buf == NULL) {
rc = -ENOMEM;
goto err_alloc_pr;
}
rp->r.m_bus = mbus;
rp->r.r_data = rp;
rp->r.rnf_submit = mon_text_submit;
rp->r.rnf_error = mon_text_error;
rp->r.rnf_complete = mon_text_complete;
snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
rp->e_slab = kmem_cache_create(rp->slab_name,
sizeof(struct mon_event_text), sizeof(long), 0,
mon_text_ctor);
if (rp->e_slab == NULL) {
rc = -ENOMEM;
goto err_slab;
}
mon_reader_add(mbus, &rp->r);
file->private_data = rp;
mutex_unlock(&mon_lock);
return 0;
// err_busy:
// kmem_cache_destroy(rp->e_slab);
err_slab:
kfree(rp->printf_buf);
err_alloc_pr:
kfree(rp);
err_alloc:
mutex_unlock(&mon_lock);
return rc;
}
static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
char __user * const buf, const size_t nbytes)
{
const size_t togo = min(nbytes, rp->printf_togo);
if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
return -EFAULT;
rp->printf_togo -= togo;
rp->printf_offset += togo;
return togo;
}
/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_t(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
ssize_t ret;
mutex_lock(&rp->printf_lock);
if (rp->printf_togo == 0) {
ep = mon_text_read_wait(rp, file);
if (IS_ERR(ep)) {
mutex_unlock(&rp->printf_lock);
return PTR_ERR(ep);
}
ptr.cnt = 0;
ptr.pbuf = rp->printf_buf;
ptr.limit = rp->printf_size;
mon_text_read_head_t(rp, &ptr, ep);
mon_text_read_statset(rp, &ptr, ep);
ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
" %d", ep->length);
mon_text_read_data(rp, &ptr, ep);
rp->printf_togo = ptr.cnt;
rp->printf_offset = 0;
kmem_cache_free(rp->e_slab, ep);
}
ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
return ret;
}
/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_u(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
ssize_t ret;
mutex_lock(&rp->printf_lock);
if (rp->printf_togo == 0) {
ep = mon_text_read_wait(rp, file);
if (IS_ERR(ep)) {
mutex_unlock(&rp->printf_lock);
return PTR_ERR(ep);
}
ptr.cnt = 0;
ptr.pbuf = rp->printf_buf;
ptr.limit = rp->printf_size;
mon_text_read_head_u(rp, &ptr, ep);
if (ep->type == 'E') {
mon_text_read_statset(rp, &ptr, ep);
} else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
mon_text_read_isostat(rp, &ptr, ep);
mon_text_read_isodesc(rp, &ptr, ep);
} else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
mon_text_read_intstat(rp, &ptr, ep);
} else {
mon_text_read_statset(rp, &ptr, ep);
}
ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
" %d", ep->length);
mon_text_read_data(rp, &ptr, ep);
rp->printf_togo = ptr.cnt;
rp->printf_offset = 0;
kmem_cache_free(rp->e_slab, ep);
}
ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
return ret;
}
static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
struct file *file)
{
struct mon_bus *mbus = rp->r.m_bus;
DECLARE_WAITQUEUE(waita, current);
struct mon_event_text *ep;
add_wait_queue(&rp->wait, &waita);
set_current_state(TASK_INTERRUPTIBLE);
while ((ep = mon_text_fetch(rp, mbus)) == NULL) {
if (file->f_flags & O_NONBLOCK) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&rp->wait, &waita);
return ERR_PTR(-EWOULDBLOCK);
}
/*
* We do not count nwaiters, because ->release is supposed
* to be called when all openers are gone only.
*/
schedule();
if (signal_pending(current)) {
remove_wait_queue(&rp->wait, &waita);
return ERR_PTR(-EINTR);
}
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&rp->wait, &waita);
return ep;
}
static void mon_text_read_head_t(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
char udir, utype;
udir = (ep->is_in ? 'i' : 'o');
switch (ep->xfertype) {
case USB_ENDPOINT_XFER_ISOC: utype = 'Z'; break;
case USB_ENDPOINT_XFER_INT: utype = 'I'; break;
case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break;
default: /* PIPE_BULK */ utype = 'B';
}
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
"%lx %u %c %c%c:%03u:%02u",
ep->id, ep->tstamp, ep->type,
utype, udir, ep->devnum, ep->epnum);
}
static void mon_text_read_head_u(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
char udir, utype;
udir = (ep->is_in ? 'i' : 'o');
switch (ep->xfertype) {
case USB_ENDPOINT_XFER_ISOC: utype = 'Z'; break;
case USB_ENDPOINT_XFER_INT: utype = 'I'; break;
case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break;
default: /* PIPE_BULK */ utype = 'B';
}
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
"%lx %u %c %c%c:%d:%03u:%u",
ep->id, ep->tstamp, ep->type,
utype, udir, ep->busnum, ep->devnum, ep->epnum);
}
static void mon_text_read_statset(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
if (ep->setup_flag == 0) { /* Setup packet is present and captured */
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" s %02x %02x %04x %04x %04x",
ep->setup[0],
ep->setup[1],
(ep->setup[3] << 8) | ep->setup[2],
(ep->setup[5] << 8) | ep->setup[4],
(ep->setup[7] << 8) | ep->setup[6]);
} else if (ep->setup_flag != '-') { /* Unable to capture setup packet */
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %c __ __ ____ ____ ____", ep->setup_flag);
} else { /* No setup for this kind of URB */
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d", ep->status);
}
}
static void mon_text_read_intstat(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%d", ep->status, ep->interval);
}
static void mon_text_read_isostat(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
if (ep->type == 'S') {
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%d:%d", ep->status, ep->interval, ep->start_frame);
} else {
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%d:%d:%d",
ep->status, ep->interval, ep->start_frame, ep->error_count);
}
}
static void mon_text_read_isodesc(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
int ndesc; /* Display this many */
int i;
const struct mon_iso_desc *dp;
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d", ep->numdesc);
ndesc = ep->numdesc;
if (ndesc > ISODESC_MAX)
ndesc = ISODESC_MAX;
if (ndesc < 0)
ndesc = 0;
dp = ep->isodesc;
for (i = 0; i < ndesc; i++) {
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%u:%u", dp->status, dp->offset, dp->length);
dp++;
}
}
static void mon_text_read_data(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
int data_len, i;
if ((data_len = ep->length) > 0) {
if (ep->data_flag == 0) {
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" =");
if (data_len >= DATA_MAX)
data_len = DATA_MAX;
for (i = 0; i < data_len; i++) {
if (i % 4 == 0) {
p->cnt += scnprintf(p->pbuf + p->cnt,
p->limit - p->cnt,
" ");
}
p->cnt += scnprintf(p->pbuf + p->cnt,
p->limit - p->cnt,
"%02x", ep->data[i]);
}
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
"\n");
} else {
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %c\n", ep->data_flag);
}
} else {
p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n");
}
}
static int mon_text_release(struct inode *inode, struct file *file)
{
struct mon_reader_text *rp = file->private_data;
struct mon_bus *mbus;
/* unsigned long flags; */
struct list_head *p;
struct mon_event_text *ep;
mutex_lock(&mon_lock);
mbus = inode->i_private;
if (mbus->nreaders <= 0) {
printk(KERN_ERR TAG ": consistency error on close\n");
mutex_unlock(&mon_lock);
return 0;
}
mon_reader_del(mbus, &rp->r);
/*
* In theory, e_list is protected by mbus->lock. However,
* after mon_reader_del has finished, the following is the case:
* - we are not on reader list anymore, so new events won't be added;
* - whole mbus may be dropped if it was orphaned.
* So, we better not touch mbus.
*/
/* spin_lock_irqsave(&mbus->lock, flags); */
while (!list_empty(&rp->e_list)) {
p = rp->e_list.next;
ep = list_entry(p, struct mon_event_text, e_link);
list_del(p);
--rp->nevents;
kmem_cache_free(rp->e_slab, ep);
}
/* spin_unlock_irqrestore(&mbus->lock, flags); */
kmem_cache_destroy(rp->e_slab);
kfree(rp->printf_buf);
kfree(rp);
mutex_unlock(&mon_lock);
return 0;
}
static const struct file_operations mon_fops_text_t = {
.owner = THIS_MODULE,
.open = mon_text_open,
.llseek = no_llseek,
.read = mon_text_read_t,
.release = mon_text_release,
};
static const struct file_operations mon_fops_text_u = {
.owner = THIS_MODULE,
.open = mon_text_open,
.llseek = no_llseek,
.read = mon_text_read_u,
.release = mon_text_release,
};
int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
{
enum { NAMESZ = 10 };
char name[NAMESZ];
int busnum = ubus? ubus->busnum: 0;
int rc;
if (mon_dir == NULL)
return 0;
if (ubus != NULL) {
rc = snprintf(name, NAMESZ, "%dt", busnum);
if (rc <= 0 || rc >= NAMESZ)
goto err_print_t;
mbus->dent_t = debugfs_create_file(name, 0600, mon_dir, mbus,
&mon_fops_text_t);
}
rc = snprintf(name, NAMESZ, "%du", busnum);
if (rc <= 0 || rc >= NAMESZ)
goto err_print_u;
mbus->dent_u = debugfs_create_file(name, 0600, mon_dir, mbus,
&mon_fops_text_u);
rc = snprintf(name, NAMESZ, "%ds", busnum);
if (rc <= 0 || rc >= NAMESZ)
goto err_print_s;
mbus->dent_s = debugfs_create_file(name, 0600, mon_dir, mbus,
&mon_fops_stat);
return 1;
err_print_s:
debugfs_remove(mbus->dent_u);
mbus->dent_u = NULL;
err_print_u:
if (ubus != NULL) {
debugfs_remove(mbus->dent_t);
mbus->dent_t = NULL;
}
err_print_t:
return 0;
}
void mon_text_del(struct mon_bus *mbus)
{
debugfs_remove(mbus->dent_u);
debugfs_remove(mbus->dent_t);
debugfs_remove(mbus->dent_s);
}
/*
* Slab interface: constructor.
*/
static void mon_text_ctor(void *mem)
{
/*
* Nothing to initialize. No, really!
* So, we fill it with garbage to emulate a reused object.
*/
memset(mem, 0xe5, sizeof(struct mon_event_text));
}
int __init mon_text_init(void)
{
mon_dir = debugfs_create_dir("usbmon", usb_debug_root);
return 0;
}
void mon_text_exit(void)
{
debugfs_remove(mon_dir);
}
| linux-master | drivers/usb/mon/mon_text.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* mon_main.c: Main file, module initiation and exit, registrations, etc.
*
* Copyright (C) 2005 Pete Zaitcev ([email protected])
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include "usb_mon.h"
static void mon_stop(struct mon_bus *mbus);
static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
static void mon_bus_drop(struct kref *r);
static void mon_bus_init(struct usb_bus *ubus);
DEFINE_MUTEX(mon_lock);
struct mon_bus mon_bus0; /* Pseudo bus meaning "all buses" */
static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */
/*
* Link a reader into the bus.
*
* This must be called with mon_lock taken because of mbus->ref.
*/
void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r)
{
unsigned long flags;
struct list_head *p;
spin_lock_irqsave(&mbus->lock, flags);
if (mbus->nreaders == 0) {
if (mbus == &mon_bus0) {
list_for_each (p, &mon_buses) {
struct mon_bus *m1;
m1 = list_entry(p, struct mon_bus, bus_link);
m1->u_bus->monitored = 1;
}
} else {
mbus->u_bus->monitored = 1;
}
}
mbus->nreaders++;
list_add_tail(&r->r_link, &mbus->r_list);
spin_unlock_irqrestore(&mbus->lock, flags);
kref_get(&mbus->ref);
}
/*
* Unlink reader from the bus.
*
* This is called with mon_lock taken, so we can decrement mbus->ref.
*/
void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r)
{
unsigned long flags;
spin_lock_irqsave(&mbus->lock, flags);
list_del(&r->r_link);
--mbus->nreaders;
if (mbus->nreaders == 0)
mon_stop(mbus);
spin_unlock_irqrestore(&mbus->lock, flags);
kref_put(&mbus->ref, mon_bus_drop);
}
/*
*/
static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_submit(r->r_data, urb);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_submit(struct usb_bus *ubus, struct urb *urb)
{
struct mon_bus *mbus;
mbus = ubus->mon_bus;
if (mbus != NULL)
mon_bus_submit(mbus, urb);
mon_bus_submit(&mon_bus0, urb);
}
/*
*/
static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int error)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_error(r->r_data, urb, error);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error)
{
struct mon_bus *mbus;
mbus = ubus->mon_bus;
if (mbus != NULL)
mon_bus_submit_error(mbus, urb, error);
mon_bus_submit_error(&mon_bus0, urb, error);
}
/*
*/
static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb, int status)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_complete(r->r_data, urb, status);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_complete(struct usb_bus *ubus, struct urb *urb, int status)
{
struct mon_bus *mbus;
mbus = ubus->mon_bus;
if (mbus != NULL)
mon_bus_complete(mbus, urb, status);
mon_bus_complete(&mon_bus0, urb, status);
}
/* int (*unlink_urb) (struct urb *urb, int status); */
/*
* Stop monitoring.
*/
static void mon_stop(struct mon_bus *mbus)
{
struct usb_bus *ubus;
struct list_head *p;
if (mbus == &mon_bus0) {
list_for_each (p, &mon_buses) {
mbus = list_entry(p, struct mon_bus, bus_link);
/*
* We do not change nreaders here, so rely on mon_lock.
*/
if (mbus->nreaders == 0 && (ubus = mbus->u_bus) != NULL)
ubus->monitored = 0;
}
} else {
/*
* A stop can be called for a dissolved mon_bus in case of
* a reader staying across an rmmod foo_hcd, so test ->u_bus.
*/
if (mon_bus0.nreaders == 0 && (ubus = mbus->u_bus) != NULL) {
ubus->monitored = 0;
mb();
}
}
}
/*
* Add a USB bus (usually by a modprobe foo-hcd)
*
* This does not return an error code because the core cannot care less
* if monitoring is not established.
*/
static void mon_bus_add(struct usb_bus *ubus)
{
mon_bus_init(ubus);
mutex_lock(&mon_lock);
if (mon_bus0.nreaders != 0)
ubus->monitored = 1;
mutex_unlock(&mon_lock);
}
/*
* Remove a USB bus (either from rmmod foo-hcd or from a hot-remove event).
*/
static void mon_bus_remove(struct usb_bus *ubus)
{
struct mon_bus *mbus = ubus->mon_bus;
mutex_lock(&mon_lock);
list_del(&mbus->bus_link);
if (mbus->text_inited)
mon_text_del(mbus);
if (mbus->bin_inited)
mon_bin_del(mbus);
mon_dissolve(mbus, ubus);
kref_put(&mbus->ref, mon_bus_drop);
mutex_unlock(&mon_lock);
}
static int mon_notify(struct notifier_block *self, unsigned long action,
void *dev)
{
switch (action) {
case USB_BUS_ADD:
mon_bus_add(dev);
break;
case USB_BUS_REMOVE:
mon_bus_remove(dev);
}
return NOTIFY_OK;
}
static struct notifier_block mon_nb = {
.notifier_call = mon_notify,
};
/*
* Ops
*/
static const struct usb_mon_operations mon_ops_0 = {
.urb_submit = mon_submit,
.urb_submit_error = mon_submit_error,
.urb_complete = mon_complete,
};
/*
* Tear usb_bus and mon_bus apart.
*/
static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus)
{
if (ubus->monitored) {
ubus->monitored = 0;
mb();
}
ubus->mon_bus = NULL;
mbus->u_bus = NULL;
mb();
/* We want synchronize_irq() here, but that needs an argument. */
}
/*
*/
static void mon_bus_drop(struct kref *r)
{
struct mon_bus *mbus = container_of(r, struct mon_bus, ref);
kfree(mbus);
}
/*
* Initialize a bus for us:
* - allocate mon_bus
* - refcount USB bus struct
* - link
*/
static void mon_bus_init(struct usb_bus *ubus)
{
struct mon_bus *mbus;
mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL);
if (mbus == NULL)
goto err_alloc;
kref_init(&mbus->ref);
spin_lock_init(&mbus->lock);
INIT_LIST_HEAD(&mbus->r_list);
/*
* We don't need to take a reference to ubus, because we receive
* a notification if the bus is about to be removed.
*/
mbus->u_bus = ubus;
ubus->mon_bus = mbus;
mbus->text_inited = mon_text_add(mbus, ubus);
mbus->bin_inited = mon_bin_add(mbus, ubus);
mutex_lock(&mon_lock);
list_add_tail(&mbus->bus_link, &mon_buses);
mutex_unlock(&mon_lock);
return;
err_alloc:
return;
}
static void mon_bus0_init(void)
{
struct mon_bus *mbus = &mon_bus0;
kref_init(&mbus->ref);
spin_lock_init(&mbus->lock);
INIT_LIST_HEAD(&mbus->r_list);
mbus->text_inited = mon_text_add(mbus, NULL);
mbus->bin_inited = mon_bin_add(mbus, NULL);
}
/*
* Search a USB bus by number. Notice that USB bus numbers start from one,
* which we may later use to identify "all" with zero.
*
* This function must be called with mon_lock held.
*
* This is obviously inefficient and may be revised in the future.
*/
struct mon_bus *mon_bus_lookup(unsigned int num)
{
struct list_head *p;
struct mon_bus *mbus;
if (num == 0) {
return &mon_bus0;
}
list_for_each (p, &mon_buses) {
mbus = list_entry(p, struct mon_bus, bus_link);
if (mbus->u_bus->busnum == num) {
return mbus;
}
}
return NULL;
}
static int __init mon_init(void)
{
struct usb_bus *ubus;
int rc, id;
if ((rc = mon_text_init()) != 0)
goto err_text;
if ((rc = mon_bin_init()) != 0)
goto err_bin;
mon_bus0_init();
if (usb_mon_register(&mon_ops_0) != 0) {
printk(KERN_NOTICE TAG ": unable to register with the core\n");
rc = -ENODEV;
goto err_reg;
}
// MOD_INC_USE_COUNT(which_module?);
mutex_lock(&usb_bus_idr_lock);
idr_for_each_entry(&usb_bus_idr, ubus, id)
mon_bus_init(ubus);
usb_register_notify(&mon_nb);
mutex_unlock(&usb_bus_idr_lock);
return 0;
err_reg:
mon_bin_exit();
err_bin:
mon_text_exit();
err_text:
return rc;
}
static void __exit mon_exit(void)
{
struct mon_bus *mbus;
struct list_head *p;
usb_unregister_notify(&mon_nb);
usb_mon_deregister();
mutex_lock(&mon_lock);
while (!list_empty(&mon_buses)) {
p = mon_buses.next;
mbus = list_entry(p, struct mon_bus, bus_link);
list_del(p);
if (mbus->text_inited)
mon_text_del(mbus);
if (mbus->bin_inited)
mon_bin_del(mbus);
/*
* This never happens, because the open/close paths in
* file level maintain module use counters and so rmmod fails
* before reaching here. However, better be safe...
*/
if (mbus->nreaders) {
printk(KERN_ERR TAG
": Outstanding opens (%d) on usb%d, leaking...\n",
mbus->nreaders, mbus->u_bus->busnum);
kref_get(&mbus->ref); /* Force leak */
}
mon_dissolve(mbus, mbus->u_bus);
kref_put(&mbus->ref, mon_bus_drop);
}
mbus = &mon_bus0;
if (mbus->text_inited)
mon_text_del(mbus);
if (mbus->bin_inited)
mon_bin_del(mbus);
mutex_unlock(&mon_lock);
mon_text_exit();
mon_bin_exit();
}
module_init(mon_init);
module_exit(mon_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/mon/mon_main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* This is the 's' or 'stat' reader which debugs usbmon itself.
* Note that this code blows through locks, so make sure that
* /dbg/usbmon/0s is well protected from non-root users.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/usb.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include "usb_mon.h"
#define STAT_BUF_SIZE 80
struct snap {
int slen;
char str[STAT_BUF_SIZE];
};
static int mon_stat_open(struct inode *inode, struct file *file)
{
struct mon_bus *mbus;
struct snap *sp;
sp = kmalloc(sizeof(struct snap), GFP_KERNEL);
if (sp == NULL)
return -ENOMEM;
mbus = inode->i_private;
sp->slen = snprintf(sp->str, STAT_BUF_SIZE,
"nreaders %d events %u text_lost %u\n",
mbus->nreaders, mbus->cnt_events, mbus->cnt_text_lost);
file->private_data = sp;
return 0;
}
static ssize_t mon_stat_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct snap *sp = file->private_data;
return simple_read_from_buffer(buf, nbytes, ppos, sp->str, sp->slen);
}
static int mon_stat_release(struct inode *inode, struct file *file)
{
struct snap *sp = file->private_data;
file->private_data = NULL;
kfree(sp);
return 0;
}
const struct file_operations mon_fops_stat = {
.owner = THIS_MODULE,
.open = mon_stat_open,
.llseek = no_llseek,
.read = mon_stat_read,
/* .write = mon_stat_write, */
/* .poll = mon_stat_poll, */
/* .unlocked_ioctl = mon_stat_ioctl, */
.release = mon_stat_release,
};
| linux-master | drivers/usb/mon/mon_stat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* This is a binary format reader.
*
* Copyright (C) 2006 Paolo Abeni ([email protected])
* Copyright (C) 2006,2007 Pete Zaitcev ([email protected])
*/
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/export.h>
#include <linux/usb.h>
#include <linux/poll.h>
#include <linux/compat.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/time64.h>
#include <linux/uaccess.h>
#include "usb_mon.h"
/*
* Defined by USB 2.0 clause 9.3, table 9.2.
*/
#define SETUP_LEN 8
/* ioctl macros */
#define MON_IOC_MAGIC 0x92
#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
/* #9 was MON_IOCT_SETAPI */
#define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get)
#ifdef CONFIG_COMPAT
#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
#define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32)
#endif
/*
* Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
* But it's all right. Just use a simple way to make sure the chunk is never
* smaller than a page.
*
* N.B. An application does not know our chunk size.
*
* Woops, get_zeroed_page() returns a single page. I guess we're stuck with
* page-sized chunks for the time being.
*/
#define CHUNK_SIZE PAGE_SIZE
#define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
/*
* The magic limit was calculated so that it allows the monitoring
* application to pick data once in two ticks. This way, another application,
* which presumably drives the bus, gets to hog CPU, yet we collect our data.
* If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
* enormous overhead built into the bus protocol, so we need about 1000 KB.
*
* This is still too much for most cases, where we just snoop a few
* descriptor fetches for enumeration. So, the default is a "reasonable"
* amount for systems with HZ=250 and incomplete bus saturation.
*
* XXX What about multi-megabyte URBs which take minutes to transfer?
*/
#define BUFF_MAX CHUNK_ALIGN(1200*1024)
#define BUFF_DFL CHUNK_ALIGN(300*1024)
#define BUFF_MIN CHUNK_ALIGN(8*1024)
/*
* The per-event API header (2 per URB).
*
* This structure is seen in userland as defined by the documentation.
*/
struct mon_bin_hdr {
u64 id; /* URB ID - from submission to callback */
unsigned char type; /* Same as in text API; extensible. */
unsigned char xfer_type; /* ISO, Intr, Control, Bulk */
unsigned char epnum; /* Endpoint number and transfer direction */
unsigned char devnum; /* Device address */
unsigned short busnum; /* Bus number */
char flag_setup;
char flag_data;
s64 ts_sec; /* ktime_get_real_ts64 */
s32 ts_usec; /* ktime_get_real_ts64 */
int status;
unsigned int len_urb; /* Length of data (submitted or actual) */
unsigned int len_cap; /* Delivered length */
union {
unsigned char setup[SETUP_LEN]; /* Only for Control S-type */
struct iso_rec {
int error_count;
int numdesc;
} iso;
} s;
int interval;
int start_frame;
unsigned int xfer_flags;
unsigned int ndesc; /* Actual number of ISO descriptors */
};
/*
* ISO vector, packed into the head of data stream.
* This has to take 16 bytes to make sure that the end of buffer
* wrap is not happening in the middle of a descriptor.
*/
struct mon_bin_isodesc {
int iso_status;
unsigned int iso_off;
unsigned int iso_len;
u32 _pad;
};
/* per file statistic */
struct mon_bin_stats {
u32 queued;
u32 dropped;
};
struct mon_bin_get {
struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */
void __user *data;
size_t alloc; /* Length of data (can be zero) */
};
struct mon_bin_mfetch {
u32 __user *offvec; /* Vector of events fetched */
u32 nfetch; /* Number of events to fetch (out: fetched) */
u32 nflush; /* Number of events to flush */
};
#ifdef CONFIG_COMPAT
struct mon_bin_get32 {
u32 hdr32;
u32 data32;
u32 alloc32;
};
struct mon_bin_mfetch32 {
u32 offvec32;
u32 nfetch32;
u32 nflush32;
};
#endif
/* Having these two values same prevents wrapping of the mon_bin_hdr */
#define PKT_ALIGN 64
#define PKT_SIZE 64
#define PKT_SZ_API0 48 /* API 0 (2.6.20) size */
#define PKT_SZ_API1 64 /* API 1 size: extra fields */
#define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */
/* max number of USB bus supported */
#define MON_BIN_MAX_MINOR 128
/*
* The buffer: map of used pages.
*/
struct mon_pgmap {
struct page *pg;
unsigned char *ptr; /* XXX just use page_to_virt everywhere? */
};
/*
* This gets associated with an open file struct.
*/
struct mon_reader_bin {
/* The buffer: one per open. */
spinlock_t b_lock; /* Protect b_cnt, b_in */
unsigned int b_size; /* Current size of the buffer - bytes */
unsigned int b_cnt; /* Bytes used */
unsigned int b_in, b_out; /* Offsets into buffer - bytes */
unsigned int b_read; /* Amount of read data in curr. pkt. */
struct mon_pgmap *b_vec; /* The map array */
wait_queue_head_t b_wait; /* Wait for data here */
struct mutex fetch_lock; /* Protect b_read, b_out */
int mmap_active;
/* A list of these is needed for "bus 0". Some time later. */
struct mon_reader r;
/* Stats */
unsigned int cnt_lost;
};
static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
unsigned int offset)
{
return (struct mon_bin_hdr *)
(rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
}
#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
static unsigned char xfer_to_pipe[4] = {
PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
};
static const struct class mon_bin_class = {
.name = "usbmon",
};
static dev_t mon_bin_dev0;
static struct cdev mon_bin_cdev;
static void mon_buff_area_fill(const struct mon_reader_bin *rp,
unsigned int offset, unsigned int size);
static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
static int mon_alloc_buff(struct mon_pgmap *map, int npages);
static void mon_free_buff(struct mon_pgmap *map, int npages);
/*
* This is a "chunked memcpy". It does not manipulate any counters.
*/
static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
unsigned int off, const unsigned char *from, unsigned int length)
{
unsigned int step_len;
unsigned char *buf;
unsigned int in_page;
while (length) {
/*
* Determine step_len.
*/
step_len = length;
in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
if (in_page < step_len)
step_len = in_page;
/*
* Copy data and advance pointers.
*/
buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
memcpy(buf, from, step_len);
if ((off += step_len) >= this->b_size) off = 0;
from += step_len;
length -= step_len;
}
return off;
}
/*
* This is a little worse than the above because it's "chunked copy_to_user".
* The return value is an error code, not an offset.
*/
static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
char __user *to, int length)
{
unsigned int step_len;
unsigned char *buf;
unsigned int in_page;
while (length) {
/*
* Determine step_len.
*/
step_len = length;
in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
if (in_page < step_len)
step_len = in_page;
/*
* Copy data and advance pointers.
*/
buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
if (copy_to_user(to, buf, step_len))
return -EINVAL;
if ((off += step_len) >= this->b_size) off = 0;
to += step_len;
length -= step_len;
}
return 0;
}
/*
* Allocate an (aligned) area in the buffer.
* This is called under b_lock.
* Returns ~0 on failure.
*/
static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
unsigned int size)
{
unsigned int offset;
size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
if (rp->b_cnt + size > rp->b_size)
return ~0;
offset = rp->b_in;
rp->b_cnt += size;
if ((rp->b_in += size) >= rp->b_size)
rp->b_in -= rp->b_size;
return offset;
}
/*
* This is the same thing as mon_buff_area_alloc, only it does not allow
* buffers to wrap. This is needed by applications which pass references
* into mmap-ed buffers up their stacks (libpcap can do that).
*
* Currently, we always have the header stuck with the data, although
* it is not strictly speaking necessary.
*
* When a buffer would wrap, we place a filler packet to mark the space.
*/
static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
unsigned int size)
{
unsigned int offset;
unsigned int fill_size;
size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
if (rp->b_cnt + size > rp->b_size)
return ~0;
if (rp->b_in + size > rp->b_size) {
/*
* This would wrap. Find if we still have space after
* skipping to the end of the buffer. If we do, place
* a filler packet and allocate a new packet.
*/
fill_size = rp->b_size - rp->b_in;
if (rp->b_cnt + size + fill_size > rp->b_size)
return ~0;
mon_buff_area_fill(rp, rp->b_in, fill_size);
offset = 0;
rp->b_in = size;
rp->b_cnt += size + fill_size;
} else if (rp->b_in + size == rp->b_size) {
offset = rp->b_in;
rp->b_in = 0;
rp->b_cnt += size;
} else {
offset = rp->b_in;
rp->b_in += size;
rp->b_cnt += size;
}
return offset;
}
/*
* Return a few (kilo-)bytes to the head of the buffer.
* This is used if a data fetch fails.
*/
static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
{
/* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */
rp->b_cnt -= size;
if (rp->b_in < size)
rp->b_in += rp->b_size;
rp->b_in -= size;
}
/*
* This has to be called under both b_lock and fetch_lock, because
* it accesses both b_cnt and b_out.
*/
static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
{
size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
rp->b_cnt -= size;
if ((rp->b_out += size) >= rp->b_size)
rp->b_out -= rp->b_size;
}
static void mon_buff_area_fill(const struct mon_reader_bin *rp,
unsigned int offset, unsigned int size)
{
struct mon_bin_hdr *ep;
ep = MON_OFF2HDR(rp, offset);
memset(ep, 0, PKT_SIZE);
ep->type = '@';
ep->len_cap = size - PKT_SIZE;
}
static inline char mon_bin_get_setup(unsigned char *setupb,
const struct urb *urb, char ev_type)
{
if (urb->setup_packet == NULL)
return 'Z';
memcpy(setupb, urb->setup_packet, SETUP_LEN);
return 0;
}
static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
unsigned int offset, struct urb *urb, unsigned int length,
char *flag)
{
int i;
struct scatterlist *sg;
unsigned int this_len;
*flag = 0;
if (urb->num_sgs == 0) {
if (urb->transfer_buffer == NULL) {
*flag = 'Z';
return length;
}
mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
length = 0;
} else {
/* If IOMMU coalescing occurred, we cannot trust sg_page */
if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
*flag = 'D';
return length;
}
/* Copy up to the first non-addressable segment */
for_each_sg(urb->sg, sg, urb->num_sgs, i) {
if (length == 0 || PageHighMem(sg_page(sg)))
break;
this_len = min_t(unsigned int, sg->length, length);
offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
this_len);
length -= this_len;
}
if (i == 0)
*flag = 'D';
}
return length;
}
/*
* This is the look-ahead pass in case of 'C Zi', when actual_length cannot
* be used to determine the length of the whole contiguous buffer.
*/
static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp,
struct urb *urb, unsigned int ndesc)
{
struct usb_iso_packet_descriptor *fp;
unsigned int length;
length = 0;
fp = urb->iso_frame_desc;
while (ndesc-- != 0) {
if (fp->actual_length != 0) {
if (fp->offset + fp->actual_length > length)
length = fp->offset + fp->actual_length;
}
fp++;
}
return length;
}
static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc)
{
struct mon_bin_isodesc *dp;
struct usb_iso_packet_descriptor *fp;
fp = urb->iso_frame_desc;
while (ndesc-- != 0) {
dp = (struct mon_bin_isodesc *)
(rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
dp->iso_status = fp->status;
dp->iso_off = fp->offset;
dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length;
dp->_pad = 0;
if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size)
offset = 0;
fp++;
}
}
static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
char ev_type, int status)
{
const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
struct timespec64 ts;
unsigned long flags;
unsigned int urb_length;
unsigned int offset;
unsigned int length;
unsigned int delta;
unsigned int ndesc, lendesc;
unsigned char dir;
struct mon_bin_hdr *ep;
char data_tag = 0;
ktime_get_real_ts64(&ts);
spin_lock_irqsave(&rp->b_lock, flags);
/*
* Find the maximum allowable length, then allocate space.
*/
urb_length = (ev_type == 'S') ?
urb->transfer_buffer_length : urb->actual_length;
length = urb_length;
if (usb_endpoint_xfer_isoc(epd)) {
if (urb->number_of_packets < 0) {
ndesc = 0;
} else if (urb->number_of_packets >= ISODESC_MAX) {
ndesc = ISODESC_MAX;
} else {
ndesc = urb->number_of_packets;
}
if (ev_type == 'C' && usb_urb_dir_in(urb))
length = mon_bin_collate_isodesc(rp, urb, ndesc);
} else {
ndesc = 0;
}
lendesc = ndesc*sizeof(struct mon_bin_isodesc);
/* not an issue unless there's a subtle bug in a HCD somewhere */
if (length >= urb->transfer_buffer_length)
length = urb->transfer_buffer_length;
if (length >= rp->b_size/5)
length = rp->b_size/5;
if (usb_urb_dir_in(urb)) {
if (ev_type == 'S') {
length = 0;
data_tag = '<';
}
/* Cannot rely on endpoint number in case of control ep.0 */
dir = USB_DIR_IN;
} else {
if (ev_type == 'C') {
length = 0;
data_tag = '>';
}
dir = 0;
}
if (rp->mmap_active) {
offset = mon_buff_area_alloc_contiguous(rp,
length + PKT_SIZE + lendesc);
} else {
offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc);
}
if (offset == ~0) {
rp->cnt_lost++;
spin_unlock_irqrestore(&rp->b_lock, flags);
return;
}
ep = MON_OFF2HDR(rp, offset);
if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
/*
* Fill the allocated area.
*/
memset(ep, 0, PKT_SIZE);
ep->type = ev_type;
ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)];
ep->epnum = dir | usb_endpoint_num(epd);
ep->devnum = urb->dev->devnum;
ep->busnum = urb->dev->bus->busnum;
ep->id = (unsigned long) urb;
ep->ts_sec = ts.tv_sec;
ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC;
ep->status = status;
ep->len_urb = urb_length;
ep->len_cap = length + lendesc;
ep->xfer_flags = urb->transfer_flags;
if (usb_endpoint_xfer_int(epd)) {
ep->interval = urb->interval;
} else if (usb_endpoint_xfer_isoc(epd)) {
ep->interval = urb->interval;
ep->start_frame = urb->start_frame;
ep->s.iso.error_count = urb->error_count;
ep->s.iso.numdesc = urb->number_of_packets;
}
if (usb_endpoint_xfer_control(epd) && ev_type == 'S') {
ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type);
} else {
ep->flag_setup = '-';
}
if (ndesc != 0) {
ep->ndesc = ndesc;
mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc);
if ((offset += lendesc) >= rp->b_size)
offset -= rp->b_size;
}
if (length != 0) {
length = mon_bin_get_data(rp, offset, urb, length,
&ep->flag_data);
if (length > 0) {
delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
ep->len_cap -= length;
delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
mon_buff_area_shrink(rp, delta);
}
} else {
ep->flag_data = data_tag;
}
spin_unlock_irqrestore(&rp->b_lock, flags);
wake_up(&rp->b_wait);
}
static void mon_bin_submit(void *data, struct urb *urb)
{
struct mon_reader_bin *rp = data;
mon_bin_event(rp, urb, 'S', -EINPROGRESS);
}
static void mon_bin_complete(void *data, struct urb *urb, int status)
{
struct mon_reader_bin *rp = data;
mon_bin_event(rp, urb, 'C', status);
}
static void mon_bin_error(void *data, struct urb *urb, int error)
{
struct mon_reader_bin *rp = data;
struct timespec64 ts;
unsigned long flags;
unsigned int offset;
struct mon_bin_hdr *ep;
ktime_get_real_ts64(&ts);
spin_lock_irqsave(&rp->b_lock, flags);
offset = mon_buff_area_alloc(rp, PKT_SIZE);
if (offset == ~0) {
/* Not incrementing cnt_lost. Just because. */
spin_unlock_irqrestore(&rp->b_lock, flags);
return;
}
ep = MON_OFF2HDR(rp, offset);
memset(ep, 0, PKT_SIZE);
ep->type = 'E';
ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)];
ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0;
ep->epnum |= usb_endpoint_num(&urb->ep->desc);
ep->devnum = urb->dev->devnum;
ep->busnum = urb->dev->bus->busnum;
ep->id = (unsigned long) urb;
ep->ts_sec = ts.tv_sec;
ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC;
ep->status = error;
ep->flag_setup = '-';
ep->flag_data = 'E';
spin_unlock_irqrestore(&rp->b_lock, flags);
wake_up(&rp->b_wait);
}
static int mon_bin_open(struct inode *inode, struct file *file)
{
struct mon_bus *mbus;
struct mon_reader_bin *rp;
size_t size;
int rc;
mutex_lock(&mon_lock);
mbus = mon_bus_lookup(iminor(inode));
if (mbus == NULL) {
mutex_unlock(&mon_lock);
return -ENODEV;
}
if (mbus != &mon_bus0 && mbus->u_bus == NULL) {
printk(KERN_ERR TAG ": consistency error on open\n");
mutex_unlock(&mon_lock);
return -ENODEV;
}
rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
if (rp == NULL) {
rc = -ENOMEM;
goto err_alloc;
}
spin_lock_init(&rp->b_lock);
init_waitqueue_head(&rp->b_wait);
mutex_init(&rp->fetch_lock);
rp->b_size = BUFF_DFL;
size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
rc = -ENOMEM;
goto err_allocvec;
}
if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
goto err_allocbuff;
rp->r.m_bus = mbus;
rp->r.r_data = rp;
rp->r.rnf_submit = mon_bin_submit;
rp->r.rnf_error = mon_bin_error;
rp->r.rnf_complete = mon_bin_complete;
mon_reader_add(mbus, &rp->r);
file->private_data = rp;
mutex_unlock(&mon_lock);
return 0;
err_allocbuff:
kfree(rp->b_vec);
err_allocvec:
kfree(rp);
err_alloc:
mutex_unlock(&mon_lock);
return rc;
}
/*
* Extract an event from buffer and copy it to user space.
* Wait if there is no event ready.
* Returns zero or error.
*/
static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
struct mon_bin_hdr __user *hdr, unsigned int hdrbytes,
void __user *data, unsigned int nbytes)
{
unsigned long flags;
struct mon_bin_hdr *ep;
size_t step_len;
unsigned int offset;
int rc;
mutex_lock(&rp->fetch_lock);
if ((rc = mon_bin_wait_event(file, rp)) < 0) {
mutex_unlock(&rp->fetch_lock);
return rc;
}
ep = MON_OFF2HDR(rp, rp->b_out);
if (copy_to_user(hdr, ep, hdrbytes)) {
mutex_unlock(&rp->fetch_lock);
return -EFAULT;
}
step_len = min(ep->len_cap, nbytes);
if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
if (copy_from_buf(rp, offset, data, step_len)) {
mutex_unlock(&rp->fetch_lock);
return -EFAULT;
}
spin_lock_irqsave(&rp->b_lock, flags);
mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
spin_unlock_irqrestore(&rp->b_lock, flags);
rp->b_read = 0;
mutex_unlock(&rp->fetch_lock);
return 0;
}
static int mon_bin_release(struct inode *inode, struct file *file)
{
struct mon_reader_bin *rp = file->private_data;
struct mon_bus* mbus = rp->r.m_bus;
mutex_lock(&mon_lock);
if (mbus->nreaders <= 0) {
printk(KERN_ERR TAG ": consistency error on close\n");
mutex_unlock(&mon_lock);
return 0;
}
mon_reader_del(mbus, &rp->r);
mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
kfree(rp->b_vec);
kfree(rp);
mutex_unlock(&mon_lock);
return 0;
}
static ssize_t mon_bin_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct mon_reader_bin *rp = file->private_data;
unsigned int hdrbytes = PKT_SZ_API0;
unsigned long flags;
struct mon_bin_hdr *ep;
unsigned int offset;
size_t step_len;
char *ptr;
ssize_t done = 0;
int rc;
mutex_lock(&rp->fetch_lock);
if ((rc = mon_bin_wait_event(file, rp)) < 0) {
mutex_unlock(&rp->fetch_lock);
return rc;
}
ep = MON_OFF2HDR(rp, rp->b_out);
if (rp->b_read < hdrbytes) {
step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read));
ptr = ((char *)ep) + rp->b_read;
if (step_len && copy_to_user(buf, ptr, step_len)) {
mutex_unlock(&rp->fetch_lock);
return -EFAULT;
}
nbytes -= step_len;
buf += step_len;
rp->b_read += step_len;
done += step_len;
}
if (rp->b_read >= hdrbytes) {
step_len = ep->len_cap;
step_len -= rp->b_read - hdrbytes;
if (step_len > nbytes)
step_len = nbytes;
offset = rp->b_out + PKT_SIZE;
offset += rp->b_read - hdrbytes;
if (offset >= rp->b_size)
offset -= rp->b_size;
if (copy_from_buf(rp, offset, buf, step_len)) {
mutex_unlock(&rp->fetch_lock);
return -EFAULT;
}
nbytes -= step_len;
buf += step_len;
rp->b_read += step_len;
done += step_len;
}
/*
* Check if whole packet was read, and if so, jump to the next one.
*/
if (rp->b_read >= hdrbytes + ep->len_cap) {
spin_lock_irqsave(&rp->b_lock, flags);
mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
spin_unlock_irqrestore(&rp->b_lock, flags);
rp->b_read = 0;
}
mutex_unlock(&rp->fetch_lock);
return done;
}
/*
* Remove at most nevents from chunked buffer.
* Returns the number of removed events.
*/
static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
{
unsigned long flags;
struct mon_bin_hdr *ep;
int i;
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
for (i = 0; i < nevents; ++i) {
if (MON_RING_EMPTY(rp))
break;
ep = MON_OFF2HDR(rp, rp->b_out);
mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
}
spin_unlock_irqrestore(&rp->b_lock, flags);
rp->b_read = 0;
mutex_unlock(&rp->fetch_lock);
return i;
}
/*
* Fetch at most max event offsets into the buffer and put them into vec.
* The events are usually freed later with mon_bin_flush.
* Return the effective number of events fetched.
*/
static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
u32 __user *vec, unsigned int max)
{
unsigned int cur_out;
unsigned int bytes, avail;
unsigned int size;
unsigned int nevents;
struct mon_bin_hdr *ep;
unsigned long flags;
int rc;
mutex_lock(&rp->fetch_lock);
if ((rc = mon_bin_wait_event(file, rp)) < 0) {
mutex_unlock(&rp->fetch_lock);
return rc;
}
spin_lock_irqsave(&rp->b_lock, flags);
avail = rp->b_cnt;
spin_unlock_irqrestore(&rp->b_lock, flags);
cur_out = rp->b_out;
nevents = 0;
bytes = 0;
while (bytes < avail) {
if (nevents >= max)
break;
ep = MON_OFF2HDR(rp, cur_out);
if (put_user(cur_out, &vec[nevents])) {
mutex_unlock(&rp->fetch_lock);
return -EFAULT;
}
nevents++;
size = ep->len_cap + PKT_SIZE;
size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
if ((cur_out += size) >= rp->b_size)
cur_out -= rp->b_size;
bytes += size;
}
mutex_unlock(&rp->fetch_lock);
return nevents;
}
/*
* Count events. This is almost the same as the above mon_bin_fetch,
* only we do not store offsets into user vector, and we have no limit.
*/
static int mon_bin_queued(struct mon_reader_bin *rp)
{
unsigned int cur_out;
unsigned int bytes, avail;
unsigned int size;
unsigned int nevents;
struct mon_bin_hdr *ep;
unsigned long flags;
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
avail = rp->b_cnt;
spin_unlock_irqrestore(&rp->b_lock, flags);
cur_out = rp->b_out;
nevents = 0;
bytes = 0;
while (bytes < avail) {
ep = MON_OFF2HDR(rp, cur_out);
nevents++;
size = ep->len_cap + PKT_SIZE;
size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
if ((cur_out += size) >= rp->b_size)
cur_out -= rp->b_size;
bytes += size;
}
mutex_unlock(&rp->fetch_lock);
return nevents;
}
/*
*/
static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct mon_reader_bin *rp = file->private_data;
// struct mon_bus* mbus = rp->r.m_bus;
int ret = 0;
struct mon_bin_hdr *ep;
unsigned long flags;
switch (cmd) {
case MON_IOCQ_URB_LEN:
/*
* N.B. This only returns the size of data, without the header.
*/
spin_lock_irqsave(&rp->b_lock, flags);
if (!MON_RING_EMPTY(rp)) {
ep = MON_OFF2HDR(rp, rp->b_out);
ret = ep->len_cap;
}
spin_unlock_irqrestore(&rp->b_lock, flags);
break;
case MON_IOCQ_RING_SIZE:
mutex_lock(&rp->fetch_lock);
ret = rp->b_size;
mutex_unlock(&rp->fetch_lock);
break;
case MON_IOCT_RING_SIZE:
/*
* Changing the buffer size will flush it's contents; the new
* buffer is allocated before releasing the old one to be sure
* the device will stay functional also in case of memory
* pressure.
*/
{
int size;
struct mon_pgmap *vec;
if (arg < BUFF_MIN || arg > BUFF_MAX)
return -EINVAL;
size = CHUNK_ALIGN(arg);
vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap),
GFP_KERNEL);
if (vec == NULL) {
ret = -ENOMEM;
break;
}
ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
if (ret < 0) {
kfree(vec);
break;
}
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
if (rp->mmap_active) {
mon_free_buff(vec, size/CHUNK_SIZE);
kfree(vec);
ret = -EBUSY;
} else {
mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
kfree(rp->b_vec);
rp->b_vec = vec;
rp->b_size = size;
rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
rp->cnt_lost = 0;
}
spin_unlock_irqrestore(&rp->b_lock, flags);
mutex_unlock(&rp->fetch_lock);
}
break;
case MON_IOCH_MFLUSH:
ret = mon_bin_flush(rp, arg);
break;
case MON_IOCX_GET:
case MON_IOCX_GETX:
{
struct mon_bin_get getb;
if (copy_from_user(&getb, (void __user *)arg,
sizeof(struct mon_bin_get)))
return -EFAULT;
if (getb.alloc > 0x10000000) /* Want to cast to u32 */
return -EINVAL;
ret = mon_bin_get_event(file, rp, getb.hdr,
(cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1,
getb.data, (unsigned int)getb.alloc);
}
break;
case MON_IOCX_MFETCH:
{
struct mon_bin_mfetch mfetch;
struct mon_bin_mfetch __user *uptr;
uptr = (struct mon_bin_mfetch __user *)arg;
if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
return -EFAULT;
if (mfetch.nflush) {
ret = mon_bin_flush(rp, mfetch.nflush);
if (ret < 0)
return ret;
if (put_user(ret, &uptr->nflush))
return -EFAULT;
}
ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
if (ret < 0)
return ret;
if (put_user(ret, &uptr->nfetch))
return -EFAULT;
ret = 0;
}
break;
case MON_IOCG_STATS: {
struct mon_bin_stats __user *sp;
unsigned int nevents;
unsigned int ndropped;
spin_lock_irqsave(&rp->b_lock, flags);
ndropped = rp->cnt_lost;
rp->cnt_lost = 0;
spin_unlock_irqrestore(&rp->b_lock, flags);
nevents = mon_bin_queued(rp);
sp = (struct mon_bin_stats __user *)arg;
if (put_user(ndropped, &sp->dropped))
return -EFAULT;
if (put_user(nevents, &sp->queued))
return -EFAULT;
}
break;
default:
return -ENOTTY;
}
return ret;
}
#ifdef CONFIG_COMPAT
static long mon_bin_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct mon_reader_bin *rp = file->private_data;
int ret;
switch (cmd) {
case MON_IOCX_GET32:
case MON_IOCX_GETX32:
{
struct mon_bin_get32 getb;
if (copy_from_user(&getb, (void __user *)arg,
sizeof(struct mon_bin_get32)))
return -EFAULT;
ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32),
(cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1,
compat_ptr(getb.data32), getb.alloc32);
if (ret < 0)
return ret;
}
return 0;
case MON_IOCX_MFETCH32:
{
struct mon_bin_mfetch32 mfetch;
struct mon_bin_mfetch32 __user *uptr;
uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg);
if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
return -EFAULT;
if (mfetch.nflush32) {
ret = mon_bin_flush(rp, mfetch.nflush32);
if (ret < 0)
return ret;
if (put_user(ret, &uptr->nflush32))
return -EFAULT;
}
ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
mfetch.nfetch32);
if (ret < 0)
return ret;
if (put_user(ret, &uptr->nfetch32))
return -EFAULT;
}
return 0;
case MON_IOCG_STATS:
return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
case MON_IOCQ_URB_LEN:
case MON_IOCQ_RING_SIZE:
case MON_IOCT_RING_SIZE:
case MON_IOCH_MFLUSH:
return mon_bin_ioctl(file, cmd, arg);
default:
;
}
return -ENOTTY;
}
#endif /* CONFIG_COMPAT */
static __poll_t
mon_bin_poll(struct file *file, struct poll_table_struct *wait)
{
struct mon_reader_bin *rp = file->private_data;
__poll_t mask = 0;
unsigned long flags;
if (file->f_mode & FMODE_READ)
poll_wait(file, &rp->b_wait, wait);
spin_lock_irqsave(&rp->b_lock, flags);
if (!MON_RING_EMPTY(rp))
mask |= EPOLLIN | EPOLLRDNORM; /* readable */
spin_unlock_irqrestore(&rp->b_lock, flags);
return mask;
}
/*
* open and close: just keep track of how many times the device is
* mapped, to use the proper memory allocation function.
*/
static void mon_bin_vma_open(struct vm_area_struct *vma)
{
struct mon_reader_bin *rp = vma->vm_private_data;
unsigned long flags;
spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active++;
spin_unlock_irqrestore(&rp->b_lock, flags);
}
static void mon_bin_vma_close(struct vm_area_struct *vma)
{
unsigned long flags;
struct mon_reader_bin *rp = vma->vm_private_data;
spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active--;
spin_unlock_irqrestore(&rp->b_lock, flags);
}
/*
* Map ring pages to user space.
*/
static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
{
struct mon_reader_bin *rp = vmf->vma->vm_private_data;
unsigned long offset, chunk_idx;
struct page *pageptr;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rp->b_size)
return VM_FAULT_SIGBUS;
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
vmf->page = pageptr;
return 0;
}
static const struct vm_operations_struct mon_bin_vm_ops = {
.open = mon_bin_vma_open,
.close = mon_bin_vma_close,
.fault = mon_bin_vma_fault,
};
static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
{
/* don't do anything here: "fault" will set up page table entries */
vma->vm_ops = &mon_bin_vm_ops;
if (vma->vm_flags & VM_WRITE)
return -EPERM;
vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE);
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
return 0;
}
static const struct file_operations mon_fops_binary = {
.owner = THIS_MODULE,
.open = mon_bin_open,
.llseek = no_llseek,
.read = mon_bin_read,
/* .write = mon_text_write, */
.poll = mon_bin_poll,
.unlocked_ioctl = mon_bin_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mon_bin_compat_ioctl,
#endif
.release = mon_bin_release,
.mmap = mon_bin_mmap,
};
static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
{
DECLARE_WAITQUEUE(waita, current);
unsigned long flags;
add_wait_queue(&rp->b_wait, &waita);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&rp->b_lock, flags);
while (MON_RING_EMPTY(rp)) {
spin_unlock_irqrestore(&rp->b_lock, flags);
if (file->f_flags & O_NONBLOCK) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&rp->b_wait, &waita);
return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
}
schedule();
if (signal_pending(current)) {
remove_wait_queue(&rp->b_wait, &waita);
return -EINTR;
}
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&rp->b_lock, flags);
}
spin_unlock_irqrestore(&rp->b_lock, flags);
set_current_state(TASK_RUNNING);
remove_wait_queue(&rp->b_wait, &waita);
return 0;
}
static int mon_alloc_buff(struct mon_pgmap *map, int npages)
{
int n;
unsigned long vaddr;
for (n = 0; n < npages; n++) {
vaddr = get_zeroed_page(GFP_KERNEL);
if (vaddr == 0) {
while (n-- != 0)
free_page((unsigned long) map[n].ptr);
return -ENOMEM;
}
map[n].ptr = (unsigned char *) vaddr;
map[n].pg = virt_to_page((void *) vaddr);
}
return 0;
}
static void mon_free_buff(struct mon_pgmap *map, int npages)
{
int n;
for (n = 0; n < npages; n++)
free_page((unsigned long) map[n].ptr);
}
int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
{
struct device *dev;
unsigned minor = ubus? ubus->busnum: 0;
if (minor >= MON_BIN_MAX_MINOR)
return 0;
dev = device_create(&mon_bin_class, ubus ? ubus->controller : NULL,
MKDEV(MAJOR(mon_bin_dev0), minor), NULL,
"usbmon%d", minor);
if (IS_ERR(dev))
return 0;
mbus->classdev = dev;
return 1;
}
void mon_bin_del(struct mon_bus *mbus)
{
device_destroy(&mon_bin_class, mbus->classdev->devt);
}
int __init mon_bin_init(void)
{
int rc;
rc = class_register(&mon_bin_class);
if (rc)
goto err_class;
rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
if (rc < 0)
goto err_dev;
cdev_init(&mon_bin_cdev, &mon_fops_binary);
mon_bin_cdev.owner = THIS_MODULE;
rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR);
if (rc < 0)
goto err_add;
return 0;
err_add:
unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
err_dev:
class_unregister(&mon_bin_class);
err_class:
return rc;
}
void mon_bin_exit(void)
{
cdev_del(&mon_bin_cdev);
unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
class_unregister(&mon_bin_class);
}
| linux-master | drivers/usb/mon/mon_bin.c |
// SPDX-License-Identifier: GPL-2.0
/* Driver for Microtek Scanmaker X6 USB scanner, and possibly others.
*
* (C) Copyright 2000 John Fremlin <[email protected]>
* (C) Copyright 2000 Oliver Neukum <[email protected]>
*
* Parts shamelessly stolen from usb-storage and copyright by their
* authors. Thanks to Matt Dharm for giving us permission!
*
* This driver implements a SCSI host controller driver and a USB
* device driver. To avoid confusion, all the USB related stuff is
* prefixed by mts_usb_ and all the SCSI stuff by mts_scsi_.
*
* Microtek (www.microtek.com) did not release the specifications for
* their USB protocol to us, so we had to reverse engineer them. We
* don't know for which models they are valid.
*
* The X6 USB has three bulk endpoints, one output (0x1) down which
* commands and outgoing data are sent, and two input: 0x82 from which
* normal data is read from the scanner (in packets of maximum 32
* bytes) and from which the status byte is read, and 0x83 from which
* the results of a scan (or preview) are read in up to 64 * 1024 byte
* chunks by the Windows driver. We don't know how much it is possible
* to read at a time from 0x83.
*
* It seems possible to read (with URB transfers) everything from 0x82
* in one go, without bothering to read in 32 byte chunks.
*
* There seems to be an optimisation of a further READ implicit if
* you simply read from 0x83.
*
* Guessed protocol:
*
* Send raw SCSI command to EP 0x1
*
* If there is data to receive:
* If the command was READ datatype=image:
* Read a lot of data from EP 0x83
* Else:
* Read data from EP 0x82
* Else:
* If there is data to transmit:
* Write it to EP 0x1
*
* Read status byte from EP 0x82
*
* References:
*
* The SCSI command set for the scanner is available from
* ftp://ftp.microtek.com/microtek/devpack/
*
* Microtek NV sent us a more up to date version of the document. If
* you want it, just send mail.
*
* Status:
*
* Untested with multiple scanners.
* Untested on SMP.
* Untested on a bigendian machine.
*
* History:
*
* 20000417 starting history
* 20000417 fixed load oops
* 20000417 fixed unload oops
* 20000419 fixed READ IMAGE detection
* 20000424 started conversion to use URBs
* 20000502 handled short transfers as errors
* 20000513 rename and organisation of functions (john)
* 20000513 added IDs for all products supported by Windows driver (john)
* 20000514 Rewrote mts_scsi_queuecommand to use URBs (john)
* 20000514 Version 0.0.8j
* 20000514 Fix reporting of non-existent devices to SCSI layer (john)
* 20000514 Added MTS_DEBUG_INT (john)
* 20000514 Changed "usb-microtek" to "microtek" for consistency (john)
* 20000514 Stupid bug fixes (john)
* 20000514 Version 0.0.9j
* 20000515 Put transfer context and URB in mts_desc (john)
* 20000515 Added prelim turn off debugging support (john)
* 20000515 Version 0.0.10j
* 20000515 Fixed up URB allocation (clear URB on alloc) (john)
* 20000515 Version 0.0.11j
* 20000516 Removed unnecessary spinlock in mts_transfer_context (john)
* 20000516 Removed unnecessary up on instance lock in mts_remove_nolock (john)
* 20000516 Implemented (badly) scsi_abort (john)
* 20000516 Version 0.0.12j
* 20000517 Hopefully removed mts_remove_nolock quasideadlock (john)
* 20000517 Added mts_debug_dump to print ll USB info (john)
* 20000518 Tweaks and documentation updates (john)
* 20000518 Version 0.0.13j
* 20000518 Cleaned up abort handling (john)
* 20000523 Removed scsi_command and various scsi_..._resets (john)
* 20000523 Added unlink URB on scsi_abort, now OHCI supports it (john)
* 20000523 Fixed last tiresome compile warning (john)
* 20000523 Version 0.0.14j (though version 0.1 has come out?)
* 20000602 Added primitive reset
* 20000602 Version 0.2.0
* 20000603 various cosmetic changes
* 20000603 Version 0.2.1
* 20000620 minor cosmetic changes
* 20000620 Version 0.2.2
* 20000822 Hopefully fixed deadlock in mts_remove_nolock()
* 20000822 Fixed minor race in mts_transfer_cleanup()
* 20000822 Fixed deadlock on submission error in queuecommand
* 20000822 Version 0.2.3
* 20000913 Reduced module size if debugging is off
* 20000913 Version 0.2.4
* 20010210 New abort logic
* 20010210 Version 0.3.0
* 20010217 Merged scatter/gather
* 20010218 Version 0.4.0
* 20010218 Cosmetic fixes
* 20010218 Version 0.4.1
* 20010306 Abort while using scatter/gather
* 20010306 Version 0.4.2
* 20010311 Remove all timeouts and tidy up generally (john)
* 20010320 check return value of scsi_register()
* 20010320 Version 0.4.3
* 20010408 Identify version on module load.
* 20011003 Fix multiple requests
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/proc_fs.h>
#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "microtek.h"
#define DRIVER_AUTHOR "John Fremlin <[email protected]>, Oliver Neukum <[email protected]>"
#define DRIVER_DESC "Microtek Scanmaker X6 USB scanner driver"
/* Should we do debugging? */
//#define MTS_DO_DEBUG
/* USB layer driver interface */
static int mts_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void mts_usb_disconnect(struct usb_interface *intf);
static const struct usb_device_id mts_usb_ids[];
static struct usb_driver mts_usb_driver = {
.name = "microtekX6",
.probe = mts_usb_probe,
.disconnect = mts_usb_disconnect,
.id_table = mts_usb_ids,
};
/* Internal driver stuff */
#define MTS_VERSION "0.4.3"
#define MTS_NAME "microtek usb (rev " MTS_VERSION "): "
#define MTS_WARNING(x...) \
printk( KERN_WARNING MTS_NAME x )
#define MTS_ERROR(x...) \
printk( KERN_ERR MTS_NAME x )
#define MTS_INT_ERROR(x...) \
MTS_ERROR(x)
#define MTS_MESSAGE(x...) \
printk( KERN_INFO MTS_NAME x )
#if defined MTS_DO_DEBUG
#define MTS_DEBUG(x...) \
printk( KERN_DEBUG MTS_NAME x )
#define MTS_DEBUG_GOT_HERE() \
MTS_DEBUG("got to %s:%d (%s)\n", __FILE__, (int)__LINE__, __func__ )
#define MTS_DEBUG_INT() \
do { MTS_DEBUG_GOT_HERE(); \
MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \
MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \
mts_debug_dump(context->instance);\
} while(0)
#else
#define MTS_NUL_STATEMENT do { } while(0)
#define MTS_DEBUG(x...) MTS_NUL_STATEMENT
#define MTS_DEBUG_GOT_HERE() MTS_NUL_STATEMENT
#define MTS_DEBUG_INT() MTS_NUL_STATEMENT
#endif
#define MTS_INT_INIT()\
struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \
MTS_DEBUG_INT();\
#ifdef MTS_DO_DEBUG
static inline void mts_debug_dump(struct mts_desc* desc) {
MTS_DEBUG("desc at 0x%x: toggle = %02x%02x\n",
(int)desc,
(int)desc->usb_dev->toggle[1],(int)desc->usb_dev->toggle[0]
);
MTS_DEBUG("ep_out=%x ep_response=%x ep_image=%x\n",
usb_sndbulkpipe(desc->usb_dev,desc->ep_out),
usb_rcvbulkpipe(desc->usb_dev,desc->ep_response),
usb_rcvbulkpipe(desc->usb_dev,desc->ep_image)
);
}
static inline void mts_show_command(struct scsi_cmnd *srb)
{
char *what = NULL;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break;
case REZERO_UNIT: what = "REZERO_UNIT"; break;
case REQUEST_SENSE: what = "REQUEST_SENSE"; break;
case FORMAT_UNIT: what = "FORMAT_UNIT"; break;
case READ_BLOCK_LIMITS: what = "READ_BLOCK_LIMITS"; break;
case REASSIGN_BLOCKS: what = "REASSIGN_BLOCKS"; break;
case READ_6: what = "READ_6"; break;
case WRITE_6: what = "WRITE_6"; break;
case SEEK_6: what = "SEEK_6"; break;
case READ_REVERSE: what = "READ_REVERSE"; break;
case WRITE_FILEMARKS: what = "WRITE_FILEMARKS"; break;
case SPACE: what = "SPACE"; break;
case INQUIRY: what = "INQUIRY"; break;
case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break;
case MODE_SELECT: what = "MODE_SELECT"; break;
case RESERVE: what = "RESERVE"; break;
case RELEASE: what = "RELEASE"; break;
case COPY: what = "COPY"; break;
case ERASE: what = "ERASE"; break;
case MODE_SENSE: what = "MODE_SENSE"; break;
case START_STOP: what = "START_STOP"; break;
case RECEIVE_DIAGNOSTIC: what = "RECEIVE_DIAGNOSTIC"; break;
case SEND_DIAGNOSTIC: what = "SEND_DIAGNOSTIC"; break;
case ALLOW_MEDIUM_REMOVAL: what = "ALLOW_MEDIUM_REMOVAL"; break;
case SET_WINDOW: what = "SET_WINDOW"; break;
case READ_CAPACITY: what = "READ_CAPACITY"; break;
case READ_10: what = "READ_10"; break;
case WRITE_10: what = "WRITE_10"; break;
case SEEK_10: what = "SEEK_10"; break;
case WRITE_VERIFY: what = "WRITE_VERIFY"; break;
case VERIFY: what = "VERIFY"; break;
case SEARCH_HIGH: what = "SEARCH_HIGH"; break;
case SEARCH_EQUAL: what = "SEARCH_EQUAL"; break;
case SEARCH_LOW: what = "SEARCH_LOW"; break;
case SET_LIMITS: what = "SET_LIMITS"; break;
case READ_POSITION: what = "READ_POSITION"; break;
case SYNCHRONIZE_CACHE: what = "SYNCHRONIZE_CACHE"; break;
case LOCK_UNLOCK_CACHE: what = "LOCK_UNLOCK_CACHE"; break;
case READ_DEFECT_DATA: what = "READ_DEFECT_DATA"; break;
case MEDIUM_SCAN: what = "MEDIUM_SCAN"; break;
case COMPARE: what = "COMPARE"; break;
case COPY_VERIFY: what = "COPY_VERIFY"; break;
case WRITE_BUFFER: what = "WRITE_BUFFER"; break;
case READ_BUFFER: what = "READ_BUFFER"; break;
case UPDATE_BLOCK: what = "UPDATE_BLOCK"; break;
case READ_LONG: what = "READ_LONG"; break;
case WRITE_LONG: what = "WRITE_LONG"; break;
case CHANGE_DEFINITION: what = "CHANGE_DEFINITION"; break;
case WRITE_SAME: what = "WRITE_SAME"; break;
case READ_TOC: what = "READ_TOC"; break;
case LOG_SELECT: what = "LOG_SELECT"; break;
case LOG_SENSE: what = "LOG_SENSE"; break;
case MODE_SELECT_10: what = "MODE_SELECT_10"; break;
case MODE_SENSE_10: what = "MODE_SENSE_10"; break;
case MOVE_MEDIUM: what = "MOVE_MEDIUM"; break;
case READ_12: what = "READ_12"; break;
case WRITE_12: what = "WRITE_12"; break;
case WRITE_VERIFY_12: what = "WRITE_VERIFY_12"; break;
case SEARCH_HIGH_12: what = "SEARCH_HIGH_12"; break;
case SEARCH_EQUAL_12: what = "SEARCH_EQUAL_12"; break;
case SEARCH_LOW_12: what = "SEARCH_LOW_12"; break;
case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break;
case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break;
case WRITE_LONG_2: what = "WRITE_LONG_2"; break;
default:
MTS_DEBUG("can't decode command\n");
goto out;
break;
}
MTS_DEBUG( "Command %s (%d bytes)\n", what, srb->cmd_len);
out:
MTS_DEBUG( " %10ph\n", srb->cmnd);
}
#else
static inline void mts_show_command(struct scsi_cmnd * dummy)
{
}
static inline void mts_debug_dump(struct mts_desc* dummy)
{
}
#endif
static inline void mts_urb_abort(struct mts_desc* desc) {
MTS_DEBUG_GOT_HERE();
mts_debug_dump(desc);
usb_kill_urb( desc->urb );
}
static int mts_slave_alloc (struct scsi_device *s)
{
s->inquiry_len = 0x24;
return 0;
}
static int mts_slave_configure (struct scsi_device *s)
{
blk_queue_dma_alignment(s->request_queue, (512 - 1));
return 0;
}
static int mts_scsi_abort(struct scsi_cmnd *srb)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
MTS_DEBUG_GOT_HERE();
mts_urb_abort(desc);
return FAILED;
}
static int mts_scsi_host_reset(struct scsi_cmnd *srb)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
int result;
MTS_DEBUG_GOT_HERE();
mts_debug_dump(desc);
result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf);
if (result == 0) {
result = usb_reset_device(desc->usb_dev);
usb_unlock_device(desc->usb_dev);
}
return result ? FAILED : SUCCESS;
}
static int
mts_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *srb);
static void mts_transfer_cleanup( struct urb *transfer );
static void mts_do_sg(struct urb * transfer);
static inline
void mts_int_submit_urb (struct urb* transfer,
int pipe,
void* data,
unsigned length,
usb_complete_t callback )
/* Interrupt context! */
/* Holding transfer->context->lock! */
{
int res;
MTS_INT_INIT();
usb_fill_bulk_urb(transfer,
context->instance->usb_dev,
pipe,
data,
length,
callback,
context
);
res = usb_submit_urb( transfer, GFP_ATOMIC );
if ( unlikely(res) ) {
MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res );
set_host_byte(context->srb, DID_ERROR);
mts_transfer_cleanup(transfer);
}
}
static void mts_transfer_cleanup( struct urb *transfer )
/* Interrupt context! */
{
MTS_INT_INIT();
if ( likely(context->final_callback != NULL) )
context->final_callback(context->srb);
}
static void mts_transfer_done( struct urb *transfer )
{
MTS_INT_INIT();
context->srb->result &= MTS_SCSI_ERR_MASK;
context->srb->result |= (unsigned)(*context->scsi_status)<<1;
mts_transfer_cleanup(transfer);
}
static void mts_get_status( struct urb *transfer )
/* Interrupt context! */
{
MTS_INT_INIT();
mts_int_submit_urb(transfer,
usb_rcvbulkpipe(context->instance->usb_dev,
context->instance->ep_response),
context->scsi_status,
1,
mts_transfer_done );
}
static void mts_data_done( struct urb* transfer )
/* Interrupt context! */
{
int status = transfer->status;
MTS_INT_INIT();
if ( context->data_length != transfer->actual_length ) {
scsi_set_resid(context->srb, context->data_length -
transfer->actual_length);
} else if ( unlikely(status) ) {
set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR));
}
mts_get_status(transfer);
}
static void mts_command_done( struct urb *transfer )
/* Interrupt context! */
{
int status = transfer->status;
MTS_INT_INIT();
if ( unlikely(status) ) {
if (status == -ENOENT) {
/* We are being killed */
MTS_DEBUG_GOT_HERE();
set_host_byte(context->srb, DID_ABORT);
} else {
/* A genuine error has occurred */
MTS_DEBUG_GOT_HERE();
set_host_byte(context->srb, DID_ERROR);
}
mts_transfer_cleanup(transfer);
return;
}
if (context->srb->cmnd[0] == REQUEST_SENSE) {
mts_int_submit_urb(transfer,
context->data_pipe,
context->srb->sense_buffer,
context->data_length,
mts_data_done);
} else { if ( context->data ) {
mts_int_submit_urb(transfer,
context->data_pipe,
context->data,
context->data_length,
scsi_sg_count(context->srb) > 1 ?
mts_do_sg : mts_data_done);
} else {
mts_get_status(transfer);
}
}
}
static void mts_do_sg (struct urb* transfer)
{
int status = transfer->status;
MTS_INT_INIT();
MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,
scsi_sg_count(context->srb));
if (unlikely(status)) {
set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR));
mts_transfer_cleanup(transfer);
}
context->curr_sg = sg_next(context->curr_sg);
mts_int_submit_urb(transfer,
context->data_pipe,
sg_virt(context->curr_sg),
context->curr_sg->length,
sg_is_last(context->curr_sg) ?
mts_data_done : mts_do_sg);
}
static const u8 mts_read_image_sig[] = { 0x28, 00, 00, 00 };
static const u8 mts_read_image_sig_len = 4;
static const unsigned char mts_direction[256/8] = {
0x28, 0x81, 0x14, 0x14, 0x20, 0x01, 0x90, 0x77,
0x0C, 0x20, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
#define MTS_DIRECTION_IS_IN(x) ((mts_direction[x>>3] >> (x & 7)) & 1)
static void
mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
{
int pipe;
MTS_DEBUG_GOT_HERE();
desc->context.instance = desc;
desc->context.srb = srb;
if (!scsi_bufflen(srb)) {
desc->context.data = NULL;
desc->context.data_length = 0;
return;
} else {
desc->context.curr_sg = scsi_sglist(srb);
desc->context.data = sg_virt(desc->context.curr_sg);
desc->context.data_length = desc->context.curr_sg->length;
}
/* can't rely on srb->sc_data_direction */
/* Brutally ripped from usb-storage */
if ( !memcmp( srb->cmnd, mts_read_image_sig, mts_read_image_sig_len )
) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image);
MTS_DEBUG( "transferring from desc->ep_image == %d\n",
(int)desc->ep_image );
} else if ( MTS_DIRECTION_IS_IN(srb->cmnd[0]) ) {
pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response);
MTS_DEBUG( "transferring from desc->ep_response == %d\n",
(int)desc->ep_response);
} else {
MTS_DEBUG("transferring to desc->ep_out == %d\n",
(int)desc->ep_out);
pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out);
}
desc->context.data_pipe = pipe;
}
static int mts_scsi_queuecommand_lck(struct scsi_cmnd *srb)
{
mts_scsi_cmnd_callback callback = scsi_done;
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
int res;
MTS_DEBUG_GOT_HERE();
mts_show_command(srb);
mts_debug_dump(desc);
if ( srb->device->lun || srb->device->id || srb->device->channel ) {
MTS_DEBUG("Command to LUN=%d ID=%d CHANNEL=%d from SCSI layer\n",(int)srb->device->lun,(int)srb->device->id, (int)srb->device->channel );
MTS_DEBUG("this device doesn't exist\n");
set_host_byte(srb, DID_BAD_TARGET);
if(likely(callback != NULL))
callback(srb);
goto out;
}
usb_fill_bulk_urb(desc->urb,
desc->usb_dev,
usb_sndbulkpipe(desc->usb_dev,desc->ep_out),
srb->cmnd,
srb->cmd_len,
mts_command_done,
&desc->context
);
mts_build_transfer_context( srb, desc );
desc->context.final_callback = callback;
/* here we need ATOMIC as we are called with the iolock */
res=usb_submit_urb(desc->urb, GFP_ATOMIC);
if(unlikely(res)){
MTS_ERROR("error %d submitting URB\n",(int)res);
set_host_byte(srb, DID_ERROR);
if(likely(callback != NULL))
callback(srb);
}
out:
return 0;
}
static DEF_SCSI_QCMD(mts_scsi_queuecommand)
static const struct scsi_host_template mts_scsi_host_template = {
.module = THIS_MODULE,
.name = "microtekX6",
.proc_name = "microtekX6",
.queuecommand = mts_scsi_queuecommand,
.eh_abort_handler = mts_scsi_abort,
.eh_host_reset_handler = mts_scsi_host_reset,
.sg_tablesize = SG_ALL,
.can_queue = 1,
.this_id = -1,
.emulated = 1,
.slave_alloc = mts_slave_alloc,
.slave_configure = mts_slave_configure,
.max_sectors= 256, /* 128 K */
};
/* The entries of microtek_table must correspond, line-by-line to
the entries of mts_supported_products[]. */
static const struct usb_device_id mts_usb_ids[] =
{
{ USB_DEVICE(0x4ce, 0x0300) },
{ USB_DEVICE(0x5da, 0x0094) },
{ USB_DEVICE(0x5da, 0x0099) },
{ USB_DEVICE(0x5da, 0x009a) },
{ USB_DEVICE(0x5da, 0x00a0) },
{ USB_DEVICE(0x5da, 0x00a3) },
{ USB_DEVICE(0x5da, 0x80a3) },
{ USB_DEVICE(0x5da, 0x80ac) },
{ USB_DEVICE(0x5da, 0x00b6) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, mts_usb_ids);
static int mts_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int i;
int ep_out = -1;
int ep_in_set[3]; /* this will break if we have more than three endpoints
which is why we check */
int *ep_in_current = ep_in_set;
int err_retval = -ENOMEM;
struct mts_desc * new_desc;
struct usb_device *dev = interface_to_usbdev (intf);
/* the current altsetting on the interface we're probing */
struct usb_host_interface *altsetting;
MTS_DEBUG_GOT_HERE();
MTS_DEBUG( "usb-device descriptor at %x\n", (int)dev );
MTS_DEBUG( "product id = 0x%x, vendor id = 0x%x\n",
le16_to_cpu(dev->descriptor.idProduct),
le16_to_cpu(dev->descriptor.idVendor) );
MTS_DEBUG_GOT_HERE();
/* the current altsetting on the interface we're probing */
altsetting = intf->cur_altsetting;
/* Check if the config is sane */
if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) {
MTS_WARNING( "expecting %d got %d endpoints! Bailing out.\n",
(int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints );
return -ENODEV;
}
for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) {
if ((altsetting->endpoint[i].desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_BULK) {
MTS_WARNING( "can only deal with bulk endpoints; endpoint %d is not bulk.\n",
(int)altsetting->endpoint[i].desc.bEndpointAddress );
} else {
if (altsetting->endpoint[i].desc.bEndpointAddress &
USB_DIR_IN)
*ep_in_current++
= altsetting->endpoint[i].desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK;
else {
if ( ep_out != -1 ) {
MTS_WARNING( "can only deal with one output endpoints. Bailing out." );
return -ENODEV;
}
ep_out = altsetting->endpoint[i].desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK;
}
}
}
if (ep_in_current != &ep_in_set[2]) {
MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
return -ENODEV;
}
if ( ep_out == -1 ) {
MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
return -ENODEV;
}
new_desc = kzalloc(sizeof(struct mts_desc), GFP_KERNEL);
if (!new_desc)
goto out;
new_desc->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!new_desc->urb)
goto out_kfree;
new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL);
if (!new_desc->context.scsi_status)
goto out_free_urb;
new_desc->usb_dev = dev;
new_desc->usb_intf = intf;
/* endpoints */
new_desc->ep_out = ep_out;
new_desc->ep_response = ep_in_set[0];
new_desc->ep_image = ep_in_set[1];
if ( new_desc->ep_out != MTS_EP_OUT )
MTS_WARNING( "will this work? Command EP is not usually %d\n",
(int)new_desc->ep_out );
if ( new_desc->ep_response != MTS_EP_RESPONSE )
MTS_WARNING( "will this work? Response EP is not usually %d\n",
(int)new_desc->ep_response );
if ( new_desc->ep_image != MTS_EP_IMAGE )
MTS_WARNING( "will this work? Image data EP is not usually %d\n",
(int)new_desc->ep_image );
new_desc->host = scsi_host_alloc(&mts_scsi_host_template,
sizeof(new_desc));
if (!new_desc->host)
goto out_kfree2;
new_desc->host->hostdata[0] = (unsigned long)new_desc;
if (scsi_add_host(new_desc->host, &dev->dev)) {
err_retval = -EIO;
goto out_host_put;
}
scsi_scan_host(new_desc->host);
usb_set_intfdata(intf, new_desc);
return 0;
out_host_put:
scsi_host_put(new_desc->host);
out_kfree2:
kfree(new_desc->context.scsi_status);
out_free_urb:
usb_free_urb(new_desc->urb);
out_kfree:
kfree(new_desc);
out:
return err_retval;
}
static void mts_usb_disconnect (struct usb_interface *intf)
{
struct mts_desc *desc = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
usb_kill_urb(desc->urb);
scsi_remove_host(desc->host);
scsi_host_put(desc->host);
usb_free_urb(desc->urb);
kfree(desc->context.scsi_status);
kfree(desc);
}
module_usb_driver(mts_usb_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/image/microtek.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* copyright (C) 1999/2000 by Henning Zabel <[email protected]>
*/
/*
* USB-Kernel Driver for the Mustek MDC800 Digital Camera
* (c) 1999/2000 Henning Zabel <[email protected]>
*
*
* The driver brings the USB functions of the MDC800 to Linux.
* To use the Camera you must support the USB Protocol of the camera
* to the Kernel Node.
* The Driver uses a misc device Node. Create it with :
* mknod /dev/mustek c 180 32
*
* The driver supports only one camera.
*
* Fix: mdc800 used sleep_on and slept with io_lock held.
* Converted sleep_on to waitqueues with schedule_timeout and made io_lock
* a semaphore from a spinlock.
* by Oliver Neukum <[email protected]>
* (02/12/2001)
*
* Identify version on module load.
* (08/04/2001) gb
*
* version 0.7.5
* Fixed potential SMP races with Spinlocks.
* Thanks to Oliver Neukum <[email protected]> who
* noticed the race conditions.
* (30/10/2000)
*
* Fixed: Setting urb->dev before submitting urb.
* by Greg KH <[email protected]>
* (13/10/2000)
*
* version 0.7.3
* bugfix : The mdc800->state field gets set to READY after the
* disconnect function sets it to NOT_CONNECTED. This makes the
* driver running like the camera is connected and causes some
* hang ups.
*
* version 0.7.1
* MOD_INC and MOD_DEC are changed in usb_probe to prevent load/unload
* problems when compiled as Module.
* (04/04/2000)
*
* The mdc800 driver gets assigned the USB Minor 32-47. The Registration
* was updated to use these values.
* (26/03/2000)
*
* The Init und Exit Module Function are updated.
* (01/03/2000)
*
* version 0.7.0
* Rewrite of the driver : The driver now uses URB's. The old stuff
* has been removed.
*
* version 0.6.0
* Rewrite of this driver: The Emulation of the rs232 protocoll
* has been removed from the driver. A special executeCommand function
* for this driver is included to gphoto.
* The driver supports two kind of communication to bulk endpoints.
* Either with the dev->bus->ops->bulk... or with callback function.
* (09/11/1999)
*
* version 0.5.0:
* first Version that gets a version number. Most of the needed
* functions work.
* (20/10/1999)
*/
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/usb.h>
#include <linux/fs.h>
/*
* Version Information
*/
#define DRIVER_VERSION "v0.7.5 (30/10/2000)"
#define DRIVER_AUTHOR "Henning Zabel <[email protected]>"
#define DRIVER_DESC "USB Driver for Mustek MDC800 Digital Camera"
/* Vendor and Product Information */
#define MDC800_VENDOR_ID 0x055f
#define MDC800_PRODUCT_ID 0xa800
/* Timeouts (msec) */
#define TO_DOWNLOAD_GET_READY 1500
#define TO_DOWNLOAD_GET_BUSY 1500
#define TO_WRITE_GET_READY 1000
#define TO_DEFAULT_COMMAND 5000
#define TO_READ_FROM_IRQ TO_DEFAULT_COMMAND
#define TO_GET_READY TO_DEFAULT_COMMAND
/* Minor Number of the device (create with mknod /dev/mustek c 180 32) */
#define MDC800_DEVICE_MINOR_BASE 32
/**************************************************************************
Data and structs
***************************************************************************/
typedef enum {
NOT_CONNECTED, READY, WORKING, DOWNLOAD
} mdc800_state;
/* Data for the driver */
struct mdc800_data
{
struct usb_device * dev; // Device Data
mdc800_state state;
unsigned int endpoint [4];
struct urb * irq_urb;
wait_queue_head_t irq_wait;
int irq_woken;
char* irq_urb_buffer;
int camera_busy; // is camera busy ?
int camera_request_ready; // Status to synchronize with irq
char camera_response [8]; // last Bytes send after busy
struct urb * write_urb;
char* write_urb_buffer;
wait_queue_head_t write_wait;
int written;
struct urb * download_urb;
char* download_urb_buffer;
wait_queue_head_t download_wait;
int downloaded;
int download_left; // Bytes left to download ?
/* Device Data */
char out [64]; // Answer Buffer
int out_ptr; // Index to the first not readen byte
int out_count; // Bytes in the buffer
int open; // Camera device open ?
struct mutex io_lock; // IO -lock
char in [8]; // Command Input Buffer
int in_count;
int pic_index; // Cache for the Imagesize (-1 for nothing cached )
int pic_len;
int minor;
};
/* Specification of the Endpoints */
static struct usb_endpoint_descriptor mdc800_ed [4] =
{
{
.bLength = 0,
.bDescriptorType = 0,
.bEndpointAddress = 0x01,
.bmAttributes = 0x02,
.wMaxPacketSize = cpu_to_le16(8),
.bInterval = 0,
.bRefresh = 0,
.bSynchAddress = 0,
},
{
.bLength = 0,
.bDescriptorType = 0,
.bEndpointAddress = 0x82,
.bmAttributes = 0x03,
.wMaxPacketSize = cpu_to_le16(8),
.bInterval = 0,
.bRefresh = 0,
.bSynchAddress = 0,
},
{
.bLength = 0,
.bDescriptorType = 0,
.bEndpointAddress = 0x03,
.bmAttributes = 0x02,
.wMaxPacketSize = cpu_to_le16(64),
.bInterval = 0,
.bRefresh = 0,
.bSynchAddress = 0,
},
{
.bLength = 0,
.bDescriptorType = 0,
.bEndpointAddress = 0x84,
.bmAttributes = 0x02,
.wMaxPacketSize = cpu_to_le16(64),
.bInterval = 0,
.bRefresh = 0,
.bSynchAddress = 0,
},
};
/* The Variable used by the driver */
static struct mdc800_data* mdc800;
/***************************************************************************
The USB Part of the driver
****************************************************************************/
static int mdc800_endpoint_equals (struct usb_endpoint_descriptor *a,struct usb_endpoint_descriptor *b)
{
return (
( a->bEndpointAddress == b->bEndpointAddress )
&& ( a->bmAttributes == b->bmAttributes )
&& ( a->wMaxPacketSize == b->wMaxPacketSize )
);
}
/*
* Checks whether the camera responds busy
*/
static int mdc800_isBusy (char* ch)
{
int i=0;
while (i<8)
{
if (ch [i] != (char)0x99)
return 0;
i++;
}
return 1;
}
/*
* Checks whether the Camera is ready
*/
static int mdc800_isReady (char *ch)
{
int i=0;
while (i<8)
{
if (ch [i] != (char)0xbb)
return 0;
i++;
}
return 1;
}
/*
* USB IRQ Handler for InputLine
*/
static void mdc800_usb_irq (struct urb *urb)
{
int data_received=0, wake_up;
unsigned char* b=urb->transfer_buffer;
struct mdc800_data* mdc800=urb->context;
struct device *dev = &mdc800->dev->dev;
int status = urb->status;
if (status >= 0) {
if (mdc800_isBusy (b))
{
if (!mdc800->camera_busy)
{
mdc800->camera_busy=1;
dev_dbg(dev, "gets busy\n");
}
}
else
{
if (mdc800->camera_busy && mdc800_isReady (b))
{
mdc800->camera_busy=0;
dev_dbg(dev, "gets ready\n");
}
}
if (!(mdc800_isBusy (b) || mdc800_isReady (b)))
{
/* Store Data in camera_answer field */
dev_dbg(dev, "%i %i %i %i %i %i %i %i \n",b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7]);
memcpy (mdc800->camera_response,b,8);
data_received=1;
}
}
wake_up= ( mdc800->camera_request_ready > 0 )
&&
(
((mdc800->camera_request_ready == 1) && (!mdc800->camera_busy))
||
((mdc800->camera_request_ready == 2) && data_received)
||
((mdc800->camera_request_ready == 3) && (mdc800->camera_busy))
||
(status < 0)
);
if (wake_up)
{
mdc800->camera_request_ready=0;
mdc800->irq_woken=1;
wake_up (&mdc800->irq_wait);
}
}
/*
* Waits a while until the irq responds that camera is ready
*
* mode : 0: Wait for camera gets ready
* 1: Wait for receiving data
* 2: Wait for camera gets busy
*
* msec: Time to wait
*/
static int mdc800_usb_waitForIRQ (int mode, int msec)
{
mdc800->camera_request_ready=1+mode;
wait_event_timeout(mdc800->irq_wait, mdc800->irq_woken,
msecs_to_jiffies(msec));
mdc800->irq_woken = 0;
if (mdc800->camera_request_ready>0)
{
mdc800->camera_request_ready=0;
dev_err(&mdc800->dev->dev, "timeout waiting for camera.\n");
return -1;
}
if (mdc800->state == NOT_CONNECTED)
{
printk(KERN_WARNING "mdc800: Camera gets disconnected "
"during waiting for irq.\n");
mdc800->camera_request_ready=0;
return -2;
}
return 0;
}
/*
* The write_urb callback function
*/
static void mdc800_usb_write_notify (struct urb *urb)
{
struct mdc800_data* mdc800=urb->context;
int status = urb->status;
if (status != 0)
dev_err(&mdc800->dev->dev,
"writing command fails (status=%i)\n", status);
else
mdc800->state=READY;
mdc800->written = 1;
wake_up (&mdc800->write_wait);
}
/*
* The download_urb callback function
*/
static void mdc800_usb_download_notify (struct urb *urb)
{
struct mdc800_data* mdc800=urb->context;
int status = urb->status;
if (status == 0) {
/* Fill output buffer with these data */
memcpy (mdc800->out, urb->transfer_buffer, 64);
mdc800->out_count=64;
mdc800->out_ptr=0;
mdc800->download_left-=64;
if (mdc800->download_left == 0)
{
mdc800->state=READY;
}
} else {
dev_err(&mdc800->dev->dev,
"request bytes fails (status:%i)\n", status);
}
mdc800->downloaded = 1;
wake_up (&mdc800->download_wait);
}
/***************************************************************************
Probing for the Camera
***************************************************************************/
static struct usb_driver mdc800_usb_driver;
static const struct file_operations mdc800_device_ops;
static struct usb_class_driver mdc800_class = {
.name = "mdc800%d",
.fops = &mdc800_device_ops,
.minor_base = MDC800_DEVICE_MINOR_BASE,
};
/*
* Callback to search the Mustek MDC800 on the USB Bus
*/
static int mdc800_usb_probe (struct usb_interface *intf,
const struct usb_device_id *id)
{
int i,j;
struct usb_host_interface *intf_desc;
struct usb_device *dev = interface_to_usbdev (intf);
int irq_interval=0;
int retval;
dev_dbg(&intf->dev, "(%s) called.\n", __func__);
if (mdc800->dev != NULL)
{
dev_warn(&intf->dev, "only one Mustek MDC800 is supported.\n");
return -ENODEV;
}
if (dev->descriptor.bNumConfigurations != 1)
{
dev_err(&intf->dev,
"probe fails -> wrong Number of Configuration\n");
return -ENODEV;
}
intf_desc = intf->cur_altsetting;
if (
( intf_desc->desc.bInterfaceClass != 0xff )
|| ( intf_desc->desc.bInterfaceSubClass != 0 )
|| ( intf_desc->desc.bInterfaceProtocol != 0 )
|| ( intf_desc->desc.bNumEndpoints != 4)
)
{
dev_err(&intf->dev, "probe fails -> wrong Interface\n");
return -ENODEV;
}
/* Check the Endpoints */
for (i=0; i<4; i++)
{
mdc800->endpoint[i]=-1;
for (j=0; j<4; j++)
{
if (mdc800_endpoint_equals (&intf_desc->endpoint [j].desc,&mdc800_ed [i]))
{
mdc800->endpoint[i]=intf_desc->endpoint [j].desc.bEndpointAddress ;
if (i==1)
{
irq_interval=intf_desc->endpoint [j].desc.bInterval;
}
}
}
if (mdc800->endpoint[i] == -1)
{
dev_err(&intf->dev, "probe fails -> Wrong Endpoints.\n");
return -ENODEV;
}
}
dev_info(&intf->dev, "Found Mustek MDC800 on USB.\n");
mutex_lock(&mdc800->io_lock);
retval = usb_register_dev(intf, &mdc800_class);
if (retval) {
dev_err(&intf->dev, "Not able to get a minor for this device.\n");
mutex_unlock(&mdc800->io_lock);
return -ENODEV;
}
mdc800->dev=dev;
mdc800->open=0;
/* Setup URB Structs */
usb_fill_int_urb (
mdc800->irq_urb,
mdc800->dev,
usb_rcvintpipe (mdc800->dev,mdc800->endpoint [1]),
mdc800->irq_urb_buffer,
8,
mdc800_usb_irq,
mdc800,
irq_interval
);
usb_fill_bulk_urb (
mdc800->write_urb,
mdc800->dev,
usb_sndbulkpipe (mdc800->dev, mdc800->endpoint[0]),
mdc800->write_urb_buffer,
8,
mdc800_usb_write_notify,
mdc800
);
usb_fill_bulk_urb (
mdc800->download_urb,
mdc800->dev,
usb_rcvbulkpipe (mdc800->dev, mdc800->endpoint [3]),
mdc800->download_urb_buffer,
64,
mdc800_usb_download_notify,
mdc800
);
mdc800->state=READY;
mutex_unlock(&mdc800->io_lock);
usb_set_intfdata(intf, mdc800);
return 0;
}
/*
* Disconnect USB device (maybe the MDC800)
*/
static void mdc800_usb_disconnect (struct usb_interface *intf)
{
struct mdc800_data* mdc800 = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "(%s) called\n", __func__);
if (mdc800) {
if (mdc800->state == NOT_CONNECTED)
return;
usb_deregister_dev(intf, &mdc800_class);
/* must be under lock to make sure no URB
is submitted after usb_kill_urb() */
mutex_lock(&mdc800->io_lock);
mdc800->state=NOT_CONNECTED;
usb_kill_urb(mdc800->irq_urb);
usb_kill_urb(mdc800->write_urb);
usb_kill_urb(mdc800->download_urb);
mutex_unlock(&mdc800->io_lock);
mdc800->dev = NULL;
usb_set_intfdata(intf, NULL);
}
dev_info(&intf->dev, "Mustek MDC800 disconnected from USB.\n");
}
/***************************************************************************
The Misc device Part (file_operations)
****************************************************************************/
/*
* This Function calc the Answersize for a command.
*/
static int mdc800_getAnswerSize (char command)
{
switch ((unsigned char) command)
{
case 0x2a:
case 0x49:
case 0x51:
case 0x0d:
case 0x20:
case 0x07:
case 0x01:
case 0x25:
case 0x00:
return 8;
case 0x05:
case 0x3e:
return mdc800->pic_len;
case 0x09:
return 4096;
default:
return 0;
}
}
/*
* Init the device: (1) alloc mem (2) Increase MOD Count ..
*/
static int mdc800_device_open (struct inode* inode, struct file *file)
{
int retval=0;
int errn=0;
mutex_lock(&mdc800->io_lock);
if (mdc800->state == NOT_CONNECTED)
{
errn=-EBUSY;
goto error_out;
}
if (mdc800->open)
{
errn=-EBUSY;
goto error_out;
}
mdc800->in_count=0;
mdc800->out_count=0;
mdc800->out_ptr=0;
mdc800->pic_index=0;
mdc800->pic_len=-1;
mdc800->download_left=0;
mdc800->camera_busy=0;
mdc800->camera_request_ready=0;
retval=0;
mdc800->irq_urb->dev = mdc800->dev;
retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL);
if (retval) {
dev_err(&mdc800->dev->dev,
"request USB irq fails (submit_retval=%i).\n", retval);
errn = -EIO;
goto error_out;
}
mdc800->open=1;
dev_dbg(&mdc800->dev->dev, "Mustek MDC800 device opened.\n");
error_out:
mutex_unlock(&mdc800->io_lock);
return errn;
}
/*
* Close the Camera and release Memory
*/
static int mdc800_device_release (struct inode* inode, struct file *file)
{
int retval=0;
mutex_lock(&mdc800->io_lock);
if (mdc800->open && (mdc800->state != NOT_CONNECTED))
{
usb_kill_urb(mdc800->irq_urb);
usb_kill_urb(mdc800->write_urb);
usb_kill_urb(mdc800->download_urb);
mdc800->open=0;
}
else
{
retval=-EIO;
}
mutex_unlock(&mdc800->io_lock);
return retval;
}
/*
* The Device read callback Function
*/
static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t len, loff_t *pos)
{
size_t left=len, sts=len; /* single transfer size */
char __user *ptr = buf;
int retval;
mutex_lock(&mdc800->io_lock);
if (mdc800->state == NOT_CONNECTED)
{
mutex_unlock(&mdc800->io_lock);
return -EBUSY;
}
if (mdc800->state == WORKING)
{
printk(KERN_WARNING "mdc800: Illegal State \"working\""
"reached during read ?!\n");
mutex_unlock(&mdc800->io_lock);
return -EBUSY;
}
if (!mdc800->open)
{
mutex_unlock(&mdc800->io_lock);
return -EBUSY;
}
while (left)
{
if (signal_pending (current))
{
mutex_unlock(&mdc800->io_lock);
return -EINTR;
}
sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left;
if (sts <= 0)
{
/* Too less Data in buffer */
if (mdc800->state == DOWNLOAD)
{
mdc800->out_count=0;
mdc800->out_ptr=0;
/* Download -> Request new bytes */
mdc800->download_urb->dev = mdc800->dev;
retval = usb_submit_urb (mdc800->download_urb, GFP_KERNEL);
if (retval) {
dev_err(&mdc800->dev->dev,
"Can't submit download urb "
"(retval=%i)\n", retval);
mutex_unlock(&mdc800->io_lock);
return len-left;
}
wait_event_timeout(mdc800->download_wait,
mdc800->downloaded,
msecs_to_jiffies(TO_DOWNLOAD_GET_READY));
mdc800->downloaded = 0;
if (mdc800->download_urb->status != 0)
{
dev_err(&mdc800->dev->dev,
"request download-bytes fails "
"(status=%i)\n",
mdc800->download_urb->status);
mutex_unlock(&mdc800->io_lock);
return len-left;
}
}
else
{
/* No more bytes -> that's an error*/
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
}
else
{
/* Copy Bytes */
if (copy_to_user(ptr, &mdc800->out [mdc800->out_ptr],
sts)) {
mutex_unlock(&mdc800->io_lock);
return -EFAULT;
}
ptr+=sts;
left-=sts;
mdc800->out_ptr+=sts;
}
}
mutex_unlock(&mdc800->io_lock);
return len-left;
}
/*
* The Device write callback Function
* If a 8Byte Command is received, it will be send to the camera.
* After this the driver initiates the request for the answer or
* just waits until the camera becomes ready.
*/
static ssize_t mdc800_device_write (struct file *file, const char __user *buf, size_t len, loff_t *pos)
{
size_t i=0;
int retval;
mutex_lock(&mdc800->io_lock);
if (mdc800->state != READY)
{
mutex_unlock(&mdc800->io_lock);
return -EBUSY;
}
if (!mdc800->open )
{
mutex_unlock(&mdc800->io_lock);
return -EBUSY;
}
while (i<len)
{
unsigned char c;
if (signal_pending (current))
{
mutex_unlock(&mdc800->io_lock);
return -EINTR;
}
if(get_user(c, buf+i))
{
mutex_unlock(&mdc800->io_lock);
return -EFAULT;
}
/* check for command start */
if (c == 0x55)
{
mdc800->in_count=0;
mdc800->out_count=0;
mdc800->out_ptr=0;
mdc800->download_left=0;
}
/* save command byte */
if (mdc800->in_count < 8)
{
mdc800->in[mdc800->in_count] = c;
mdc800->in_count++;
}
else
{
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
/* Command Buffer full ? -> send it to camera */
if (mdc800->in_count == 8)
{
int answersize;
if (mdc800_usb_waitForIRQ (0,TO_GET_READY))
{
dev_err(&mdc800->dev->dev,
"Camera didn't get ready.\n");
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
answersize=mdc800_getAnswerSize (mdc800->in[1]);
mdc800->state=WORKING;
memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8);
mdc800->write_urb->dev = mdc800->dev;
retval = usb_submit_urb (mdc800->write_urb, GFP_KERNEL);
if (retval) {
dev_err(&mdc800->dev->dev,
"submitting write urb fails "
"(retval=%i)\n", retval);
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
wait_event_timeout(mdc800->write_wait, mdc800->written,
msecs_to_jiffies(TO_WRITE_GET_READY));
mdc800->written = 0;
if (mdc800->state == WORKING)
{
usb_kill_urb(mdc800->write_urb);
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
switch ((unsigned char) mdc800->in[1])
{
case 0x05: /* Download Image */
case 0x3e: /* Take shot in Fine Mode (WCam Mode) */
if (mdc800->pic_len < 0)
{
dev_err(&mdc800->dev->dev,
"call 0x07 before "
"0x05,0x3e\n");
mdc800->state=READY;
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
mdc800->pic_len=-1;
fallthrough;
case 0x09: /* Download Thumbnail */
mdc800->download_left=answersize+64;
mdc800->state=DOWNLOAD;
mdc800_usb_waitForIRQ (0,TO_DOWNLOAD_GET_BUSY);
break;
default:
if (answersize)
{
if (mdc800_usb_waitForIRQ (1,TO_READ_FROM_IRQ))
{
dev_err(&mdc800->dev->dev, "requesting answer from irq fails\n");
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
/* Write dummy data, (this is ugly but part of the USB Protocol */
/* if you use endpoint 1 as bulk and not as irq) */
memcpy (mdc800->out, mdc800->camera_response,8);
/* This is the interpreted answer */
memcpy (&mdc800->out[8], mdc800->camera_response,8);
mdc800->out_ptr=0;
mdc800->out_count=16;
/* Cache the Imagesize, if command was getImageSize */
if (mdc800->in [1] == (char) 0x07)
{
mdc800->pic_len=(int) 65536*(unsigned char) mdc800->camera_response[0]+256*(unsigned char) mdc800->camera_response[1]+(unsigned char) mdc800->camera_response[2];
dev_dbg(&mdc800->dev->dev, "cached imagesize = %i\n", mdc800->pic_len);
}
}
else
{
if (mdc800_usb_waitForIRQ (0,TO_DEFAULT_COMMAND))
{
dev_err(&mdc800->dev->dev, "Command Timeout.\n");
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
}
mdc800->state=READY;
break;
}
}
i++;
}
mutex_unlock(&mdc800->io_lock);
return i;
}
/***************************************************************************
Init and Cleanup this driver (Structs and types)
****************************************************************************/
/* File Operations of this drivers */
static const struct file_operations mdc800_device_ops =
{
.owner = THIS_MODULE,
.read = mdc800_device_read,
.write = mdc800_device_write,
.open = mdc800_device_open,
.release = mdc800_device_release,
.llseek = noop_llseek,
};
static const struct usb_device_id mdc800_table[] = {
{ USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, mdc800_table);
/*
* USB Driver Struct for this device
*/
static struct usb_driver mdc800_usb_driver =
{
.name = "mdc800",
.probe = mdc800_usb_probe,
.disconnect = mdc800_usb_disconnect,
.id_table = mdc800_table
};
/************************************************************************
Init and Cleanup this driver (Main Functions)
*************************************************************************/
static int __init usb_mdc800_init (void)
{
int retval = -ENODEV;
/* Allocate Memory */
mdc800=kzalloc (sizeof (struct mdc800_data), GFP_KERNEL);
if (!mdc800)
goto cleanup_on_fail;
mdc800->dev = NULL;
mdc800->state=NOT_CONNECTED;
mutex_init (&mdc800->io_lock);
init_waitqueue_head (&mdc800->irq_wait);
init_waitqueue_head (&mdc800->write_wait);
init_waitqueue_head (&mdc800->download_wait);
mdc800->irq_woken = 0;
mdc800->downloaded = 0;
mdc800->written = 0;
mdc800->irq_urb_buffer=kmalloc (8, GFP_KERNEL);
if (!mdc800->irq_urb_buffer)
goto cleanup_on_fail;
mdc800->write_urb_buffer=kmalloc (8, GFP_KERNEL);
if (!mdc800->write_urb_buffer)
goto cleanup_on_fail;
mdc800->download_urb_buffer=kmalloc (64, GFP_KERNEL);
if (!mdc800->download_urb_buffer)
goto cleanup_on_fail;
mdc800->irq_urb=usb_alloc_urb (0, GFP_KERNEL);
if (!mdc800->irq_urb)
goto cleanup_on_fail;
mdc800->download_urb=usb_alloc_urb (0, GFP_KERNEL);
if (!mdc800->download_urb)
goto cleanup_on_fail;
mdc800->write_urb=usb_alloc_urb (0, GFP_KERNEL);
if (!mdc800->write_urb)
goto cleanup_on_fail;
/* Register the driver */
retval = usb_register(&mdc800_usb_driver);
if (retval)
goto cleanup_on_fail;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
/* Clean driver up, when something fails */
cleanup_on_fail:
if (mdc800 != NULL)
{
printk(KERN_ERR "mdc800: can't alloc memory!\n");
kfree(mdc800->download_urb_buffer);
kfree(mdc800->write_urb_buffer);
kfree(mdc800->irq_urb_buffer);
usb_free_urb(mdc800->write_urb);
usb_free_urb(mdc800->download_urb);
usb_free_urb(mdc800->irq_urb);
kfree (mdc800);
}
mdc800 = NULL;
return retval;
}
static void __exit usb_mdc800_cleanup (void)
{
usb_deregister (&mdc800_usb_driver);
usb_free_urb (mdc800->irq_urb);
usb_free_urb (mdc800->download_urb);
usb_free_urb (mdc800->write_urb);
kfree (mdc800->irq_urb_buffer);
kfree (mdc800->write_urb_buffer);
kfree (mdc800->download_urb_buffer);
kfree (mdc800);
mdc800 = NULL;
}
module_init (usb_mdc800_init);
module_exit (usb_mdc800_cleanup);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/image/mdc800.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Generic ULPI USB transceiver support
*
* Copyright (C) 2009 Daniel Mack <[email protected]>
*
* Based on sources from
*
* Sascha Hauer <[email protected]>
* Freescale Semiconductors
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
struct ulpi_info {
unsigned int id;
char *name;
};
#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
#define ULPI_INFO(_id, _name) \
{ \
.id = (_id), \
.name = (_name), \
}
/* ULPI hardcoded IDs, used for probing */
static struct ulpi_info ulpi_ids[] = {
ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"),
ULPI_INFO(ULPI_ID(0x0424, 0x0009), "SMSC USB334x"),
ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
};
static int ulpi_set_otg_flags(struct usb_phy *phy)
{
unsigned int flags = ULPI_OTG_CTRL_DP_PULLDOWN |
ULPI_OTG_CTRL_DM_PULLDOWN;
if (phy->flags & ULPI_OTG_ID_PULLUP)
flags |= ULPI_OTG_CTRL_ID_PULLUP;
/*
* ULPI Specification rev.1.1 default
* for Dp/DmPulldown is enabled.
*/
if (phy->flags & ULPI_OTG_DP_PULLDOWN_DIS)
flags &= ~ULPI_OTG_CTRL_DP_PULLDOWN;
if (phy->flags & ULPI_OTG_DM_PULLDOWN_DIS)
flags &= ~ULPI_OTG_CTRL_DM_PULLDOWN;
if (phy->flags & ULPI_OTG_EXTVBUSIND)
flags |= ULPI_OTG_CTRL_EXTVBUSIND;
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
static int ulpi_set_fc_flags(struct usb_phy *phy)
{
unsigned int flags = 0;
/*
* ULPI Specification rev.1.1 default
* for XcvrSelect is Full Speed.
*/
if (phy->flags & ULPI_FC_HS)
flags |= ULPI_FUNC_CTRL_HIGH_SPEED;
else if (phy->flags & ULPI_FC_LS)
flags |= ULPI_FUNC_CTRL_LOW_SPEED;
else if (phy->flags & ULPI_FC_FS4LS)
flags |= ULPI_FUNC_CTRL_FS4LS;
else
flags |= ULPI_FUNC_CTRL_FULL_SPEED;
if (phy->flags & ULPI_FC_TERMSEL)
flags |= ULPI_FUNC_CTRL_TERMSELECT;
/*
* ULPI Specification rev.1.1 default
* for OpMode is Normal Operation.
*/
if (phy->flags & ULPI_FC_OP_NODRV)
flags |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
else if (phy->flags & ULPI_FC_OP_DIS_NRZI)
flags |= ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI;
else if (phy->flags & ULPI_FC_OP_NSYNC_NEOP)
flags |= ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP;
else
flags |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
/*
* ULPI Specification rev.1.1 default
* for SuspendM is Powered.
*/
flags |= ULPI_FUNC_CTRL_SUSPENDM;
return usb_phy_io_write(phy, flags, ULPI_FUNC_CTRL);
}
static int ulpi_set_ic_flags(struct usb_phy *phy)
{
unsigned int flags = 0;
if (phy->flags & ULPI_IC_AUTORESUME)
flags |= ULPI_IFC_CTRL_AUTORESUME;
if (phy->flags & ULPI_IC_EXTVBUS_INDINV)
flags |= ULPI_IFC_CTRL_EXTERNAL_VBUS;
if (phy->flags & ULPI_IC_IND_PASSTHRU)
flags |= ULPI_IFC_CTRL_PASSTHRU;
if (phy->flags & ULPI_IC_PROTECT_DIS)
flags |= ULPI_IFC_CTRL_PROTECT_IFC_DISABLE;
return usb_phy_io_write(phy, flags, ULPI_IFC_CTRL);
}
static int ulpi_set_flags(struct usb_phy *phy)
{
int ret;
ret = ulpi_set_otg_flags(phy);
if (ret)
return ret;
ret = ulpi_set_ic_flags(phy);
if (ret)
return ret;
return ulpi_set_fc_flags(phy);
}
static int ulpi_check_integrity(struct usb_phy *phy)
{
int ret, i;
unsigned int val = 0x55;
for (i = 0; i < 2; i++) {
ret = usb_phy_io_write(phy, val, ULPI_SCRATCH);
if (ret < 0)
return ret;
ret = usb_phy_io_read(phy, ULPI_SCRATCH);
if (ret < 0)
return ret;
if (ret != val) {
pr_err("ULPI integrity check: failed!");
return -ENODEV;
}
val = val << 1;
}
pr_info("ULPI integrity check: passed.\n");
return 0;
}
static int ulpi_init(struct usb_phy *phy)
{
int i, vid, pid, ret;
u32 ulpi_id = 0;
for (i = 0; i < 4; i++) {
ret = usb_phy_io_read(phy, ULPI_PRODUCT_ID_HIGH - i);
if (ret < 0)
return ret;
ulpi_id = (ulpi_id << 8) | ret;
}
vid = ulpi_id & 0xffff;
pid = ulpi_id >> 16;
pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid);
for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++) {
if (ulpi_ids[i].id == ULPI_ID(vid, pid)) {
pr_info("Found %s ULPI transceiver.\n",
ulpi_ids[i].name);
break;
}
}
ret = ulpi_check_integrity(phy);
if (ret)
return ret;
return ulpi_set_flags(phy);
}
static int ulpi_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct usb_phy *phy = otg->usb_phy;
unsigned int flags = usb_phy_io_read(phy, ULPI_IFC_CTRL);
if (!host) {
otg->host = NULL;
return 0;
}
otg->host = host;
flags &= ~(ULPI_IFC_CTRL_6_PIN_SERIAL_MODE |
ULPI_IFC_CTRL_3_PIN_SERIAL_MODE |
ULPI_IFC_CTRL_CARKITMODE);
if (phy->flags & ULPI_IC_6PIN_SERIAL)
flags |= ULPI_IFC_CTRL_6_PIN_SERIAL_MODE;
else if (phy->flags & ULPI_IC_3PIN_SERIAL)
flags |= ULPI_IFC_CTRL_3_PIN_SERIAL_MODE;
else if (phy->flags & ULPI_IC_CARKIT)
flags |= ULPI_IFC_CTRL_CARKITMODE;
return usb_phy_io_write(phy, flags, ULPI_IFC_CTRL);
}
static int ulpi_set_vbus(struct usb_otg *otg, bool on)
{
struct usb_phy *phy = otg->usb_phy;
unsigned int flags = usb_phy_io_read(phy, ULPI_OTG_CTRL);
flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
if (on) {
if (phy->flags & ULPI_OTG_DRVVBUS)
flags |= ULPI_OTG_CTRL_DRVVBUS;
if (phy->flags & ULPI_OTG_DRVVBUS_EXT)
flags |= ULPI_OTG_CTRL_DRVVBUS_EXT;
}
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
struct usb_phy_io_ops *ops,
unsigned int flags)
{
phy->label = "ULPI";
phy->flags = flags;
phy->io_ops = ops;
phy->otg = otg;
phy->init = ulpi_init;
otg->usb_phy = phy;
otg->set_host = ulpi_set_host;
otg->set_vbus = ulpi_set_vbus;
}
struct usb_phy *
otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags)
{
struct usb_phy *phy;
struct usb_otg *otg;
phy = kzalloc(sizeof(*phy), GFP_KERNEL);
if (!phy)
return NULL;
otg = kzalloc(sizeof(*otg), GFP_KERNEL);
if (!otg) {
kfree(phy);
return NULL;
}
otg_ulpi_init(phy, otg, ops, flags);
return phy;
}
EXPORT_SYMBOL_GPL(otg_ulpi_create);
struct usb_phy *
devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags)
{
struct usb_phy *phy;
struct usb_otg *otg;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return NULL;
otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
if (!otg) {
devm_kfree(dev, phy);
return NULL;
}
otg_ulpi_init(phy, otg, ops, flags);
return phy;
}
EXPORT_SYMBOL_GPL(devm_otg_ulpi_create);
| linux-master | drivers/usb/phy/phy-ulpi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012-2014 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <[email protected]>
* on behalf of DENX Software Engineering GmbH
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/usb/otg.h>
#include <linux/stmp_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/iopoll.h>
#define DRIVER_NAME "mxs_phy"
/* Register Macro */
#define HW_USBPHY_PWD 0x00
#define HW_USBPHY_TX 0x10
#define HW_USBPHY_CTRL 0x30
#define HW_USBPHY_CTRL_SET 0x34
#define HW_USBPHY_CTRL_CLR 0x38
#define HW_USBPHY_DEBUG_SET 0x54
#define HW_USBPHY_DEBUG_CLR 0x58
#define HW_USBPHY_IP 0x90
#define HW_USBPHY_IP_SET 0x94
#define HW_USBPHY_IP_CLR 0x98
#define GM_USBPHY_TX_TXCAL45DP(x) (((x) & 0xf) << 16)
#define GM_USBPHY_TX_TXCAL45DN(x) (((x) & 0xf) << 8)
#define GM_USBPHY_TX_D_CAL(x) (((x) & 0xf) << 0)
/* imx7ulp */
#define HW_USBPHY_PLL_SIC 0xa0
#define HW_USBPHY_PLL_SIC_SET 0xa4
#define HW_USBPHY_PLL_SIC_CLR 0xa8
#define BM_USBPHY_CTRL_SFTRST BIT(31)
#define BM_USBPHY_CTRL_CLKGATE BIT(30)
#define BM_USBPHY_CTRL_OTG_ID_VALUE BIT(27)
#define BM_USBPHY_CTRL_ENAUTOSET_USBCLKS BIT(26)
#define BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE BIT(25)
#define BM_USBPHY_CTRL_ENVBUSCHG_WKUP BIT(23)
#define BM_USBPHY_CTRL_ENIDCHG_WKUP BIT(22)
#define BM_USBPHY_CTRL_ENDPDMCHG_WKUP BIT(21)
#define BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD BIT(20)
#define BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE BIT(19)
#define BM_USBPHY_CTRL_ENAUTO_PWRON_PLL BIT(18)
#define BM_USBPHY_CTRL_ENUTMILEVEL3 BIT(15)
#define BM_USBPHY_CTRL_ENUTMILEVEL2 BIT(14)
#define BM_USBPHY_CTRL_ENHOSTDISCONDETECT BIT(1)
#define BM_USBPHY_IP_FIX (BIT(17) | BIT(18))
#define BM_USBPHY_DEBUG_CLKGATE BIT(30)
/* imx7ulp */
#define BM_USBPHY_PLL_LOCK BIT(31)
#define BM_USBPHY_PLL_REG_ENABLE BIT(21)
#define BM_USBPHY_PLL_BYPASS BIT(16)
#define BM_USBPHY_PLL_POWER BIT(12)
#define BM_USBPHY_PLL_EN_USB_CLKS BIT(6)
/* Anatop Registers */
#define ANADIG_ANA_MISC0 0x150
#define ANADIG_ANA_MISC0_SET 0x154
#define ANADIG_ANA_MISC0_CLR 0x158
#define ANADIG_USB1_CHRG_DETECT_SET 0x1b4
#define ANADIG_USB1_CHRG_DETECT_CLR 0x1b8
#define ANADIG_USB2_CHRG_DETECT_SET 0x214
#define ANADIG_USB1_CHRG_DETECT_EN_B BIT(20)
#define ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B BIT(19)
#define ANADIG_USB1_CHRG_DETECT_CHK_CONTACT BIT(18)
#define ANADIG_USB1_VBUS_DET_STAT 0x1c0
#define ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
#define ANADIG_USB1_CHRG_DET_STAT 0x1d0
#define ANADIG_USB1_CHRG_DET_STAT_DM_STATE BIT(2)
#define ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED BIT(1)
#define ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT BIT(0)
#define ANADIG_USB2_VBUS_DET_STAT 0x220
#define ANADIG_USB1_LOOPBACK_SET 0x1e4
#define ANADIG_USB1_LOOPBACK_CLR 0x1e8
#define ANADIG_USB1_LOOPBACK_UTMI_TESTSTART BIT(0)
#define ANADIG_USB2_LOOPBACK_SET 0x244
#define ANADIG_USB2_LOOPBACK_CLR 0x248
#define ANADIG_USB1_MISC 0x1f0
#define ANADIG_USB2_MISC 0x250
#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG BIT(12)
#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL BIT(11)
#define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
#define BM_ANADIG_USB2_VBUS_DET_STAT_VBUS_VALID BIT(3)
#define BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 BIT(2)
#define BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN BIT(5)
#define BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 BIT(2)
#define BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN BIT(5)
#define BM_ANADIG_USB1_MISC_RX_VPIN_FS BIT(29)
#define BM_ANADIG_USB1_MISC_RX_VMIN_FS BIT(28)
#define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29)
#define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28)
#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
/* Do disconnection between PHY and controller without vbus */
#define MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS BIT(0)
/*
* The PHY will be in messy if there is a wakeup after putting
* bus to suspend (set portsc.suspendM) but before setting PHY to low
* power mode (set portsc.phcd).
*/
#define MXS_PHY_ABNORMAL_IN_SUSPEND BIT(1)
/*
* The SOF sends too fast after resuming, it will cause disconnection
* between host and high speed device.
*/
#define MXS_PHY_SENDING_SOF_TOO_FAST BIT(2)
/*
* IC has bug fixes logic, they include
* MXS_PHY_ABNORMAL_IN_SUSPEND and MXS_PHY_SENDING_SOF_TOO_FAST
* which are described at above flags, the RTL will handle it
* according to different versions.
*/
#define MXS_PHY_NEED_IP_FIX BIT(3)
/* Minimum and maximum values for device tree entries */
#define MXS_PHY_TX_CAL45_MIN 35
#define MXS_PHY_TX_CAL45_MAX 54
#define MXS_PHY_TX_D_CAL_MIN 79
#define MXS_PHY_TX_D_CAL_MAX 119
struct mxs_phy_data {
unsigned int flags;
};
static const struct mxs_phy_data imx23_phy_data = {
.flags = MXS_PHY_ABNORMAL_IN_SUSPEND | MXS_PHY_SENDING_SOF_TOO_FAST,
};
static const struct mxs_phy_data imx6q_phy_data = {
.flags = MXS_PHY_SENDING_SOF_TOO_FAST |
MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
MXS_PHY_NEED_IP_FIX,
};
static const struct mxs_phy_data imx6sl_phy_data = {
.flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
MXS_PHY_NEED_IP_FIX,
};
static const struct mxs_phy_data vf610_phy_data = {
.flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
MXS_PHY_NEED_IP_FIX,
};
static const struct mxs_phy_data imx6sx_phy_data = {
.flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
};
static const struct mxs_phy_data imx6ul_phy_data = {
.flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
};
static const struct mxs_phy_data imx7ulp_phy_data = {
};
static const struct of_device_id mxs_phy_dt_ids[] = {
{ .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
{ .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
{ .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
{ .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
{ .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, },
{ .compatible = "fsl,imx6ul-usbphy", .data = &imx6ul_phy_data, },
{ .compatible = "fsl,imx7ulp-usbphy", .data = &imx7ulp_phy_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
struct mxs_phy {
struct usb_phy phy;
struct clk *clk;
const struct mxs_phy_data *data;
struct regmap *regmap_anatop;
int port_id;
u32 tx_reg_set;
u32 tx_reg_mask;
};
static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
{
return mxs_phy->data == &imx6q_phy_data;
}
static inline bool is_imx6sl_phy(struct mxs_phy *mxs_phy)
{
return mxs_phy->data == &imx6sl_phy_data;
}
static inline bool is_imx7ulp_phy(struct mxs_phy *mxs_phy)
{
return mxs_phy->data == &imx7ulp_phy_data;
}
/*
* PHY needs some 32K cycles to switch from 32K clock to
* bus (such as AHB/AXI, etc) clock.
*/
static void mxs_phy_clock_switch_delay(void)
{
usleep_range(300, 400);
}
static void mxs_phy_tx_init(struct mxs_phy *mxs_phy)
{
void __iomem *base = mxs_phy->phy.io_priv;
u32 phytx;
/* Update TX register if there is anything to write */
if (mxs_phy->tx_reg_mask) {
phytx = readl(base + HW_USBPHY_TX);
phytx &= ~mxs_phy->tx_reg_mask;
phytx |= mxs_phy->tx_reg_set;
writel(phytx, base + HW_USBPHY_TX);
}
}
static int mxs_phy_pll_enable(void __iomem *base, bool enable)
{
int ret = 0;
if (enable) {
u32 value;
writel(BM_USBPHY_PLL_REG_ENABLE, base + HW_USBPHY_PLL_SIC_SET);
writel(BM_USBPHY_PLL_BYPASS, base + HW_USBPHY_PLL_SIC_CLR);
writel(BM_USBPHY_PLL_POWER, base + HW_USBPHY_PLL_SIC_SET);
ret = readl_poll_timeout(base + HW_USBPHY_PLL_SIC,
value, (value & BM_USBPHY_PLL_LOCK) != 0,
100, 10000);
if (ret)
return ret;
writel(BM_USBPHY_PLL_EN_USB_CLKS, base +
HW_USBPHY_PLL_SIC_SET);
} else {
writel(BM_USBPHY_PLL_EN_USB_CLKS, base +
HW_USBPHY_PLL_SIC_CLR);
writel(BM_USBPHY_PLL_POWER, base + HW_USBPHY_PLL_SIC_CLR);
writel(BM_USBPHY_PLL_BYPASS, base + HW_USBPHY_PLL_SIC_SET);
writel(BM_USBPHY_PLL_REG_ENABLE, base + HW_USBPHY_PLL_SIC_CLR);
}
return ret;
}
static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
{
int ret;
void __iomem *base = mxs_phy->phy.io_priv;
if (is_imx7ulp_phy(mxs_phy)) {
ret = mxs_phy_pll_enable(base, true);
if (ret)
return ret;
}
ret = stmp_reset_block(base + HW_USBPHY_CTRL);
if (ret)
goto disable_pll;
/* Power up the PHY */
writel(0, base + HW_USBPHY_PWD);
/*
* USB PHY Ctrl Setting
* - Auto clock/power on
* - Enable full/low speed support
*/
writel(BM_USBPHY_CTRL_ENAUTOSET_USBCLKS |
BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE |
BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD |
BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE |
BM_USBPHY_CTRL_ENAUTO_PWRON_PLL |
BM_USBPHY_CTRL_ENUTMILEVEL2 |
BM_USBPHY_CTRL_ENUTMILEVEL3,
base + HW_USBPHY_CTRL_SET);
if (mxs_phy->data->flags & MXS_PHY_NEED_IP_FIX)
writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
if (mxs_phy->regmap_anatop) {
unsigned int reg = mxs_phy->port_id ?
ANADIG_USB1_CHRG_DETECT_SET :
ANADIG_USB2_CHRG_DETECT_SET;
/*
* The external charger detector needs to be disabled,
* or the signal at DP will be poor
*/
regmap_write(mxs_phy->regmap_anatop, reg,
ANADIG_USB1_CHRG_DETECT_EN_B |
ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
}
mxs_phy_tx_init(mxs_phy);
return 0;
disable_pll:
if (is_imx7ulp_phy(mxs_phy))
mxs_phy_pll_enable(base, false);
return ret;
}
/* Return true if the vbus is there */
static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
{
unsigned int vbus_value = 0;
if (!mxs_phy->regmap_anatop)
return false;
if (mxs_phy->port_id == 0)
regmap_read(mxs_phy->regmap_anatop,
ANADIG_USB1_VBUS_DET_STAT,
&vbus_value);
else if (mxs_phy->port_id == 1)
regmap_read(mxs_phy->regmap_anatop,
ANADIG_USB2_VBUS_DET_STAT,
&vbus_value);
if (vbus_value & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)
return true;
else
return false;
}
static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
{
void __iomem *base = mxs_phy->phy.io_priv;
u32 reg;
if (disconnect)
writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
base + HW_USBPHY_DEBUG_CLR);
if (mxs_phy->port_id == 0) {
reg = disconnect ? ANADIG_USB1_LOOPBACK_SET
: ANADIG_USB1_LOOPBACK_CLR;
regmap_write(mxs_phy->regmap_anatop, reg,
BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 |
BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN);
} else if (mxs_phy->port_id == 1) {
reg = disconnect ? ANADIG_USB2_LOOPBACK_SET
: ANADIG_USB2_LOOPBACK_CLR;
regmap_write(mxs_phy->regmap_anatop, reg,
BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 |
BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN);
}
if (!disconnect)
writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
base + HW_USBPHY_DEBUG_SET);
/* Delay some time, and let Linestate be SE0 for controller */
if (disconnect)
usleep_range(500, 1000);
}
static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
{
return IS_ENABLED(CONFIG_USB_OTG) &&
mxs_phy->phy.last_event == USB_EVENT_ID;
}
static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
{
bool vbus_is_on = false;
enum usb_phy_events last_event = mxs_phy->phy.last_event;
/* If the SoCs don't need to disconnect line without vbus, quit */
if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS))
return;
/* If the SoCs don't have anatop, quit */
if (!mxs_phy->regmap_anatop)
return;
vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
if (on && ((!vbus_is_on && !mxs_phy_is_otg_host(mxs_phy))
|| (last_event == USB_EVENT_VBUS)))
__mxs_phy_disconnect_line(mxs_phy, true);
else
__mxs_phy_disconnect_line(mxs_phy, false);
}
static int mxs_phy_init(struct usb_phy *phy)
{
int ret;
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
mxs_phy_clock_switch_delay();
ret = clk_prepare_enable(mxs_phy->clk);
if (ret)
return ret;
return mxs_phy_hw_init(mxs_phy);
}
static void mxs_phy_shutdown(struct usb_phy *phy)
{
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP |
BM_USBPHY_CTRL_ENDPDMCHG_WKUP |
BM_USBPHY_CTRL_ENIDCHG_WKUP |
BM_USBPHY_CTRL_ENAUTOSET_USBCLKS |
BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE |
BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD |
BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE |
BM_USBPHY_CTRL_ENAUTO_PWRON_PLL;
writel(value, phy->io_priv + HW_USBPHY_CTRL_CLR);
writel(0xffffffff, phy->io_priv + HW_USBPHY_PWD);
writel(BM_USBPHY_CTRL_CLKGATE,
phy->io_priv + HW_USBPHY_CTRL_SET);
if (is_imx7ulp_phy(mxs_phy))
mxs_phy_pll_enable(phy->io_priv, false);
clk_disable_unprepare(mxs_phy->clk);
}
static bool mxs_phy_is_low_speed_connection(struct mxs_phy *mxs_phy)
{
unsigned int line_state;
/* bit definition is the same for all controllers */
unsigned int dp_bit = BM_ANADIG_USB1_MISC_RX_VPIN_FS,
dm_bit = BM_ANADIG_USB1_MISC_RX_VMIN_FS;
unsigned int reg = ANADIG_USB1_MISC;
/* If the SoCs don't have anatop, quit */
if (!mxs_phy->regmap_anatop)
return false;
if (mxs_phy->port_id == 0)
reg = ANADIG_USB1_MISC;
else if (mxs_phy->port_id == 1)
reg = ANADIG_USB2_MISC;
regmap_read(mxs_phy->regmap_anatop, reg, &line_state);
if ((line_state & (dp_bit | dm_bit)) == dm_bit)
return true;
else
return false;
}
static int mxs_phy_suspend(struct usb_phy *x, int suspend)
{
int ret;
struct mxs_phy *mxs_phy = to_mxs_phy(x);
bool low_speed_connection, vbus_is_on;
low_speed_connection = mxs_phy_is_low_speed_connection(mxs_phy);
vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
if (suspend) {
/*
* FIXME: Do not power down RXPWD1PT1 bit for low speed
* connect. The low speed connection will have problem at
* very rare cases during usb suspend and resume process.
*/
if (low_speed_connection & vbus_is_on) {
/*
* If value to be set as pwd value is not 0xffffffff,
* several 32Khz cycles are needed.
*/
mxs_phy_clock_switch_delay();
writel(0xffbfffff, x->io_priv + HW_USBPHY_PWD);
} else {
writel(0xffffffff, x->io_priv + HW_USBPHY_PWD);
}
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_SET);
clk_disable_unprepare(mxs_phy->clk);
} else {
mxs_phy_clock_switch_delay();
ret = clk_prepare_enable(mxs_phy->clk);
if (ret)
return ret;
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_CLR);
writel(0, x->io_priv + HW_USBPHY_PWD);
}
return 0;
}
static int mxs_phy_set_wakeup(struct usb_phy *x, bool enabled)
{
struct mxs_phy *mxs_phy = to_mxs_phy(x);
u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP |
BM_USBPHY_CTRL_ENDPDMCHG_WKUP |
BM_USBPHY_CTRL_ENIDCHG_WKUP;
if (enabled) {
mxs_phy_disconnect_line(mxs_phy, true);
writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_SET);
} else {
writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_CLR);
mxs_phy_disconnect_line(mxs_phy, false);
}
return 0;
}
static int mxs_phy_on_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
dev_dbg(phy->dev, "%s device has connected\n",
(speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
if (speed == USB_SPEED_HIGH)
writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
phy->io_priv + HW_USBPHY_CTRL_SET);
return 0;
}
static int mxs_phy_on_disconnect(struct usb_phy *phy,
enum usb_device_speed speed)
{
dev_dbg(phy->dev, "%s device has disconnected\n",
(speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
/* Sometimes, the speed is not high speed when the error occurs */
if (readl(phy->io_priv + HW_USBPHY_CTRL) &
BM_USBPHY_CTRL_ENHOSTDISCONDETECT)
writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
phy->io_priv + HW_USBPHY_CTRL_CLR);
return 0;
}
#define MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT 100
static int mxs_charger_data_contact_detect(struct mxs_phy *x)
{
struct regmap *regmap = x->regmap_anatop;
int i, stable_contact_count = 0;
u32 val;
/* Check if vbus is valid */
regmap_read(regmap, ANADIG_USB1_VBUS_DET_STAT, &val);
if (!(val & ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)) {
dev_err(x->phy.dev, "vbus is not valid\n");
return -EINVAL;
}
/* Enable charger detector */
regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_CLR,
ANADIG_USB1_CHRG_DETECT_EN_B);
/*
* - Do not check whether a charger is connected to the USB port
* - Check whether the USB plug has been in contact with each other
*/
regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
/* Check if plug is connected */
for (i = 0; i < MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT; i++) {
regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
if (val & ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT) {
stable_contact_count++;
if (stable_contact_count > 5)
/* Data pin makes contact */
break;
else
usleep_range(5000, 10000);
} else {
stable_contact_count = 0;
usleep_range(5000, 6000);
}
}
if (i == MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT) {
dev_err(x->phy.dev,
"Data pin can't make good contact.\n");
/* Disable charger detector */
regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
ANADIG_USB1_CHRG_DETECT_EN_B |
ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
return -ENXIO;
}
return 0;
}
static enum usb_charger_type mxs_charger_primary_detection(struct mxs_phy *x)
{
struct regmap *regmap = x->regmap_anatop;
enum usb_charger_type chgr_type = UNKNOWN_TYPE;
u32 val;
/*
* - Do check whether a charger is connected to the USB port
* - Do not Check whether the USB plug has been in contact with
* each other
*/
regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_CLR,
ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
msleep(100);
/* Check if it is a charger */
regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
if (!(val & ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
chgr_type = SDP_TYPE;
dev_dbg(x->phy.dev, "It is a standard downstream port\n");
}
/* Disable charger detector */
regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
ANADIG_USB1_CHRG_DETECT_EN_B |
ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
return chgr_type;
}
/*
* It must be called after DP is pulled up, which is used to
* differentiate DCP and CDP.
*/
static enum usb_charger_type mxs_charger_secondary_detection(struct mxs_phy *x)
{
struct regmap *regmap = x->regmap_anatop;
int val;
msleep(80);
regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
if (val & ANADIG_USB1_CHRG_DET_STAT_DM_STATE) {
dev_dbg(x->phy.dev, "It is a dedicate charging port\n");
return DCP_TYPE;
} else {
dev_dbg(x->phy.dev, "It is a charging downstream port\n");
return CDP_TYPE;
}
}
static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
{
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
struct regmap *regmap = mxs_phy->regmap_anatop;
void __iomem *base = phy->io_priv;
enum usb_charger_type chgr_type = UNKNOWN_TYPE;
if (!regmap)
return UNKNOWN_TYPE;
if (mxs_charger_data_contact_detect(mxs_phy))
return chgr_type;
chgr_type = mxs_charger_primary_detection(mxs_phy);
if (chgr_type != SDP_TYPE) {
/* Pull up DP via test */
writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
base + HW_USBPHY_DEBUG_CLR);
regmap_write(regmap, ANADIG_USB1_LOOPBACK_SET,
ANADIG_USB1_LOOPBACK_UTMI_TESTSTART);
chgr_type = mxs_charger_secondary_detection(mxs_phy);
/* Stop the test */
regmap_write(regmap, ANADIG_USB1_LOOPBACK_CLR,
ANADIG_USB1_LOOPBACK_UTMI_TESTSTART);
writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
base + HW_USBPHY_DEBUG_SET);
}
return chgr_type;
}
static int mxs_phy_probe(struct platform_device *pdev)
{
void __iomem *base;
struct clk *clk;
struct mxs_phy *mxs_phy;
int ret;
struct device_node *np = pdev->dev.of_node;
u32 val;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev,
"can't get the clock, err=%ld", PTR_ERR(clk));
return PTR_ERR(clk);
}
mxs_phy = devm_kzalloc(&pdev->dev, sizeof(*mxs_phy), GFP_KERNEL);
if (!mxs_phy)
return -ENOMEM;
/* Some SoCs don't have anatop registers */
if (of_property_present(np, "fsl,anatop")) {
mxs_phy->regmap_anatop = syscon_regmap_lookup_by_phandle
(np, "fsl,anatop");
if (IS_ERR(mxs_phy->regmap_anatop)) {
dev_dbg(&pdev->dev,
"failed to find regmap for anatop\n");
return PTR_ERR(mxs_phy->regmap_anatop);
}
}
/* Precompute which bits of the TX register are to be updated, if any */
if (!of_property_read_u32(np, "fsl,tx-cal-45-dn-ohms", &val) &&
val >= MXS_PHY_TX_CAL45_MIN && val <= MXS_PHY_TX_CAL45_MAX) {
/* Scale to a 4-bit value */
val = (MXS_PHY_TX_CAL45_MAX - val) * 0xF
/ (MXS_PHY_TX_CAL45_MAX - MXS_PHY_TX_CAL45_MIN);
mxs_phy->tx_reg_mask |= GM_USBPHY_TX_TXCAL45DN(~0);
mxs_phy->tx_reg_set |= GM_USBPHY_TX_TXCAL45DN(val);
}
if (!of_property_read_u32(np, "fsl,tx-cal-45-dp-ohms", &val) &&
val >= MXS_PHY_TX_CAL45_MIN && val <= MXS_PHY_TX_CAL45_MAX) {
/* Scale to a 4-bit value. */
val = (MXS_PHY_TX_CAL45_MAX - val) * 0xF
/ (MXS_PHY_TX_CAL45_MAX - MXS_PHY_TX_CAL45_MIN);
mxs_phy->tx_reg_mask |= GM_USBPHY_TX_TXCAL45DP(~0);
mxs_phy->tx_reg_set |= GM_USBPHY_TX_TXCAL45DP(val);
}
if (!of_property_read_u32(np, "fsl,tx-d-cal", &val) &&
val >= MXS_PHY_TX_D_CAL_MIN && val <= MXS_PHY_TX_D_CAL_MAX) {
/* Scale to a 4-bit value. Round up the values and heavily
* weight the rounding by adding 2/3 of the denominator.
*/
val = ((MXS_PHY_TX_D_CAL_MAX - val) * 0xF
+ (MXS_PHY_TX_D_CAL_MAX - MXS_PHY_TX_D_CAL_MIN) * 2/3)
/ (MXS_PHY_TX_D_CAL_MAX - MXS_PHY_TX_D_CAL_MIN);
mxs_phy->tx_reg_mask |= GM_USBPHY_TX_D_CAL(~0);
mxs_phy->tx_reg_set |= GM_USBPHY_TX_D_CAL(val);
}
ret = of_alias_get_id(np, "usbphy");
if (ret < 0)
dev_dbg(&pdev->dev, "failed to get alias id, errno %d\n", ret);
mxs_phy->port_id = ret;
mxs_phy->phy.io_priv = base;
mxs_phy->phy.dev = &pdev->dev;
mxs_phy->phy.label = DRIVER_NAME;
mxs_phy->phy.init = mxs_phy_init;
mxs_phy->phy.shutdown = mxs_phy_shutdown;
mxs_phy->phy.set_suspend = mxs_phy_suspend;
mxs_phy->phy.notify_connect = mxs_phy_on_connect;
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
mxs_phy->phy.type = USB_PHY_TYPE_USB2;
mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup;
mxs_phy->phy.charger_detect = mxs_phy_charger_detect;
mxs_phy->clk = clk;
mxs_phy->data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, mxs_phy);
device_set_wakeup_capable(&pdev->dev, true);
return usb_add_phy_dev(&mxs_phy->phy);
}
static void mxs_phy_remove(struct platform_device *pdev)
{
struct mxs_phy *mxs_phy = platform_get_drvdata(pdev);
usb_remove_phy(&mxs_phy->phy);
}
#ifdef CONFIG_PM_SLEEP
static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on)
{
unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
/* If the SoCs don't have anatop, quit */
if (!mxs_phy->regmap_anatop)
return;
if (is_imx6q_phy(mxs_phy))
regmap_write(mxs_phy->regmap_anatop, reg,
BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG);
else if (is_imx6sl_phy(mxs_phy))
regmap_write(mxs_phy->regmap_anatop,
reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL);
}
static int mxs_phy_system_suspend(struct device *dev)
{
struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
mxs_phy_enable_ldo_in_suspend(mxs_phy, true);
return 0;
}
static int mxs_phy_system_resume(struct device *dev)
{
struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
mxs_phy_enable_ldo_in_suspend(mxs_phy, false);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(mxs_phy_pm, mxs_phy_system_suspend,
mxs_phy_system_resume);
static struct platform_driver mxs_phy_driver = {
.probe = mxs_phy_probe,
.remove_new = mxs_phy_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxs_phy_dt_ids,
.pm = &mxs_phy_pm,
},
};
static int __init mxs_phy_module_init(void)
{
return platform_driver_register(&mxs_phy_driver);
}
postcore_initcall(mxs_phy_module_init);
static void __exit mxs_phy_module_exit(void)
{
platform_driver_unregister(&mxs_phy_driver);
}
module_exit(mxs_phy_module_exit);
MODULE_ALIAS("platform:mxs-usb-phy");
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_AUTHOR("Richard Zhao <[email protected]>");
MODULE_DESCRIPTION("Freescale MXS USB PHY driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/phy/phy-mxs-usb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB transceiver driver for AB8500 family chips
*
* Copyright (C) 2010-2013 ST-Ericsson AB
* Mian Yousaf Kaukab <[email protected]>
* Avinash Kumar <[email protected]>
* Thirupathi Chippakurthy <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/otg.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/usb/musb-ux500.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
/* Bank AB8500_SYS_CTRL2_BLOCK */
#define AB8500_MAIN_WD_CTRL_REG 0x01
/* Bank AB8500_USB */
#define AB8500_USB_LINE_STAT_REG 0x80
#define AB8505_USB_LINE_STAT_REG 0x94
#define AB8500_USB_PHY_CTRL_REG 0x8A
/* Bank AB8500_DEVELOPMENT */
#define AB8500_BANK12_ACCESS 0x00
/* Bank AB8500_DEBUG */
#define AB8500_USB_PHY_TUNE1 0x05
#define AB8500_USB_PHY_TUNE2 0x06
#define AB8500_USB_PHY_TUNE3 0x07
/* Bank AB8500_INTERRUPT */
#define AB8500_IT_SOURCE2_REG 0x01
#define AB8500_BIT_OTG_STAT_ID (1 << 0)
#define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0)
#define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1)
#define AB8500_BIT_WD_CTRL_ENABLE (1 << 0)
#define AB8500_BIT_WD_CTRL_KICK (1 << 1)
#define AB8500_BIT_SOURCE2_VBUSDET (1 << 7)
#define AB8500_WD_KICK_DELAY_US 100 /* usec */
#define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */
#define AB8500_V20_31952_DISABLE_DELAY_US 100 /* usec */
/* Usb line status register */
enum ab8500_usb_link_status {
USB_LINK_NOT_CONFIGURED_8500 = 0,
USB_LINK_STD_HOST_NC_8500,
USB_LINK_STD_HOST_C_NS_8500,
USB_LINK_STD_HOST_C_S_8500,
USB_LINK_HOST_CHG_NM_8500,
USB_LINK_HOST_CHG_HS_8500,
USB_LINK_HOST_CHG_HS_CHIRP_8500,
USB_LINK_DEDICATED_CHG_8500,
USB_LINK_ACA_RID_A_8500,
USB_LINK_ACA_RID_B_8500,
USB_LINK_ACA_RID_C_NM_8500,
USB_LINK_ACA_RID_C_HS_8500,
USB_LINK_ACA_RID_C_HS_CHIRP_8500,
USB_LINK_HM_IDGND_8500,
USB_LINK_RESERVED_8500,
USB_LINK_NOT_VALID_LINK_8500,
};
enum ab8505_usb_link_status {
USB_LINK_NOT_CONFIGURED_8505 = 0,
USB_LINK_STD_HOST_NC_8505,
USB_LINK_STD_HOST_C_NS_8505,
USB_LINK_STD_HOST_C_S_8505,
USB_LINK_CDP_8505,
USB_LINK_RESERVED0_8505,
USB_LINK_RESERVED1_8505,
USB_LINK_DEDICATED_CHG_8505,
USB_LINK_ACA_RID_A_8505,
USB_LINK_ACA_RID_B_8505,
USB_LINK_ACA_RID_C_NM_8505,
USB_LINK_RESERVED2_8505,
USB_LINK_RESERVED3_8505,
USB_LINK_HM_IDGND_8505,
USB_LINK_CHARGERPORT_NOT_OK_8505,
USB_LINK_CHARGER_DM_HIGH_8505,
USB_LINK_PHYEN_NO_VBUS_NO_IDGND_8505,
USB_LINK_STD_UPSTREAM_NO_IDGNG_NO_VBUS_8505,
USB_LINK_STD_UPSTREAM_8505,
USB_LINK_CHARGER_SE1_8505,
USB_LINK_CARKIT_CHGR_1_8505,
USB_LINK_CARKIT_CHGR_2_8505,
USB_LINK_ACA_DOCK_CHGR_8505,
USB_LINK_SAMSUNG_BOOT_CBL_PHY_EN_8505,
USB_LINK_SAMSUNG_BOOT_CBL_PHY_DISB_8505,
USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505,
USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505,
USB_LINK_MOTOROLA_FACTORY_CBL_PHY_EN_8505,
};
enum ab8500_usb_mode {
USB_IDLE = 0,
USB_PERIPHERAL,
USB_HOST,
USB_DEDICATED_CHG,
USB_UART
};
/* Register USB_LINK_STATUS interrupt */
#define AB8500_USB_FLAG_USE_LINK_STATUS_IRQ (1 << 0)
/* Register ID_WAKEUP_F interrupt */
#define AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ (1 << 1)
/* Register VBUS_DET_F interrupt */
#define AB8500_USB_FLAG_USE_VBUS_DET_IRQ (1 << 2)
/* Driver is using the ab-iddet driver*/
#define AB8500_USB_FLAG_USE_AB_IDDET (1 << 3)
/* Enable setting regulators voltage */
#define AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE (1 << 4)
struct ab8500_usb {
struct usb_phy phy;
struct device *dev;
struct ab8500 *ab8500;
unsigned vbus_draw;
struct work_struct phy_dis_work;
enum ab8500_usb_mode mode;
struct clk *sysclk;
struct regulator *v_ape;
struct regulator *v_musb;
struct regulator *v_ulpi;
int saved_v_ulpi;
int previous_link_status_state;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_sleep;
bool enabled_charging_detection;
unsigned int flags;
};
static inline struct ab8500_usb *phy_to_ab(struct usb_phy *x)
{
return container_of(x, struct ab8500_usb, phy);
}
static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
{
abx500_set_register_interruptible(ab->dev,
AB8500_SYS_CTRL2_BLOCK,
AB8500_MAIN_WD_CTRL_REG,
AB8500_BIT_WD_CTRL_ENABLE);
udelay(AB8500_WD_KICK_DELAY_US);
abx500_set_register_interruptible(ab->dev,
AB8500_SYS_CTRL2_BLOCK,
AB8500_MAIN_WD_CTRL_REG,
(AB8500_BIT_WD_CTRL_ENABLE
| AB8500_BIT_WD_CTRL_KICK));
udelay(AB8500_WD_V11_DISABLE_DELAY_US);
abx500_set_register_interruptible(ab->dev,
AB8500_SYS_CTRL2_BLOCK,
AB8500_MAIN_WD_CTRL_REG,
0);
}
static void ab8500_usb_regulator_enable(struct ab8500_usb *ab)
{
int ret, volt;
ret = regulator_enable(ab->v_ape);
if (ret)
dev_err(ab->dev, "Failed to enable v-ape\n");
if (ab->flags & AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE) {
ab->saved_v_ulpi = regulator_get_voltage(ab->v_ulpi);
if (ab->saved_v_ulpi < 0)
dev_err(ab->dev, "Failed to get v_ulpi voltage\n");
ret = regulator_set_voltage(ab->v_ulpi, 1300000, 1350000);
if (ret < 0)
dev_err(ab->dev, "Failed to set the Vintcore to 1.3V, ret=%d\n",
ret);
ret = regulator_set_load(ab->v_ulpi, 28000);
if (ret < 0)
dev_err(ab->dev, "Failed to set optimum mode (ret=%d)\n",
ret);
}
ret = regulator_enable(ab->v_ulpi);
if (ret)
dev_err(ab->dev, "Failed to enable vddulpivio18\n");
if (ab->flags & AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE) {
volt = regulator_get_voltage(ab->v_ulpi);
if ((volt != 1300000) && (volt != 1350000))
dev_err(ab->dev, "Vintcore is not set to 1.3V volt=%d\n",
volt);
}
ret = regulator_enable(ab->v_musb);
if (ret)
dev_err(ab->dev, "Failed to enable musb_1v8\n");
}
static void ab8500_usb_regulator_disable(struct ab8500_usb *ab)
{
int ret;
regulator_disable(ab->v_musb);
regulator_disable(ab->v_ulpi);
/* USB is not the only consumer of Vintcore, restore old settings */
if (ab->flags & AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE) {
if (ab->saved_v_ulpi > 0) {
ret = regulator_set_voltage(ab->v_ulpi,
ab->saved_v_ulpi, ab->saved_v_ulpi);
if (ret < 0)
dev_err(ab->dev, "Failed to set the Vintcore to %duV, ret=%d\n",
ab->saved_v_ulpi, ret);
}
ret = regulator_set_load(ab->v_ulpi, 0);
if (ret < 0)
dev_err(ab->dev, "Failed to set optimum mode (ret=%d)\n",
ret);
}
regulator_disable(ab->v_ape);
}
static void ab8500_usb_wd_linkstatus(struct ab8500_usb *ab, u8 bit)
{
/* Workaround for v2.0 bug # 31952 */
if (is_ab8500_2p0(ab->ab8500)) {
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
bit, bit);
udelay(AB8500_V20_31952_DISABLE_DELAY_US);
}
}
static void ab8500_usb_phy_enable(struct ab8500_usb *ab, bool sel_host)
{
u8 bit;
bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
AB8500_BIT_PHY_CTRL_DEVICE_EN;
/* mux and configure USB pins to DEFAULT state */
ab->pinctrl = pinctrl_get_select(ab->dev, PINCTRL_STATE_DEFAULT);
if (IS_ERR(ab->pinctrl))
dev_err(ab->dev, "could not get/set default pinstate\n");
if (clk_prepare_enable(ab->sysclk))
dev_err(ab->dev, "can't prepare/enable clock\n");
ab8500_usb_regulator_enable(ab);
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
bit, bit);
}
static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
{
u8 bit;
bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
AB8500_BIT_PHY_CTRL_DEVICE_EN;
ab8500_usb_wd_linkstatus(ab, bit);
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
bit, 0);
/* Needed to disable the phy.*/
ab8500_usb_wd_workaround(ab);
clk_disable_unprepare(ab->sysclk);
ab8500_usb_regulator_disable(ab);
if (!IS_ERR(ab->pinctrl)) {
/* configure USB pins to SLEEP state */
ab->pins_sleep = pinctrl_lookup_state(ab->pinctrl,
PINCTRL_STATE_SLEEP);
if (IS_ERR(ab->pins_sleep))
dev_dbg(ab->dev, "could not get sleep pinstate\n");
else if (pinctrl_select_state(ab->pinctrl, ab->pins_sleep))
dev_err(ab->dev, "could not set pins to sleep state\n");
/*
* as USB pins are shared with iddet, release them to allow
* iddet to request them
*/
pinctrl_put(ab->pinctrl);
}
}
#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_enable(ab, true)
#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_disable(ab, true)
#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_enable(ab, false)
#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_disable(ab, false)
static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
enum ab8505_usb_link_status lsts)
{
enum ux500_musb_vbus_id_status event = 0;
dev_dbg(ab->dev, "ab8505_usb_link_status_update %d\n", lsts);
/*
* Spurious link_status interrupts are seen at the time of
* disconnection of a device in RIDA state
*/
if (ab->previous_link_status_state == USB_LINK_ACA_RID_A_8505 &&
(lsts == USB_LINK_STD_HOST_NC_8505))
return 0;
ab->previous_link_status_state = lsts;
switch (lsts) {
case USB_LINK_ACA_RID_B_8505:
event = UX500_MUSB_RIDB;
fallthrough;
case USB_LINK_NOT_CONFIGURED_8505:
case USB_LINK_RESERVED0_8505:
case USB_LINK_RESERVED1_8505:
case USB_LINK_RESERVED2_8505:
case USB_LINK_RESERVED3_8505:
ab->mode = USB_IDLE;
ab->phy.otg->default_a = false;
ab->vbus_draw = 0;
if (event != UX500_MUSB_RIDB)
event = UX500_MUSB_NONE;
/*
* Fallback to default B_IDLE as nothing
* is connected
*/
ab->phy.otg->state = OTG_STATE_B_IDLE;
usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
break;
case USB_LINK_ACA_RID_C_NM_8505:
event = UX500_MUSB_RIDC;
fallthrough;
case USB_LINK_STD_HOST_NC_8505:
case USB_LINK_STD_HOST_C_NS_8505:
case USB_LINK_STD_HOST_C_S_8505:
case USB_LINK_CDP_8505:
if (ab->mode == USB_IDLE) {
ab->mode = USB_PERIPHERAL;
ab8500_usb_peri_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
usb_phy_set_event(&ab->phy, USB_EVENT_ENUMERATED);
}
if (event != UX500_MUSB_RIDC)
event = UX500_MUSB_VBUS;
break;
case USB_LINK_ACA_RID_A_8505:
case USB_LINK_ACA_DOCK_CHGR_8505:
event = UX500_MUSB_RIDA;
fallthrough;
case USB_LINK_HM_IDGND_8505:
if (ab->mode == USB_IDLE) {
ab->mode = USB_HOST;
ab8500_usb_host_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
}
ab->phy.otg->default_a = true;
if (event != UX500_MUSB_RIDA)
event = UX500_MUSB_ID;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
break;
case USB_LINK_DEDICATED_CHG_8505:
ab->mode = USB_DEDICATED_CHG;
event = UX500_MUSB_CHARGER;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
/*
* FIXME: For now we rely on the boot firmware to set up the necessary
* PHY/pin configuration for UART mode.
*
* AB8505 does not seem to report any status change for UART cables,
* possibly because it cannot detect them autonomously.
* We may need to measure the ID resistance manually to reliably
* detect UART cables after bootup.
*/
case USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505:
case USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505:
if (ab->mode == USB_IDLE) {
ab->mode = USB_UART;
ab8500_usb_peri_phy_en(ab);
}
break;
default:
break;
}
return 0;
}
static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
enum ab8500_usb_link_status lsts)
{
enum ux500_musb_vbus_id_status event = 0;
dev_dbg(ab->dev, "ab8500_usb_link_status_update %d\n", lsts);
/*
* Spurious link_status interrupts are seen in case of a
* disconnection of a device in IDGND and RIDA stage
*/
if (ab->previous_link_status_state == USB_LINK_HM_IDGND_8500 &&
(lsts == USB_LINK_STD_HOST_C_NS_8500 ||
lsts == USB_LINK_STD_HOST_NC_8500))
return 0;
if (ab->previous_link_status_state == USB_LINK_ACA_RID_A_8500 &&
lsts == USB_LINK_STD_HOST_NC_8500)
return 0;
ab->previous_link_status_state = lsts;
switch (lsts) {
case USB_LINK_ACA_RID_B_8500:
event = UX500_MUSB_RIDB;
fallthrough;
case USB_LINK_NOT_CONFIGURED_8500:
case USB_LINK_NOT_VALID_LINK_8500:
ab->mode = USB_IDLE;
ab->phy.otg->default_a = false;
ab->vbus_draw = 0;
if (event != UX500_MUSB_RIDB)
event = UX500_MUSB_NONE;
/* Fallback to default B_IDLE as nothing is connected */
ab->phy.otg->state = OTG_STATE_B_IDLE;
usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
break;
case USB_LINK_ACA_RID_C_NM_8500:
case USB_LINK_ACA_RID_C_HS_8500:
case USB_LINK_ACA_RID_C_HS_CHIRP_8500:
event = UX500_MUSB_RIDC;
fallthrough;
case USB_LINK_STD_HOST_NC_8500:
case USB_LINK_STD_HOST_C_NS_8500:
case USB_LINK_STD_HOST_C_S_8500:
case USB_LINK_HOST_CHG_NM_8500:
case USB_LINK_HOST_CHG_HS_8500:
case USB_LINK_HOST_CHG_HS_CHIRP_8500:
if (ab->mode == USB_IDLE) {
ab->mode = USB_PERIPHERAL;
ab8500_usb_peri_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
usb_phy_set_event(&ab->phy, USB_EVENT_ENUMERATED);
}
if (event != UX500_MUSB_RIDC)
event = UX500_MUSB_VBUS;
break;
case USB_LINK_ACA_RID_A_8500:
event = UX500_MUSB_RIDA;
fallthrough;
case USB_LINK_HM_IDGND_8500:
if (ab->mode == USB_IDLE) {
ab->mode = USB_HOST;
ab8500_usb_host_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
}
ab->phy.otg->default_a = true;
if (event != UX500_MUSB_RIDA)
event = UX500_MUSB_ID;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
break;
case USB_LINK_DEDICATED_CHG_8500:
ab->mode = USB_DEDICATED_CHG;
event = UX500_MUSB_CHARGER;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
case USB_LINK_RESERVED_8500:
break;
}
return 0;
}
/*
* Connection Sequence:
* 1. Link Status Interrupt
* 2. Enable AB clock
* 3. Enable AB regulators
* 4. Enable USB phy
* 5. Reset the musb controller
* 6. Switch the ULPI GPIO pins to function mode
* 7. Enable the musb Peripheral5 clock
* 8. Restore MUSB context
*/
static int abx500_usb_link_status_update(struct ab8500_usb *ab)
{
u8 reg;
int ret = 0;
if (is_ab8500(ab->ab8500)) {
enum ab8500_usb_link_status lsts;
ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_LINE_STAT_REG, ®);
if (ret < 0)
return ret;
lsts = (reg >> 3) & 0x0F;
ret = ab8500_usb_link_status_update(ab, lsts);
} else if (is_ab8505(ab->ab8500)) {
enum ab8505_usb_link_status lsts;
ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8505_USB_LINE_STAT_REG, ®);
if (ret < 0)
return ret;
lsts = (reg >> 3) & 0x1F;
ret = ab8505_usb_link_status_update(ab, lsts);
}
return ret;
}
/*
* Disconnection Sequence:
* 1. Disconnect Interrupt
* 2. Disable regulators
* 3. Disable AB clock
* 4. Disable the Phy
* 5. Link Status Interrupt
* 6. Disable Musb Clock
*/
static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *) data;
enum usb_phy_events event = USB_EVENT_NONE;
/* Link status will not be updated till phy is disabled. */
if (ab->mode == USB_HOST) {
ab->phy.otg->default_a = false;
ab->vbus_draw = 0;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
ab8500_usb_host_phy_dis(ab);
ab->mode = USB_IDLE;
}
if (ab->mode == USB_PERIPHERAL) {
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
ab8500_usb_peri_phy_dis(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_CLEAN, &ab->vbus_draw);
ab->mode = USB_IDLE;
ab->phy.otg->default_a = false;
ab->vbus_draw = 0;
}
if (ab->mode == USB_UART) {
ab8500_usb_peri_phy_dis(ab);
ab->mode = USB_IDLE;
}
if (is_ab8500_2p0(ab->ab8500)) {
if (ab->mode == USB_DEDICATED_CHG) {
ab8500_usb_wd_linkstatus(ab,
AB8500_BIT_PHY_CTRL_DEVICE_EN);
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
AB8500_BIT_PHY_CTRL_DEVICE_EN, 0);
}
}
return IRQ_HANDLED;
}
static irqreturn_t ab8500_usb_link_status_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *)data;
abx500_usb_link_status_update(ab);
return IRQ_HANDLED;
}
static void ab8500_usb_phy_disable_work(struct work_struct *work)
{
struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
phy_dis_work);
if (!ab->phy.otg->host)
ab8500_usb_host_phy_dis(ab);
if (!ab->phy.otg->gadget)
ab8500_usb_peri_phy_dis(ab);
}
static int ab8500_usb_set_suspend(struct usb_phy *x, int suspend)
{
/* TODO */
return 0;
}
static int ab8500_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct ab8500_usb *ab;
if (!otg)
return -ENODEV;
ab = phy_to_ab(otg->usb_phy);
ab->phy.otg->gadget = gadget;
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
if ((ab->mode != USB_IDLE) && !gadget) {
ab->mode = USB_IDLE;
schedule_work(&ab->phy_dis_work);
}
return 0;
}
static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct ab8500_usb *ab;
if (!otg)
return -ENODEV;
ab = phy_to_ab(otg->usb_phy);
ab->phy.otg->host = host;
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
if ((ab->mode != USB_IDLE) && !host) {
ab->mode = USB_IDLE;
schedule_work(&ab->phy_dis_work);
}
return 0;
}
static void ab8500_usb_restart_phy(struct ab8500_usb *ab)
{
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
AB8500_BIT_PHY_CTRL_DEVICE_EN,
AB8500_BIT_PHY_CTRL_DEVICE_EN);
udelay(100);
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
AB8500_BIT_PHY_CTRL_DEVICE_EN,
0);
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
AB8500_BIT_PHY_CTRL_HOST_EN,
AB8500_BIT_PHY_CTRL_HOST_EN);
udelay(100);
abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_PHY_CTRL_REG,
AB8500_BIT_PHY_CTRL_HOST_EN,
0);
}
static int ab8500_usb_regulator_get(struct ab8500_usb *ab)
{
int err;
ab->v_ape = devm_regulator_get(ab->dev, "v-ape");
if (IS_ERR(ab->v_ape)) {
dev_err(ab->dev, "Could not get v-ape supply\n");
err = PTR_ERR(ab->v_ape);
return err;
}
ab->v_ulpi = devm_regulator_get(ab->dev, "vddulpivio18");
if (IS_ERR(ab->v_ulpi)) {
dev_err(ab->dev, "Could not get vddulpivio18 supply\n");
err = PTR_ERR(ab->v_ulpi);
return err;
}
ab->v_musb = devm_regulator_get(ab->dev, "musb_1v8");
if (IS_ERR(ab->v_musb)) {
dev_err(ab->dev, "Could not get musb_1v8 supply\n");
err = PTR_ERR(ab->v_musb);
return err;
}
return 0;
}
static int ab8500_usb_irq_setup(struct platform_device *pdev,
struct ab8500_usb *ab)
{
int err;
int irq;
if (ab->flags & AB8500_USB_FLAG_USE_LINK_STATUS_IRQ) {
irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS");
if (irq < 0)
return irq;
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_link_status_irq,
IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
"usb-link-status", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for link status irq\n");
return err;
}
}
if (ab->flags & AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ) {
irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
if (irq < 0)
return irq;
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
"usb-id-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for ID fall irq\n");
return err;
}
}
if (ab->flags & AB8500_USB_FLAG_USE_VBUS_DET_IRQ) {
irq = platform_get_irq_byname(pdev, "VBUS_DET_F");
if (irq < 0)
return irq;
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
"usb-vbus-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
return err;
}
}
return 0;
}
static void ab8500_usb_set_ab8500_tuning_values(struct ab8500_usb *ab)
{
int err;
/* Enable the PBT/Bank 0x12 access */
err = abx500_set_register_interruptible(ab->dev,
AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x01);
if (err < 0)
dev_err(ab->dev, "Failed to enable bank12 access err=%d\n",
err);
err = abx500_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE1, 0xC8);
if (err < 0)
dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n",
err);
err = abx500_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE2, 0x00);
if (err < 0)
dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n",
err);
err = abx500_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0x78);
if (err < 0)
dev_err(ab->dev, "Failed to set PHY_TUNE3 register err=%d\n",
err);
/* Switch to normal mode/disable Bank 0x12 access */
err = abx500_set_register_interruptible(ab->dev,
AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x00);
if (err < 0)
dev_err(ab->dev, "Failed to switch bank12 access err=%d\n",
err);
}
static void ab8500_usb_set_ab8505_tuning_values(struct ab8500_usb *ab)
{
int err;
/* Enable the PBT/Bank 0x12 access */
err = abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS,
0x01, 0x01);
if (err < 0)
dev_err(ab->dev, "Failed to enable bank12 access err=%d\n",
err);
err = abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE1,
0xC8, 0xC8);
if (err < 0)
dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n",
err);
err = abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE2,
0x60, 0x60);
if (err < 0)
dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n",
err);
err = abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE3,
0xFC, 0x80);
if (err < 0)
dev_err(ab->dev, "Failed to set PHY_TUNE3 register err=%d\n",
err);
/* Switch to normal mode/disable Bank 0x12 access */
err = abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS,
0x00, 0x00);
if (err < 0)
dev_err(ab->dev, "Failed to switch bank12 access err=%d\n",
err);
}
static int ab8500_usb_probe(struct platform_device *pdev)
{
struct ab8500_usb *ab;
struct ab8500 *ab8500;
struct usb_otg *otg;
int err;
int rev;
ab8500 = dev_get_drvdata(pdev->dev.parent);
rev = abx500_get_chip_id(&pdev->dev);
if (is_ab8500_1p1_or_earlier(ab8500)) {
dev_err(&pdev->dev, "Unsupported AB8500 chip rev=%d\n", rev);
return -ENODEV;
}
ab = devm_kzalloc(&pdev->dev, sizeof(*ab), GFP_KERNEL);
if (!ab)
return -ENOMEM;
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
ab->dev = &pdev->dev;
ab->ab8500 = ab8500;
ab->phy.dev = ab->dev;
ab->phy.otg = otg;
ab->phy.label = "ab8500";
ab->phy.set_suspend = ab8500_usb_set_suspend;
ab->phy.otg->state = OTG_STATE_UNDEFINED;
otg->usb_phy = &ab->phy;
otg->set_host = ab8500_usb_set_host;
otg->set_peripheral = ab8500_usb_set_peripheral;
if (is_ab8500(ab->ab8500)) {
ab->flags |= AB8500_USB_FLAG_USE_LINK_STATUS_IRQ |
AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ |
AB8500_USB_FLAG_USE_VBUS_DET_IRQ |
AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
} else if (is_ab8505(ab->ab8500)) {
ab->flags |= AB8500_USB_FLAG_USE_LINK_STATUS_IRQ |
AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ |
AB8500_USB_FLAG_USE_VBUS_DET_IRQ |
AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
}
/* Disable regulator voltage setting for AB8500 <= v2.0 */
if (is_ab8500_2p0_or_earlier(ab->ab8500))
ab->flags &= ~AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
platform_set_drvdata(pdev, ab);
/* all: Disable phy when called from set_host and set_peripheral */
INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
err = ab8500_usb_regulator_get(ab);
if (err)
return err;
ab->sysclk = devm_clk_get(ab->dev, "sysclk");
if (IS_ERR(ab->sysclk)) {
dev_err(ab->dev, "Could not get sysclk.\n");
return PTR_ERR(ab->sysclk);
}
err = ab8500_usb_irq_setup(pdev, ab);
if (err < 0)
return err;
err = usb_add_phy(&ab->phy, USB_PHY_TYPE_USB2);
if (err) {
dev_err(&pdev->dev, "Can't register transceiver\n");
return err;
}
if (is_ab8500(ab->ab8500) && !is_ab8500_2p0_or_earlier(ab->ab8500))
/* Phy tuning values for AB8500 > v2.0 */
ab8500_usb_set_ab8500_tuning_values(ab);
else if (is_ab8505(ab->ab8500))
/* Phy tuning values for AB8505 */
ab8500_usb_set_ab8505_tuning_values(ab);
/* Needed to enable ID detection. */
ab8500_usb_wd_workaround(ab);
/*
* This is required for usb-link-status to work properly when a
* cable is connected at boot time.
*/
ab8500_usb_restart_phy(ab);
abx500_usb_link_status_update(ab);
dev_info(&pdev->dev, "revision 0x%2x driver initialized\n", rev);
return 0;
}
static void ab8500_usb_remove(struct platform_device *pdev)
{
struct ab8500_usb *ab = platform_get_drvdata(pdev);
cancel_work_sync(&ab->phy_dis_work);
usb_remove_phy(&ab->phy);
if (ab->mode == USB_HOST)
ab8500_usb_host_phy_dis(ab);
else if (ab->mode == USB_PERIPHERAL)
ab8500_usb_peri_phy_dis(ab);
}
static const struct platform_device_id ab8500_usb_devtype[] = {
{ .name = "ab8500-usb", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, ab8500_usb_devtype);
static struct platform_driver ab8500_usb_driver = {
.probe = ab8500_usb_probe,
.remove_new = ab8500_usb_remove,
.id_table = ab8500_usb_devtype,
.driver = {
.name = "abx5x0-usb",
},
};
static int __init ab8500_usb_init(void)
{
return platform_driver_register(&ab8500_usb_driver);
}
subsys_initcall(ab8500_usb_init);
static void __exit ab8500_usb_exit(void)
{
platform_driver_unregister(&ab8500_usb_driver);
}
module_exit(ab8500_usb_exit);
MODULE_AUTHOR("ST-Ericsson AB");
MODULE_DESCRIPTION("AB8500 family usb transceiver driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/phy/phy-ab8500-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2013 NVIDIA Corporation
*
* Author:
* Erik Gilling <[email protected]>
* Benoit Goby <[email protected]>
* Venu Byravarasu <[email protected]>
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/of.h>
#include <linux/usb/tegra_usb_phy.h>
#include <linux/usb/ulpi.h>
#define ULPI_VIEWPORT 0x170
/* PORTSC PTS/PHCD bits, Tegra20 only */
#define TEGRA_USB_PORTSC1 0x184
#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
#define TEGRA_USB_PORTSC1_PHCD BIT(23)
/* HOSTPC1 PTS/PHCD bits, Tegra30 and above */
#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
#define TEGRA_USB_HOSTPC1_DEVLC_PHCD BIT(22)
/* Bits of PORTSC1, which will get cleared by writing 1 into them */
#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
#define USB_SUSP_CTRL 0x400
#define USB_WAKE_ON_RESUME_EN BIT(2)
#define USB_WAKE_ON_CNNT_EN_DEV BIT(3)
#define USB_WAKE_ON_DISCON_EN_DEV BIT(4)
#define USB_SUSP_CLR BIT(5)
#define USB_PHY_CLK_VALID BIT(7)
#define UTMIP_RESET BIT(11)
#define UHSIC_RESET BIT(11)
#define UTMIP_PHY_ENABLE BIT(12)
#define ULPI_PHY_ENABLE BIT(13)
#define USB_SUSP_SET BIT(14)
#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
#define USB_PHY_VBUS_SENSORS 0x404
#define B_SESS_VLD_WAKEUP_EN BIT(14)
#define A_SESS_VLD_WAKEUP_EN BIT(22)
#define A_VBUS_VLD_WAKEUP_EN BIT(30)
#define USB_PHY_VBUS_WAKEUP_ID 0x408
#define ID_INT_EN BIT(0)
#define ID_CHG_DET BIT(1)
#define VBUS_WAKEUP_INT_EN BIT(8)
#define VBUS_WAKEUP_CHG_DET BIT(9)
#define VBUS_WAKEUP_STS BIT(10)
#define VBUS_WAKEUP_WAKEUP_EN BIT(30)
#define USB1_LEGACY_CTRL 0x410
#define USB1_NO_LEGACY_MODE BIT(0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1)
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
(1 << 1)
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
#define ULPI_TIMING_CTRL_0 0x424
#define ULPI_OUTPUT_PINMUX_BYP BIT(10)
#define ULPI_CLKOUT_PINMUX_BYP BIT(11)
#define ULPI_TIMING_CTRL_1 0x428
#define ULPI_DATA_TRIMMER_LOAD BIT(0)
#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
#define ULPI_STPDIRNXT_TRIMMER_LOAD BIT(16)
#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
#define ULPI_DIR_TRIMMER_LOAD BIT(24)
#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
#define UTMIP_PLL_CFG1 0x804
#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
#define UTMIP_XCVR_CFG0 0x808
#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
#define UTMIP_XCVR_SETUP_MSB(x) ((((x) & 0x70) >> 4) << 22)
#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
#define UTMIP_FORCE_PD_POWERDOWN BIT(14)
#define UTMIP_FORCE_PD2_POWERDOWN BIT(16)
#define UTMIP_FORCE_PDZI_POWERDOWN BIT(18)
#define UTMIP_XCVR_LSBIAS_SEL BIT(21)
#define UTMIP_XCVR_HSSLEW(x) (((x) & 0x3) << 4)
#define UTMIP_XCVR_HSSLEW_MSB(x) ((((x) & 0x1fc) >> 2) << 25)
#define UTMIP_BIAS_CFG0 0x80c
#define UTMIP_OTGPD BIT(11)
#define UTMIP_BIASPD BIT(10)
#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
#define UTMIP_HSRX_CFG0 0x810
#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
#define UTMIP_HSRX_CFG1 0x814
#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
#define UTMIP_TX_CFG0 0x820
#define UTMIP_FS_PREABMLE_J BIT(19)
#define UTMIP_HS_DISCON_DISABLE BIT(8)
#define UTMIP_MISC_CFG0 0x824
#define UTMIP_DPDM_OBSERVE BIT(26)
#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
#define UTMIP_SUSPEND_EXIT_ON_EDGE BIT(22)
#define UTMIP_MISC_CFG1 0x828
#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
#define UTMIP_DEBOUNCE_CFG0 0x82c
#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
#define UTMIP_BAT_CHRG_CFG0 0x830
#define UTMIP_PD_CHRG BIT(0)
#define UTMIP_SPARE_CFG0 0x834
#define FUSE_SETUP_SEL BIT(3)
#define UTMIP_XCVR_CFG1 0x838
#define UTMIP_FORCE_PDDISC_POWERDOWN BIT(0)
#define UTMIP_FORCE_PDCHRP_POWERDOWN BIT(2)
#define UTMIP_FORCE_PDDR_POWERDOWN BIT(4)
#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
#define UTMIP_BIAS_CFG1 0x83c
#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
/* For Tegra30 and above only, the address is different in Tegra20 */
#define USB_USBMODE 0x1f8
#define USB_USBMODE_MASK (3 << 0)
#define USB_USBMODE_HOST (3 << 0)
#define USB_USBMODE_DEVICE (2 << 0)
#define PMC_USB_AO 0xf0
#define VBUS_WAKEUP_PD_P0 BIT(2)
#define ID_PD_P0 BIT(3)
static DEFINE_SPINLOCK(utmip_pad_lock);
static unsigned int utmip_pad_count;
struct tegra_xtal_freq {
unsigned int freq;
u8 enable_delay;
u8 stable_count;
u8 active_delay;
u8 xtal_freq_count;
u16 debounce;
};
static const struct tegra_xtal_freq tegra_freq_table[] = {
{
.freq = 12000000,
.enable_delay = 0x02,
.stable_count = 0x2F,
.active_delay = 0x04,
.xtal_freq_count = 0x76,
.debounce = 0x7530,
},
{
.freq = 13000000,
.enable_delay = 0x02,
.stable_count = 0x33,
.active_delay = 0x05,
.xtal_freq_count = 0x7F,
.debounce = 0x7EF4,
},
{
.freq = 19200000,
.enable_delay = 0x03,
.stable_count = 0x4B,
.active_delay = 0x06,
.xtal_freq_count = 0xBB,
.debounce = 0xBB80,
},
{
.freq = 26000000,
.enable_delay = 0x04,
.stable_count = 0x66,
.active_delay = 0x09,
.xtal_freq_count = 0xFE,
.debounce = 0xFDE8,
},
};
static inline struct tegra_usb_phy *to_tegra_usb_phy(struct usb_phy *u_phy)
{
return container_of(u_phy, struct tegra_usb_phy, u_phy);
}
static void set_pts(struct tegra_usb_phy *phy, u8 pts_val)
{
void __iomem *base = phy->regs;
u32 val;
if (phy->soc_config->has_hostpc) {
val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PTS(~0);
val |= TEGRA_USB_HOSTPC1_DEVLC_PTS(pts_val);
writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
val = readl_relaxed(base + TEGRA_USB_PORTSC1);
val &= ~TEGRA_PORTSC1_RWC_BITS;
val &= ~TEGRA_USB_PORTSC1_PTS(~0);
val |= TEGRA_USB_PORTSC1_PTS(pts_val);
writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
static void set_phcd(struct tegra_usb_phy *phy, bool enable)
{
void __iomem *base = phy->regs;
u32 val;
if (phy->soc_config->has_hostpc) {
val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
if (enable)
val |= TEGRA_USB_HOSTPC1_DEVLC_PHCD;
else
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PHCD;
writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
val = readl_relaxed(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
if (enable)
val |= TEGRA_USB_PORTSC1_PHCD;
else
val &= ~TEGRA_USB_PORTSC1_PHCD;
writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
static int utmip_pad_open(struct tegra_usb_phy *phy)
{
int ret;
ret = clk_prepare_enable(phy->pad_clk);
if (ret) {
dev_err(phy->u_phy.dev,
"Failed to enable UTMI-pads clock: %d\n", ret);
return ret;
}
spin_lock(&utmip_pad_lock);
ret = reset_control_deassert(phy->pad_rst);
if (ret) {
dev_err(phy->u_phy.dev,
"Failed to initialize UTMI-pads reset: %d\n", ret);
goto unlock;
}
ret = reset_control_assert(phy->pad_rst);
if (ret) {
dev_err(phy->u_phy.dev,
"Failed to assert UTMI-pads reset: %d\n", ret);
goto unlock;
}
udelay(1);
ret = reset_control_deassert(phy->pad_rst);
if (ret)
dev_err(phy->u_phy.dev,
"Failed to deassert UTMI-pads reset: %d\n", ret);
unlock:
spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
return ret;
}
static int utmip_pad_close(struct tegra_usb_phy *phy)
{
int ret;
ret = clk_prepare_enable(phy->pad_clk);
if (ret) {
dev_err(phy->u_phy.dev,
"Failed to enable UTMI-pads clock: %d\n", ret);
return ret;
}
ret = reset_control_assert(phy->pad_rst);
if (ret)
dev_err(phy->u_phy.dev,
"Failed to assert UTMI-pads reset: %d\n", ret);
udelay(1);
clk_disable_unprepare(phy->pad_clk);
return ret;
}
static int utmip_pad_power_on(struct tegra_usb_phy *phy)
{
struct tegra_utmip_config *config = phy->config;
void __iomem *base = phy->pad_regs;
u32 val;
int err;
err = clk_prepare_enable(phy->pad_clk);
if (err)
return err;
spin_lock(&utmip_pad_lock);
if (utmip_pad_count++ == 0) {
val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
if (phy->soc_config->requires_extra_tuning_parameters) {
val &= ~(UTMIP_HSSQUELCH_LEVEL(~0) |
UTMIP_HSDISCON_LEVEL(~0) |
UTMIP_HSDISCON_LEVEL_MSB(~0));
val |= UTMIP_HSSQUELCH_LEVEL(config->hssquelch_level);
val |= UTMIP_HSDISCON_LEVEL(config->hsdiscon_level);
val |= UTMIP_HSDISCON_LEVEL_MSB(config->hsdiscon_level);
}
writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
if (phy->pad_wakeup) {
phy->pad_wakeup = false;
utmip_pad_count--;
}
spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
return 0;
}
static int utmip_pad_power_off(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->pad_regs;
u32 val;
int ret;
ret = clk_prepare_enable(phy->pad_clk);
if (ret)
return ret;
spin_lock(&utmip_pad_lock);
if (!utmip_pad_count) {
dev_err(phy->u_phy.dev, "UTMIP pad already powered off\n");
ret = -EINVAL;
goto ulock;
}
/*
* In accordance to TRM, OTG and Bias pad circuits could be turned off
* to save power if wake is enabled, but the VBUS-change detection
* method is board-specific and these circuits may need to be enabled
* to generate wakeup event, hence we will just keep them both enabled.
*/
if (phy->wakeup_enabled) {
phy->pad_wakeup = true;
utmip_pad_count++;
}
if (--utmip_pad_count == 0) {
val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
ulock:
spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
return ret;
}
static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
{
u32 tmp;
return readl_relaxed_poll_timeout(reg, tmp, (tmp & mask) == result,
2000, 6000);
}
static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->regs;
u32 val;
/*
* The USB driver may have already initiated the phy clock
* disable so wait to see if the clock turns off and if not
* then proceed with gating the clock.
*/
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0)
return;
if (phy->is_legacy_phy) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
writel_relaxed(val, base + USB_SUSP_CTRL);
usleep_range(10, 100);
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
writel_relaxed(val, base + USB_SUSP_CTRL);
} else {
set_phcd(phy, true);
}
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on disable\n");
}
static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->regs;
u32 val;
/*
* The USB driver may have already initiated the phy clock
* enable so wait to see if the clock turns on and if not
* then proceed with ungating the clock.
*/
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
USB_PHY_CLK_VALID) == 0)
return;
if (phy->is_legacy_phy) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel_relaxed(val, base + USB_SUSP_CTRL);
usleep_range(10, 100);
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
writel_relaxed(val, base + USB_SUSP_CTRL);
} else {
set_phcd(phy, false);
}
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
USB_PHY_CLK_VALID))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on enable\n");
}
static int utmi_phy_power_on(struct tegra_usb_phy *phy)
{
struct tegra_utmip_config *config = phy->config;
void __iomem *base = phy->regs;
u32 val;
int err;
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
val = readl_relaxed(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
writel_relaxed(val, base + USB1_LEGACY_CTRL);
}
val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_FS_PREABMLE_J;
writel_relaxed(val, base + UTMIP_TX_CFG0);
val = readl_relaxed(base + UTMIP_HSRX_CFG0);
val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0));
val |= UTMIP_IDLE_WAIT(config->idle_wait_delay);
val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit);
writel_relaxed(val, base + UTMIP_HSRX_CFG0);
val = readl_relaxed(base + UTMIP_HSRX_CFG1);
val &= ~UTMIP_HS_SYNC_START_DLY(~0);
val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay);
writel_relaxed(val, base + UTMIP_HSRX_CFG1);
val = readl_relaxed(base + UTMIP_DEBOUNCE_CFG0);
val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce);
writel_relaxed(val, base + UTMIP_DEBOUNCE_CFG0);
val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
writel_relaxed(val, base + UTMIP_MISC_CFG0);
if (!phy->soc_config->utmi_pll_config_in_car_module) {
val = readl_relaxed(base + UTMIP_MISC_CFG1);
val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) |
UTMIP_PLLU_STABLE_COUNT(~0));
val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
writel_relaxed(val, base + UTMIP_MISC_CFG1);
val = readl_relaxed(base + UTMIP_PLL_CFG1);
val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) |
UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
writel_relaxed(val, base + UTMIP_PLL_CFG1);
}
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_WAKE_ON_RESUME_EN;
writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->mode != USB_DR_MODE_HOST) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
writel_relaxed(val, base + USB_SUSP_CTRL);
val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
val &= ~VBUS_WAKEUP_WAKEUP_EN;
val &= ~(ID_CHG_DET | VBUS_WAKEUP_CHG_DET);
writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
val &= ~(A_VBUS_VLD_WAKEUP_EN | A_SESS_VLD_WAKEUP_EN);
val &= ~(B_SESS_VLD_WAKEUP_EN);
writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val &= ~UTMIP_PD_CHRG;
writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
} else {
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
}
err = utmip_pad_power_on(phy);
if (err)
return err;
val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_LSBIAS_SEL |
UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_SETUP_MSB(~0) |
UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0));
if (!config->xcvr_setup_use_fuses) {
val |= UTMIP_XCVR_SETUP(config->xcvr_setup);
val |= UTMIP_XCVR_SETUP_MSB(config->xcvr_setup);
}
val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew);
val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew);
if (phy->soc_config->requires_extra_tuning_parameters) {
val &= ~(UTMIP_XCVR_HSSLEW(~0) | UTMIP_XCVR_HSSLEW_MSB(~0));
val |= UTMIP_XCVR_HSSLEW(config->xcvr_hsslew);
val |= UTMIP_XCVR_HSSLEW_MSB(config->xcvr_hsslew);
}
writel_relaxed(val, base + UTMIP_XCVR_CFG0);
val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0));
val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
writel_relaxed(val, base + UTMIP_XCVR_CFG1);
val = readl_relaxed(base + UTMIP_BIAS_CFG1);
val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
writel_relaxed(val, base + UTMIP_BIAS_CFG1);
val = readl_relaxed(base + UTMIP_SPARE_CFG0);
if (config->xcvr_setup_use_fuses)
val |= FUSE_SETUP_SEL;
else
val &= ~FUSE_SETUP_SEL;
writel_relaxed(val, base + UTMIP_SPARE_CFG0);
if (!phy->is_legacy_phy) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
writel_relaxed(val, base + USB_SUSP_CTRL);
}
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~UTMIP_RESET;
writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
val = readl_relaxed(base + USB1_LEGACY_CTRL);
val &= ~USB1_VBUS_SENSE_CTL_MASK;
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
writel_relaxed(val, base + USB1_LEGACY_CTRL);
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
writel_relaxed(val, base + USB_SUSP_CTRL);
}
utmi_phy_clk_enable(phy);
if (phy->soc_config->requires_usbmode_setup) {
val = readl_relaxed(base + USB_USBMODE);
val &= ~USB_USBMODE_MASK;
if (phy->mode == USB_DR_MODE_HOST)
val |= USB_USBMODE_HOST;
else
val |= USB_USBMODE_DEVICE;
writel_relaxed(val, base + USB_USBMODE);
}
if (!phy->is_legacy_phy)
set_pts(phy, 0);
return 0;
}
static int utmi_phy_power_off(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->regs;
u32 val;
/*
* Give hardware time to settle down after VBUS disconnection,
* otherwise PHY will immediately wake up from suspend.
*/
if (phy->wakeup_enabled && phy->mode != USB_DR_MODE_HOST)
readl_relaxed_poll_timeout(base + USB_PHY_VBUS_WAKEUP_ID,
val, !(val & VBUS_WAKEUP_STS),
5000, 100000);
utmi_phy_clk_disable(phy);
/* PHY won't resume if reset is asserted */
if (!phy->wakeup_enabled) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
writel_relaxed(val, base + USB_SUSP_CTRL);
}
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
if (!phy->wakeup_enabled) {
val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN;
writel_relaxed(val, base + UTMIP_XCVR_CFG0);
}
val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
writel_relaxed(val, base + UTMIP_XCVR_CFG1);
if (phy->wakeup_enabled) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
val |= USB_WAKEUP_DEBOUNCE_COUNT(5);
val |= USB_WAKE_ON_RESUME_EN;
writel_relaxed(val, base + USB_SUSP_CTRL);
/*
* Ask VBUS sensor to generate wake event once cable is
* connected.
*/
if (phy->mode != USB_DR_MODE_HOST) {
val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
val |= VBUS_WAKEUP_WAKEUP_EN;
val &= ~(ID_CHG_DET | VBUS_WAKEUP_CHG_DET);
writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
val |= A_VBUS_VLD_WAKEUP_EN;
writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
}
}
return utmip_pad_power_off(phy);
}
static void utmi_phy_preresume(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->regs;
u32 val;
val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_HS_DISCON_DISABLE;
writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_postresume(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->regs;
u32 val;
val = readl_relaxed(base + UTMIP_TX_CFG0);
val &= ~UTMIP_HS_DISCON_DISABLE;
writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
void __iomem *base = phy->regs;
u32 val;
val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
else
val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
writel_relaxed(val, base + UTMIP_MISC_CFG0);
usleep_range(1, 10);
val = readl_relaxed(base + UTMIP_MISC_CFG0);
val |= UTMIP_DPDM_OBSERVE;
writel_relaxed(val, base + UTMIP_MISC_CFG0);
usleep_range(10, 100);
}
static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->regs;
u32 val;
val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE;
writel_relaxed(val, base + UTMIP_MISC_CFG0);
usleep_range(10, 100);
}
static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
{
void __iomem *base = phy->regs;
u32 val;
int err;
gpiod_set_value_cansleep(phy->reset_gpio, 1);
err = clk_prepare_enable(phy->clk);
if (err)
return err;
usleep_range(5000, 6000);
gpiod_set_value_cansleep(phy->reset_gpio, 0);
usleep_range(1000, 2000);
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UHSIC_RESET;
writel_relaxed(val, base + USB_SUSP_CTRL);
val = readl_relaxed(base + ULPI_TIMING_CTRL_0);
val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
writel_relaxed(val, base + ULPI_TIMING_CTRL_0);
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= ULPI_PHY_ENABLE;
writel_relaxed(val, base + USB_SUSP_CTRL);
val = 0;
writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
val |= ULPI_DATA_TRIMMER_SEL(4);
val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
val |= ULPI_DIR_TRIMMER_SEL(4);
writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
usleep_range(10, 100);
val |= ULPI_DATA_TRIMMER_LOAD;
val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
val |= ULPI_DIR_TRIMMER_LOAD;
writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
/* Fix VbusInvalid due to floating VBUS */
err = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
if (err) {
dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
goto disable_clk;
}
err = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
if (err) {
dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
goto disable_clk;
}
val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel_relaxed(val, base + USB_SUSP_CTRL);
usleep_range(100, 1000);
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
writel_relaxed(val, base + USB_SUSP_CTRL);
return 0;
disable_clk:
clk_disable_unprepare(phy->clk);
return err;
}
static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
{
gpiod_set_value_cansleep(phy->reset_gpio, 1);
usleep_range(5000, 6000);
clk_disable_unprepare(phy->clk);
/*
* Wakeup currently unimplemented for ULPI, thus PHY needs to be
* force-resumed.
*/
if (WARN_ON_ONCE(phy->wakeup_enabled)) {
ulpi_phy_power_on(phy);
return -EOPNOTSUPP;
}
return 0;
}
static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
{
int err;
if (phy->powered_on)
return 0;
if (phy->is_ulpi_phy)
err = ulpi_phy_power_on(phy);
else
err = utmi_phy_power_on(phy);
if (err)
return err;
phy->powered_on = true;
/* Let PHY settle down */
usleep_range(2000, 2500);
return 0;
}
static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
{
int err;
if (!phy->powered_on)
return 0;
if (phy->is_ulpi_phy)
err = ulpi_phy_power_off(phy);
else
err = utmi_phy_power_off(phy);
if (err)
return err;
phy->powered_on = false;
return 0;
}
static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (WARN_ON(!phy->freq))
return;
usb_phy_set_wakeup(u_phy, false);
tegra_usb_phy_power_off(phy);
if (!phy->is_ulpi_phy)
utmip_pad_close(phy);
regulator_disable(phy->vbus);
clk_disable_unprepare(phy->pll_u);
phy->freq = NULL;
}
static irqreturn_t tegra_usb_phy_isr(int irq, void *data)
{
u32 val, int_mask = ID_CHG_DET | VBUS_WAKEUP_CHG_DET;
struct tegra_usb_phy *phy = data;
void __iomem *base = phy->regs;
/*
* The PHY interrupt also wakes the USB controller driver since
* interrupt is shared. We don't do anything in the PHY driver,
* so just clear the interrupt.
*/
val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
return val & int_mask ? IRQ_HANDLED : IRQ_NONE;
}
static int tegra_usb_phy_set_wakeup(struct usb_phy *u_phy, bool enable)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
void __iomem *base = phy->regs;
int ret = 0;
u32 val;
if (phy->wakeup_enabled && phy->mode != USB_DR_MODE_HOST &&
phy->irq > 0) {
disable_irq(phy->irq);
val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
val &= ~(ID_INT_EN | VBUS_WAKEUP_INT_EN);
writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
enable_irq(phy->irq);
free_irq(phy->irq, phy);
phy->wakeup_enabled = false;
}
if (enable && phy->mode != USB_DR_MODE_HOST && phy->irq > 0) {
ret = request_irq(phy->irq, tegra_usb_phy_isr, IRQF_SHARED,
dev_name(phy->u_phy.dev), phy);
if (!ret) {
disable_irq(phy->irq);
/*
* USB clock will be resumed once wake event will be
* generated. The ID-change event requires to have
* interrupts enabled, otherwise it won't be generated.
*/
val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
val |= ID_INT_EN | VBUS_WAKEUP_INT_EN;
writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
enable_irq(phy->irq);
} else {
dev_err(phy->u_phy.dev,
"Failed to request interrupt: %d", ret);
enable = false;
}
}
phy->wakeup_enabled = enable;
return ret;
}
static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
int ret;
if (WARN_ON(!phy->freq))
return -EINVAL;
/*
* PHY is sharing IRQ with the CI driver, hence here we either
* disable interrupt for both PHY and CI or for CI only. The
* interrupt needs to be disabled while hardware is reprogrammed
* because interrupt touches the programmed registers, and thus,
* there could be a race condition.
*/
if (phy->irq > 0)
disable_irq(phy->irq);
if (suspend)
ret = tegra_usb_phy_power_off(phy);
else
ret = tegra_usb_phy_power_on(phy);
if (phy->irq > 0)
enable_irq(phy->irq);
return ret;
}
static int tegra_usb_phy_configure_pmc(struct tegra_usb_phy *phy)
{
int err, val = 0;
/* older device-trees don't have PMC regmap */
if (!phy->pmc_regmap)
return 0;
/*
* Tegra20 has a different layout of PMC USB register bits and AO is
* enabled by default after system reset on Tegra20, so assume nothing
* to do on Tegra20.
*/
if (!phy->soc_config->requires_pmc_ao_power_up)
return 0;
/* enable VBUS wake-up detector */
if (phy->mode != USB_DR_MODE_HOST)
val |= VBUS_WAKEUP_PD_P0 << phy->instance * 4;
/* enable ID-pin ACC detector for OTG mode switching */
if (phy->mode == USB_DR_MODE_OTG)
val |= ID_PD_P0 << phy->instance * 4;
/* disable detectors to reset them */
err = regmap_set_bits(phy->pmc_regmap, PMC_USB_AO, val);
if (err) {
dev_err(phy->u_phy.dev, "Failed to disable PMC AO: %d\n", err);
return err;
}
usleep_range(10, 100);
/* enable detectors */
err = regmap_clear_bits(phy->pmc_regmap, PMC_USB_AO, val);
if (err) {
dev_err(phy->u_phy.dev, "Failed to enable PMC AO: %d\n", err);
return err;
}
/* detectors starts to work after 10ms */
usleep_range(10000, 15000);
return 0;
}
static int tegra_usb_phy_init(struct usb_phy *u_phy)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
unsigned long parent_rate;
unsigned int i;
int err;
if (WARN_ON(phy->freq))
return 0;
err = clk_prepare_enable(phy->pll_u);
if (err)
return err;
parent_rate = clk_get_rate(clk_get_parent(phy->pll_u));
for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) {
if (tegra_freq_table[i].freq == parent_rate) {
phy->freq = &tegra_freq_table[i];
break;
}
}
if (!phy->freq) {
dev_err(phy->u_phy.dev, "Invalid pll_u parent rate %ld\n",
parent_rate);
err = -EINVAL;
goto disable_clk;
}
err = regulator_enable(phy->vbus);
if (err) {
dev_err(phy->u_phy.dev,
"Failed to enable USB VBUS regulator: %d\n", err);
goto disable_clk;
}
if (!phy->is_ulpi_phy) {
err = utmip_pad_open(phy);
if (err)
goto disable_vbus;
}
err = tegra_usb_phy_configure_pmc(phy);
if (err)
goto close_phy;
err = tegra_usb_phy_power_on(phy);
if (err)
goto close_phy;
return 0;
close_phy:
if (!phy->is_ulpi_phy)
utmip_pad_close(phy);
disable_vbus:
regulator_disable(phy->vbus);
disable_clk:
clk_disable_unprepare(phy->pll_u);
phy->freq = NULL;
return err;
}
void tegra_usb_phy_preresume(struct usb_phy *u_phy)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_preresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume);
void tegra_usb_phy_postresume(struct usb_phy *u_phy)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_postresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume);
void tegra_ehci_phy_restore_start(struct usb_phy *u_phy,
enum tegra_usb_phy_port_speed port_speed)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_start(phy, port_speed);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start);
void tegra_ehci_phy_restore_end(struct usb_phy *u_phy)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_end(phy);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_end);
static int read_utmi_param(struct platform_device *pdev, const char *param,
u8 *dest)
{
u32 value;
int err;
err = of_property_read_u32(pdev->dev.of_node, param, &value);
if (err)
dev_err(&pdev->dev,
"Failed to read USB UTMI parameter %s: %d\n",
param, err);
else
*dest = value;
return err;
}
static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
struct platform_device *pdev)
{
struct tegra_utmip_config *config;
struct resource *res;
int err;
tegra_phy->is_ulpi_phy = false;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_err(&pdev->dev, "Failed to get UTMI pad regs\n");
return -ENXIO;
}
/*
* Note that UTMI pad registers are shared by all PHYs, therefore
* devm_platform_ioremap_resource() can't be used here.
*/
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!tegra_phy->pad_regs) {
dev_err(&pdev->dev, "Failed to remap UTMI pad regs\n");
return -ENOMEM;
}
tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
GFP_KERNEL);
if (!tegra_phy->config)
return -ENOMEM;
config = tegra_phy->config;
err = read_utmi_param(pdev, "nvidia,hssync-start-delay",
&config->hssync_start_delay);
if (err)
return err;
err = read_utmi_param(pdev, "nvidia,elastic-limit",
&config->elastic_limit);
if (err)
return err;
err = read_utmi_param(pdev, "nvidia,idle-wait-delay",
&config->idle_wait_delay);
if (err)
return err;
err = read_utmi_param(pdev, "nvidia,term-range-adj",
&config->term_range_adj);
if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsfslew",
&config->xcvr_lsfslew);
if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsrslew",
&config->xcvr_lsrslew);
if (err)
return err;
if (tegra_phy->soc_config->requires_extra_tuning_parameters) {
err = read_utmi_param(pdev, "nvidia,xcvr-hsslew",
&config->xcvr_hsslew);
if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hssquelch-level",
&config->hssquelch_level);
if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hsdiscon-level",
&config->hsdiscon_level);
if (err)
return err;
}
config->xcvr_setup_use_fuses = of_property_read_bool(
pdev->dev.of_node, "nvidia,xcvr-setup-use-fuses");
if (!config->xcvr_setup_use_fuses) {
err = read_utmi_param(pdev, "nvidia,xcvr-setup",
&config->xcvr_setup);
if (err)
return err;
}
return 0;
}
static void tegra_usb_phy_put_pmc_device(void *dev)
{
put_device(dev);
}
static int tegra_usb_phy_parse_pmc(struct device *dev,
struct tegra_usb_phy *phy)
{
struct platform_device *pmc_pdev;
struct of_phandle_args args;
int err;
err = of_parse_phandle_with_fixed_args(dev->of_node, "nvidia,pmc",
1, 0, &args);
if (err) {
if (err != -ENOENT)
return err;
dev_warn_once(dev, "nvidia,pmc is missing, please update your device-tree\n");
return 0;
}
pmc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!pmc_pdev)
return -ENODEV;
err = devm_add_action_or_reset(dev, tegra_usb_phy_put_pmc_device,
&pmc_pdev->dev);
if (err)
return err;
if (!platform_get_drvdata(pmc_pdev))
return -EPROBE_DEFER;
phy->pmc_regmap = dev_get_regmap(&pmc_pdev->dev, "usb_sleepwalk");
if (!phy->pmc_regmap)
return -EINVAL;
phy->instance = args.args[0];
return 0;
}
static const struct tegra_phy_soc_config tegra20_soc_config = {
.utmi_pll_config_in_car_module = false,
.has_hostpc = false,
.requires_usbmode_setup = false,
.requires_extra_tuning_parameters = false,
.requires_pmc_ao_power_up = false,
};
static const struct tegra_phy_soc_config tegra30_soc_config = {
.utmi_pll_config_in_car_module = true,
.has_hostpc = true,
.requires_usbmode_setup = true,
.requires_extra_tuning_parameters = true,
.requires_pmc_ao_power_up = true,
};
static const struct of_device_id tegra_usb_phy_id_table[] = {
{ .compatible = "nvidia,tegra30-usb-phy", .data = &tegra30_soc_config },
{ .compatible = "nvidia,tegra20-usb-phy", .data = &tegra20_soc_config },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
static int tegra_usb_phy_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct tegra_usb_phy *tegra_phy;
enum usb_phy_interface phy_type;
struct reset_control *reset;
struct gpio_desc *gpiod;
struct resource *res;
struct usb_phy *phy;
int err;
tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
if (!tegra_phy)
return -ENOMEM;
tegra_phy->soc_config = of_device_get_match_data(&pdev->dev);
tegra_phy->irq = platform_get_irq_optional(pdev, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get I/O memory\n");
return -ENXIO;
}
/*
* Note that PHY and USB controller are using shared registers,
* therefore devm_platform_ioremap_resource() can't be used here.
*/
tegra_phy->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!tegra_phy->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
return -ENOMEM;
}
tegra_phy->is_legacy_phy =
of_property_read_bool(np, "nvidia,has-legacy-mode");
if (of_property_present(np, "dr_mode"))
tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
else
tegra_phy->mode = USB_DR_MODE_HOST;
if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
dev_err(&pdev->dev, "dr_mode is invalid\n");
return -EINVAL;
}
/* On some boards, the VBUS regulator doesn't need to be controlled */
tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
if (IS_ERR(tegra_phy->vbus))
return PTR_ERR(tegra_phy->vbus);
tegra_phy->pll_u = devm_clk_get(&pdev->dev, "pll_u");
err = PTR_ERR_OR_ZERO(tegra_phy->pll_u);
if (err) {
dev_err(&pdev->dev, "Failed to get pll_u clock: %d\n", err);
return err;
}
err = tegra_usb_phy_parse_pmc(&pdev->dev, tegra_phy);
if (err) {
dev_err_probe(&pdev->dev, err, "Failed to get PMC regmap\n");
return err;
}
phy_type = of_usb_get_phy_mode(np);
switch (phy_type) {
case USBPHY_INTERFACE_MODE_UTMI:
err = utmi_phy_probe(tegra_phy, pdev);
if (err)
return err;
tegra_phy->pad_clk = devm_clk_get(&pdev->dev, "utmi-pads");
err = PTR_ERR_OR_ZERO(tegra_phy->pad_clk);
if (err) {
dev_err(&pdev->dev,
"Failed to get UTMIP pad clock: %d\n", err);
return err;
}
reset = devm_reset_control_get_optional_shared(&pdev->dev,
"utmi-pads");
err = PTR_ERR_OR_ZERO(reset);
if (err) {
dev_err(&pdev->dev,
"Failed to get UTMI-pads reset: %d\n", err);
return err;
}
tegra_phy->pad_rst = reset;
break;
case USBPHY_INTERFACE_MODE_ULPI:
tegra_phy->is_ulpi_phy = true;
tegra_phy->clk = devm_clk_get(&pdev->dev, "ulpi-link");
err = PTR_ERR_OR_ZERO(tegra_phy->clk);
if (err) {
dev_err(&pdev->dev,
"Failed to get ULPI clock: %d\n", err);
return err;
}
gpiod = devm_gpiod_get(&pdev->dev, "nvidia,phy-reset",
GPIOD_OUT_HIGH);
err = PTR_ERR_OR_ZERO(gpiod);
if (err) {
dev_err(&pdev->dev,
"Request failed for reset GPIO: %d\n", err);
return err;
}
err = gpiod_set_consumer_name(gpiod, "ulpi_phy_reset_b");
if (err) {
dev_err(&pdev->dev,
"Failed to set up reset GPIO name: %d\n", err);
return err;
}
tegra_phy->reset_gpio = gpiod;
phy = devm_otg_ulpi_create(&pdev->dev,
&ulpi_viewport_access_ops, 0);
if (!phy) {
dev_err(&pdev->dev, "Failed to create ULPI OTG\n");
return -ENOMEM;
}
tegra_phy->ulpi = phy;
tegra_phy->ulpi->io_priv = tegra_phy->regs + ULPI_VIEWPORT;
break;
default:
dev_err(&pdev->dev, "phy_type %u is invalid or unsupported\n",
phy_type);
return -EINVAL;
}
tegra_phy->u_phy.dev = &pdev->dev;
tegra_phy->u_phy.init = tegra_usb_phy_init;
tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown;
tegra_phy->u_phy.set_wakeup = tegra_usb_phy_set_wakeup;
tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend;
platform_set_drvdata(pdev, tegra_phy);
return usb_add_phy_dev(&tegra_phy->u_phy);
}
static void tegra_usb_phy_remove(struct platform_device *pdev)
{
struct tegra_usb_phy *tegra_phy = platform_get_drvdata(pdev);
usb_remove_phy(&tegra_phy->u_phy);
}
static struct platform_driver tegra_usb_phy_driver = {
.probe = tegra_usb_phy_probe,
.remove_new = tegra_usb_phy_remove,
.driver = {
.name = "tegra-phy",
.of_match_table = tegra_usb_phy_id_table,
},
};
module_platform_driver(tegra_usb_phy_driver);
MODULE_DESCRIPTION("Tegra USB PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/phy/phy-tegra-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NXP ISP1301 USB transceiver driver
*
* Copyright (C) 2012 Roland Stigge
*
* Author: Roland Stigge <[email protected]>
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/usb/phy.h>
#include <linux/usb/isp1301.h>
#define DRV_NAME "isp1301"
struct isp1301 {
struct usb_phy phy;
struct mutex mutex;
struct i2c_client *client;
};
#define phy_to_isp(p) (container_of((p), struct isp1301, phy))
static const struct i2c_device_id isp1301_id[] = {
{ "isp1301", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, isp1301_id);
static const struct of_device_id isp1301_of_match[] = {
{.compatible = "nxp,isp1301" },
{ },
};
MODULE_DEVICE_TABLE(of, isp1301_of_match);
static struct i2c_client *isp1301_i2c_client;
static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
{
return i2c_smbus_write_byte_data(isp->client, reg | clear, value);
}
static int isp1301_write(struct isp1301 *isp, u8 reg, u8 value)
{
return __isp1301_write(isp, reg, value, 0);
}
static int isp1301_clear(struct isp1301 *isp, u8 reg, u8 value)
{
return __isp1301_write(isp, reg, value, ISP1301_I2C_REG_CLEAR_ADDR);
}
static int isp1301_phy_init(struct usb_phy *phy)
{
struct isp1301 *isp = phy_to_isp(phy);
/* Disable transparent UART mode first */
isp1301_clear(isp, ISP1301_I2C_MODE_CONTROL_1, MC1_UART_EN);
isp1301_clear(isp, ISP1301_I2C_MODE_CONTROL_1, ~MC1_SPEED_REG);
isp1301_write(isp, ISP1301_I2C_MODE_CONTROL_1, MC1_SPEED_REG);
isp1301_clear(isp, ISP1301_I2C_MODE_CONTROL_2, ~0);
isp1301_write(isp, ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_PSW_EN
| MC2_SPD_SUSP_CTRL));
isp1301_clear(isp, ISP1301_I2C_OTG_CONTROL_1, ~0);
isp1301_write(isp, ISP1301_I2C_MODE_CONTROL_1, MC1_DAT_SE0);
isp1301_write(isp, ISP1301_I2C_OTG_CONTROL_1, (OTG1_DM_PULLDOWN
| OTG1_DP_PULLDOWN));
isp1301_clear(isp, ISP1301_I2C_OTG_CONTROL_1, (OTG1_DM_PULLUP
| OTG1_DP_PULLUP));
/* mask all interrupts */
isp1301_clear(isp, ISP1301_I2C_INTERRUPT_LATCH, ~0);
isp1301_clear(isp, ISP1301_I2C_INTERRUPT_FALLING, ~0);
isp1301_clear(isp, ISP1301_I2C_INTERRUPT_RISING, ~0);
return 0;
}
static int isp1301_phy_set_vbus(struct usb_phy *phy, int on)
{
struct isp1301 *isp = phy_to_isp(phy);
if (on)
isp1301_write(isp, ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
else
isp1301_clear(isp, ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
return 0;
}
static int isp1301_probe(struct i2c_client *client)
{
struct isp1301 *isp;
struct usb_phy *phy;
isp = devm_kzalloc(&client->dev, sizeof(*isp), GFP_KERNEL);
if (!isp)
return -ENOMEM;
isp->client = client;
mutex_init(&isp->mutex);
phy = &isp->phy;
phy->dev = &client->dev;
phy->label = DRV_NAME;
phy->init = isp1301_phy_init;
phy->set_vbus = isp1301_phy_set_vbus;
phy->type = USB_PHY_TYPE_USB2;
i2c_set_clientdata(client, isp);
usb_add_phy_dev(phy);
isp1301_i2c_client = client;
return 0;
}
static void isp1301_remove(struct i2c_client *client)
{
struct isp1301 *isp = i2c_get_clientdata(client);
usb_remove_phy(&isp->phy);
isp1301_i2c_client = NULL;
}
static struct i2c_driver isp1301_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = isp1301_of_match,
},
.probe = isp1301_probe,
.remove = isp1301_remove,
.id_table = isp1301_id,
};
module_i2c_driver(isp1301_driver);
struct i2c_client *isp1301_get_client(struct device_node *node)
{
struct i2c_client *client;
/* reference of ISP1301 I2C node via DT */
client = of_find_i2c_device_by_node(node);
if (client)
return client;
/* non-DT: only one ISP1301 chip supported */
return isp1301_i2c_client;
}
EXPORT_SYMBOL_GPL(isp1301_get_client);
MODULE_AUTHOR("Roland Stigge <[email protected]>");
MODULE_DESCRIPTION("NXP ISP1301 USB transceiver driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/phy/phy-isp1301.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* twl6030_usb - TWL6030 USB transceiver, talking to OMAP OTG driver.
*
* Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Hema HK <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/usb/musb.h>
#include <linux/usb/phy_companion.h>
#include <linux/phy/omap_usb.h>
#include <linux/mfd/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/of.h>
/* usb register definitions */
#define USB_VENDOR_ID_LSB 0x00
#define USB_VENDOR_ID_MSB 0x01
#define USB_PRODUCT_ID_LSB 0x02
#define USB_PRODUCT_ID_MSB 0x03
#define USB_VBUS_CTRL_SET 0x04
#define USB_VBUS_CTRL_CLR 0x05
#define USB_ID_CTRL_SET 0x06
#define USB_ID_CTRL_CLR 0x07
#define USB_VBUS_INT_SRC 0x08
#define USB_VBUS_INT_LATCH_SET 0x09
#define USB_VBUS_INT_LATCH_CLR 0x0A
#define USB_VBUS_INT_EN_LO_SET 0x0B
#define USB_VBUS_INT_EN_LO_CLR 0x0C
#define USB_VBUS_INT_EN_HI_SET 0x0D
#define USB_VBUS_INT_EN_HI_CLR 0x0E
#define USB_ID_INT_SRC 0x0F
#define USB_ID_INT_LATCH_SET 0x10
#define USB_ID_INT_LATCH_CLR 0x11
#define USB_ID_INT_EN_LO_SET 0x12
#define USB_ID_INT_EN_LO_CLR 0x13
#define USB_ID_INT_EN_HI_SET 0x14
#define USB_ID_INT_EN_HI_CLR 0x15
#define USB_OTG_ADP_CTRL 0x16
#define USB_OTG_ADP_HIGH 0x17
#define USB_OTG_ADP_LOW 0x18
#define USB_OTG_ADP_RISE 0x19
#define USB_OTG_REVISION 0x1A
/* to be moved to LDO */
#define TWL6030_MISC2 0xE5
#define TWL6030_CFG_LDO_PD2 0xF5
#define TWL6030_BACKUP_REG 0xFA
#define STS_HW_CONDITIONS 0x21
/* In module TWL6030_MODULE_PM_MASTER */
#define STS_HW_CONDITIONS 0x21
#define STS_USB_ID BIT(2)
/* In module TWL6030_MODULE_PM_RECEIVER */
#define VUSB_CFG_TRANS 0x71
#define VUSB_CFG_STATE 0x72
#define VUSB_CFG_VOLTAGE 0x73
/* in module TWL6030_MODULE_MAIN_CHARGE */
#define CHARGERUSB_CTRL1 0x8
#define CONTROLLER_STAT1 0x03
#define VBUS_DET BIT(2)
struct twl6030_usb {
struct phy_companion comparator;
struct device *dev;
/* for vbus reporting with irqs disabled */
spinlock_t lock;
struct regulator *usb3v3;
/* used to check initial cable status after probe */
struct delayed_work get_status_work;
/* used to set vbus, in atomic path */
struct work_struct set_vbus_work;
int irq1;
int irq2;
enum musb_vbus_id_status linkstat;
u8 asleep;
bool vbus_enable;
};
#define comparator_to_twl(x) container_of((x), struct twl6030_usb, comparator)
/*-------------------------------------------------------------------------*/
static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
u8 data, u8 address)
{
int ret = 0;
ret = twl_i2c_write_u8(module, data, address);
if (ret < 0)
dev_err(twl->dev,
"Write[0x%x] Error %d\n", address, ret);
return ret;
}
static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
{
u8 data;
int ret;
ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
ret = data;
else
dev_err(twl->dev,
"readb[0x%x,0x%x] Error %d\n",
module, address, ret);
return ret;
}
static int twl6030_start_srp(struct phy_companion *comparator)
{
struct twl6030_usb *twl = comparator_to_twl(comparator);
twl6030_writeb(twl, TWL_MODULE_USB, 0x24, USB_VBUS_CTRL_SET);
twl6030_writeb(twl, TWL_MODULE_USB, 0x84, USB_VBUS_CTRL_SET);
mdelay(100);
twl6030_writeb(twl, TWL_MODULE_USB, 0xa0, USB_VBUS_CTRL_CLR);
return 0;
}
static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
{
/* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x1, TWL6030_BACKUP_REG);
/* Program CFG_LDO_PD2 register and set VUSB bit */
twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x1, TWL6030_CFG_LDO_PD2);
/* Program MISC2 register and set bit VUSB_IN_VBAT */
twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2);
twl->usb3v3 = regulator_get(twl->dev, "usb");
if (IS_ERR(twl->usb3v3))
return -ENODEV;
/* Program the USB_VBUS_CTRL_SET and set VBUS_ACT_COMP bit */
twl6030_writeb(twl, TWL_MODULE_USB, 0x4, USB_VBUS_CTRL_SET);
/*
* Program the USB_ID_CTRL_SET register to enable GND drive
* and the ID comparators
*/
twl6030_writeb(twl, TWL_MODULE_USB, 0x14, USB_ID_CTRL_SET);
return 0;
}
static ssize_t vbus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct twl6030_usb *twl = dev_get_drvdata(dev);
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&twl->lock, flags);
switch (twl->linkstat) {
case MUSB_VBUS_VALID:
ret = snprintf(buf, PAGE_SIZE, "vbus\n");
break;
case MUSB_ID_GROUND:
ret = snprintf(buf, PAGE_SIZE, "id\n");
break;
case MUSB_VBUS_OFF:
ret = snprintf(buf, PAGE_SIZE, "none\n");
break;
default:
ret = snprintf(buf, PAGE_SIZE, "UNKNOWN\n");
}
spin_unlock_irqrestore(&twl->lock, flags);
return ret;
}
static DEVICE_ATTR_RO(vbus);
static struct attribute *twl6030_attrs[] = {
&dev_attr_vbus.attr,
NULL,
};
ATTRIBUTE_GROUPS(twl6030);
static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
{
struct twl6030_usb *twl = _twl;
enum musb_vbus_id_status status = MUSB_UNKNOWN;
u8 vbus_state, hw_state;
int ret;
hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
vbus_state = twl6030_readb(twl, TWL_MODULE_MAIN_CHARGE,
CONTROLLER_STAT1);
if (!(hw_state & STS_USB_ID)) {
if (vbus_state & VBUS_DET) {
ret = regulator_enable(twl->usb3v3);
if (ret)
dev_err(twl->dev, "Failed to enable usb3v3\n");
twl->asleep = 1;
status = MUSB_VBUS_VALID;
twl->linkstat = status;
ret = musb_mailbox(status);
if (ret)
twl->linkstat = MUSB_UNKNOWN;
} else {
if (twl->linkstat != MUSB_UNKNOWN) {
status = MUSB_VBUS_OFF;
twl->linkstat = status;
ret = musb_mailbox(status);
if (ret)
twl->linkstat = MUSB_UNKNOWN;
if (twl->asleep) {
regulator_disable(twl->usb3v3);
twl->asleep = 0;
}
}
}
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
return IRQ_HANDLED;
}
static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
{
struct twl6030_usb *twl = _twl;
enum musb_vbus_id_status status = MUSB_UNKNOWN;
u8 hw_state;
int ret;
hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
if (hw_state & STS_USB_ID) {
ret = regulator_enable(twl->usb3v3);
if (ret)
dev_err(twl->dev, "Failed to enable usb3v3\n");
twl->asleep = 1;
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
status = MUSB_ID_GROUND;
twl->linkstat = status;
ret = musb_mailbox(status);
if (ret)
twl->linkstat = MUSB_UNKNOWN;
} else {
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
}
twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
return IRQ_HANDLED;
}
static void twl6030_status_work(struct work_struct *work)
{
struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
get_status_work.work);
twl6030_usb_irq(twl->irq2, twl);
twl6030_usbotg_irq(twl->irq1, twl);
}
static int twl6030_enable_irq(struct twl6030_usb *twl)
{
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
REG_INT_MSK_STS_C);
return 0;
}
static void otg_set_vbus_work(struct work_struct *data)
{
struct twl6030_usb *twl = container_of(data, struct twl6030_usb,
set_vbus_work);
/*
* Start driving VBUS. Set OPA_MODE bit in CHARGERUSB_CTRL1
* register. This enables boost mode.
*/
if (twl->vbus_enable)
twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE, 0x40,
CHARGERUSB_CTRL1);
else
twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE, 0x00,
CHARGERUSB_CTRL1);
}
static int twl6030_set_vbus(struct phy_companion *comparator, bool enabled)
{
struct twl6030_usb *twl = comparator_to_twl(comparator);
twl->vbus_enable = enabled;
schedule_work(&twl->set_vbus_work);
return 0;
}
static int twl6030_usb_probe(struct platform_device *pdev)
{
u32 ret;
struct twl6030_usb *twl;
int status, err;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
if (!np) {
dev_err(dev, "no DT info\n");
return -EINVAL;
}
twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
return -ENOMEM;
twl->dev = &pdev->dev;
twl->irq1 = platform_get_irq(pdev, 0);
twl->irq2 = platform_get_irq(pdev, 1);
twl->linkstat = MUSB_UNKNOWN;
if (twl->irq1 < 0)
return twl->irq1;
if (twl->irq2 < 0)
return twl->irq2;
twl->comparator.set_vbus = twl6030_set_vbus;
twl->comparator.start_srp = twl6030_start_srp;
ret = omap_usb2_set_comparator(&twl->comparator);
if (ret == -ENODEV) {
dev_info(&pdev->dev, "phy not ready, deferring probe");
return -EPROBE_DEFER;
}
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
err = twl6030_usb_ldo_init(twl);
if (err) {
dev_err(&pdev->dev, "ldo init failed\n");
return err;
}
platform_set_drvdata(pdev, twl);
INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"twl6030_usb", twl);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq1, status);
goto err_put_regulator;
}
status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"twl6030_usb", twl);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq2, status);
goto err_free_irq1;
}
twl->asleep = 0;
twl6030_enable_irq(twl);
schedule_delayed_work(&twl->get_status_work, HZ);
dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
return 0;
err_free_irq1:
free_irq(twl->irq1, twl);
err_put_regulator:
regulator_put(twl->usb3v3);
return status;
}
static void twl6030_usb_remove(struct platform_device *pdev)
{
struct twl6030_usb *twl = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&twl->get_status_work);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
REG_INT_MSK_LINE_C);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
REG_INT_MSK_STS_C);
free_irq(twl->irq1, twl);
free_irq(twl->irq2, twl);
regulator_put(twl->usb3v3);
cancel_work_sync(&twl->set_vbus_work);
}
static const struct of_device_id twl6030_usb_id_table[] = {
{ .compatible = "ti,twl6030-usb" },
{}
};
MODULE_DEVICE_TABLE(of, twl6030_usb_id_table);
static struct platform_driver twl6030_usb_driver = {
.probe = twl6030_usb_probe,
.remove_new = twl6030_usb_remove,
.driver = {
.name = "twl6030_usb",
.of_match_table = of_match_ptr(twl6030_usb_id_table),
.dev_groups = twl6030_groups,
},
};
static int __init twl6030_usb_init(void)
{
return platform_driver_register(&twl6030_usb_driver);
}
subsys_initcall(twl6030_usb_init);
static void __exit twl6030_usb_exit(void)
{
platform_driver_unregister(&twl6030_usb_driver);
}
module_exit(twl6030_usb_exit);
MODULE_ALIAS("platform:twl6030_usb");
MODULE_AUTHOR("Hema HK <[email protected]>");
MODULE_DESCRIPTION("TWL6030 USB transceiver driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/phy/phy-twl6030-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* gpio-vbus.c - simple GPIO VBUS sensing driver for B peripheral devices
*
* Copyright (c) 2008 Philipp Zabel <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
/*
* A simple GPIO VBUS sensing driver for B peripheral only devices
* with internal transceivers. It can control a D+ pullup GPIO and
* a regulator to limit the current drawn from VBUS.
*
* Needs to be loaded before the UDC driver that will use it.
*/
struct gpio_vbus_data {
struct gpio_desc *vbus_gpiod;
struct gpio_desc *pullup_gpiod;
struct usb_phy phy;
struct device *dev;
struct regulator *vbus_draw;
int vbus_draw_enabled;
unsigned mA;
struct delayed_work work;
int vbus;
int irq;
};
/*
* This driver relies on "both edges" triggering. VBUS has 100 msec to
* stabilize, so the peripheral controller driver may need to cope with
* some bouncing due to current surges (e.g. charging local capacitance)
* and contact chatter.
*
* REVISIT in desperate straits, toggling between rising and falling
* edges might be workable.
*/
#define VBUS_IRQ_FLAGS \
(IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
/* interface to regulator framework */
static void set_vbus_draw(struct gpio_vbus_data *gpio_vbus, unsigned mA)
{
struct regulator *vbus_draw = gpio_vbus->vbus_draw;
int enabled;
int ret;
if (!vbus_draw)
return;
enabled = gpio_vbus->vbus_draw_enabled;
if (mA) {
regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
if (!enabled) {
ret = regulator_enable(vbus_draw);
if (ret < 0)
return;
gpio_vbus->vbus_draw_enabled = 1;
}
} else {
if (enabled) {
ret = regulator_disable(vbus_draw);
if (ret < 0)
return;
gpio_vbus->vbus_draw_enabled = 0;
}
}
gpio_vbus->mA = mA;
}
static int is_vbus_powered(struct gpio_vbus_data *gpio_vbus)
{
return gpiod_get_value(gpio_vbus->vbus_gpiod);
}
static void gpio_vbus_work(struct work_struct *work)
{
struct gpio_vbus_data *gpio_vbus =
container_of(work, struct gpio_vbus_data, work.work);
int status, vbus;
if (!gpio_vbus->phy.otg->gadget)
return;
vbus = is_vbus_powered(gpio_vbus);
if ((vbus ^ gpio_vbus->vbus) == 0)
return;
gpio_vbus->vbus = vbus;
/* Peripheral controllers which manage the pullup themselves won't have
* a pullup GPIO configured here. If it's configured here, we'll do
* what isp1301_omap::b_peripheral() does and enable the pullup here...
* although that may complicate usb_gadget_{,dis}connect() support.
*/
if (vbus) {
status = USB_EVENT_VBUS;
gpio_vbus->phy.otg->state = OTG_STATE_B_PERIPHERAL;
gpio_vbus->phy.last_event = status;
usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget);
/* drawing a "unit load" is *always* OK, except for OTG */
set_vbus_draw(gpio_vbus, 100);
/* optionally enable D+ pullup */
if (gpio_vbus->pullup_gpiod)
gpiod_set_value(gpio_vbus->pullup_gpiod, 1);
atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
status, gpio_vbus->phy.otg->gadget);
usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_ENUMERATED);
} else {
/* optionally disable D+ pullup */
if (gpio_vbus->pullup_gpiod)
gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget);
status = USB_EVENT_NONE;
gpio_vbus->phy.otg->state = OTG_STATE_B_IDLE;
gpio_vbus->phy.last_event = status;
atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
status, gpio_vbus->phy.otg->gadget);
usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_NONE);
}
}
/* VBUS change IRQ handler */
static irqreturn_t gpio_vbus_irq(int irq, void *data)
{
struct platform_device *pdev = data;
struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
struct usb_otg *otg = gpio_vbus->phy.otg;
dev_dbg(&pdev->dev, "VBUS %s (gadget: %s)\n",
is_vbus_powered(gpio_vbus) ? "supplied" : "inactive",
otg->gadget ? otg->gadget->name : "none");
if (otg->gadget)
schedule_delayed_work(&gpio_vbus->work, msecs_to_jiffies(100));
return IRQ_HANDLED;
}
/* OTG transceiver interface */
/* bind/unbind the peripheral controller */
static int gpio_vbus_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct gpio_vbus_data *gpio_vbus;
struct platform_device *pdev;
gpio_vbus = container_of(otg->usb_phy, struct gpio_vbus_data, phy);
pdev = to_platform_device(gpio_vbus->dev);
if (!gadget) {
dev_dbg(&pdev->dev, "unregistering gadget '%s'\n",
otg->gadget->name);
/* optionally disable D+ pullup */
if (gpio_vbus->pullup_gpiod)
gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
usb_gadget_vbus_disconnect(otg->gadget);
otg->state = OTG_STATE_UNDEFINED;
otg->gadget = NULL;
return 0;
}
otg->gadget = gadget;
dev_dbg(&pdev->dev, "registered gadget '%s'\n", gadget->name);
/* initialize connection state */
gpio_vbus->vbus = 0; /* start with disconnected */
gpio_vbus_irq(gpio_vbus->irq, pdev);
return 0;
}
/* effective for B devices, ignored for A-peripheral */
static int gpio_vbus_set_power(struct usb_phy *phy, unsigned mA)
{
struct gpio_vbus_data *gpio_vbus;
gpio_vbus = container_of(phy, struct gpio_vbus_data, phy);
if (phy->otg->state == OTG_STATE_B_PERIPHERAL)
set_vbus_draw(gpio_vbus, mA);
return 0;
}
/* for non-OTG B devices: set/clear transceiver suspend mode */
static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
{
struct gpio_vbus_data *gpio_vbus;
gpio_vbus = container_of(phy, struct gpio_vbus_data, phy);
/* draw max 0 mA from vbus in suspend mode; or the previously
* recorded amount of current if not suspended
*
* NOTE: high powered configs (mA > 100) may draw up to 2.5 mA
* if they're wake-enabled ... we don't handle that yet.
*/
return gpio_vbus_set_power(phy, suspend ? 0 : gpio_vbus->mA);
}
/* platform driver interface */
static int gpio_vbus_probe(struct platform_device *pdev)
{
struct gpio_vbus_data *gpio_vbus;
struct resource *res;
struct device *dev = &pdev->dev;
int err, irq;
unsigned long irqflags;
gpio_vbus = devm_kzalloc(&pdev->dev, sizeof(struct gpio_vbus_data),
GFP_KERNEL);
if (!gpio_vbus)
return -ENOMEM;
gpio_vbus->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!gpio_vbus->phy.otg)
return -ENOMEM;
platform_set_drvdata(pdev, gpio_vbus);
gpio_vbus->dev = &pdev->dev;
gpio_vbus->phy.label = "gpio-vbus";
gpio_vbus->phy.dev = gpio_vbus->dev;
gpio_vbus->phy.set_power = gpio_vbus_set_power;
gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend;
gpio_vbus->phy.otg->state = OTG_STATE_UNDEFINED;
gpio_vbus->phy.otg->usb_phy = &gpio_vbus->phy;
gpio_vbus->phy.otg->set_peripheral = gpio_vbus_set_peripheral;
/* Look up the VBUS sensing GPIO */
gpio_vbus->vbus_gpiod = devm_gpiod_get(dev, "vbus", GPIOD_IN);
if (IS_ERR(gpio_vbus->vbus_gpiod)) {
err = PTR_ERR(gpio_vbus->vbus_gpiod);
dev_err(&pdev->dev, "can't request vbus gpio, err: %d\n", err);
return err;
}
gpiod_set_consumer_name(gpio_vbus->vbus_gpiod, "vbus_detect");
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res) {
irq = res->start;
irqflags = (res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
} else {
irq = gpiod_to_irq(gpio_vbus->vbus_gpiod);
irqflags = VBUS_IRQ_FLAGS;
}
gpio_vbus->irq = irq;
/*
* The VBUS sensing GPIO should have a pulldown, which will normally be
* part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
* value the GPIO detects as active. Some systems will use comparators.
* Get the optional D+ or D- pullup GPIO. If the data line pullup is
* in use, initialize it to "not pulling up"
*/
gpio_vbus->pullup_gpiod = devm_gpiod_get_optional(dev, "pullup",
GPIOD_OUT_LOW);
if (IS_ERR(gpio_vbus->pullup_gpiod)) {
err = PTR_ERR(gpio_vbus->pullup_gpiod);
dev_err(&pdev->dev, "can't request pullup gpio, err: %d\n",
err);
return err;
}
if (gpio_vbus->pullup_gpiod)
gpiod_set_consumer_name(gpio_vbus->pullup_gpiod, "udc_pullup");
err = devm_request_irq(&pdev->dev, irq, gpio_vbus_irq, irqflags,
"vbus_detect", pdev);
if (err) {
dev_err(&pdev->dev, "can't request irq %i, err: %d\n",
irq, err);
return err;
}
INIT_DELAYED_WORK(&gpio_vbus->work, gpio_vbus_work);
gpio_vbus->vbus_draw = devm_regulator_get(&pdev->dev, "vbus_draw");
if (IS_ERR(gpio_vbus->vbus_draw)) {
dev_dbg(&pdev->dev, "can't get vbus_draw regulator, err: %ld\n",
PTR_ERR(gpio_vbus->vbus_draw));
gpio_vbus->vbus_draw = NULL;
}
/* only active when a gadget is registered */
err = usb_add_phy(&gpio_vbus->phy, USB_PHY_TYPE_USB2);
if (err) {
dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
err);
return err;
}
/* TODO: wakeup could be enabled here with device_init_wakeup(dev, 1) */
return 0;
}
static void gpio_vbus_remove(struct platform_device *pdev)
{
struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
device_init_wakeup(&pdev->dev, 0);
cancel_delayed_work_sync(&gpio_vbus->work);
usb_remove_phy(&gpio_vbus->phy);
}
#ifdef CONFIG_PM
static int gpio_vbus_pm_suspend(struct device *dev)
{
struct gpio_vbus_data *gpio_vbus = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(gpio_vbus->irq);
return 0;
}
static int gpio_vbus_pm_resume(struct device *dev)
{
struct gpio_vbus_data *gpio_vbus = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(gpio_vbus->irq);
return 0;
}
static const struct dev_pm_ops gpio_vbus_dev_pm_ops = {
.suspend = gpio_vbus_pm_suspend,
.resume = gpio_vbus_pm_resume,
};
#endif
MODULE_ALIAS("platform:gpio-vbus");
/*
* NOTE: this driver matches against "gpio-usb-b-connector" for
* devices that do NOT support role switch.
*/
static const struct of_device_id gpio_vbus_of_match[] = {
{
.compatible = "gpio-usb-b-connector",
},
{},
};
static struct platform_driver gpio_vbus_driver = {
.driver = {
.name = "gpio-vbus",
#ifdef CONFIG_PM
.pm = &gpio_vbus_dev_pm_ops,
#endif
.of_match_table = gpio_vbus_of_match,
},
.probe = gpio_vbus_probe,
.remove_new = gpio_vbus_remove,
};
module_platform_driver(gpio_vbus_driver);
MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
MODULE_AUTHOR("Philipp Zabel");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/phy/phy-gpio-vbus-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OMAP OTG controller driver
*
* Based on code from tahvo-usb.c and isp1301_omap.c drivers.
*
* Copyright (C) 2005-2006 Nokia Corporation
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
*/
#include <linux/io.h>
#include <linux/err.h>
#include <linux/extcon.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap1.h>
struct otg_device {
void __iomem *base;
bool id;
bool vbus;
struct extcon_dev *extcon;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
};
#define OMAP_OTG_CTRL 0x0c
#define OMAP_OTG_ASESSVLD (1 << 20)
#define OMAP_OTG_BSESSEND (1 << 19)
#define OMAP_OTG_BSESSVLD (1 << 18)
#define OMAP_OTG_VBUSVLD (1 << 17)
#define OMAP_OTG_ID (1 << 16)
#define OMAP_OTG_XCEIV_OUTPUTS \
(OMAP_OTG_ASESSVLD | OMAP_OTG_BSESSEND | OMAP_OTG_BSESSVLD | \
OMAP_OTG_VBUSVLD | OMAP_OTG_ID)
static void omap_otg_ctrl(struct otg_device *otg_dev, u32 outputs)
{
u32 l;
l = readl(otg_dev->base + OMAP_OTG_CTRL);
l &= ~OMAP_OTG_XCEIV_OUTPUTS;
l |= outputs;
writel(l, otg_dev->base + OMAP_OTG_CTRL);
}
static void omap_otg_set_mode(struct otg_device *otg_dev)
{
if (!otg_dev->id && otg_dev->vbus)
/* Set B-session valid. */
omap_otg_ctrl(otg_dev, OMAP_OTG_ID | OMAP_OTG_BSESSVLD);
else if (otg_dev->vbus)
/* Set A-session valid. */
omap_otg_ctrl(otg_dev, OMAP_OTG_ASESSVLD);
else if (!otg_dev->id)
/* Set B-session end to indicate no VBUS. */
omap_otg_ctrl(otg_dev, OMAP_OTG_ID | OMAP_OTG_BSESSEND);
}
static int omap_otg_id_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct otg_device *otg_dev = container_of(nb, struct otg_device, id_nb);
otg_dev->id = event;
omap_otg_set_mode(otg_dev);
return NOTIFY_DONE;
}
static int omap_otg_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct otg_device *otg_dev = container_of(nb, struct otg_device,
vbus_nb);
otg_dev->vbus = event;
omap_otg_set_mode(otg_dev);
return NOTIFY_DONE;
}
static int omap_otg_probe(struct platform_device *pdev)
{
const struct omap_usb_config *config = pdev->dev.platform_data;
struct otg_device *otg_dev;
struct extcon_dev *extcon;
int ret;
u32 rev;
if (!config || !config->extcon)
return -ENODEV;
extcon = extcon_get_extcon_dev(config->extcon);
if (IS_ERR(extcon))
return PTR_ERR(extcon);
otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
if (!otg_dev)
return -ENOMEM;
otg_dev->base = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
if (IS_ERR(otg_dev->base))
return PTR_ERR(otg_dev->base);
otg_dev->extcon = extcon;
otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
ret = devm_extcon_register_notifier(&pdev->dev, extcon,
EXTCON_USB_HOST, &otg_dev->id_nb);
if (ret)
return ret;
ret = devm_extcon_register_notifier(&pdev->dev, extcon,
EXTCON_USB, &otg_dev->vbus_nb);
if (ret) {
return ret;
}
otg_dev->id = extcon_get_state(extcon, EXTCON_USB_HOST);
otg_dev->vbus = extcon_get_state(extcon, EXTCON_USB);
omap_otg_set_mode(otg_dev);
rev = readl(otg_dev->base);
dev_info(&pdev->dev,
"OMAP USB OTG controller rev %d.%d (%s, id=%d, vbus=%d)\n",
(rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id,
otg_dev->vbus);
platform_set_drvdata(pdev, otg_dev);
return 0;
}
static struct platform_driver omap_otg_driver = {
.probe = omap_otg_probe,
.driver = {
.name = "omap_otg",
},
};
module_platform_driver(omap_otg_driver);
MODULE_DESCRIPTION("OMAP USB OTG controller driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
| linux-master | drivers/usb/phy/phy-omap-otg.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* phy.c -- USB phy handling
*
* Copyright (C) 2004-2013 Texas Instruments
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/usb/phy.h>
/* Default current range by charger type. */
#define DEFAULT_SDP_CUR_MIN 2
#define DEFAULT_SDP_CUR_MAX 500
#define DEFAULT_SDP_CUR_MIN_SS 150
#define DEFAULT_SDP_CUR_MAX_SS 900
#define DEFAULT_DCP_CUR_MIN 500
#define DEFAULT_DCP_CUR_MAX 5000
#define DEFAULT_CDP_CUR_MIN 1500
#define DEFAULT_CDP_CUR_MAX 5000
#define DEFAULT_ACA_CUR_MIN 1500
#define DEFAULT_ACA_CUR_MAX 5000
static LIST_HEAD(phy_list);
static DEFINE_SPINLOCK(phy_lock);
struct phy_devm {
struct usb_phy *phy;
struct notifier_block *nb;
};
static const char *const usb_chger_type[] = {
[UNKNOWN_TYPE] = "USB_CHARGER_UNKNOWN_TYPE",
[SDP_TYPE] = "USB_CHARGER_SDP_TYPE",
[CDP_TYPE] = "USB_CHARGER_CDP_TYPE",
[DCP_TYPE] = "USB_CHARGER_DCP_TYPE",
[ACA_TYPE] = "USB_CHARGER_ACA_TYPE",
};
static const char *const usb_chger_state[] = {
[USB_CHARGER_DEFAULT] = "USB_CHARGER_DEFAULT",
[USB_CHARGER_PRESENT] = "USB_CHARGER_PRESENT",
[USB_CHARGER_ABSENT] = "USB_CHARGER_ABSENT",
};
static struct usb_phy *__usb_find_phy(struct list_head *list,
enum usb_phy_type type)
{
struct usb_phy *phy = NULL;
list_for_each_entry(phy, list, head) {
if (phy->type != type)
continue;
return phy;
}
return ERR_PTR(-ENODEV);
}
static struct usb_phy *__of_usb_find_phy(struct device_node *node)
{
struct usb_phy *phy;
if (!of_device_is_available(node))
return ERR_PTR(-ENODEV);
list_for_each_entry(phy, &phy_list, head) {
if (node != phy->dev->of_node)
continue;
return phy;
}
return ERR_PTR(-EPROBE_DEFER);
}
static struct usb_phy *__device_to_usb_phy(const struct device *dev)
{
struct usb_phy *usb_phy;
list_for_each_entry(usb_phy, &phy_list, head) {
if (usb_phy->dev == dev)
return usb_phy;
}
return NULL;
}
static void usb_phy_set_default_current(struct usb_phy *usb_phy)
{
usb_phy->chg_cur.sdp_min = DEFAULT_SDP_CUR_MIN;
usb_phy->chg_cur.sdp_max = DEFAULT_SDP_CUR_MAX;
usb_phy->chg_cur.dcp_min = DEFAULT_DCP_CUR_MIN;
usb_phy->chg_cur.dcp_max = DEFAULT_DCP_CUR_MAX;
usb_phy->chg_cur.cdp_min = DEFAULT_CDP_CUR_MIN;
usb_phy->chg_cur.cdp_max = DEFAULT_CDP_CUR_MAX;
usb_phy->chg_cur.aca_min = DEFAULT_ACA_CUR_MIN;
usb_phy->chg_cur.aca_max = DEFAULT_ACA_CUR_MAX;
}
/**
* usb_phy_notify_charger_work - notify the USB charger state
* @work: the charger work to notify the USB charger state
*
* This work can be issued when USB charger state has been changed or
* USB charger current has been changed, then we can notify the current
* what can be drawn to power user and the charger state to userspace.
*
* If we get the charger type from extcon subsystem, we can notify the
* charger state to power user automatically by usb_phy_get_charger_type()
* issuing from extcon subsystem.
*
* If we get the charger type from ->charger_detect() instead of extcon
* subsystem, the usb phy driver should issue usb_phy_set_charger_state()
* to set charger state when the charger state has been changed.
*/
static void usb_phy_notify_charger_work(struct work_struct *work)
{
struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work);
unsigned int min, max;
switch (usb_phy->chg_state) {
case USB_CHARGER_PRESENT:
usb_phy_get_charger_current(usb_phy, &min, &max);
atomic_notifier_call_chain(&usb_phy->notifier, max, usb_phy);
break;
case USB_CHARGER_ABSENT:
usb_phy_set_default_current(usb_phy);
atomic_notifier_call_chain(&usb_phy->notifier, 0, usb_phy);
break;
default:
dev_warn(usb_phy->dev, "Unknown USB charger state: %d\n",
usb_phy->chg_state);
return;
}
kobject_uevent(&usb_phy->dev->kobj, KOBJ_CHANGE);
}
static int usb_phy_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct usb_phy *usb_phy;
char uchger_state[50] = { 0 };
char uchger_type[50] = { 0 };
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
usb_phy = __device_to_usb_phy(dev);
spin_unlock_irqrestore(&phy_lock, flags);
if (!usb_phy)
return -ENODEV;
snprintf(uchger_state, ARRAY_SIZE(uchger_state),
"USB_CHARGER_STATE=%s", usb_chger_state[usb_phy->chg_state]);
snprintf(uchger_type, ARRAY_SIZE(uchger_type),
"USB_CHARGER_TYPE=%s", usb_chger_type[usb_phy->chg_type]);
if (add_uevent_var(env, uchger_state))
return -ENOMEM;
if (add_uevent_var(env, uchger_type))
return -ENOMEM;
return 0;
}
static void __usb_phy_get_charger_type(struct usb_phy *usb_phy)
{
if (extcon_get_state(usb_phy->edev, EXTCON_CHG_USB_SDP) > 0) {
usb_phy->chg_type = SDP_TYPE;
usb_phy->chg_state = USB_CHARGER_PRESENT;
} else if (extcon_get_state(usb_phy->edev, EXTCON_CHG_USB_CDP) > 0) {
usb_phy->chg_type = CDP_TYPE;
usb_phy->chg_state = USB_CHARGER_PRESENT;
} else if (extcon_get_state(usb_phy->edev, EXTCON_CHG_USB_DCP) > 0) {
usb_phy->chg_type = DCP_TYPE;
usb_phy->chg_state = USB_CHARGER_PRESENT;
} else if (extcon_get_state(usb_phy->edev, EXTCON_CHG_USB_ACA) > 0) {
usb_phy->chg_type = ACA_TYPE;
usb_phy->chg_state = USB_CHARGER_PRESENT;
} else {
usb_phy->chg_type = UNKNOWN_TYPE;
usb_phy->chg_state = USB_CHARGER_ABSENT;
}
schedule_work(&usb_phy->chg_work);
}
/**
* usb_phy_get_charger_type - get charger type from extcon subsystem
* @nb: the notifier block to determine charger type
* @state: the cable state
* @data: private data
*
* Determin the charger type from extcon subsystem which also means the
* charger state has been chaned, then we should notify this event.
*/
static int usb_phy_get_charger_type(struct notifier_block *nb,
unsigned long state, void *data)
{
struct usb_phy *usb_phy = container_of(nb, struct usb_phy, type_nb);
__usb_phy_get_charger_type(usb_phy);
return NOTIFY_OK;
}
/**
* usb_phy_set_charger_current - set the USB charger current
* @usb_phy: the USB phy to be used
* @mA: the current need to be set
*
* Usually we only change the charger default current when USB finished the
* enumeration as one SDP charger. As one SDP charger, usb_phy_set_power()
* will issue this function to change charger current when after setting USB
* configuration, or suspend/resume USB. For other type charger, we should
* use the default charger current and we do not suggest to issue this function
* to change the charger current.
*
* When USB charger current has been changed, we need to notify the power users.
*/
void usb_phy_set_charger_current(struct usb_phy *usb_phy, unsigned int mA)
{
switch (usb_phy->chg_type) {
case SDP_TYPE:
if (usb_phy->chg_cur.sdp_max == mA)
return;
usb_phy->chg_cur.sdp_max = (mA > DEFAULT_SDP_CUR_MAX_SS) ?
DEFAULT_SDP_CUR_MAX_SS : mA;
break;
case DCP_TYPE:
if (usb_phy->chg_cur.dcp_max == mA)
return;
usb_phy->chg_cur.dcp_max = (mA > DEFAULT_DCP_CUR_MAX) ?
DEFAULT_DCP_CUR_MAX : mA;
break;
case CDP_TYPE:
if (usb_phy->chg_cur.cdp_max == mA)
return;
usb_phy->chg_cur.cdp_max = (mA > DEFAULT_CDP_CUR_MAX) ?
DEFAULT_CDP_CUR_MAX : mA;
break;
case ACA_TYPE:
if (usb_phy->chg_cur.aca_max == mA)
return;
usb_phy->chg_cur.aca_max = (mA > DEFAULT_ACA_CUR_MAX) ?
DEFAULT_ACA_CUR_MAX : mA;
break;
default:
return;
}
schedule_work(&usb_phy->chg_work);
}
EXPORT_SYMBOL_GPL(usb_phy_set_charger_current);
/**
* usb_phy_get_charger_current - get the USB charger current
* @usb_phy: the USB phy to be used
* @min: the minimum current
* @max: the maximum current
*
* Usually we will notify the maximum current to power user, but for some
* special case, power user also need the minimum current value. Then the
* power user can issue this function to get the suitable current.
*/
void usb_phy_get_charger_current(struct usb_phy *usb_phy,
unsigned int *min, unsigned int *max)
{
switch (usb_phy->chg_type) {
case SDP_TYPE:
*min = usb_phy->chg_cur.sdp_min;
*max = usb_phy->chg_cur.sdp_max;
break;
case DCP_TYPE:
*min = usb_phy->chg_cur.dcp_min;
*max = usb_phy->chg_cur.dcp_max;
break;
case CDP_TYPE:
*min = usb_phy->chg_cur.cdp_min;
*max = usb_phy->chg_cur.cdp_max;
break;
case ACA_TYPE:
*min = usb_phy->chg_cur.aca_min;
*max = usb_phy->chg_cur.aca_max;
break;
default:
*min = 0;
*max = 0;
break;
}
}
EXPORT_SYMBOL_GPL(usb_phy_get_charger_current);
/**
* usb_phy_set_charger_state - set the USB charger state
* @usb_phy: the USB phy to be used
* @state: the new state need to be set for charger
*
* The usb phy driver can issue this function when the usb phy driver
* detected the charger state has been changed, in this case the charger
* type should be get from ->charger_detect().
*/
void usb_phy_set_charger_state(struct usb_phy *usb_phy,
enum usb_charger_state state)
{
if (usb_phy->chg_state == state || !usb_phy->charger_detect)
return;
usb_phy->chg_state = state;
if (usb_phy->chg_state == USB_CHARGER_PRESENT)
usb_phy->chg_type = usb_phy->charger_detect(usb_phy);
else
usb_phy->chg_type = UNKNOWN_TYPE;
schedule_work(&usb_phy->chg_work);
}
EXPORT_SYMBOL_GPL(usb_phy_set_charger_state);
static void devm_usb_phy_release(struct device *dev, void *res)
{
struct usb_phy *phy = *(struct usb_phy **)res;
usb_put_phy(phy);
}
static void devm_usb_phy_release2(struct device *dev, void *_res)
{
struct phy_devm *res = _res;
if (res->nb)
usb_unregister_notifier(res->phy, res->nb);
usb_put_phy(res->phy);
}
static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
{
struct usb_phy **phy = res;
return *phy == match_data;
}
static void usb_charger_init(struct usb_phy *usb_phy)
{
usb_phy->chg_type = UNKNOWN_TYPE;
usb_phy->chg_state = USB_CHARGER_DEFAULT;
usb_phy_set_default_current(usb_phy);
INIT_WORK(&usb_phy->chg_work, usb_phy_notify_charger_work);
}
static int usb_add_extcon(struct usb_phy *x)
{
int ret;
if (of_property_read_bool(x->dev->of_node, "extcon")) {
x->edev = extcon_get_edev_by_phandle(x->dev, 0);
if (IS_ERR(x->edev))
return PTR_ERR(x->edev);
x->id_edev = extcon_get_edev_by_phandle(x->dev, 1);
if (IS_ERR(x->id_edev)) {
x->id_edev = NULL;
dev_info(x->dev, "No separate ID extcon device\n");
}
if (x->vbus_nb.notifier_call) {
ret = devm_extcon_register_notifier(x->dev, x->edev,
EXTCON_USB,
&x->vbus_nb);
if (ret < 0) {
dev_err(x->dev,
"register VBUS notifier failed\n");
return ret;
}
} else {
x->type_nb.notifier_call = usb_phy_get_charger_type;
ret = devm_extcon_register_notifier(x->dev, x->edev,
EXTCON_CHG_USB_SDP,
&x->type_nb);
if (ret) {
dev_err(x->dev,
"register extcon USB SDP failed.\n");
return ret;
}
ret = devm_extcon_register_notifier(x->dev, x->edev,
EXTCON_CHG_USB_CDP,
&x->type_nb);
if (ret) {
dev_err(x->dev,
"register extcon USB CDP failed.\n");
return ret;
}
ret = devm_extcon_register_notifier(x->dev, x->edev,
EXTCON_CHG_USB_DCP,
&x->type_nb);
if (ret) {
dev_err(x->dev,
"register extcon USB DCP failed.\n");
return ret;
}
ret = devm_extcon_register_notifier(x->dev, x->edev,
EXTCON_CHG_USB_ACA,
&x->type_nb);
if (ret) {
dev_err(x->dev,
"register extcon USB ACA failed.\n");
return ret;
}
}
if (x->id_nb.notifier_call) {
struct extcon_dev *id_ext;
if (x->id_edev)
id_ext = x->id_edev;
else
id_ext = x->edev;
ret = devm_extcon_register_notifier(x->dev, id_ext,
EXTCON_USB_HOST,
&x->id_nb);
if (ret < 0) {
dev_err(x->dev,
"register ID notifier failed\n");
return ret;
}
}
}
if (x->type_nb.notifier_call)
__usb_phy_get_charger_type(x);
return 0;
}
/**
* devm_usb_get_phy - find the USB PHY
* @dev: device that requests this phy
* @type: the type of the phy the controller requires
*
* Gets the phy using usb_get_phy(), and associates a device with it using
* devres. On driver detach, release function is invoked on the devres data,
* then, devres data is freed.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *devm_usb_get_phy(struct device *dev, enum usb_phy_type type)
{
struct usb_phy **ptr, *phy;
ptr = devres_alloc(devm_usb_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
phy = usb_get_phy(type);
if (!IS_ERR(phy)) {
*ptr = phy;
devres_add(dev, ptr);
} else
devres_free(ptr);
return phy;
}
EXPORT_SYMBOL_GPL(devm_usb_get_phy);
/**
* usb_get_phy - find the USB PHY
* @type: the type of the phy the controller requires
*
* Returns the phy driver, after getting a refcount to it; or
* -ENODEV if there is no such phy. The caller is responsible for
* calling usb_put_phy() to release that count.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *usb_get_phy(enum usb_phy_type type)
{
struct usb_phy *phy = NULL;
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
phy = __usb_find_phy(&phy_list, type);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
pr_debug("PHY: unable to find transceiver of type %s\n",
usb_phy_type_string(type));
if (!IS_ERR(phy))
phy = ERR_PTR(-ENODEV);
goto err0;
}
get_device(phy->dev);
err0:
spin_unlock_irqrestore(&phy_lock, flags);
return phy;
}
EXPORT_SYMBOL_GPL(usb_get_phy);
/**
* devm_usb_get_phy_by_node - find the USB PHY by device_node
* @dev: device that requests this phy
* @node: the device_node for the phy device.
* @nb: a notifier_block to register with the phy.
*
* Returns the phy driver associated with the given device_node,
* after getting a refcount to it, -ENODEV if there is no such phy or
* -EPROBE_DEFER if the device is not yet loaded. While at that, it
* also associates the device with
* the phy using devres. On driver detach, release function is invoked
* on the devres data, then, devres data is freed.
*
* For use by peripheral drivers for devices related to a phy,
* such as a charger.
*/
struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
struct device_node *node,
struct notifier_block *nb)
{
struct usb_phy *phy = ERR_PTR(-ENOMEM);
struct phy_devm *ptr;
unsigned long flags;
ptr = devres_alloc(devm_usb_phy_release2, sizeof(*ptr), GFP_KERNEL);
if (!ptr) {
dev_dbg(dev, "failed to allocate memory for devres\n");
goto err0;
}
spin_lock_irqsave(&phy_lock, flags);
phy = __of_usb_find_phy(node);
if (IS_ERR(phy)) {
devres_free(ptr);
goto err1;
}
if (!try_module_get(phy->dev->driver->owner)) {
phy = ERR_PTR(-ENODEV);
devres_free(ptr);
goto err1;
}
if (nb)
usb_register_notifier(phy, nb);
ptr->phy = phy;
ptr->nb = nb;
devres_add(dev, ptr);
get_device(phy->dev);
err1:
spin_unlock_irqrestore(&phy_lock, flags);
err0:
return phy;
}
EXPORT_SYMBOL_GPL(devm_usb_get_phy_by_node);
/**
* devm_usb_get_phy_by_phandle - find the USB PHY by phandle
* @dev: device that requests this phy
* @phandle: name of the property holding the phy phandle value
* @index: the index of the phy
*
* Returns the phy driver associated with the given phandle value,
* after getting a refcount to it, -ENODEV if there is no such phy or
* -EPROBE_DEFER if there is a phandle to the phy, but the device is
* not yet loaded. While at that, it also associates the device with
* the phy using devres. On driver detach, release function is invoked
* on the devres data, then, devres data is freed.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
const char *phandle, u8 index)
{
struct device_node *node;
struct usb_phy *phy;
if (!dev->of_node) {
dev_dbg(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
}
node = of_parse_phandle(dev->of_node, phandle, index);
if (!node) {
dev_dbg(dev, "failed to get %s phandle in %pOF node\n", phandle,
dev->of_node);
return ERR_PTR(-ENODEV);
}
phy = devm_usb_get_phy_by_node(dev, node, NULL);
of_node_put(node);
return phy;
}
EXPORT_SYMBOL_GPL(devm_usb_get_phy_by_phandle);
/**
* devm_usb_put_phy - release the USB PHY
* @dev: device that wants to release this phy
* @phy: the phy returned by devm_usb_get_phy()
*
* destroys the devres associated with this phy and invokes usb_put_phy
* to release the phy.
*
* For use by USB host and peripheral drivers.
*/
void devm_usb_put_phy(struct device *dev, struct usb_phy *phy)
{
int r;
r = devres_destroy(dev, devm_usb_phy_release, devm_usb_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
EXPORT_SYMBOL_GPL(devm_usb_put_phy);
/**
* usb_put_phy - release the USB PHY
* @x: the phy returned by usb_get_phy()
*
* Releases a refcount the caller received from usb_get_phy().
*
* For use by USB host and peripheral drivers.
*/
void usb_put_phy(struct usb_phy *x)
{
if (x) {
struct module *owner = x->dev->driver->owner;
put_device(x->dev);
module_put(owner);
}
}
EXPORT_SYMBOL_GPL(usb_put_phy);
/**
* usb_add_phy: declare the USB PHY
* @x: the USB phy to be used; or NULL
* @type: the type of this PHY
*
* This call is exclusively for use by phy drivers, which
* coordinate the activities of drivers for host and peripheral
* controllers, and in some cases for VBUS current regulation.
*/
int usb_add_phy(struct usb_phy *x, enum usb_phy_type type)
{
int ret = 0;
unsigned long flags;
struct usb_phy *phy;
if (x->type != USB_PHY_TYPE_UNDEFINED) {
dev_err(x->dev, "not accepting initialized PHY %s\n", x->label);
return -EINVAL;
}
usb_charger_init(x);
ret = usb_add_extcon(x);
if (ret)
return ret;
ATOMIC_INIT_NOTIFIER_HEAD(&x->notifier);
spin_lock_irqsave(&phy_lock, flags);
list_for_each_entry(phy, &phy_list, head) {
if (phy->type == type) {
ret = -EBUSY;
dev_err(x->dev, "transceiver type %s already exists\n",
usb_phy_type_string(type));
goto out;
}
}
x->type = type;
list_add_tail(&x->head, &phy_list);
out:
spin_unlock_irqrestore(&phy_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(usb_add_phy);
static struct device_type usb_phy_dev_type = {
.name = "usb_phy",
.uevent = usb_phy_uevent,
};
/**
* usb_add_phy_dev - declare the USB PHY
* @x: the USB phy to be used; or NULL
*
* This call is exclusively for use by phy drivers, which
* coordinate the activities of drivers for host and peripheral
* controllers, and in some cases for VBUS current regulation.
*/
int usb_add_phy_dev(struct usb_phy *x)
{
unsigned long flags;
int ret;
if (!x->dev) {
dev_err(x->dev, "no device provided for PHY\n");
return -EINVAL;
}
usb_charger_init(x);
ret = usb_add_extcon(x);
if (ret)
return ret;
x->dev->type = &usb_phy_dev_type;
ATOMIC_INIT_NOTIFIER_HEAD(&x->notifier);
spin_lock_irqsave(&phy_lock, flags);
list_add_tail(&x->head, &phy_list);
spin_unlock_irqrestore(&phy_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(usb_add_phy_dev);
/**
* usb_remove_phy - remove the OTG PHY
* @x: the USB OTG PHY to be removed;
*
* This reverts the effects of usb_add_phy
*/
void usb_remove_phy(struct usb_phy *x)
{
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
if (x)
list_del(&x->head);
spin_unlock_irqrestore(&phy_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_remove_phy);
/**
* usb_phy_set_event - set event to phy event
* @x: the phy returned by usb_get_phy();
* @event: event to set
*
* This sets event to phy event
*/
void usb_phy_set_event(struct usb_phy *x, unsigned long event)
{
x->last_event = event;
}
EXPORT_SYMBOL_GPL(usb_phy_set_event);
| linux-master | drivers/usb/phy/phy.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2007,2008 Freescale semiconductor, Inc.
*
* Author: Li Yang <[email protected]>
* Jerry Huang <[email protected]>
*
* Initialization based on code from Shlomi Gridish.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/workqueue.h>
#include <linux/time.h>
#include <linux/fsl_devices.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include "phy-fsl-usb.h"
#ifdef VERBOSE
#define VDBG(fmt, args...) pr_debug("[%s] " fmt, \
__func__, ## args)
#else
#define VDBG(stuff...) do {} while (0)
#endif
#define DRIVER_VERSION "Rev. 1.55"
#define DRIVER_AUTHOR "Jerry Huang/Li Yang"
#define DRIVER_DESC "Freescale USB OTG Transceiver Driver"
#define DRIVER_INFO DRIVER_DESC " " DRIVER_VERSION
static const char driver_name[] = "fsl-usb2-otg";
const pm_message_t otg_suspend_state = {
.event = 1,
};
#define HA_DATA_PULSE
static struct usb_dr_mmap *usb_dr_regs;
static struct fsl_otg *fsl_otg_dev;
static int srp_wait_done;
/* FSM timers */
struct fsl_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr, *a_aidl_bdis_tmr,
*b_ase0_brst_tmr, *b_se0_srp_tmr;
/* Driver specific timers */
struct fsl_otg_timer *b_data_pulse_tmr, *b_vbus_pulse_tmr, *b_srp_fail_tmr,
*b_srp_wait_tmr, *a_wait_enum_tmr;
static struct list_head active_timers;
static const struct fsl_otg_config fsl_otg_initdata = {
.otg_port = 1,
};
#ifdef CONFIG_PPC32
static u32 _fsl_readl_be(const unsigned __iomem *p)
{
return in_be32(p);
}
static u32 _fsl_readl_le(const unsigned __iomem *p)
{
return in_le32(p);
}
static void _fsl_writel_be(u32 v, unsigned __iomem *p)
{
out_be32(p, v);
}
static void _fsl_writel_le(u32 v, unsigned __iomem *p)
{
out_le32(p, v);
}
static u32 (*_fsl_readl)(const unsigned __iomem *p);
static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
#define fsl_readl(p) (*_fsl_readl)((p))
#define fsl_writel(v, p) (*_fsl_writel)((v), (p))
#else
#define fsl_readl(addr) readl(addr)
#define fsl_writel(val, addr) writel(val, addr)
#endif /* CONFIG_PPC32 */
int write_ulpi(u8 addr, u8 data)
{
u32 temp;
temp = 0x60000000 | (addr << 16) | data;
fsl_writel(temp, &usb_dr_regs->ulpiview);
return 0;
}
/* -------------------------------------------------------------*/
/* Operations that will be called from OTG Finite State Machine */
/* Charge vbus for vbus pulsing in SRP */
void fsl_otg_chrg_vbus(struct otg_fsm *fsm, int on)
{
u32 tmp;
tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK;
if (on)
/* stop discharging, start charging */
tmp = (tmp & ~OTGSC_CTRL_VBUS_DISCHARGE) |
OTGSC_CTRL_VBUS_CHARGE;
else
/* stop charging */
tmp &= ~OTGSC_CTRL_VBUS_CHARGE;
fsl_writel(tmp, &usb_dr_regs->otgsc);
}
/* Discharge vbus through a resistor to ground */
void fsl_otg_dischrg_vbus(int on)
{
u32 tmp;
tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK;
if (on)
/* stop charging, start discharging */
tmp = (tmp & ~OTGSC_CTRL_VBUS_CHARGE) |
OTGSC_CTRL_VBUS_DISCHARGE;
else
/* stop discharging */
tmp &= ~OTGSC_CTRL_VBUS_DISCHARGE;
fsl_writel(tmp, &usb_dr_regs->otgsc);
}
/* A-device driver vbus, controlled through PP bit in PORTSC */
void fsl_otg_drv_vbus(struct otg_fsm *fsm, int on)
{
u32 tmp;
if (on) {
tmp = fsl_readl(&usb_dr_regs->portsc) & ~PORTSC_W1C_BITS;
fsl_writel(tmp | PORTSC_PORT_POWER, &usb_dr_regs->portsc);
} else {
tmp = fsl_readl(&usb_dr_regs->portsc) &
~PORTSC_W1C_BITS & ~PORTSC_PORT_POWER;
fsl_writel(tmp, &usb_dr_regs->portsc);
}
}
/*
* Pull-up D+, signalling connect by periperal. Also used in
* data-line pulsing in SRP
*/
void fsl_otg_loc_conn(struct otg_fsm *fsm, int on)
{
u32 tmp;
tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK;
if (on)
tmp |= OTGSC_CTRL_DATA_PULSING;
else
tmp &= ~OTGSC_CTRL_DATA_PULSING;
fsl_writel(tmp, &usb_dr_regs->otgsc);
}
/*
* Generate SOF by host. This is controlled through suspend/resume the
* port. In host mode, controller will automatically send SOF.
* Suspend will block the data on the port.
*/
void fsl_otg_loc_sof(struct otg_fsm *fsm, int on)
{
u32 tmp;
tmp = fsl_readl(&fsl_otg_dev->dr_mem_map->portsc) & ~PORTSC_W1C_BITS;
if (on)
tmp |= PORTSC_PORT_FORCE_RESUME;
else
tmp |= PORTSC_PORT_SUSPEND;
fsl_writel(tmp, &fsl_otg_dev->dr_mem_map->portsc);
}
/* Start SRP pulsing by data-line pulsing, followed with v-bus pulsing. */
void fsl_otg_start_pulse(struct otg_fsm *fsm)
{
u32 tmp;
srp_wait_done = 0;
#ifdef HA_DATA_PULSE
tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK;
tmp |= OTGSC_HA_DATA_PULSE;
fsl_writel(tmp, &usb_dr_regs->otgsc);
#else
fsl_otg_loc_conn(1);
#endif
fsl_otg_add_timer(fsm, b_data_pulse_tmr);
}
void b_data_pulse_end(unsigned long foo)
{
#ifdef HA_DATA_PULSE
#else
fsl_otg_loc_conn(0);
#endif
/* Do VBUS pulse after data pulse */
fsl_otg_pulse_vbus();
}
void fsl_otg_pulse_vbus(void)
{
srp_wait_done = 0;
fsl_otg_chrg_vbus(&fsl_otg_dev->fsm, 1);
/* start the timer to end vbus charge */
fsl_otg_add_timer(&fsl_otg_dev->fsm, b_vbus_pulse_tmr);
}
void b_vbus_pulse_end(unsigned long foo)
{
fsl_otg_chrg_vbus(&fsl_otg_dev->fsm, 0);
/*
* As USB3300 using the same a_sess_vld and b_sess_vld voltage
* we need to discharge the bus for a while to distinguish
* residual voltage of vbus pulsing and A device pull up
*/
fsl_otg_dischrg_vbus(1);
fsl_otg_add_timer(&fsl_otg_dev->fsm, b_srp_wait_tmr);
}
void b_srp_end(unsigned long foo)
{
fsl_otg_dischrg_vbus(0);
srp_wait_done = 1;
if ((fsl_otg_dev->phy.otg->state == OTG_STATE_B_SRP_INIT) &&
fsl_otg_dev->fsm.b_sess_vld)
fsl_otg_dev->fsm.b_srp_done = 1;
}
/*
* Workaround for a_host suspending too fast. When a_bus_req=0,
* a_host will start by SRP. It needs to set b_hnp_enable before
* actually suspending to start HNP
*/
void a_wait_enum(unsigned long foo)
{
VDBG("a_wait_enum timeout\n");
if (!fsl_otg_dev->phy.otg->host->b_hnp_enable)
fsl_otg_add_timer(&fsl_otg_dev->fsm, a_wait_enum_tmr);
else
otg_statemachine(&fsl_otg_dev->fsm);
}
/* The timeout callback function to set time out bit */
void set_tmout(unsigned long indicator)
{
*(int *)indicator = 1;
}
/* Initialize timers */
int fsl_otg_init_timers(struct otg_fsm *fsm)
{
/* FSM used timers */
a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
(unsigned long)&fsm->a_wait_vrise_tmout);
if (!a_wait_vrise_tmr)
return -ENOMEM;
a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON,
(unsigned long)&fsm->a_wait_bcon_tmout);
if (!a_wait_bcon_tmr)
return -ENOMEM;
a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
(unsigned long)&fsm->a_aidl_bdis_tmout);
if (!a_aidl_bdis_tmr)
return -ENOMEM;
b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST,
(unsigned long)&fsm->b_ase0_brst_tmout);
if (!b_ase0_brst_tmr)
return -ENOMEM;
b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
(unsigned long)&fsm->b_se0_srp);
if (!b_se0_srp_tmr)
return -ENOMEM;
b_srp_fail_tmr = otg_timer_initializer(&set_tmout, TB_SRP_FAIL,
(unsigned long)&fsm->b_srp_done);
if (!b_srp_fail_tmr)
return -ENOMEM;
a_wait_enum_tmr = otg_timer_initializer(&a_wait_enum, 10,
(unsigned long)&fsm);
if (!a_wait_enum_tmr)
return -ENOMEM;
/* device driver used timers */
b_srp_wait_tmr = otg_timer_initializer(&b_srp_end, TB_SRP_WAIT, 0);
if (!b_srp_wait_tmr)
return -ENOMEM;
b_data_pulse_tmr = otg_timer_initializer(&b_data_pulse_end,
TB_DATA_PLS, 0);
if (!b_data_pulse_tmr)
return -ENOMEM;
b_vbus_pulse_tmr = otg_timer_initializer(&b_vbus_pulse_end,
TB_VBUS_PLS, 0);
if (!b_vbus_pulse_tmr)
return -ENOMEM;
return 0;
}
/* Uninitialize timers */
void fsl_otg_uninit_timers(void)
{
/* FSM used timers */
kfree(a_wait_vrise_tmr);
kfree(a_wait_bcon_tmr);
kfree(a_aidl_bdis_tmr);
kfree(b_ase0_brst_tmr);
kfree(b_se0_srp_tmr);
kfree(b_srp_fail_tmr);
kfree(a_wait_enum_tmr);
/* device driver used timers */
kfree(b_srp_wait_tmr);
kfree(b_data_pulse_tmr);
kfree(b_vbus_pulse_tmr);
}
static struct fsl_otg_timer *fsl_otg_get_timer(enum otg_fsm_timer t)
{
struct fsl_otg_timer *timer;
/* REVISIT: use array of pointers to timers instead */
switch (t) {
case A_WAIT_VRISE:
timer = a_wait_vrise_tmr;
break;
case A_WAIT_BCON:
timer = a_wait_vrise_tmr;
break;
case A_AIDL_BDIS:
timer = a_wait_vrise_tmr;
break;
case B_ASE0_BRST:
timer = a_wait_vrise_tmr;
break;
case B_SE0_SRP:
timer = a_wait_vrise_tmr;
break;
case B_SRP_FAIL:
timer = a_wait_vrise_tmr;
break;
case A_WAIT_ENUM:
timer = a_wait_vrise_tmr;
break;
default:
timer = NULL;
}
return timer;
}
/* Add timer to timer list */
void fsl_otg_add_timer(struct otg_fsm *fsm, void *gtimer)
{
struct fsl_otg_timer *timer = gtimer;
struct fsl_otg_timer *tmp_timer;
/*
* Check if the timer is already in the active list,
* if so update timer count
*/
list_for_each_entry(tmp_timer, &active_timers, list)
if (tmp_timer == timer) {
timer->count = timer->expires;
return;
}
timer->count = timer->expires;
list_add_tail(&timer->list, &active_timers);
}
static void fsl_otg_fsm_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
{
struct fsl_otg_timer *timer;
timer = fsl_otg_get_timer(t);
if (!timer)
return;
fsl_otg_add_timer(fsm, timer);
}
/* Remove timer from the timer list; clear timeout status */
void fsl_otg_del_timer(struct otg_fsm *fsm, void *gtimer)
{
struct fsl_otg_timer *timer = gtimer;
struct fsl_otg_timer *tmp_timer, *del_tmp;
list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
if (tmp_timer == timer)
list_del(&timer->list);
}
static void fsl_otg_fsm_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
{
struct fsl_otg_timer *timer;
timer = fsl_otg_get_timer(t);
if (!timer)
return;
fsl_otg_del_timer(fsm, timer);
}
/* Reset controller, not reset the bus */
void otg_reset_controller(void)
{
u32 command;
command = fsl_readl(&usb_dr_regs->usbcmd);
command |= (1 << 1);
fsl_writel(command, &usb_dr_regs->usbcmd);
while (fsl_readl(&usb_dr_regs->usbcmd) & (1 << 1))
;
}
/* Call suspend/resume routines in host driver */
int fsl_otg_start_host(struct otg_fsm *fsm, int on)
{
struct usb_otg *otg = fsm->otg;
struct device *dev;
struct fsl_otg *otg_dev =
container_of(otg->usb_phy, struct fsl_otg, phy);
u32 retval = 0;
if (!otg->host)
return -ENODEV;
dev = otg->host->controller;
/*
* Update a_vbus_vld state as a_vbus_vld int is disabled
* in device mode
*/
fsm->a_vbus_vld =
!!(fsl_readl(&usb_dr_regs->otgsc) & OTGSC_STS_A_VBUS_VALID);
if (on) {
/* start fsl usb host controller */
if (otg_dev->host_working)
goto end;
else {
otg_reset_controller();
VDBG("host on......\n");
if (dev->driver->pm && dev->driver->pm->resume) {
retval = dev->driver->pm->resume(dev);
if (fsm->id) {
/* default-b */
fsl_otg_drv_vbus(fsm, 1);
/*
* Workaround: b_host can't driver
* vbus, but PP in PORTSC needs to
* be 1 for host to work.
* So we set drv_vbus bit in
* transceiver to 0 thru ULPI.
*/
write_ulpi(0x0c, 0x20);
}
}
otg_dev->host_working = 1;
}
} else {
/* stop fsl usb host controller */
if (!otg_dev->host_working)
goto end;
else {
VDBG("host off......\n");
if (dev && dev->driver) {
if (dev->driver->pm && dev->driver->pm->suspend)
retval = dev->driver->pm->suspend(dev);
if (fsm->id)
/* default-b */
fsl_otg_drv_vbus(fsm, 0);
}
otg_dev->host_working = 0;
}
}
end:
return retval;
}
/*
* Call suspend and resume function in udc driver
* to stop and start udc driver.
*/
int fsl_otg_start_gadget(struct otg_fsm *fsm, int on)
{
struct usb_otg *otg = fsm->otg;
struct device *dev;
if (!otg->gadget || !otg->gadget->dev.parent)
return -ENODEV;
VDBG("gadget %s\n", on ? "on" : "off");
dev = otg->gadget->dev.parent;
if (on) {
if (dev->driver->resume)
dev->driver->resume(dev);
} else {
if (dev->driver->suspend)
dev->driver->suspend(dev, otg_suspend_state);
}
return 0;
}
/*
* Called by initialization code of host driver. Register host controller
* to the OTG. Suspend host for OTG role detection.
*/
static int fsl_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct fsl_otg *otg_dev;
if (!otg)
return -ENODEV;
otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
if (otg_dev != fsl_otg_dev)
return -ENODEV;
otg->host = host;
otg_dev->fsm.a_bus_drop = 0;
otg_dev->fsm.a_bus_req = 1;
if (host) {
VDBG("host off......\n");
otg->host->otg_port = fsl_otg_initdata.otg_port;
otg->host->is_b_host = otg_dev->fsm.id;
/*
* must leave time for hub_wq to finish its thing
* before yanking the host driver out from under it,
* so suspend the host after a short delay.
*/
otg_dev->host_working = 1;
schedule_delayed_work(&otg_dev->otg_event, 100);
return 0;
} else {
/* host driver going away */
if (!(fsl_readl(&otg_dev->dr_mem_map->otgsc) &
OTGSC_STS_USB_ID)) {
/* Mini-A cable connected */
struct otg_fsm *fsm = &otg_dev->fsm;
otg->state = OTG_STATE_UNDEFINED;
fsm->protocol = PROTO_UNDEF;
}
}
otg_dev->host_working = 0;
otg_statemachine(&otg_dev->fsm);
return 0;
}
/* Called by initialization code of udc. Register udc to OTG. */
static int fsl_otg_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct fsl_otg *otg_dev;
if (!otg)
return -ENODEV;
otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
VDBG("otg_dev 0x%x\n", (int)otg_dev);
VDBG("fsl_otg_dev 0x%x\n", (int)fsl_otg_dev);
if (otg_dev != fsl_otg_dev)
return -ENODEV;
if (!gadget) {
if (!otg->default_a)
otg->gadget->ops->vbus_draw(otg->gadget, 0);
usb_gadget_vbus_disconnect(otg->gadget);
otg->gadget = 0;
otg_dev->fsm.b_bus_req = 0;
otg_statemachine(&otg_dev->fsm);
return 0;
}
otg->gadget = gadget;
otg->gadget->is_a_peripheral = !otg_dev->fsm.id;
otg_dev->fsm.b_bus_req = 1;
/* start the gadget right away if the ID pin says Mini-B */
pr_debug("ID pin=%d\n", otg_dev->fsm.id);
if (otg_dev->fsm.id == 1) {
fsl_otg_start_host(&otg_dev->fsm, 0);
otg_drv_vbus(&otg_dev->fsm, 0);
fsl_otg_start_gadget(&otg_dev->fsm, 1);
}
return 0;
}
/*
* Delayed pin detect interrupt processing.
*
* When the Mini-A cable is disconnected from the board,
* the pin-detect interrupt happens before the disconnect
* interrupts for the connected device(s). In order to
* process the disconnect interrupt(s) prior to switching
* roles, the pin-detect interrupts are delayed, and handled
* by this routine.
*/
static void fsl_otg_event(struct work_struct *work)
{
struct fsl_otg *og = container_of(work, struct fsl_otg, otg_event.work);
struct otg_fsm *fsm = &og->fsm;
if (fsm->id) { /* switch to gadget */
fsl_otg_start_host(fsm, 0);
otg_drv_vbus(fsm, 0);
fsl_otg_start_gadget(fsm, 1);
}
}
/* B-device start SRP */
static int fsl_otg_start_srp(struct usb_otg *otg)
{
struct fsl_otg *otg_dev;
if (!otg || otg->state != OTG_STATE_B_IDLE)
return -ENODEV;
otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
if (otg_dev != fsl_otg_dev)
return -ENODEV;
otg_dev->fsm.b_bus_req = 1;
otg_statemachine(&otg_dev->fsm);
return 0;
}
/* A_host suspend will call this function to start hnp */
static int fsl_otg_start_hnp(struct usb_otg *otg)
{
struct fsl_otg *otg_dev;
if (!otg)
return -ENODEV;
otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
if (otg_dev != fsl_otg_dev)
return -ENODEV;
pr_debug("start_hnp...\n");
/* clear a_bus_req to enter a_suspend state */
otg_dev->fsm.a_bus_req = 0;
otg_statemachine(&otg_dev->fsm);
return 0;
}
/*
* Interrupt handler. OTG/host/peripheral share the same int line.
* OTG driver clears OTGSC interrupts and leaves USB interrupts
* intact. It needs to have knowledge of some USB interrupts
* such as port change.
*/
irqreturn_t fsl_otg_isr(int irq, void *dev_id)
{
struct otg_fsm *fsm = &((struct fsl_otg *)dev_id)->fsm;
struct usb_otg *otg = ((struct fsl_otg *)dev_id)->phy.otg;
u32 otg_int_src, otg_sc;
otg_sc = fsl_readl(&usb_dr_regs->otgsc);
otg_int_src = otg_sc & OTGSC_INTSTS_MASK & (otg_sc >> 8);
/* Only clear otg interrupts */
fsl_writel(otg_sc, &usb_dr_regs->otgsc);
/*FIXME: ID change not generate when init to 0 */
fsm->id = (otg_sc & OTGSC_STS_USB_ID) ? 1 : 0;
otg->default_a = (fsm->id == 0);
/* process OTG interrupts */
if (otg_int_src) {
if (otg_int_src & OTGSC_INTSTS_USB_ID) {
fsm->id = (otg_sc & OTGSC_STS_USB_ID) ? 1 : 0;
otg->default_a = (fsm->id == 0);
/* clear conn information */
if (fsm->id)
fsm->b_conn = 0;
else
fsm->a_conn = 0;
if (otg->host)
otg->host->is_b_host = fsm->id;
if (otg->gadget)
otg->gadget->is_a_peripheral = !fsm->id;
VDBG("ID int (ID is %d)\n", fsm->id);
if (fsm->id) { /* switch to gadget */
schedule_delayed_work(
&((struct fsl_otg *)dev_id)->otg_event,
100);
} else { /* switch to host */
cancel_delayed_work(&
((struct fsl_otg *)dev_id)->
otg_event);
fsl_otg_start_gadget(fsm, 0);
otg_drv_vbus(fsm, 1);
fsl_otg_start_host(fsm, 1);
}
return IRQ_HANDLED;
}
}
return IRQ_NONE;
}
static struct otg_fsm_ops fsl_otg_ops = {
.chrg_vbus = fsl_otg_chrg_vbus,
.drv_vbus = fsl_otg_drv_vbus,
.loc_conn = fsl_otg_loc_conn,
.loc_sof = fsl_otg_loc_sof,
.start_pulse = fsl_otg_start_pulse,
.add_timer = fsl_otg_fsm_add_timer,
.del_timer = fsl_otg_fsm_del_timer,
.start_host = fsl_otg_start_host,
.start_gadget = fsl_otg_start_gadget,
};
/* Initialize the global variable fsl_otg_dev and request IRQ for OTG */
static int fsl_otg_conf(struct platform_device *pdev)
{
struct fsl_otg *fsl_otg_tc;
int status;
if (fsl_otg_dev)
return 0;
/* allocate space to fsl otg device */
fsl_otg_tc = kzalloc(sizeof(struct fsl_otg), GFP_KERNEL);
if (!fsl_otg_tc)
return -ENOMEM;
fsl_otg_tc->phy.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL);
if (!fsl_otg_tc->phy.otg) {
kfree(fsl_otg_tc);
return -ENOMEM;
}
INIT_DELAYED_WORK(&fsl_otg_tc->otg_event, fsl_otg_event);
INIT_LIST_HEAD(&active_timers);
status = fsl_otg_init_timers(&fsl_otg_tc->fsm);
if (status) {
pr_info("Couldn't init OTG timers\n");
goto err;
}
mutex_init(&fsl_otg_tc->fsm.lock);
/* Set OTG state machine operations */
fsl_otg_tc->fsm.ops = &fsl_otg_ops;
/* initialize the otg structure */
fsl_otg_tc->phy.label = DRIVER_DESC;
fsl_otg_tc->phy.dev = &pdev->dev;
fsl_otg_tc->phy.otg->usb_phy = &fsl_otg_tc->phy;
fsl_otg_tc->phy.otg->set_host = fsl_otg_set_host;
fsl_otg_tc->phy.otg->set_peripheral = fsl_otg_set_peripheral;
fsl_otg_tc->phy.otg->start_hnp = fsl_otg_start_hnp;
fsl_otg_tc->phy.otg->start_srp = fsl_otg_start_srp;
fsl_otg_dev = fsl_otg_tc;
/* Store the otg transceiver */
status = usb_add_phy(&fsl_otg_tc->phy, USB_PHY_TYPE_USB2);
if (status) {
pr_warn(FSL_OTG_NAME ": unable to register OTG transceiver.\n");
goto err;
}
return 0;
err:
fsl_otg_uninit_timers();
kfree(fsl_otg_tc->phy.otg);
kfree(fsl_otg_tc);
return status;
}
/* OTG Initialization */
int usb_otg_start(struct platform_device *pdev)
{
struct fsl_otg *p_otg;
struct usb_phy *otg_trans = usb_get_phy(USB_PHY_TYPE_USB2);
struct otg_fsm *fsm;
int status;
struct resource *res;
u32 temp;
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
p_otg = container_of(otg_trans, struct fsl_otg, phy);
fsm = &p_otg->fsm;
/* Initialize the state machine structure with default values */
SET_OTG_STATE(otg_trans, OTG_STATE_UNDEFINED);
fsm->otg = p_otg->phy.otg;
/* We don't require predefined MEM/IRQ resource index */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
/* We don't request_mem_region here to enable resource sharing
* with host/device */
usb_dr_regs = ioremap(res->start, sizeof(struct usb_dr_mmap));
p_otg->dr_mem_map = (struct usb_dr_mmap *)usb_dr_regs;
pdata->regs = (void *)usb_dr_regs;
if (pdata->init && pdata->init(pdev) != 0)
return -EINVAL;
#ifdef CONFIG_PPC32
if (pdata->big_endian_mmio) {
_fsl_readl = _fsl_readl_be;
_fsl_writel = _fsl_writel_be;
} else {
_fsl_readl = _fsl_readl_le;
_fsl_writel = _fsl_writel_le;
}
#endif
/* request irq */
p_otg->irq = platform_get_irq(pdev, 0);
if (p_otg->irq < 0)
return p_otg->irq;
status = request_irq(p_otg->irq, fsl_otg_isr,
IRQF_SHARED, driver_name, p_otg);
if (status) {
dev_dbg(p_otg->phy.dev, "can't get IRQ %d, error %d\n",
p_otg->irq, status);
iounmap(p_otg->dr_mem_map);
kfree(p_otg->phy.otg);
kfree(p_otg);
return status;
}
/* stop the controller */
temp = fsl_readl(&p_otg->dr_mem_map->usbcmd);
temp &= ~USB_CMD_RUN_STOP;
fsl_writel(temp, &p_otg->dr_mem_map->usbcmd);
/* reset the controller */
temp = fsl_readl(&p_otg->dr_mem_map->usbcmd);
temp |= USB_CMD_CTRL_RESET;
fsl_writel(temp, &p_otg->dr_mem_map->usbcmd);
/* wait reset completed */
while (fsl_readl(&p_otg->dr_mem_map->usbcmd) & USB_CMD_CTRL_RESET)
;
/* configure the VBUSHS as IDLE(both host and device) */
temp = USB_MODE_STREAM_DISABLE | (pdata->es ? USB_MODE_ES : 0);
fsl_writel(temp, &p_otg->dr_mem_map->usbmode);
/* configure PHY interface */
temp = fsl_readl(&p_otg->dr_mem_map->portsc);
temp &= ~(PORTSC_PHY_TYPE_SEL | PORTSC_PTW);
switch (pdata->phy_mode) {
case FSL_USB2_PHY_ULPI:
temp |= PORTSC_PTS_ULPI;
break;
case FSL_USB2_PHY_UTMI_WIDE:
temp |= PORTSC_PTW_16BIT;
fallthrough;
case FSL_USB2_PHY_UTMI:
temp |= PORTSC_PTS_UTMI;
fallthrough;
default:
break;
}
fsl_writel(temp, &p_otg->dr_mem_map->portsc);
if (pdata->have_sysif_regs) {
/* configure control enable IO output, big endian register */
temp = __raw_readl(&p_otg->dr_mem_map->control);
temp |= USB_CTRL_IOENB;
__raw_writel(temp, &p_otg->dr_mem_map->control);
}
/* disable all interrupt and clear all OTGSC status */
temp = fsl_readl(&p_otg->dr_mem_map->otgsc);
temp &= ~OTGSC_INTERRUPT_ENABLE_BITS_MASK;
temp |= OTGSC_INTERRUPT_STATUS_BITS_MASK | OTGSC_CTRL_VBUS_DISCHARGE;
fsl_writel(temp, &p_otg->dr_mem_map->otgsc);
/*
* The identification (id) input is FALSE when a Mini-A plug is inserted
* in the devices Mini-AB receptacle. Otherwise, this input is TRUE.
* Also: record initial state of ID pin
*/
if (fsl_readl(&p_otg->dr_mem_map->otgsc) & OTGSC_STS_USB_ID) {
p_otg->phy.otg->state = OTG_STATE_UNDEFINED;
p_otg->fsm.id = 1;
} else {
p_otg->phy.otg->state = OTG_STATE_A_IDLE;
p_otg->fsm.id = 0;
}
pr_debug("initial ID pin=%d\n", p_otg->fsm.id);
/* enable OTG ID pin interrupt */
temp = fsl_readl(&p_otg->dr_mem_map->otgsc);
temp |= OTGSC_INTR_USB_ID_EN;
temp &= ~(OTGSC_CTRL_VBUS_DISCHARGE | OTGSC_INTR_1MS_TIMER_EN);
fsl_writel(temp, &p_otg->dr_mem_map->otgsc);
return 0;
}
static int fsl_otg_probe(struct platform_device *pdev)
{
int ret;
if (!dev_get_platdata(&pdev->dev))
return -ENODEV;
/* configure the OTG */
ret = fsl_otg_conf(pdev);
if (ret) {
dev_err(&pdev->dev, "Couldn't configure OTG module\n");
return ret;
}
/* start OTG */
ret = usb_otg_start(pdev);
if (ret) {
dev_err(&pdev->dev, "Can't init FSL OTG device\n");
return ret;
}
return ret;
}
static void fsl_otg_remove(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
usb_remove_phy(&fsl_otg_dev->phy);
free_irq(fsl_otg_dev->irq, fsl_otg_dev);
iounmap((void *)usb_dr_regs);
fsl_otg_uninit_timers();
kfree(fsl_otg_dev->phy.otg);
kfree(fsl_otg_dev);
if (pdata->exit)
pdata->exit(pdev);
}
struct platform_driver fsl_otg_driver = {
.probe = fsl_otg_probe,
.remove_new = fsl_otg_remove,
.driver = {
.name = driver_name,
.owner = THIS_MODULE,
},
};
module_platform_driver(fsl_otg_driver);
MODULE_DESCRIPTION(DRIVER_INFO);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/phy/phy-fsl-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Google, Inc.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#define ULPI_VIEW_WAKEUP (1 << 31)
#define ULPI_VIEW_RUN (1 << 30)
#define ULPI_VIEW_WRITE (1 << 29)
#define ULPI_VIEW_READ (0 << 29)
#define ULPI_VIEW_ADDR(x) (((x) & 0xff) << 16)
#define ULPI_VIEW_DATA_READ(x) (((x) >> 8) & 0xff)
#define ULPI_VIEW_DATA_WRITE(x) ((x) & 0xff)
static int ulpi_viewport_wait(void __iomem *view, u32 mask)
{
u32 val;
return readl_poll_timeout_atomic(view, val, !(val & mask), 1, 2000);
}
static int ulpi_viewport_read(struct usb_phy *otg, u32 reg)
{
int ret;
void __iomem *view = otg->io_priv;
writel(ULPI_VIEW_WAKEUP | ULPI_VIEW_WRITE, view);
ret = ulpi_viewport_wait(view, ULPI_VIEW_WAKEUP);
if (ret)
return ret;
writel(ULPI_VIEW_RUN | ULPI_VIEW_READ | ULPI_VIEW_ADDR(reg), view);
ret = ulpi_viewport_wait(view, ULPI_VIEW_RUN);
if (ret)
return ret;
return ULPI_VIEW_DATA_READ(readl(view));
}
static int ulpi_viewport_write(struct usb_phy *otg, u32 val, u32 reg)
{
int ret;
void __iomem *view = otg->io_priv;
writel(ULPI_VIEW_WAKEUP | ULPI_VIEW_WRITE, view);
ret = ulpi_viewport_wait(view, ULPI_VIEW_WAKEUP);
if (ret)
return ret;
writel(ULPI_VIEW_RUN | ULPI_VIEW_WRITE | ULPI_VIEW_DATA_WRITE(val) |
ULPI_VIEW_ADDR(reg), view);
return ulpi_viewport_wait(view, ULPI_VIEW_RUN);
}
struct usb_phy_io_ops ulpi_viewport_access_ops = {
.read = ulpi_viewport_read,
.write = ulpi_viewport_write,
};
EXPORT_SYMBOL_GPL(ulpi_viewport_access_ops);
| linux-master | drivers/usb/phy/phy-ulpi-viewport.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/usb/of.h>
#include "phy-am335x-control.h"
#include "phy-generic.h"
struct am335x_phy {
struct usb_phy_generic usb_phy_gen;
struct phy_control *phy_ctrl;
int id;
enum usb_dr_mode dr_mode;
};
static int am335x_init(struct usb_phy *phy)
{
struct am335x_phy *am_phy = dev_get_drvdata(phy->dev);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, true);
return 0;
}
static void am335x_shutdown(struct usb_phy *phy)
{
struct am335x_phy *am_phy = dev_get_drvdata(phy->dev);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
}
static int am335x_phy_probe(struct platform_device *pdev)
{
struct am335x_phy *am_phy;
struct device *dev = &pdev->dev;
int ret;
am_phy = devm_kzalloc(dev, sizeof(*am_phy), GFP_KERNEL);
if (!am_phy)
return -ENOMEM;
am_phy->phy_ctrl = am335x_get_phy_control(dev);
if (!am_phy->phy_ctrl)
return -EPROBE_DEFER;
am_phy->id = of_alias_get_id(pdev->dev.of_node, "phy");
if (am_phy->id < 0) {
dev_err(&pdev->dev, "Missing PHY id: %d\n", am_phy->id);
return am_phy->id;
}
am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen);
if (ret)
return ret;
am_phy->usb_phy_gen.phy.init = am335x_init;
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
platform_set_drvdata(pdev, am_phy);
device_init_wakeup(dev, true);
/*
* If we leave PHY wakeup enabled then AM33XX wakes up
* immediately from DS0. To avoid this we mark dev->power.can_wakeup
* to false. The same is checked in suspend routine to decide
* on whether to enable PHY wakeup or not.
* PHY wakeup works fine in standby mode, there by allowing us to
* handle remote wakeup, wakeup on disconnect and connect.
*/
device_set_wakeup_enable(dev, false);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
}
static void am335x_phy_remove(struct platform_device *pdev)
{
struct am335x_phy *am_phy = platform_get_drvdata(pdev);
usb_remove_phy(&am_phy->usb_phy_gen.phy);
}
#ifdef CONFIG_PM_SLEEP
static int am335x_phy_suspend(struct device *dev)
{
struct am335x_phy *am_phy = dev_get_drvdata(dev);
/*
* Enable phy wakeup only if dev->power.can_wakeup is true.
* Make sure to enable wakeup to support remote wakeup in
* standby mode ( same is not supported in OFF(DS0) mode).
* Enable it by doing
* echo enabled > /sys/bus/platform/devices/<usb-phy-id>/power/wakeup
*/
if (device_may_wakeup(dev))
phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, true);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
return 0;
}
static int am335x_phy_resume(struct device *dev)
{
struct am335x_phy *am_phy = dev_get_drvdata(dev);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, true);
if (device_may_wakeup(dev))
phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, false);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(am335x_pm_ops, am335x_phy_suspend, am335x_phy_resume);
static const struct of_device_id am335x_phy_ids[] = {
{ .compatible = "ti,am335x-usb-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, am335x_phy_ids);
static struct platform_driver am335x_phy_driver = {
.probe = am335x_phy_probe,
.remove_new = am335x_phy_remove,
.driver = {
.name = "am335x-phy-driver",
.pm = &am335x_pm_ops,
.of_match_table = am335x_phy_ids,
},
};
module_platform_driver(am335x_phy_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/phy/phy-am335x.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* phy-keystone - USB PHY, talking to dwc3 controller in Keystone.
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
*
* Author: WingMan Kwok <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/io.h>
#include <linux/of.h>
#include "phy-generic.h"
/* USB PHY control register offsets */
#define USB_PHY_CTL_UTMI 0x0000
#define USB_PHY_CTL_PIPE 0x0004
#define USB_PHY_CTL_PARAM_1 0x0008
#define USB_PHY_CTL_PARAM_2 0x000c
#define USB_PHY_CTL_CLOCK 0x0010
#define USB_PHY_CTL_PLL 0x0014
#define PHY_REF_SSP_EN BIT(29)
struct keystone_usbphy {
struct usb_phy_generic usb_phy_gen;
void __iomem *phy_ctrl;
};
static inline u32 keystone_usbphy_readl(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
static inline void keystone_usbphy_writel(void __iomem *base,
u32 offset, u32 value)
{
writel(value, base + offset);
}
static int keystone_usbphy_init(struct usb_phy *phy)
{
struct keystone_usbphy *k_phy = dev_get_drvdata(phy->dev);
u32 val;
val = keystone_usbphy_readl(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK);
keystone_usbphy_writel(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK,
val | PHY_REF_SSP_EN);
return 0;
}
static void keystone_usbphy_shutdown(struct usb_phy *phy)
{
struct keystone_usbphy *k_phy = dev_get_drvdata(phy->dev);
u32 val;
val = keystone_usbphy_readl(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK);
keystone_usbphy_writel(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK,
val & ~PHY_REF_SSP_EN);
}
static int keystone_usbphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keystone_usbphy *k_phy;
int ret;
k_phy = devm_kzalloc(dev, sizeof(*k_phy), GFP_KERNEL);
if (!k_phy)
return -ENOMEM;
k_phy->phy_ctrl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen);
if (ret)
return ret;
k_phy->usb_phy_gen.phy.init = keystone_usbphy_init;
k_phy->usb_phy_gen.phy.shutdown = keystone_usbphy_shutdown;
platform_set_drvdata(pdev, k_phy);
return usb_add_phy_dev(&k_phy->usb_phy_gen.phy);
}
static void keystone_usbphy_remove(struct platform_device *pdev)
{
struct keystone_usbphy *k_phy = platform_get_drvdata(pdev);
usb_remove_phy(&k_phy->usb_phy_gen.phy);
}
static const struct of_device_id keystone_usbphy_ids[] = {
{ .compatible = "ti,keystone-usbphy" },
{ }
};
MODULE_DEVICE_TABLE(of, keystone_usbphy_ids);
static struct platform_driver keystone_usbphy_driver = {
.probe = keystone_usbphy_probe,
.remove_new = keystone_usbphy_remove,
.driver = {
.name = "keystone-usbphy",
.of_match_table = keystone_usbphy_ids,
},
};
module_platform_driver(keystone_usbphy_driver);
MODULE_ALIAS("platform:keystone-usbphy");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("Keystone USB phy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/phy/phy-keystone.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB of helper code
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
static const char *const usbphy_modes[] = {
[USBPHY_INTERFACE_MODE_UNKNOWN] = "",
[USBPHY_INTERFACE_MODE_UTMI] = "utmi",
[USBPHY_INTERFACE_MODE_UTMIW] = "utmi_wide",
[USBPHY_INTERFACE_MODE_ULPI] = "ulpi",
[USBPHY_INTERFACE_MODE_SERIAL] = "serial",
[USBPHY_INTERFACE_MODE_HSIC] = "hsic",
};
/**
* of_usb_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
*
* The function gets phy interface string from property 'phy_type',
* and returns the corresponding enum usb_phy_interface
*/
enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np)
{
const char *phy_type;
int err, i;
err = of_property_read_string(np, "phy_type", &phy_type);
if (err < 0)
return USBPHY_INTERFACE_MODE_UNKNOWN;
for (i = 0; i < ARRAY_SIZE(usbphy_modes); i++)
if (!strcmp(phy_type, usbphy_modes[i]))
return i;
return USBPHY_INTERFACE_MODE_UNKNOWN;
}
EXPORT_SYMBOL_GPL(of_usb_get_phy_mode);
| linux-master | drivers/usb/phy/of.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <[email protected]>
* Neil Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/proc_fs.h>
#include <linux/clk.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/otg.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
#include <linux/platform_data/mv_usb.h>
#include "phy-mv-usb.h"
#define DRIVER_DESC "Marvell USB OTG transceiver driver"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static const char driver_name[] = "mv-otg";
static char *state_string[] = {
"undefined",
"b_idle",
"b_srp_init",
"b_peripheral",
"b_wait_acon",
"b_host",
"a_idle",
"a_wait_vrise",
"a_wait_bcon",
"a_host",
"a_suspend",
"a_peripheral",
"a_wait_vfall",
"a_vbus_err"
};
static int mv_otg_set_vbus(struct usb_otg *otg, bool on)
{
struct mv_otg *mvotg = container_of(otg->usb_phy, struct mv_otg, phy);
if (mvotg->pdata->set_vbus == NULL)
return -ENODEV;
return mvotg->pdata->set_vbus(on);
}
static int mv_otg_set_host(struct usb_otg *otg,
struct usb_bus *host)
{
otg->host = host;
return 0;
}
static int mv_otg_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
otg->gadget = gadget;
return 0;
}
static void mv_otg_run_state_machine(struct mv_otg *mvotg,
unsigned long delay)
{
dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n");
if (!mvotg->qwork)
return;
queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
}
static void mv_otg_timer_await_bcon(struct timer_list *t)
{
struct mv_otg *mvotg = from_timer(mvotg, t,
otg_ctrl.timer[A_WAIT_BCON_TIMER]);
mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
dev_info(&mvotg->pdev->dev, "B Device No Response!\n");
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
}
static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
{
struct timer_list *timer;
if (id >= OTG_TIMER_NUM)
return -EINVAL;
timer = &mvotg->otg_ctrl.timer[id];
if (timer_pending(timer))
del_timer(timer);
return 0;
}
static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
unsigned long interval)
{
struct timer_list *timer;
if (id >= OTG_TIMER_NUM)
return -EINVAL;
timer = &mvotg->otg_ctrl.timer[id];
if (timer_pending(timer)) {
dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id);
return -EBUSY;
}
timer->expires = jiffies + interval;
add_timer(timer);
return 0;
}
static int mv_otg_reset(struct mv_otg *mvotg)
{
u32 tmp;
int ret;
/* Stop the controller */
tmp = readl(&mvotg->op_regs->usbcmd);
tmp &= ~USBCMD_RUN_STOP;
writel(tmp, &mvotg->op_regs->usbcmd);
/* Reset the controller to get default values */
writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
ret = readl_poll_timeout_atomic(&mvotg->op_regs->usbcmd, tmp,
(tmp & USBCMD_CTRL_RESET), 10, 10000);
if (ret < 0) {
dev_err(&mvotg->pdev->dev,
"Wait for RESET completed TIMEOUT\n");
return ret;
}
writel(0x0, &mvotg->op_regs->usbintr);
tmp = readl(&mvotg->op_regs->usbsts);
writel(tmp, &mvotg->op_regs->usbsts);
return 0;
}
static void mv_otg_init_irq(struct mv_otg *mvotg)
{
u32 otgsc;
mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID
| OTGSC_INTR_A_VBUS_VALID;
mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID
| OTGSC_INTSTS_A_VBUS_VALID;
if (mvotg->pdata->vbus == NULL) {
mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID
| OTGSC_INTR_B_SESSION_END;
mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID
| OTGSC_INTSTS_B_SESSION_END;
}
if (mvotg->pdata->id == NULL) {
mvotg->irq_en |= OTGSC_INTR_USB_ID;
mvotg->irq_status |= OTGSC_INTSTS_USB_ID;
}
otgsc = readl(&mvotg->op_regs->otgsc);
otgsc |= mvotg->irq_en;
writel(otgsc, &mvotg->op_regs->otgsc);
}
static void mv_otg_start_host(struct mv_otg *mvotg, int on)
{
#ifdef CONFIG_USB
struct usb_otg *otg = mvotg->phy.otg;
struct usb_hcd *hcd;
if (!otg->host)
return;
dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop");
hcd = bus_to_hcd(otg->host);
if (on) {
usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
device_wakeup_enable(hcd->self.controller);
} else {
usb_remove_hcd(hcd);
}
#endif /* CONFIG_USB */
}
static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
{
struct usb_otg *otg = mvotg->phy.otg;
if (!otg->gadget)
return;
dev_info(mvotg->phy.dev, "gadget %s\n", on ? "on" : "off");
if (on)
usb_gadget_vbus_connect(otg->gadget);
else
usb_gadget_vbus_disconnect(otg->gadget);
}
static void otg_clock_enable(struct mv_otg *mvotg)
{
clk_prepare_enable(mvotg->clk);
}
static void otg_clock_disable(struct mv_otg *mvotg)
{
clk_disable_unprepare(mvotg->clk);
}
static int mv_otg_enable_internal(struct mv_otg *mvotg)
{
int retval = 0;
if (mvotg->active)
return 0;
dev_dbg(&mvotg->pdev->dev, "otg enabled\n");
otg_clock_enable(mvotg);
if (mvotg->pdata->phy_init) {
retval = mvotg->pdata->phy_init(mvotg->phy_regs);
if (retval) {
dev_err(&mvotg->pdev->dev,
"init phy error %d\n", retval);
otg_clock_disable(mvotg);
return retval;
}
}
mvotg->active = 1;
return 0;
}
static int mv_otg_enable(struct mv_otg *mvotg)
{
if (mvotg->clock_gating)
return mv_otg_enable_internal(mvotg);
return 0;
}
static void mv_otg_disable_internal(struct mv_otg *mvotg)
{
if (mvotg->active) {
dev_dbg(&mvotg->pdev->dev, "otg disabled\n");
if (mvotg->pdata->phy_deinit)
mvotg->pdata->phy_deinit(mvotg->phy_regs);
otg_clock_disable(mvotg);
mvotg->active = 0;
}
}
static void mv_otg_disable(struct mv_otg *mvotg)
{
if (mvotg->clock_gating)
mv_otg_disable_internal(mvotg);
}
static void mv_otg_update_inputs(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
u32 otgsc;
otgsc = readl(&mvotg->op_regs->otgsc);
if (mvotg->pdata->vbus) {
if (mvotg->pdata->vbus->poll() == VBUS_HIGH) {
otg_ctrl->b_sess_vld = 1;
otg_ctrl->b_sess_end = 0;
} else {
otg_ctrl->b_sess_vld = 0;
otg_ctrl->b_sess_end = 1;
}
} else {
otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID);
otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END);
}
if (mvotg->pdata->id)
otg_ctrl->id = !!mvotg->pdata->id->poll();
else
otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID);
if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id)
otg_ctrl->a_bus_req = 1;
otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID);
otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID);
dev_dbg(&mvotg->pdev->dev, "%s: ", __func__);
dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id);
dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld);
dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end);
dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld);
dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld);
}
static void mv_otg_update_state(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
int old_state = mvotg->phy.otg->state;
switch (old_state) {
case OTG_STATE_UNDEFINED:
mvotg->phy.otg->state = OTG_STATE_B_IDLE;
fallthrough;
case OTG_STATE_B_IDLE:
if (otg_ctrl->id == 0)
mvotg->phy.otg->state = OTG_STATE_A_IDLE;
else if (otg_ctrl->b_sess_vld)
mvotg->phy.otg->state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_B_PERIPHERAL:
if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
mvotg->phy.otg->state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_A_IDLE:
if (otg_ctrl->id)
mvotg->phy.otg->state = OTG_STATE_B_IDLE;
else if (!(otg_ctrl->a_bus_drop) &&
(otg_ctrl->a_bus_req || otg_ctrl->a_srp_det))
mvotg->phy.otg->state = OTG_STATE_A_WAIT_VRISE;
break;
case OTG_STATE_A_WAIT_VRISE:
if (otg_ctrl->a_vbus_vld)
mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
break;
case OTG_STATE_A_WAIT_BCON:
if (otg_ctrl->id || otg_ctrl->a_bus_drop
|| otg_ctrl->a_wait_bcon_timeout) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
otg_ctrl->a_bus_req = 0;
} else if (!otg_ctrl->a_vbus_vld) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR;
} else if (otg_ctrl->b_conn) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
mvotg->phy.otg->state = OTG_STATE_A_HOST;
}
break;
case OTG_STATE_A_HOST:
if (otg_ctrl->id || !otg_ctrl->b_conn
|| otg_ctrl->a_bus_drop)
mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
else if (!otg_ctrl->a_vbus_vld)
mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR;
break;
case OTG_STATE_A_WAIT_VFALL:
if (otg_ctrl->id
|| (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld)
|| otg_ctrl->a_bus_req)
mvotg->phy.otg->state = OTG_STATE_A_IDLE;
break;
case OTG_STATE_A_VBUS_ERR:
if (otg_ctrl->id || otg_ctrl->a_clr_err
|| otg_ctrl->a_bus_drop) {
otg_ctrl->a_clr_err = 0;
mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
}
break;
default:
break;
}
}
static void mv_otg_work(struct work_struct *work)
{
struct mv_otg *mvotg;
struct usb_otg *otg;
int old_state;
mvotg = container_of(to_delayed_work(work), struct mv_otg, work);
run:
/* work queue is single thread, or we need spin_lock to protect */
otg = mvotg->phy.otg;
old_state = otg->state;
if (!mvotg->active)
return;
mv_otg_update_inputs(mvotg);
mv_otg_update_state(mvotg);
if (old_state != mvotg->phy.otg->state) {
dev_info(&mvotg->pdev->dev, "change from state %s to %s\n",
state_string[old_state],
state_string[mvotg->phy.otg->state]);
switch (mvotg->phy.otg->state) {
case OTG_STATE_B_IDLE:
otg->default_a = 0;
if (old_state == OTG_STATE_B_PERIPHERAL)
mv_otg_start_periphrals(mvotg, 0);
mv_otg_reset(mvotg);
mv_otg_disable(mvotg);
usb_phy_set_event(&mvotg->phy, USB_EVENT_NONE);
break;
case OTG_STATE_B_PERIPHERAL:
mv_otg_enable(mvotg);
mv_otg_start_periphrals(mvotg, 1);
usb_phy_set_event(&mvotg->phy, USB_EVENT_ENUMERATED);
break;
case OTG_STATE_A_IDLE:
otg->default_a = 1;
mv_otg_enable(mvotg);
if (old_state == OTG_STATE_A_WAIT_VFALL)
mv_otg_start_host(mvotg, 0);
mv_otg_reset(mvotg);
break;
case OTG_STATE_A_WAIT_VRISE:
mv_otg_set_vbus(otg, 1);
break;
case OTG_STATE_A_WAIT_BCON:
if (old_state != OTG_STATE_A_HOST)
mv_otg_start_host(mvotg, 1);
mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
T_A_WAIT_BCON);
/*
* Now, we directly enter A_HOST. So set b_conn = 1
* here. In fact, it need host driver to notify us.
*/
mvotg->otg_ctrl.b_conn = 1;
break;
case OTG_STATE_A_HOST:
break;
case OTG_STATE_A_WAIT_VFALL:
/*
* Now, we has exited A_HOST. So set b_conn = 0
* here. In fact, it need host driver to notify us.
*/
mvotg->otg_ctrl.b_conn = 0;
mv_otg_set_vbus(otg, 0);
break;
case OTG_STATE_A_VBUS_ERR:
break;
default:
break;
}
goto run;
}
}
static irqreturn_t mv_otg_irq(int irq, void *dev)
{
struct mv_otg *mvotg = dev;
u32 otgsc;
otgsc = readl(&mvotg->op_regs->otgsc);
writel(otgsc, &mvotg->op_regs->otgsc);
/*
* if we have vbus, then the vbus detection for B-device
* will be done by mv_otg_inputs_irq().
*/
if (mvotg->pdata->vbus)
if ((otgsc & OTGSC_STS_USB_ID) &&
!(otgsc & OTGSC_INTSTS_USB_ID))
return IRQ_NONE;
if ((otgsc & mvotg->irq_status) == 0)
return IRQ_NONE;
mv_otg_run_state_machine(mvotg, 0);
return IRQ_HANDLED;
}
static irqreturn_t mv_otg_inputs_irq(int irq, void *dev)
{
struct mv_otg *mvotg = dev;
/* The clock may disabled at this time */
if (!mvotg->active) {
mv_otg_enable(mvotg);
mv_otg_init_irq(mvotg);
}
mv_otg_run_state_machine(mvotg, 0);
return IRQ_HANDLED;
}
static ssize_t
a_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
mvotg->otg_ctrl.a_bus_req);
}
static ssize_t
a_bus_req_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
if (count > 2)
return -1;
/* We will use this interface to change to A device */
if (mvotg->phy.otg->state != OTG_STATE_B_IDLE
&& mvotg->phy.otg->state != OTG_STATE_A_IDLE)
return -1;
/* The clock may disabled and we need to set irq for ID detected */
mv_otg_enable(mvotg);
mv_otg_init_irq(mvotg);
if (buf[0] == '1') {
mvotg->otg_ctrl.a_bus_req = 1;
mvotg->otg_ctrl.a_bus_drop = 0;
dev_dbg(&mvotg->pdev->dev,
"User request: a_bus_req = 1\n");
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
}
return count;
}
static DEVICE_ATTR_RW(a_bus_req);
static ssize_t
a_clr_err_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
if (!mvotg->phy.otg->default_a)
return -1;
if (count > 2)
return -1;
if (buf[0] == '1') {
mvotg->otg_ctrl.a_clr_err = 1;
dev_dbg(&mvotg->pdev->dev,
"User request: a_clr_err = 1\n");
}
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
return count;
}
static DEVICE_ATTR_WO(a_clr_err);
static ssize_t
a_bus_drop_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
mvotg->otg_ctrl.a_bus_drop);
}
static ssize_t
a_bus_drop_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
if (!mvotg->phy.otg->default_a)
return -1;
if (count > 2)
return -1;
if (buf[0] == '0') {
mvotg->otg_ctrl.a_bus_drop = 0;
dev_dbg(&mvotg->pdev->dev,
"User request: a_bus_drop = 0\n");
} else if (buf[0] == '1') {
mvotg->otg_ctrl.a_bus_drop = 1;
mvotg->otg_ctrl.a_bus_req = 0;
dev_dbg(&mvotg->pdev->dev,
"User request: a_bus_drop = 1\n");
dev_dbg(&mvotg->pdev->dev,
"User request: and a_bus_req = 0\n");
}
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
return count;
}
static DEVICE_ATTR_RW(a_bus_drop);
static struct attribute *inputs_attrs[] = {
&dev_attr_a_bus_req.attr,
&dev_attr_a_clr_err.attr,
&dev_attr_a_bus_drop.attr,
NULL,
};
static const struct attribute_group inputs_attr_group = {
.name = "inputs",
.attrs = inputs_attrs,
};
static const struct attribute_group *mv_otg_groups[] = {
&inputs_attr_group,
NULL,
};
static void mv_otg_remove(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
if (mvotg->qwork)
destroy_workqueue(mvotg->qwork);
mv_otg_disable(mvotg);
usb_remove_phy(&mvotg->phy);
}
static int mv_otg_probe(struct platform_device *pdev)
{
struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mv_otg *mvotg;
struct usb_otg *otg;
struct resource *r;
int retval = 0, i;
if (pdata == NULL) {
dev_err(&pdev->dev, "failed to get platform data\n");
return -ENODEV;
}
mvotg = devm_kzalloc(&pdev->dev, sizeof(*mvotg), GFP_KERNEL);
if (!mvotg)
return -ENOMEM;
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
platform_set_drvdata(pdev, mvotg);
mvotg->pdev = pdev;
mvotg->pdata = pdata;
mvotg->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mvotg->clk))
return PTR_ERR(mvotg->clk);
mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
if (!mvotg->qwork) {
dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
return -ENOMEM;
}
INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
/* OTG common part */
mvotg->pdev = pdev;
mvotg->phy.dev = &pdev->dev;
mvotg->phy.otg = otg;
mvotg->phy.label = driver_name;
otg->state = OTG_STATE_UNDEFINED;
otg->usb_phy = &mvotg->phy;
otg->set_host = mv_otg_set_host;
otg->set_peripheral = mv_otg_set_peripheral;
otg->set_vbus = mv_otg_set_vbus;
for (i = 0; i < OTG_TIMER_NUM; i++)
timer_setup(&mvotg->otg_ctrl.timer[i],
mv_otg_timer_await_bcon, 0);
r = platform_get_resource_byname(mvotg->pdev,
IORESOURCE_MEM, "phyregs");
if (r == NULL) {
dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
retval = -ENODEV;
goto err_destroy_workqueue;
}
mvotg->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (mvotg->phy_regs == NULL) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
retval = -EFAULT;
goto err_destroy_workqueue;
}
r = platform_get_resource_byname(mvotg->pdev,
IORESOURCE_MEM, "capregs");
if (r == NULL) {
dev_err(&pdev->dev, "no I/O memory resource defined\n");
retval = -ENODEV;
goto err_destroy_workqueue;
}
mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (mvotg->cap_regs == NULL) {
dev_err(&pdev->dev, "failed to map I/O memory\n");
retval = -EFAULT;
goto err_destroy_workqueue;
}
/* we will acces controller register, so enable the udc controller */
retval = mv_otg_enable_internal(mvotg);
if (retval) {
dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
goto err_destroy_workqueue;
}
mvotg->op_regs =
(struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs
+ (readl(mvotg->cap_regs) & CAPLENGTH_MASK));
if (pdata->id) {
retval = devm_request_threaded_irq(&pdev->dev, pdata->id->irq,
NULL, mv_otg_inputs_irq,
IRQF_ONESHOT, "id", mvotg);
if (retval) {
dev_info(&pdev->dev,
"Failed to request irq for ID\n");
pdata->id = NULL;
}
}
if (pdata->vbus) {
mvotg->clock_gating = 1;
retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq,
NULL, mv_otg_inputs_irq,
IRQF_ONESHOT, "vbus", mvotg);
if (retval) {
dev_info(&pdev->dev,
"Failed to request irq for VBUS, "
"disable clock gating\n");
mvotg->clock_gating = 0;
pdata->vbus = NULL;
}
}
if (pdata->disable_otg_clock_gating)
mvotg->clock_gating = 0;
mv_otg_reset(mvotg);
mv_otg_init_irq(mvotg);
r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0);
if (r == NULL) {
dev_err(&pdev->dev, "no IRQ resource defined\n");
retval = -ENODEV;
goto err_disable_clk;
}
mvotg->irq = r->start;
if (devm_request_irq(&pdev->dev, mvotg->irq, mv_otg_irq, IRQF_SHARED,
driver_name, mvotg)) {
dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
mvotg->irq);
mvotg->irq = 0;
retval = -ENODEV;
goto err_disable_clk;
}
retval = usb_add_phy(&mvotg->phy, USB_PHY_TYPE_USB2);
if (retval < 0) {
dev_err(&pdev->dev, "can't register transceiver, %d\n",
retval);
goto err_disable_clk;
}
spin_lock_init(&mvotg->wq_lock);
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 2 * HZ);
spin_unlock(&mvotg->wq_lock);
}
dev_info(&pdev->dev,
"successful probe OTG device %s clock gating.\n",
mvotg->clock_gating ? "with" : "without");
return 0;
err_disable_clk:
mv_otg_disable_internal(mvotg);
err_destroy_workqueue:
destroy_workqueue(mvotg->qwork);
return retval;
}
#ifdef CONFIG_PM
static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
dev_info(&pdev->dev,
"OTG state is not B_IDLE, it is %d!\n",
mvotg->phy.otg->state);
return -EAGAIN;
}
if (!mvotg->clock_gating)
mv_otg_disable_internal(mvotg);
return 0;
}
static int mv_otg_resume(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
u32 otgsc;
if (!mvotg->clock_gating) {
mv_otg_enable_internal(mvotg);
otgsc = readl(&mvotg->op_regs->otgsc);
otgsc |= mvotg->irq_en;
writel(otgsc, &mvotg->op_regs->otgsc);
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
}
return 0;
}
#endif
static struct platform_driver mv_otg_driver = {
.probe = mv_otg_probe,
.remove_new = mv_otg_remove,
.driver = {
.name = driver_name,
.dev_groups = mv_otg_groups,
},
#ifdef CONFIG_PM
.suspend = mv_otg_suspend,
.resume = mv_otg_resume,
#endif
};
module_platform_driver(mv_otg_driver);
| linux-master | drivers/usb/phy/phy-mv-usb.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
#include "phy-am335x-control.h"
struct am335x_control_usb {
struct device *dev;
void __iomem *phy_reg;
void __iomem *wkup;
spinlock_t lock;
struct phy_control phy_ctrl;
};
#define AM335X_USB0_CTRL 0x0
#define AM335X_USB1_CTRL 0x8
#define AM335x_USB_WKUP 0x0
#define USBPHY_CM_PWRDN (1 << 0)
#define USBPHY_OTG_PWRDN (1 << 1)
#define USBPHY_OTGVDET_EN (1 << 19)
#define USBPHY_OTGSESSEND_EN (1 << 20)
#define AM335X_PHY0_WK_EN (1 << 0)
#define AM335X_PHY1_WK_EN (1 << 8)
static void am335x_phy_wkup(struct phy_control *phy_ctrl, u32 id, bool on)
{
struct am335x_control_usb *usb_ctrl;
u32 val;
u32 reg;
usb_ctrl = container_of(phy_ctrl, struct am335x_control_usb, phy_ctrl);
switch (id) {
case 0:
reg = AM335X_PHY0_WK_EN;
break;
case 1:
reg = AM335X_PHY1_WK_EN;
break;
default:
WARN_ON(1);
return;
}
spin_lock(&usb_ctrl->lock);
val = readl(usb_ctrl->wkup);
if (on)
val |= reg;
else
val &= ~reg;
writel(val, usb_ctrl->wkup);
spin_unlock(&usb_ctrl->lock);
}
static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id,
enum usb_dr_mode dr_mode, bool on)
{
struct am335x_control_usb *usb_ctrl;
u32 val;
u32 reg;
usb_ctrl = container_of(phy_ctrl, struct am335x_control_usb, phy_ctrl);
switch (id) {
case 0:
reg = AM335X_USB0_CTRL;
break;
case 1:
reg = AM335X_USB1_CTRL;
break;
default:
WARN_ON(1);
return;
}
val = readl(usb_ctrl->phy_reg + reg);
if (on) {
if (dr_mode == USB_DR_MODE_HOST) {
val &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN |
USBPHY_OTGVDET_EN);
val |= USBPHY_OTGSESSEND_EN;
} else {
val &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN);
val |= USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN;
}
} else {
val |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN;
}
writel(val, usb_ctrl->phy_reg + reg);
/*
* Give the PHY ~1ms to complete the power up operation.
* Tests have shown unstable behaviour if other USB PHY related
* registers are written too shortly after such a transition.
*/
if (on)
mdelay(1);
}
static const struct phy_control ctrl_am335x = {
.phy_power = am335x_phy_power,
.phy_wkup = am335x_phy_wkup,
};
static const struct of_device_id omap_control_usb_id_table[] = {
{ .compatible = "ti,am335x-usb-ctrl-module", .data = &ctrl_am335x },
{}
};
MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
static struct platform_driver am335x_control_driver;
static int match(struct device *dev, const void *data)
{
const struct device_node *node = (const struct device_node *)data;
return dev->of_node == node &&
dev->driver == &am335x_control_driver.driver;
}
struct phy_control *am335x_get_phy_control(struct device *dev)
{
struct device_node *node;
struct am335x_control_usb *ctrl_usb;
node = of_parse_phandle(dev->of_node, "ti,ctrl_mod", 0);
if (!node)
return NULL;
dev = bus_find_device(&platform_bus_type, NULL, node, match);
of_node_put(node);
if (!dev)
return NULL;
ctrl_usb = dev_get_drvdata(dev);
put_device(dev);
if (!ctrl_usb)
return NULL;
return &ctrl_usb->phy_ctrl;
}
EXPORT_SYMBOL_GPL(am335x_get_phy_control);
static int am335x_control_usb_probe(struct platform_device *pdev)
{
struct am335x_control_usb *ctrl_usb;
const struct of_device_id *of_id;
const struct phy_control *phy_ctrl;
of_id = of_match_node(omap_control_usb_id_table, pdev->dev.of_node);
if (!of_id)
return -EINVAL;
phy_ctrl = of_id->data;
ctrl_usb = devm_kzalloc(&pdev->dev, sizeof(*ctrl_usb), GFP_KERNEL);
if (!ctrl_usb)
return -ENOMEM;
ctrl_usb->dev = &pdev->dev;
ctrl_usb->phy_reg = devm_platform_ioremap_resource_byname(pdev, "phy_ctrl");
if (IS_ERR(ctrl_usb->phy_reg))
return PTR_ERR(ctrl_usb->phy_reg);
ctrl_usb->wkup = devm_platform_ioremap_resource_byname(pdev, "wakeup");
if (IS_ERR(ctrl_usb->wkup))
return PTR_ERR(ctrl_usb->wkup);
spin_lock_init(&ctrl_usb->lock);
ctrl_usb->phy_ctrl = *phy_ctrl;
dev_set_drvdata(ctrl_usb->dev, ctrl_usb);
return 0;
}
static struct platform_driver am335x_control_driver = {
.probe = am335x_control_usb_probe,
.driver = {
.name = "am335x-control-usb",
.of_match_table = omap_control_usb_id_table,
},
};
module_platform_driver(am335x_control_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/phy/phy-am335x-control.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* NOP USB transceiver for all USB transceiver which are either built-in
* into USB IP or which are mostly autonomous.
*
* Copyright (C) 2009 Texas Instruments Inc
* Author: Ajay Kumar Gupta <[email protected]>
*
* Current status:
* This provides a "nop" transceiver for PHYs which are
* autonomous such as isp1504, isp1707, etc.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "phy-generic.h"
#define VBUS_IRQ_FLAGS \
(IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | \
IRQF_ONESHOT)
struct platform_device *usb_phy_generic_register(void)
{
return platform_device_register_simple("usb_phy_generic",
PLATFORM_DEVID_AUTO, NULL, 0);
}
EXPORT_SYMBOL_GPL(usb_phy_generic_register);
void usb_phy_generic_unregister(struct platform_device *pdev)
{
platform_device_unregister(pdev);
}
EXPORT_SYMBOL_GPL(usb_phy_generic_unregister);
static int nop_set_suspend(struct usb_phy *x, int suspend)
{
struct usb_phy_generic *nop = dev_get_drvdata(x->dev);
if (!IS_ERR(nop->clk)) {
if (suspend)
clk_disable_unprepare(nop->clk);
else
clk_prepare_enable(nop->clk);
}
return 0;
}
static void nop_reset(struct usb_phy_generic *nop)
{
if (!nop->gpiod_reset)
return;
gpiod_set_value_cansleep(nop->gpiod_reset, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(nop->gpiod_reset, 0);
}
/* interface to regulator framework */
static void nop_set_vbus_draw(struct usb_phy_generic *nop, unsigned mA)
{
struct regulator *vbus_draw = nop->vbus_draw;
int enabled;
int ret;
if (!vbus_draw)
return;
enabled = nop->vbus_draw_enabled;
if (mA) {
regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
if (!enabled) {
ret = regulator_enable(vbus_draw);
if (ret < 0)
return;
nop->vbus_draw_enabled = 1;
}
} else {
if (enabled) {
ret = regulator_disable(vbus_draw);
if (ret < 0)
return;
nop->vbus_draw_enabled = 0;
}
}
nop->mA = mA;
}
static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
{
struct usb_phy_generic *nop = data;
struct usb_otg *otg = nop->phy.otg;
int vbus, status;
vbus = gpiod_get_value(nop->gpiod_vbus);
if ((vbus ^ nop->vbus) == 0)
return IRQ_HANDLED;
nop->vbus = vbus;
if (vbus) {
status = USB_EVENT_VBUS;
otg->state = OTG_STATE_B_PERIPHERAL;
nop->phy.last_event = status;
/* drawing a "unit load" is *always* OK, except for OTG */
nop_set_vbus_draw(nop, 100);
atomic_notifier_call_chain(&nop->phy.notifier, status,
otg->gadget);
} else {
nop_set_vbus_draw(nop, 0);
status = USB_EVENT_NONE;
otg->state = OTG_STATE_B_IDLE;
nop->phy.last_event = status;
atomic_notifier_call_chain(&nop->phy.notifier, status,
otg->gadget);
}
return IRQ_HANDLED;
}
int usb_gen_phy_init(struct usb_phy *phy)
{
struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
int ret;
if (!IS_ERR(nop->vcc)) {
if (regulator_enable(nop->vcc))
dev_err(phy->dev, "Failed to enable power\n");
}
if (!IS_ERR(nop->clk)) {
ret = clk_prepare_enable(nop->clk);
if (ret)
return ret;
}
nop_reset(nop);
return 0;
}
EXPORT_SYMBOL_GPL(usb_gen_phy_init);
void usb_gen_phy_shutdown(struct usb_phy *phy)
{
struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
gpiod_set_value_cansleep(nop->gpiod_reset, 1);
if (!IS_ERR(nop->clk))
clk_disable_unprepare(nop->clk);
if (!IS_ERR(nop->vcc)) {
if (regulator_disable(nop->vcc))
dev_err(phy->dev, "Failed to disable power\n");
}
}
EXPORT_SYMBOL_GPL(usb_gen_phy_shutdown);
static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
{
if (!otg)
return -ENODEV;
if (!gadget) {
otg->gadget = NULL;
return -ENODEV;
}
otg->gadget = gadget;
if (otg->state == OTG_STATE_B_PERIPHERAL)
atomic_notifier_call_chain(&otg->usb_phy->notifier,
USB_EVENT_VBUS, otg->gadget);
else
otg->state = OTG_STATE_B_IDLE;
return 0;
}
static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
{
if (!otg)
return -ENODEV;
if (!host) {
otg->host = NULL;
return -ENODEV;
}
otg->host = host;
return 0;
}
int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
{
enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err = 0;
u32 clk_rate = 0;
bool needs_clk = false;
if (dev->of_node) {
struct device_node *node = dev->of_node;
if (of_property_read_u32(node, "clock-frequency", &clk_rate))
clk_rate = 0;
needs_clk = of_property_read_bool(node, "clocks");
}
nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
if (!err) {
nop->gpiod_vbus = devm_gpiod_get_optional(dev,
"vbus-detect",
GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
if (err)
return dev_err_probe(dev, err,
"Error requesting RESET or VBUS GPIO\n");
if (nop->gpiod_reset)
gpiod_direction_output(nop->gpiod_reset, 1);
nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg),
GFP_KERNEL);
if (!nop->phy.otg)
return -ENOMEM;
nop->clk = devm_clk_get(dev, "main_clk");
if (IS_ERR(nop->clk)) {
dev_dbg(dev, "Can't get phy clock: %ld\n",
PTR_ERR(nop->clk));
if (needs_clk)
return PTR_ERR(nop->clk);
}
if (!IS_ERR(nop->clk) && clk_rate) {
err = clk_set_rate(nop->clk, clk_rate);
if (err) {
dev_err(dev, "Error setting clock rate\n");
return err;
}
}
nop->vcc = devm_regulator_get_optional(dev, "vcc");
if (IS_ERR(nop->vcc) && PTR_ERR(nop->vcc) != -ENODEV)
return dev_err_probe(dev, PTR_ERR(nop->vcc),
"could not get vcc regulator\n");
nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
if (PTR_ERR(nop->vbus_draw) == -ENODEV)
nop->vbus_draw = NULL;
if (IS_ERR(nop->vbus_draw))
return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
"could not get vbus regulator\n");
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
nop->phy.set_suspend = nop_set_suspend;
nop->phy.type = type;
nop->phy.otg->state = OTG_STATE_UNDEFINED;
nop->phy.otg->usb_phy = &nop->phy;
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
return 0;
}
EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
static int usb_phy_generic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct usb_phy_generic *nop;
int err;
nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
if (!nop)
return -ENOMEM;
err = usb_phy_gen_create_phy(dev, nop);
if (err)
return err;
if (nop->gpiod_vbus) {
err = devm_request_threaded_irq(&pdev->dev,
gpiod_to_irq(nop->gpiod_vbus),
NULL, nop_gpio_vbus_thread,
VBUS_IRQ_FLAGS, "vbus_detect",
nop);
if (err) {
dev_err(&pdev->dev, "can't request irq %i, err: %d\n",
gpiod_to_irq(nop->gpiod_vbus), err);
return err;
}
nop->phy.otg->state = gpiod_get_value(nop->gpiod_vbus) ?
OTG_STATE_B_PERIPHERAL : OTG_STATE_B_IDLE;
}
nop->phy.init = usb_gen_phy_init;
nop->phy.shutdown = usb_gen_phy_shutdown;
err = usb_add_phy_dev(&nop->phy);
if (err) {
dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
err);
return err;
}
platform_set_drvdata(pdev, nop);
device_set_wakeup_capable(&pdev->dev,
of_property_read_bool(dn, "wakeup-source"));
return 0;
}
static void usb_phy_generic_remove(struct platform_device *pdev)
{
struct usb_phy_generic *nop = platform_get_drvdata(pdev);
usb_remove_phy(&nop->phy);
}
static const struct of_device_id nop_xceiv_dt_ids[] = {
{ .compatible = "usb-nop-xceiv" },
{ }
};
MODULE_DEVICE_TABLE(of, nop_xceiv_dt_ids);
static struct platform_driver usb_phy_generic_driver = {
.probe = usb_phy_generic_probe,
.remove_new = usb_phy_generic_remove,
.driver = {
.name = "usb_phy_generic",
.of_match_table = nop_xceiv_dt_ids,
},
};
static int __init usb_phy_generic_init(void)
{
return platform_driver_register(&usb_phy_generic_driver);
}
subsys_initcall(usb_phy_generic_init);
static void __exit usb_phy_generic_exit(void)
{
platform_driver_unregister(&usb_phy_generic_driver);
}
module_exit(usb_phy_generic_exit);
MODULE_ALIAS("platform:usb_phy_generic");
MODULE_AUTHOR("Texas Instruments Inc");
MODULE_DESCRIPTION("NOP USB Transceiver driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/phy/phy-generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tahvo USB transceiver driver
*
* Copyright (C) 2005-2006 Nokia Corporation
*
* Parts copied from isp1301_omap.c.
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
*
* Original driver written by Juha Yrjölä, Tony Lindgren and Timo Teräs.
* Modified for Retu/Tahvo MFD by Aaro Koskinen.
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/usb.h>
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/otg.h>
#include <linux/mfd/retu.h>
#include <linux/usb/gadget.h>
#include <linux/platform_device.h>
#define DRIVER_NAME "tahvo-usb"
#define TAHVO_REG_IDSR 0x02
#define TAHVO_REG_USBR 0x06
#define USBR_SLAVE_CONTROL (1 << 8)
#define USBR_VPPVIO_SW (1 << 7)
#define USBR_SPEED (1 << 6)
#define USBR_REGOUT (1 << 5)
#define USBR_MASTER_SW2 (1 << 4)
#define USBR_MASTER_SW1 (1 << 3)
#define USBR_SLAVE_SW (1 << 2)
#define USBR_NSUSPEND (1 << 1)
#define USBR_SEMODE (1 << 0)
#define TAHVO_MODE_HOST 0
#define TAHVO_MODE_PERIPHERAL 1
struct tahvo_usb {
struct platform_device *pt_dev;
struct usb_phy phy;
int vbus_state;
struct mutex serialize;
struct clk *ick;
int irq;
int tahvo_mode;
struct extcon_dev *extcon;
};
static const unsigned int tahvo_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static ssize_t vbus_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off");
}
static DEVICE_ATTR_RO(vbus);
static void check_vbus_state(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
int reg, prev_state;
reg = retu_read(rdev, TAHVO_REG_IDSR);
if (reg & TAHVO_STAT_VBUS) {
switch (tu->phy.otg->state) {
case OTG_STATE_B_IDLE:
/* Enable the gadget driver */
if (tu->phy.otg->gadget)
usb_gadget_vbus_connect(tu->phy.otg->gadget);
tu->phy.otg->state = OTG_STATE_B_PERIPHERAL;
usb_phy_set_event(&tu->phy, USB_EVENT_ENUMERATED);
break;
case OTG_STATE_A_IDLE:
/*
* Session is now valid assuming the USB hub is driving
* Vbus.
*/
tu->phy.otg->state = OTG_STATE_A_HOST;
break;
default:
break;
}
dev_info(&tu->pt_dev->dev, "USB cable connected\n");
} else {
switch (tu->phy.otg->state) {
case OTG_STATE_B_PERIPHERAL:
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
tu->phy.otg->state = OTG_STATE_B_IDLE;
usb_phy_set_event(&tu->phy, USB_EVENT_NONE);
break;
case OTG_STATE_A_HOST:
tu->phy.otg->state = OTG_STATE_A_IDLE;
break;
default:
break;
}
dev_info(&tu->pt_dev->dev, "USB cable disconnected\n");
}
prev_state = tu->vbus_state;
tu->vbus_state = reg & TAHVO_STAT_VBUS;
if (prev_state != tu->vbus_state) {
extcon_set_state_sync(tu->extcon, EXTCON_USB, tu->vbus_state);
sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state");
}
}
static void tahvo_usb_become_host(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST, true);
/* Power up the transceiver in USB host mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
USBR_MASTER_SW2 | USBR_MASTER_SW1);
tu->phy.otg->state = OTG_STATE_A_IDLE;
check_vbus_state(tu);
}
static void tahvo_usb_stop_host(struct tahvo_usb *tu)
{
tu->phy.otg->state = OTG_STATE_A_IDLE;
}
static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST, false);
/* Power up transceiver and set it in USB peripheral mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT |
USBR_NSUSPEND | USBR_SLAVE_SW);
tu->phy.otg->state = OTG_STATE_B_IDLE;
check_vbus_state(tu);
}
static void tahvo_usb_stop_peripheral(struct tahvo_usb *tu)
{
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
tu->phy.otg->state = OTG_STATE_B_IDLE;
}
static void tahvo_usb_power_off(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
/* Disable gadget controller if any */
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
/* Power off transceiver */
retu_write(rdev, TAHVO_REG_USBR, 0);
tu->phy.otg->state = OTG_STATE_UNDEFINED;
}
static int tahvo_usb_set_suspend(struct usb_phy *dev, int suspend)
{
struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, phy);
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
u16 w;
dev_dbg(&tu->pt_dev->dev, "%s\n", __func__);
w = retu_read(rdev, TAHVO_REG_USBR);
if (suspend)
w &= ~USBR_NSUSPEND;
else
w |= USBR_NSUSPEND;
retu_write(rdev, TAHVO_REG_USBR, w);
return 0;
}
static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
phy);
mutex_lock(&tu->serialize);
if (host == NULL) {
if (tu->tahvo_mode == TAHVO_MODE_HOST)
tahvo_usb_power_off(tu);
otg->host = NULL;
mutex_unlock(&tu->serialize);
return 0;
}
if (tu->tahvo_mode == TAHVO_MODE_HOST) {
otg->host = NULL;
tahvo_usb_become_host(tu);
}
otg->host = host;
mutex_unlock(&tu->serialize);
return 0;
}
static int tahvo_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
phy);
mutex_lock(&tu->serialize);
if (!gadget) {
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_power_off(tu);
tu->phy.otg->gadget = NULL;
mutex_unlock(&tu->serialize);
return 0;
}
tu->phy.otg->gadget = gadget;
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_become_peripheral(tu);
mutex_unlock(&tu->serialize);
return 0;
}
static irqreturn_t tahvo_usb_vbus_interrupt(int irq, void *_tu)
{
struct tahvo_usb *tu = _tu;
mutex_lock(&tu->serialize);
check_vbus_state(tu);
mutex_unlock(&tu->serialize);
return IRQ_HANDLED;
}
static ssize_t otg_mode_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
switch (tu->tahvo_mode) {
case TAHVO_MODE_HOST:
return sprintf(buf, "host\n");
case TAHVO_MODE_PERIPHERAL:
return sprintf(buf, "peripheral\n");
}
return -EINVAL;
}
static ssize_t otg_mode_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
int r;
mutex_lock(&tu->serialize);
if (count >= 4 && strncmp(buf, "host", 4) == 0) {
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_stop_peripheral(tu);
tu->tahvo_mode = TAHVO_MODE_HOST;
if (tu->phy.otg->host) {
dev_info(device, "HOST mode: host controller present\n");
tahvo_usb_become_host(tu);
} else {
dev_info(device, "HOST mode: no host controller, powering off\n");
tahvo_usb_power_off(tu);
}
r = strlen(buf);
} else if (count >= 10 && strncmp(buf, "peripheral", 10) == 0) {
if (tu->tahvo_mode == TAHVO_MODE_HOST)
tahvo_usb_stop_host(tu);
tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
if (tu->phy.otg->gadget) {
dev_info(device, "PERIPHERAL mode: gadget driver present\n");
tahvo_usb_become_peripheral(tu);
} else {
dev_info(device, "PERIPHERAL mode: no gadget driver, powering off\n");
tahvo_usb_power_off(tu);
}
r = strlen(buf);
} else {
r = -EINVAL;
}
mutex_unlock(&tu->serialize);
return r;
}
static DEVICE_ATTR_RW(otg_mode);
static struct attribute *tahvo_attrs[] = {
&dev_attr_vbus.attr,
&dev_attr_otg_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(tahvo);
static int tahvo_usb_probe(struct platform_device *pdev)
{
struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
struct tahvo_usb *tu;
int ret;
tu = devm_kzalloc(&pdev->dev, sizeof(*tu), GFP_KERNEL);
if (!tu)
return -ENOMEM;
tu->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*tu->phy.otg),
GFP_KERNEL);
if (!tu->phy.otg)
return -ENOMEM;
tu->pt_dev = pdev;
/* Default mode */
#ifdef CONFIG_TAHVO_USB_HOST_BY_DEFAULT
tu->tahvo_mode = TAHVO_MODE_HOST;
#else
tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
#endif
mutex_init(&tu->serialize);
tu->ick = devm_clk_get(&pdev->dev, "usb_l4_ick");
if (!IS_ERR(tu->ick))
clk_enable(tu->ick);
/*
* Set initial state, so that we generate kevents only on state changes.
*/
tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS;
tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
if (IS_ERR(tu->extcon)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = PTR_ERR(tu->extcon);
goto err_disable_clk;
}
ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
if (ret) {
dev_err(&pdev->dev, "could not register extcon device: %d\n",
ret);
goto err_disable_clk;
}
/* Set the initial cable state. */
extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST,
tu->tahvo_mode == TAHVO_MODE_HOST);
extcon_set_state_sync(tu->extcon, EXTCON_USB, tu->vbus_state);
/* Create OTG interface */
tahvo_usb_power_off(tu);
tu->phy.dev = &pdev->dev;
tu->phy.otg->state = OTG_STATE_UNDEFINED;
tu->phy.label = DRIVER_NAME;
tu->phy.set_suspend = tahvo_usb_set_suspend;
tu->phy.otg->usb_phy = &tu->phy;
tu->phy.otg->set_host = tahvo_usb_set_host;
tu->phy.otg->set_peripheral = tahvo_usb_set_peripheral;
ret = usb_add_phy(&tu->phy, USB_PHY_TYPE_USB2);
if (ret < 0) {
dev_err(&pdev->dev, "cannot register USB transceiver: %d\n",
ret);
goto err_disable_clk;
}
dev_set_drvdata(&pdev->dev, tu);
tu->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_remove_phy;
ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
IRQF_ONESHOT,
"tahvo-vbus", tu);
if (ret) {
dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
ret);
goto err_remove_phy;
}
return 0;
err_remove_phy:
usb_remove_phy(&tu->phy);
err_disable_clk:
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
return ret;
}
static void tahvo_usb_remove(struct platform_device *pdev)
{
struct tahvo_usb *tu = platform_get_drvdata(pdev);
free_irq(tu->irq, tu);
usb_remove_phy(&tu->phy);
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
}
static struct platform_driver tahvo_usb_driver = {
.probe = tahvo_usb_probe,
.remove_new = tahvo_usb_remove,
.driver = {
.name = "tahvo-usb",
.dev_groups = tahvo_groups,
},
};
module_platform_driver(tahvo_usb_driver);
MODULE_DESCRIPTION("Tahvo USB transceiver driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Juha Yrjölä, Tony Lindgren, and Timo Teräs");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
| linux-master | drivers/usb/phy/phy-tahvo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
* Copyright Hans de Goede <[email protected]> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*/
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
#include <linux/usb/hcd.h>
#include <linux/usb/storage.h>
#include <linux/usb/uas.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "uas-detect.h"
#include "scsiglue.h"
#define MAX_CMNDS 256
struct uas_dev_info {
struct usb_interface *intf;
struct usb_device *udev;
struct usb_anchor cmd_urbs;
struct usb_anchor sense_urbs;
struct usb_anchor data_urbs;
unsigned long flags;
int qdepth, resetting;
unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
unsigned use_streams:1;
unsigned shutdown:1;
struct scsi_cmnd *cmnd[MAX_CMNDS];
spinlock_t lock;
struct work_struct work;
struct work_struct scan_work; /* for async scanning */
};
enum {
SUBMIT_STATUS_URB = BIT(1),
ALLOC_DATA_IN_URB = BIT(2),
SUBMIT_DATA_IN_URB = BIT(3),
ALLOC_DATA_OUT_URB = BIT(4),
SUBMIT_DATA_OUT_URB = BIT(5),
ALLOC_CMD_URB = BIT(6),
SUBMIT_CMD_URB = BIT(7),
COMMAND_INFLIGHT = BIT(8),
DATA_IN_URB_INFLIGHT = BIT(9),
DATA_OUT_URB_INFLIGHT = BIT(10),
COMMAND_ABORTED = BIT(11),
IS_IN_WORK_LIST = BIT(12),
};
/* Overrides scsi_pointer */
struct uas_cmd_info {
unsigned int state;
unsigned int uas_tag;
struct urb *cmd_urb;
struct urb *data_in_urb;
struct urb *data_out_urb;
};
/* I hate forward declarations, but I actually have a loop */
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo);
static void uas_do_work(struct work_struct *work);
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status);
/*
* This driver needs its own workqueue, as we need to control memory allocation.
*
* In the course of error handling and power management uas_wait_for_pending_cmnds()
* needs to flush pending work items. In these contexts we cannot allocate memory
* by doing block IO as we would deadlock. For the same reason we cannot wait
* for anything allocating memory not heeding these constraints.
*
* So we have to control all work items that can be on the workqueue we flush.
* Hence we cannot share a queue and need our own.
*/
static struct workqueue_struct *workqueue;
static void uas_do_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
container_of(work, struct uas_dev_info, work);
struct uas_cmd_info *cmdinfo;
struct scsi_cmnd *cmnd;
unsigned long flags;
int i, err;
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting)
goto out;
for (i = 0; i < devinfo->qdepth; i++) {
if (!devinfo->cmnd[i])
continue;
cmnd = devinfo->cmnd[i];
cmdinfo = scsi_cmd_priv(cmnd);
if (!(cmdinfo->state & IS_IN_WORK_LIST))
continue;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata);
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
else
queue_work(workqueue, &devinfo->work);
}
out:
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_scan_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
container_of(work, struct uas_dev_info, scan_work);
struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
dev_dbg(&devinfo->intf->dev, "starting scan\n");
scsi_scan_host(shost);
dev_dbg(&devinfo->intf->dev, "scan complete\n");
}
static void uas_add_work(struct scsi_cmnd *cmnd)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = cmnd->device->hostdata;
lockdep_assert_held(&devinfo->lock);
cmdinfo->state |= IS_IN_WORK_LIST;
queue_work(workqueue, &devinfo->work);
}
static void uas_zap_pending(struct uas_dev_info *devinfo, int result)
{
struct uas_cmd_info *cmdinfo;
struct scsi_cmnd *cmnd;
unsigned long flags;
int i, err;
spin_lock_irqsave(&devinfo->lock, flags);
for (i = 0; i < devinfo->qdepth; i++) {
if (!devinfo->cmnd[i])
continue;
cmnd = devinfo->cmnd[i];
cmdinfo = scsi_cmd_priv(cmnd);
uas_log_cmd_state(cmnd, __func__, 0);
/* Sense urbs were killed, clear COMMAND_INFLIGHT manually */
cmdinfo->state &= ~COMMAND_INFLIGHT;
cmnd->result = result << 16;
err = uas_try_complete(cmnd, __func__);
WARN_ON(err != 0);
}
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
{
struct sense_iu *sense_iu = urb->transfer_buffer;
struct scsi_device *sdev = cmnd->device;
if (urb->actual_length > 16) {
unsigned len = be16_to_cpup(&sense_iu->len);
if (len + 16 != urb->actual_length) {
int newlen = min(len + 16, urb->actual_length) - 16;
if (newlen < 0)
newlen = 0;
sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
"disagrees with IU sense data length %d, "
"using %d bytes of sense data\n", __func__,
urb->actual_length, len, newlen);
len = newlen;
}
memcpy(cmnd->sense_buffer, sense_iu->sense, len);
}
cmnd->result = sense_iu->status;
}
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status)
{
struct uas_cmd_info *ci = scsi_cmd_priv(cmnd);
if (status == -ENODEV) /* too late */
return;
scmd_printk(KERN_INFO, cmnd,
"%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
prefix, status, ci->uas_tag,
(ci->state & SUBMIT_STATUS_URB) ? " s-st" : "",
(ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "",
(ci->state & SUBMIT_DATA_IN_URB) ? " s-in" : "",
(ci->state & ALLOC_DATA_OUT_URB) ? " a-out" : "",
(ci->state & SUBMIT_DATA_OUT_URB) ? " s-out" : "",
(ci->state & ALLOC_CMD_URB) ? " a-cmd" : "",
(ci->state & SUBMIT_CMD_URB) ? " s-cmd" : "",
(ci->state & COMMAND_INFLIGHT) ? " CMD" : "",
(ci->state & DATA_IN_URB_INFLIGHT) ? " IN" : "",
(ci->state & DATA_OUT_URB_INFLIGHT) ? " OUT" : "",
(ci->state & COMMAND_ABORTED) ? " abort" : "",
(ci->state & IS_IN_WORK_LIST) ? " work" : "");
scsi_print_command(cmnd);
}
static void uas_free_unsubmitted_urbs(struct scsi_cmnd *cmnd)
{
struct uas_cmd_info *cmdinfo;
if (!cmnd)
return;
cmdinfo = scsi_cmd_priv(cmnd);
if (cmdinfo->state & SUBMIT_CMD_URB)
usb_free_urb(cmdinfo->cmd_urb);
/* data urbs may have never gotten their submit flag set */
if (!(cmdinfo->state & DATA_IN_URB_INFLIGHT))
usb_free_urb(cmdinfo->data_in_urb);
if (!(cmdinfo->state & DATA_OUT_URB_INFLIGHT))
usb_free_urb(cmdinfo->data_out_urb);
}
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
lockdep_assert_held(&devinfo->lock);
if (cmdinfo->state & (COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT |
COMMAND_ABORTED))
return -EBUSY;
devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
uas_free_unsubmitted_urbs(cmnd);
scsi_done(cmnd);
return 0;
}
static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
unsigned direction)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
int err;
cmdinfo->state |= direction | SUBMIT_STATUS_URB;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata);
if (err) {
uas_add_work(cmnd);
}
}
static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *cmnd)
{
u8 response_code = riu->response_code;
switch (response_code) {
case RC_INCORRECT_LUN:
set_host_byte(cmnd, DID_BAD_TARGET);
break;
case RC_TMF_SUCCEEDED:
set_host_byte(cmnd, DID_OK);
break;
case RC_TMF_NOT_SUPPORTED:
set_host_byte(cmnd, DID_BAD_TARGET);
break;
default:
uas_log_cmd_state(cmnd, "response iu", response_code);
set_host_byte(cmnd, DID_ERROR);
break;
}
return response_code == RC_TMF_SUCCEEDED;
}
static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
struct Scsi_Host *shost = urb->context;
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct urb *data_in_urb = NULL;
struct urb *data_out_urb = NULL;
struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
unsigned int idx;
int status = urb->status;
bool success;
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting)
goto out;
if (status) {
if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
goto out;
}
idx = be16_to_cpup(&iu->tag) - 1;
if (idx >= MAX_CMNDS || !devinfo->cmnd[idx]) {
dev_err(&urb->dev->dev,
"stat urb: no pending cmd for uas-tag %d\n", idx + 1);
goto out;
}
cmnd = devinfo->cmnd[idx];
cmdinfo = scsi_cmd_priv(cmnd);
if (!(cmdinfo->state & COMMAND_INFLIGHT)) {
uas_log_cmd_state(cmnd, "unexpected status cmplt", 0);
goto out;
}
switch (iu->iu_id) {
case IU_ID_STATUS:
uas_sense(urb, cmnd);
if (cmnd->result != 0) {
/* cancel data transfers on error */
data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
}
cmdinfo->state &= ~COMMAND_INFLIGHT;
uas_try_complete(cmnd, __func__);
break;
case IU_ID_READ_READY:
if (!cmdinfo->data_in_urb ||
(cmdinfo->state & DATA_IN_URB_INFLIGHT)) {
uas_log_cmd_state(cmnd, "unexpected read rdy", 0);
break;
}
uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
break;
case IU_ID_WRITE_READY:
if (!cmdinfo->data_out_urb ||
(cmdinfo->state & DATA_OUT_URB_INFLIGHT)) {
uas_log_cmd_state(cmnd, "unexpected write rdy", 0);
break;
}
uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
break;
case IU_ID_RESPONSE:
cmdinfo->state &= ~COMMAND_INFLIGHT;
success = uas_evaluate_response_iu((struct response_iu *)iu, cmnd);
if (!success) {
/* Error, cancel data transfers */
data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
}
uas_try_complete(cmnd, __func__);
break;
default:
uas_log_cmd_state(cmnd, "bogus IU", iu->iu_id);
}
out:
usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
/* Unlinking of data urbs must be done without holding the lock */
if (data_in_urb) {
usb_unlink_urb(data_in_urb);
usb_put_urb(data_in_urb);
}
if (data_out_urb) {
usb_unlink_urb(data_out_urb);
usb_put_urb(data_out_urb);
}
}
static void uas_data_cmplt(struct urb *urb)
{
struct scsi_cmnd *cmnd = urb->context;
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned long flags;
int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
if (cmdinfo->data_in_urb == urb) {
cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
cmdinfo->data_in_urb = NULL;
} else if (cmdinfo->data_out_urb == urb) {
cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
cmdinfo->data_out_urb = NULL;
}
if (devinfo->resetting)
goto out;
/* Data urbs should not complete before the cmd urb is submitted */
if (cmdinfo->state & SUBMIT_CMD_URB) {
uas_log_cmd_state(cmnd, "unexpected data cmplt", 0);
goto out;
}
if (status) {
if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
uas_log_cmd_state(cmnd, "data cmplt err", status);
/* error: no data transfered */
scsi_set_resid(cmnd, sdb->length);
} else {
scsi_set_resid(cmnd, sdb->length - urb->actual_length);
}
uas_try_complete(cmnd, __func__);
out:
usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_cmd_cmplt(struct urb *urb)
{
if (urb->status)
dev_err(&urb->dev->dev, "cmd cmplt err %d\n", urb->status);
usb_free_urb(urb);
}
static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct scsi_cmnd *cmnd,
enum dma_data_direction dir)
{
struct usb_device *udev = devinfo->udev;
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb = usb_alloc_urb(0, gfp);
struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned int pipe = (dir == DMA_FROM_DEVICE)
? devinfo->data_in_pipe : devinfo->data_out_pipe;
if (!urb)
goto out;
usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
uas_data_cmplt, cmnd);
if (devinfo->use_streams)
urb->stream_id = cmdinfo->uas_tag;
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
urb->sg = sdb->table.sgl;
out:
return urb;
}
static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct scsi_cmnd *cmnd)
{
struct usb_device *udev = devinfo->udev;
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb = usb_alloc_urb(0, gfp);
struct sense_iu *iu;
if (!urb)
goto out;
iu = kzalloc(sizeof(*iu), gfp);
if (!iu)
goto free;
usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
uas_stat_cmplt, cmnd->device->host);
if (devinfo->use_streams)
urb->stream_id = cmdinfo->uas_tag;
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
free:
usb_free_urb(urb);
return NULL;
}
static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct scsi_cmnd *cmnd)
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb = usb_alloc_urb(0, gfp);
struct command_iu *iu;
int len;
if (!urb)
goto out;
len = cmnd->cmd_len - 16;
if (len < 0)
len = 0;
len = ALIGN(len, 4);
iu = kzalloc(sizeof(*iu) + len, gfp);
if (!iu)
goto free;
iu->iu_id = IU_ID_COMMAND;
iu->tag = cpu_to_be16(cmdinfo->uas_tag);
iu->prio_attr = UAS_SIMPLE_TAG;
iu->len = len;
int_to_scsilun(sdev->lun, &iu->lun);
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
uas_cmd_cmplt, NULL);
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
free:
usb_free_urb(urb);
return NULL;
}
/*
* Why should I request the Status IU before sending the Command IU? Spec
* says to, but also says the device may receive them in any order. Seems
* daft to me.
*/
static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
{
struct uas_dev_info *devinfo = cmnd->device->hostdata;
struct urb *urb;
int err;
urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
if (!urb)
return NULL;
usb_anchor_urb(urb, &devinfo->sense_urbs);
err = usb_submit_urb(urb, gfp);
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, "sense submit err", err);
usb_free_urb(urb);
return NULL;
}
return urb;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb;
int err;
lockdep_assert_held(&devinfo->lock);
if (cmdinfo->state & SUBMIT_STATUS_URB) {
urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
if (!urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
if (cmdinfo->state & ALLOC_DATA_IN_URB) {
cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_FROM_DEVICE);
if (!cmdinfo->data_in_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_DATA_IN_URB;
}
if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
err = usb_submit_urb(cmdinfo->data_in_urb, GFP_ATOMIC);
if (err) {
usb_unanchor_urb(cmdinfo->data_in_urb);
uas_log_cmd_state(cmnd, "data in submit err", err);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
}
if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_TO_DEVICE);
if (!cmdinfo->data_out_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
}
if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
err = usb_submit_urb(cmdinfo->data_out_urb, GFP_ATOMIC);
if (err) {
usb_unanchor_urb(cmdinfo->data_out_urb);
uas_log_cmd_state(cmnd, "data out submit err", err);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
}
if (cmdinfo->state & ALLOC_CMD_URB) {
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd);
if (!cmdinfo->cmd_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
if (cmdinfo->state & SUBMIT_CMD_URB) {
usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
err = usb_submit_urb(cmdinfo->cmd_urb, GFP_ATOMIC);
if (err) {
usb_unanchor_urb(cmdinfo->cmd_urb);
uas_log_cmd_state(cmnd, "cmd submit err", err);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
cmdinfo->state |= COMMAND_INFLIGHT;
}
return 0;
}
static int uas_queuecommand_lck(struct scsi_cmnd *cmnd)
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
unsigned long flags;
int idx, err;
/* Re-check scsi_block_requests now that we've the host-lock */
if (cmnd->device->host->host_self_blocked)
return SCSI_MLQUEUE_DEVICE_BUSY;
if ((devinfo->flags & US_FL_NO_ATA_1X) &&
(cmnd->cmnd[0] == ATA_12 || cmnd->cmnd[0] == ATA_16)) {
memcpy(cmnd->sense_buffer, usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB));
cmnd->result = SAM_STAT_CHECK_CONDITION;
scsi_done(cmnd);
return 0;
}
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting) {
set_host_byte(cmnd, DID_ERROR);
scsi_done(cmnd);
goto zombie;
}
/* Find a free uas-tag */
for (idx = 0; idx < devinfo->qdepth; idx++) {
if (!devinfo->cmnd[idx])
break;
}
if (idx == devinfo->qdepth) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
memset(cmdinfo, 0, sizeof(*cmdinfo));
cmdinfo->uas_tag = idx + 1; /* uas-tag == usb-stream-id, so 1 based */
cmdinfo->state = SUBMIT_STATUS_URB | ALLOC_CMD_URB | SUBMIT_CMD_URB;
switch (cmnd->sc_data_direction) {
case DMA_FROM_DEVICE:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
break;
case DMA_BIDIRECTIONAL:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
fallthrough;
case DMA_TO_DEVICE:
cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
break;
case DMA_NONE:
break;
}
if (!devinfo->use_streams)
cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
err = uas_submit_urbs(cmnd, devinfo);
/*
* in case of fatal errors the SCSI layer is peculiar
* a command that has finished is a success for the purpose
* of queueing, no matter how fatal the error
*/
if (err == -ENODEV) {
set_host_byte(cmnd, DID_ERROR);
scsi_done(cmnd);
goto zombie;
}
if (err) {
/* If we did nothing, give up now */
if (cmdinfo->state & SUBMIT_STATUS_URB) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
uas_add_work(cmnd);
}
devinfo->cmnd[idx] = cmnd;
zombie:
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
static DEF_SCSI_QCMD(uas_queuecommand)
/*
* For now we do not support actually sending an abort to the device, so
* this eh always fails. Still we must define it to make sure that we've
* dropped all references to the cmnd in question once this function exits.
*/
static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct urb *data_in_urb = NULL;
struct urb *data_out_urb = NULL;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
uas_log_cmd_state(cmnd, __func__, 0);
/* Ensure that try_complete does not call scsi_done */
cmdinfo->state |= COMMAND_ABORTED;
/* Drop all refs to this cmnd, kill data urbs to break their ref */
devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
if (cmdinfo->state & DATA_IN_URB_INFLIGHT)
data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
if (cmdinfo->state & DATA_OUT_URB_INFLIGHT)
data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
uas_free_unsubmitted_urbs(cmnd);
spin_unlock_irqrestore(&devinfo->lock, flags);
if (data_in_urb) {
usb_kill_urb(data_in_urb);
usb_put_urb(data_in_urb);
}
if (data_out_urb) {
usb_kill_urb(data_out_urb);
usb_put_urb(data_out_urb);
}
return FAILED;
}
static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
struct usb_device *udev = devinfo->udev;
unsigned long flags;
int err;
err = usb_lock_device_for_reset(udev, devinfo->intf);
if (err) {
shost_printk(KERN_ERR, sdev->host,
"%s FAILED to get lock err %d\n", __func__, err);
return FAILED;
}
shost_printk(KERN_INFO, sdev->host, "%s start\n", __func__);
spin_lock_irqsave(&devinfo->lock, flags);
devinfo->resetting = 1;
spin_unlock_irqrestore(&devinfo->lock, flags);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_pending(devinfo, DID_RESET);
err = usb_reset_device(udev);
spin_lock_irqsave(&devinfo->lock, flags);
devinfo->resetting = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
usb_unlock_device(udev);
if (err) {
shost_printk(KERN_INFO, sdev->host, "%s FAILED err %d\n",
__func__, err);
return FAILED;
}
shost_printk(KERN_INFO, sdev->host, "%s success\n", __func__);
return SUCCESS;
}
static int uas_target_alloc(struct scsi_target *starget)
{
struct uas_dev_info *devinfo = (struct uas_dev_info *)
dev_to_shost(starget->dev.parent)->hostdata;
if (devinfo->flags & US_FL_NO_REPORT_LUNS)
starget->no_report_luns = 1;
return 0;
}
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
sdev->hostdata = devinfo;
/*
* The protocol has no requirements on alignment in the strict sense.
* Controllers may or may not have alignment restrictions.
* As this is not exported, we use an extremely conservative guess.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
if (devinfo->flags & US_FL_MAX_SECTORS_64)
blk_queue_max_hw_sectors(sdev->request_queue, 64);
else if (devinfo->flags & US_FL_MAX_SECTORS_240)
blk_queue_max_hw_sectors(sdev->request_queue, 240);
return 0;
}
static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
sdev->no_report_opcodes = 1;
/* A few buggy USB-ATA bridges don't understand FUA */
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
/* UAS also needs to support FL_ALWAYS_SYNC */
if (devinfo->flags & US_FL_ALWAYS_SYNC) {
sdev->skip_ms_page_3f = 1;
sdev->skip_ms_page_8 = 1;
sdev->wce_default_on = 1;
}
/* Some disks cannot handle READ_CAPACITY_16 */
if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
/* Some disks cannot handle WRITE_SAME */
if (devinfo->flags & US_FL_NO_SAME)
sdev->no_write_same = 1;
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
* If this device makes that mistake, tell the sd driver.
*/
if (devinfo->flags & US_FL_FIX_CAPACITY)
sdev->fix_capacity = 1;
/*
* in some cases we have to guess
*/
if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
sdev->guess_capacity = 1;
/*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
* to do a 192-byte transfer with this command the
* majority of devices work fine, but a few still can't
* handle it. The sd driver will simply assume those
* devices are write-enabled.
*/
if (devinfo->flags & US_FL_NO_WP_DETECT)
sdev->skip_ms_page_3f = 1;
scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
static const struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
.target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
.skip_settle_delay = 1,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = sizeof(struct uas_cmd_info),
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id uas_usb_ids[] = {
# include "unusual_uas.h"
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
{ }
};
MODULE_DEVICE_TABLE(usb, uas_usb_ids);
#undef UNUSUAL_DEV
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
{
struct usb_host_interface *alt;
alt = uas_find_uas_alt_setting(intf);
if (!alt)
return -ENODEV;
return usb_set_interface(udev, alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
}
static int uas_configure_endpoints(struct uas_dev_info *devinfo)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_device *udev = devinfo->udev;
int r;
r = uas_find_endpoints(devinfo->intf->cur_altsetting, eps);
if (r)
return r;
devinfo->cmd_pipe = usb_sndbulkpipe(udev,
usb_endpoint_num(&eps[0]->desc));
devinfo->status_pipe = usb_rcvbulkpipe(udev,
usb_endpoint_num(&eps[1]->desc));
devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
usb_endpoint_num(&eps[2]->desc));
devinfo->data_out_pipe = usb_sndbulkpipe(udev,
usb_endpoint_num(&eps[3]->desc));
if (udev->speed < USB_SPEED_SUPER) {
devinfo->qdepth = 32;
devinfo->use_streams = 0;
} else {
devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
3, MAX_CMNDS, GFP_NOIO);
if (devinfo->qdepth < 0)
return devinfo->qdepth;
devinfo->use_streams = 1;
}
return 0;
}
static void uas_free_streams(struct uas_dev_info *devinfo)
{
struct usb_device *udev = devinfo->udev;
struct usb_host_endpoint *eps[3];
eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe);
eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
usb_free_streams(devinfo->intf, eps, 3, GFP_NOIO);
}
static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int result = -ENOMEM;
struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
unsigned long dev_flags;
if (!uas_use_uas_driver(intf, id, &dev_flags))
return -ENODEV;
if (uas_switch_interface(udev, intf))
return -ENODEV;
shost = scsi_host_alloc(&uas_host_template,
sizeof(struct uas_dev_info));
if (!shost)
goto set_alt0;
shost->max_cmd_len = 16 + 252;
shost->max_id = 1;
shost->max_lun = 256;
shost->max_channel = 0;
shost->sg_tablesize = udev->bus->sg_tablesize;
devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->intf = intf;
devinfo->udev = udev;
devinfo->resetting = 0;
devinfo->shutdown = 0;
devinfo->flags = dev_flags;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
INIT_WORK(&devinfo->work, uas_do_work);
INIT_WORK(&devinfo->scan_work, uas_scan_work);
result = uas_configure_endpoints(devinfo);
if (result)
goto set_alt0;
/*
* 1 tag is reserved for untagged commands +
* 1 tag to avoid off by one errors in some bridge firmwares
*/
shost->can_queue = devinfo->qdepth - 2;
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
goto free_streams;
/* Submit the delayed_work for SCSI-device scanning */
schedule_work(&devinfo->scan_work);
return result;
free_streams:
uas_free_streams(devinfo);
usb_set_intfdata(intf, NULL);
set_alt0:
usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
if (shost)
scsi_host_put(shost);
return result;
}
static int uas_cmnd_list_empty(struct uas_dev_info *devinfo)
{
unsigned long flags;
int i, r = 1;
spin_lock_irqsave(&devinfo->lock, flags);
for (i = 0; i < devinfo->qdepth; i++) {
if (devinfo->cmnd[i]) {
r = 0; /* Not empty */
break;
}
}
spin_unlock_irqrestore(&devinfo->lock, flags);
return r;
}
/*
* Wait for any pending cmnds to complete, on usb-2 sense_urbs may temporarily
* get empty while there still is more work to do due to sense-urbs completing
* with a READ/WRITE_READY iu code, so keep waiting until the list gets empty.
*/
static int uas_wait_for_pending_cmnds(struct uas_dev_info *devinfo)
{
unsigned long start_time;
int r;
start_time = jiffies;
do {
flush_work(&devinfo->work);
r = usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000);
if (r == 0)
return -ETIME;
r = usb_wait_anchor_empty_timeout(&devinfo->data_urbs, 500);
if (r == 0)
return -ETIME;
if (time_after(jiffies, start_time + 5 * HZ))
return -ETIME;
} while (!uas_cmnd_list_empty(devinfo));
return 0;
}
static int uas_pre_reset(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
if (devinfo->shutdown)
return 0;
/* Block new requests */
spin_lock_irqsave(shost->host_lock, flags);
scsi_block_requests(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
if (uas_wait_for_pending_cmnds(devinfo) != 0) {
shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
scsi_unblock_requests(shost);
return 1;
}
uas_free_streams(devinfo);
return 0;
}
static int uas_post_reset(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
int err;
if (devinfo->shutdown)
return 0;
err = uas_configure_endpoints(devinfo);
if (err && err != -ENODEV)
shost_printk(KERN_ERR, shost,
"%s: alloc streams error %d after reset",
__func__, err);
/* we must unblock the host in every case lest we deadlock */
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_unblock_requests(shost);
return err ? 1 : 0;
}
static int uas_suspend(struct usb_interface *intf, pm_message_t message)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
if (uas_wait_for_pending_cmnds(devinfo) != 0) {
shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
return -ETIME;
}
return 0;
}
static int uas_resume(struct usb_interface *intf)
{
return 0;
}
static int uas_reset_resume(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
int err;
err = uas_configure_endpoints(devinfo);
if (err) {
shost_printk(KERN_ERR, shost,
"%s: alloc streams error %d after reset",
__func__, err);
return -EIO;
}
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
static void uas_disconnect(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
devinfo->resetting = 1;
spin_unlock_irqrestore(&devinfo->lock, flags);
cancel_work_sync(&devinfo->work);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_pending(devinfo, DID_NO_CONNECT);
/*
* Prevent SCSI scanning (if it hasn't started yet)
* or wait for the SCSI-scanning routine to stop.
*/
cancel_work_sync(&devinfo->scan_work);
scsi_remove_host(shost);
uas_free_streams(devinfo);
scsi_host_put(shost);
}
/*
* Put the device back in usb-storage mode on shutdown, as some BIOS-es
* hang on reboot when the device is still in uas mode. Note the reset is
* necessary as some devices won't revert to usb-storage mode without it.
*/
static void uas_shutdown(struct device *dev)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev = interface_to_usbdev(intf);
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
if (system_state != SYSTEM_RESTART)
return;
devinfo->shutdown = 1;
uas_free_streams(devinfo);
usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
usb_reset_device(udev);
}
static struct usb_driver uas_driver = {
.name = "uas",
.probe = uas_probe,
.disconnect = uas_disconnect,
.pre_reset = uas_pre_reset,
.post_reset = uas_post_reset,
.suspend = uas_suspend,
.resume = uas_resume,
.reset_resume = uas_reset_resume,
.drvwrap.driver.shutdown = uas_shutdown,
.id_table = uas_usb_ids,
};
static int __init uas_init(void)
{
int rv;
workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
if (!workqueue)
return -ENOMEM;
rv = usb_register(&uas_driver);
if (rv) {
destroy_workqueue(workqueue);
return -ENOMEM;
}
return 0;
}
static void __exit uas_exit(void)
{
usb_deregister(&uas_driver);
destroy_workqueue(workqueue);
}
module_init(uas_init);
module_exit(uas_exit);
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
MODULE_AUTHOR(
"Hans de Goede <[email protected]>, Matthew Wilcox and Sarah Sharp");
| linux-master | drivers/usb/storage/uas.c |
// SPDX-License-Identifier: GPL-2.0
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "scsiglue.h"
#include "sierra_ms.h"
#include "debug.h"
#define SWIMS_USB_REQUEST_SetSwocMode 0x0B
#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
#define SWIMS_USB_INDEX_SetMode 0x0000
#define SWIMS_SET_MODE_Modem 0x0001
#define TRU_NORMAL 0x01
#define TRU_FORCE_MS 0x02
#define TRU_FORCE_MODEM 0x03
static unsigned int swi_tru_install = 1;
module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def),"
" 2=Force CD-Rom, 3=Force Modem)");
struct swoc_info {
__u8 rev;
__u8 reserved[8];
__u16 LinuxSKU;
__u16 LinuxVer;
__u8 reserved2[47];
} __attribute__((__packed__));
static bool containsFullLinuxPackage(struct swoc_info *swocInfo)
{
if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) ||
(swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF))
return true;
else
return false;
}
static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
{
int result;
dev_dbg(&udev->dev, "SWIMS: %s", "DEVICE MODE SWITCH\n");
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */
USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */
eSWocMode, /* __u16 value */
0x0000, /* __u16 index */
NULL, /* void *data */
0, /* __u16 size */
USB_CTRL_SET_TIMEOUT); /* int timeout */
return result;
}
static int sierra_get_swoc_info(struct usb_device *udev,
struct swoc_info *swocInfo)
{
int result;
dev_dbg(&udev->dev, "SWIMS: Attempting to get TRU-Install info\n");
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */
USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */
0, /* __u16 value */
0, /* __u16 index */
(void *) swocInfo, /* void *data */
sizeof(struct swoc_info), /* __u16 size */
USB_CTRL_SET_TIMEOUT); /* int timeout */
swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU);
swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer);
return result;
}
static void debug_swoc(const struct device *dev, struct swoc_info *swocInfo)
{
dev_dbg(dev, "SWIMS: SWoC Rev: %02d\n", swocInfo->rev);
dev_dbg(dev, "SWIMS: Linux SKU: %04X\n", swocInfo->LinuxSKU);
dev_dbg(dev, "SWIMS: Linux Version: %04X\n", swocInfo->LinuxVer);
}
static ssize_t truinst_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct swoc_info *swocInfo;
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev = interface_to_usbdev(intf);
int result;
if (swi_tru_install == TRU_FORCE_MS) {
result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
} else {
swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
if (!swocInfo) {
snprintf(buf, PAGE_SIZE, "Error\n");
return -ENOMEM;
}
result = sierra_get_swoc_info(udev, swocInfo);
if (result < 0) {
dev_dbg(dev, "SWIMS: failed SWoC query\n");
kfree(swocInfo);
snprintf(buf, PAGE_SIZE, "Error\n");
return -EIO;
}
debug_swoc(dev, swocInfo);
result = snprintf(buf, PAGE_SIZE,
"REV=%02d SKU=%04X VER=%04X\n",
swocInfo->rev,
swocInfo->LinuxSKU,
swocInfo->LinuxVer);
kfree(swocInfo);
}
return result;
}
static DEVICE_ATTR_RO(truinst);
int sierra_ms_init(struct us_data *us)
{
int result, retries;
struct swoc_info *swocInfo;
struct usb_device *udev;
udev = us->pusb_dev;
/* Force Modem mode */
if (swi_tru_install == TRU_FORCE_MODEM) {
usb_stor_dbg(us, "SWIMS: Forcing Modem Mode\n");
result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
if (result < 0)
usb_stor_dbg(us, "SWIMS: Failed to switch to modem mode\n");
return -EIO;
}
/* Force Mass Storage mode (keep CD-Rom) */
else if (swi_tru_install == TRU_FORCE_MS) {
usb_stor_dbg(us, "SWIMS: Forcing Mass Storage Mode\n");
goto complete;
}
/* Normal TRU-Install Logic */
else {
usb_stor_dbg(us, "SWIMS: Normal SWoC Logic\n");
swocInfo = kmalloc(sizeof(struct swoc_info),
GFP_KERNEL);
if (!swocInfo)
return -ENOMEM;
retries = 3;
do {
retries--;
result = sierra_get_swoc_info(udev, swocInfo);
if (result < 0) {
usb_stor_dbg(us, "SWIMS: Failed SWoC query\n");
schedule_timeout_uninterruptible(2*HZ);
}
} while (retries && result < 0);
if (result < 0) {
usb_stor_dbg(us, "SWIMS: Completely failed SWoC query\n");
kfree(swocInfo);
return -EIO;
}
debug_swoc(&us->pusb_dev->dev, swocInfo);
/*
* If there is not Linux software on the TRU-Install device
* then switch to modem mode
*/
if (!containsFullLinuxPackage(swocInfo)) {
usb_stor_dbg(us, "SWIMS: Switching to Modem Mode\n");
result = sierra_set_ms_mode(udev,
SWIMS_SET_MODE_Modem);
if (result < 0)
usb_stor_dbg(us, "SWIMS: Failed to switch modem\n");
kfree(swocInfo);
return -EIO;
}
kfree(swocInfo);
}
complete:
return device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
}
| linux-master | drivers/usb/storage/sierra_ms.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Rio Karma
*
* (c) 2006 Bob Copeland <[email protected]>
* (c) 2006 Keith Bennett <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-karma"
MODULE_DESCRIPTION("Driver for Rio Karma");
MODULE_AUTHOR("Bob Copeland <[email protected]>, Keith Bennett <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
#define RIO_PREFIX "RIOP\x00"
#define RIO_PREFIX_LEN 5
#define RIO_SEND_LEN 40
#define RIO_RECV_LEN 0x200
#define RIO_ENTER_STORAGE 0x1
#define RIO_LEAVE_STORAGE 0x2
#define RIO_RESET 0xC
struct karma_data {
int in_storage;
char *recv;
};
static int rio_karma_init(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id karma_usb_ids[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, karma_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev karma_unusual_dev_list[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/*
* Send commands to Rio Karma.
*
* For each command we send 40 bytes starting 'RIOP\0' followed by
* the command number and a sequence number, which the device will ack
* with a 512-byte packet with the high four bits set and everything
* else null. Then we send 'RIOP\x80' followed by a zero and the
* sequence number, until byte 5 in the response repeats the sequence
* number.
*/
static int rio_karma_send_command(char cmd, struct us_data *us)
{
int result;
unsigned long timeout;
static unsigned char seq = 1;
struct karma_data *data = (struct karma_data *) us->extra;
usb_stor_dbg(us, "sending command %04x\n", cmd);
memset(us->iobuf, 0, RIO_SEND_LEN);
memcpy(us->iobuf, RIO_PREFIX, RIO_PREFIX_LEN);
us->iobuf[5] = cmd;
us->iobuf[6] = seq;
timeout = jiffies + msecs_to_jiffies(6000);
for (;;) {
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
us->iobuf, RIO_SEND_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
goto err;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data->recv, RIO_RECV_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
goto err;
if (data->recv[5] == seq)
break;
if (time_after(jiffies, timeout))
goto err;
us->iobuf[4] = 0x80;
us->iobuf[5] = 0;
msleep(50);
}
seq++;
if (seq == 0)
seq = 1;
usb_stor_dbg(us, "sent command %04x\n", cmd);
return 0;
err:
usb_stor_dbg(us, "command %04x failed\n", cmd);
return USB_STOR_TRANSPORT_FAILED;
}
/*
* Trap START_STOP and READ_10 to leave/re-enter storage mode.
* Everything else is propagated to the normal bulk layer.
*/
static int rio_karma_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int ret;
struct karma_data *data = (struct karma_data *) us->extra;
if (srb->cmnd[0] == READ_10 && !data->in_storage) {
ret = rio_karma_send_command(RIO_ENTER_STORAGE, us);
if (ret)
return ret;
data->in_storage = 1;
return usb_stor_Bulk_transport(srb, us);
} else if (srb->cmnd[0] == START_STOP) {
ret = rio_karma_send_command(RIO_LEAVE_STORAGE, us);
if (ret)
return ret;
data->in_storage = 0;
return rio_karma_send_command(RIO_RESET, us);
}
return usb_stor_Bulk_transport(srb, us);
}
static void rio_karma_destructor(void *extra)
{
struct karma_data *data = (struct karma_data *) extra;
kfree(data->recv);
}
static int rio_karma_init(struct us_data *us)
{
struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
if (!data)
return -ENOMEM;
data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO);
if (!data->recv) {
kfree(data);
return -ENOMEM;
}
us->extra = data;
us->extra_destructor = rio_karma_destructor;
if (rio_karma_send_command(RIO_ENTER_STORAGE, us))
return -EIO;
data->in_storage = 1;
return 0;
}
static struct scsi_host_template karma_host_template;
static int karma_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - karma_usb_ids) + karma_unusual_dev_list,
&karma_host_template);
if (result)
return result;
us->transport_name = "Rio Karma/Bulk";
us->transport = rio_karma_transport;
us->transport_reset = usb_stor_Bulk_reset;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver karma_driver = {
.name = DRV_NAME,
.probe = karma_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = karma_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(karma_driver, karma_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/karma.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2002 Matthew Dharm ([email protected])
*
* Developed with the assistance of:
* (c) 2000 David L. Brown, Jr. ([email protected])
* (c) 2000 Stephen J. Gowdy ([email protected])
* (c) 2002 Alan Stern <[email protected]>
*
* Initial work by:
* (c) 1999 Michael Gee ([email protected])
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*/
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/usb/quirks.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "scsiglue.h"
#include "debug.h"
#include <linux/blkdev.h>
#include "../../scsi/sd.h"
/***********************************************************************
* Data transfer routines
***********************************************************************/
/*
* This is subtle, so pay attention:
* ---------------------------------
* We're very concerned about races with a command abort. Hanging this code
* is a sure fire way to hang the kernel. (Note that this discussion applies
* only to transactions resulting from a scsi queued-command, since only
* these transactions are subject to a scsi abort. Other transactions, such
* as those occurring during device-specific initialization, must be handled
* by a separate code path.)
*
* The abort function (usb_storage_command_abort() in scsiglue.c) first
* sets the machine state and the ABORTING bit in us->dflags to prevent
* new URBs from being submitted. It then calls usb_stor_stop_transport()
* below, which atomically tests-and-clears the URB_ACTIVE bit in us->dflags
* to see if the current_urb needs to be stopped. Likewise, the SG_ACTIVE
* bit is tested to see if the current_sg scatter-gather request needs to be
* stopped. The timeout callback routine does much the same thing.
*
* When a disconnect occurs, the DISCONNECTING bit in us->dflags is set to
* prevent new URBs from being submitted, and usb_stor_stop_transport() is
* called to stop any ongoing requests.
*
* The submit function first verifies that the submitting is allowed
* (neither ABORTING nor DISCONNECTING bits are set) and that the submit
* completes without errors, and only then sets the URB_ACTIVE bit. This
* prevents the stop_transport() function from trying to cancel the URB
* while the submit call is underway. Next, the submit function must test
* the flags to see if an abort or disconnect occurred during the submission
* or before the URB_ACTIVE bit was set. If so, it's essential to cancel
* the URB if it hasn't been cancelled already (i.e., if the URB_ACTIVE bit
* is still set). Either way, the function must then wait for the URB to
* finish. Note that the URB can still be in progress even after a call to
* usb_unlink_urb() returns.
*
* The idea is that (1) once the ABORTING or DISCONNECTING bit is set,
* either the stop_transport() function or the submitting function
* is guaranteed to call usb_unlink_urb() for an active URB,
* and (2) test_and_clear_bit() prevents usb_unlink_urb() from being
* called more than once or from being called during usb_submit_urb().
*/
/*
* This is the completion handler which will wake us up when an URB
* completes.
*/
static void usb_stor_blocking_completion(struct urb *urb)
{
struct completion *urb_done_ptr = urb->context;
complete(urb_done_ptr);
}
/*
* This is the common part of the URB message submission code
*
* All URBs from the usb-storage driver involved in handling a queued scsi
* command _must_ pass through this function (or something like it) for the
* abort mechanisms to work properly.
*/
static int usb_stor_msg_common(struct us_data *us, int timeout)
{
struct completion urb_done;
long timeleft;
int status;
/* don't submit URBs during abort processing */
if (test_bit(US_FLIDX_ABORTING, &us->dflags))
return -EIO;
/* set up data structures for the wakeup system */
init_completion(&urb_done);
/* fill the common fields in the URB */
us->current_urb->context = &urb_done;
us->current_urb->transfer_flags = 0;
/*
* we assume that if transfer_buffer isn't us->iobuf then it
* hasn't been mapped for DMA. Yes, this is clunky, but it's
* easier than always having the caller tell us whether the
* transfer buffer has already been mapped.
*/
if (us->current_urb->transfer_buffer == us->iobuf)
us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
us->current_urb->transfer_dma = us->iobuf_dma;
/* submit the URB */
status = usb_submit_urb(us->current_urb, GFP_NOIO);
if (status) {
/* something went wrong */
return status;
}
/*
* since the URB has been submitted successfully, it's now okay
* to cancel it
*/
set_bit(US_FLIDX_URB_ACTIVE, &us->dflags);
/* did an abort occur during the submission? */
if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {
/* cancel the URB, if it hasn't been cancelled already */
if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
usb_stor_dbg(us, "-- cancelling URB\n");
usb_unlink_urb(us->current_urb);
}
}
/* wait for the completion of the URB */
timeleft = wait_for_completion_interruptible_timeout(
&urb_done, timeout ? : MAX_SCHEDULE_TIMEOUT);
clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags);
if (timeleft <= 0) {
usb_stor_dbg(us, "%s -- cancelling URB\n",
timeleft == 0 ? "Timeout" : "Signal");
usb_kill_urb(us->current_urb);
}
/* return the URB status */
return us->current_urb->status;
}
/*
* Transfer one control message, with timeouts, and allowing early
* termination. Return codes are usual -Exxx, *not* USB_STOR_XFER_xxx.
*/
int usb_stor_control_msg(struct us_data *us, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
{
int status;
usb_stor_dbg(us, "rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
request, requesttype, value, index, size);
/* fill in the devrequest structure */
us->cr->bRequestType = requesttype;
us->cr->bRequest = request;
us->cr->wValue = cpu_to_le16(value);
us->cr->wIndex = cpu_to_le16(index);
us->cr->wLength = cpu_to_le16(size);
/* fill and submit the URB */
usb_fill_control_urb(us->current_urb, us->pusb_dev, pipe,
(unsigned char*) us->cr, data, size,
usb_stor_blocking_completion, NULL);
status = usb_stor_msg_common(us, timeout);
/* return the actual length of the data transferred if no error */
if (status == 0)
status = us->current_urb->actual_length;
return status;
}
EXPORT_SYMBOL_GPL(usb_stor_control_msg);
/*
* This is a version of usb_clear_halt() that allows early termination and
* doesn't read the status from the device -- this is because some devices
* crash their internal firmware when the status is requested after a halt.
*
* A definitive list of these 'bad' devices is too difficult to maintain or
* make complete enough to be useful. This problem was first observed on the
* Hagiwara FlashGate DUAL unit. However, bus traces reveal that neither
* MacOS nor Windows checks the status after clearing a halt.
*
* Since many vendors in this space limit their testing to interoperability
* with these two OSes, specification violations like this one are common.
*/
int usb_stor_clear_halt(struct us_data *us, unsigned int pipe)
{
int result;
int endp = usb_pipeendpoint(pipe);
if (usb_pipein (pipe))
endp |= USB_DIR_IN;
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, endp,
NULL, 0, 3*HZ);
if (result >= 0)
usb_reset_endpoint(us->pusb_dev, endp);
usb_stor_dbg(us, "result = %d\n", result);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_clear_halt);
/*
* Interpret the results of a URB transfer
*
* This function prints appropriate debugging messages, clears halts on
* non-control endpoints, and translates the status to the corresponding
* USB_STOR_XFER_xxx return code.
*/
static int interpret_urb_result(struct us_data *us, unsigned int pipe,
unsigned int length, int result, unsigned int partial)
{
usb_stor_dbg(us, "Status code %d; transferred %u/%u\n",
result, partial, length);
switch (result) {
/* no error code; did we send all the data? */
case 0:
if (partial != length) {
usb_stor_dbg(us, "-- short transfer\n");
return USB_STOR_XFER_SHORT;
}
usb_stor_dbg(us, "-- transfer complete\n");
return USB_STOR_XFER_GOOD;
/* stalled */
case -EPIPE:
/*
* for control endpoints, (used by CB[I]) a stall indicates
* a failed command
*/
if (usb_pipecontrol(pipe)) {
usb_stor_dbg(us, "-- stall on control pipe\n");
return USB_STOR_XFER_STALLED;
}
/* for other sorts of endpoint, clear the stall */
usb_stor_dbg(us, "clearing endpoint halt for pipe 0x%x\n",
pipe);
if (usb_stor_clear_halt(us, pipe) < 0)
return USB_STOR_XFER_ERROR;
return USB_STOR_XFER_STALLED;
/* babble - the device tried to send more than we wanted to read */
case -EOVERFLOW:
usb_stor_dbg(us, "-- babble\n");
return USB_STOR_XFER_LONG;
/* the transfer was cancelled by abort, disconnect, or timeout */
case -ECONNRESET:
usb_stor_dbg(us, "-- transfer cancelled\n");
return USB_STOR_XFER_ERROR;
/* short scatter-gather read transfer */
case -EREMOTEIO:
usb_stor_dbg(us, "-- short read transfer\n");
return USB_STOR_XFER_SHORT;
/* abort or disconnect in progress */
case -EIO:
usb_stor_dbg(us, "-- abort or disconnect in progress\n");
return USB_STOR_XFER_ERROR;
/* the catch-all error case */
default:
usb_stor_dbg(us, "-- unknown error\n");
return USB_STOR_XFER_ERROR;
}
}
/*
* Transfer one control message, without timeouts, but allowing early
* termination. Return codes are USB_STOR_XFER_xxx.
*/
int usb_stor_ctrl_transfer(struct us_data *us, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size)
{
int result;
usb_stor_dbg(us, "rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
request, requesttype, value, index, size);
/* fill in the devrequest structure */
us->cr->bRequestType = requesttype;
us->cr->bRequest = request;
us->cr->wValue = cpu_to_le16(value);
us->cr->wIndex = cpu_to_le16(index);
us->cr->wLength = cpu_to_le16(size);
/* fill and submit the URB */
usb_fill_control_urb(us->current_urb, us->pusb_dev, pipe,
(unsigned char*) us->cr, data, size,
usb_stor_blocking_completion, NULL);
result = usb_stor_msg_common(us, 0);
return interpret_urb_result(us, pipe, size, result,
us->current_urb->actual_length);
}
EXPORT_SYMBOL_GPL(usb_stor_ctrl_transfer);
/*
* Receive one interrupt buffer, without timeouts, but allowing early
* termination. Return codes are USB_STOR_XFER_xxx.
*
* This routine always uses us->recv_intr_pipe as the pipe and
* us->ep_bInterval as the interrupt interval.
*/
static int usb_stor_intr_transfer(struct us_data *us, void *buf,
unsigned int length)
{
int result;
unsigned int pipe = us->recv_intr_pipe;
unsigned int maxp;
usb_stor_dbg(us, "xfer %u bytes\n", length);
/* calculate the max packet size */
maxp = usb_maxpacket(us->pusb_dev, pipe);
if (maxp > length)
maxp = length;
/* fill and submit the URB */
usb_fill_int_urb(us->current_urb, us->pusb_dev, pipe, buf,
maxp, usb_stor_blocking_completion, NULL,
us->ep_bInterval);
result = usb_stor_msg_common(us, 0);
return interpret_urb_result(us, pipe, length, result,
us->current_urb->actual_length);
}
/*
* Transfer one buffer via bulk pipe, without timeouts, but allowing early
* termination. Return codes are USB_STOR_XFER_xxx. If the bulk pipe
* stalls during the transfer, the halt is automatically cleared.
*/
int usb_stor_bulk_transfer_buf(struct us_data *us, unsigned int pipe,
void *buf, unsigned int length, unsigned int *act_len)
{
int result;
usb_stor_dbg(us, "xfer %u bytes\n", length);
/* fill and submit the URB */
usb_fill_bulk_urb(us->current_urb, us->pusb_dev, pipe, buf, length,
usb_stor_blocking_completion, NULL);
result = usb_stor_msg_common(us, 0);
/* store the actual length of the data transferred */
if (act_len)
*act_len = us->current_urb->actual_length;
return interpret_urb_result(us, pipe, length, result,
us->current_urb->actual_length);
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_buf);
/*
* Transfer a scatter-gather list via bulk transfer
*
* This function does basically the same thing as usb_stor_bulk_transfer_buf()
* above, but it uses the usbcore scatter-gather library.
*/
static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe,
struct scatterlist *sg, int num_sg, unsigned int length,
unsigned int *act_len)
{
int result;
/* don't submit s-g requests during abort processing */
if (test_bit(US_FLIDX_ABORTING, &us->dflags))
goto usb_stor_xfer_error;
/* initialize the scatter-gather request block */
usb_stor_dbg(us, "xfer %u bytes, %d entries\n", length, num_sg);
result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0,
sg, num_sg, length, GFP_NOIO);
if (result) {
usb_stor_dbg(us, "usb_sg_init returned %d\n", result);
goto usb_stor_xfer_error;
}
/*
* since the block has been initialized successfully, it's now
* okay to cancel it
*/
set_bit(US_FLIDX_SG_ACTIVE, &us->dflags);
/* did an abort occur during the submission? */
if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {
/* cancel the request, if it hasn't been cancelled already */
if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags)) {
usb_stor_dbg(us, "-- cancelling sg request\n");
usb_sg_cancel(&us->current_sg);
}
}
/* wait for the completion of the transfer */
usb_sg_wait(&us->current_sg);
clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags);
result = us->current_sg.status;
if (act_len)
*act_len = us->current_sg.bytes;
return interpret_urb_result(us, pipe, length, result,
us->current_sg.bytes);
usb_stor_xfer_error:
if (act_len)
*act_len = 0;
return USB_STOR_XFER_ERROR;
}
/*
* Common used function. Transfer a complete command
* via usb_stor_bulk_transfer_sglist() above. Set cmnd resid
*/
int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
struct scsi_cmnd* srb)
{
unsigned int partial;
int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
scsi_sg_count(srb), scsi_bufflen(srb),
&partial);
scsi_set_resid(srb, scsi_bufflen(srb) - partial);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_srb);
/*
* Transfer an entire SCSI command's worth of data payload over the bulk
* pipe.
*
* Note that this uses usb_stor_bulk_transfer_buf() and
* usb_stor_bulk_transfer_sglist() to achieve its goals --
* this function simply determines whether we're going to use
* scatter-gather or not, and acts appropriately.
*/
int usb_stor_bulk_transfer_sg(struct us_data* us, unsigned int pipe,
void *buf, unsigned int length_left, int use_sg, int *residual)
{
int result;
unsigned int partial;
/* are we scatter-gathering? */
if (use_sg) {
/* use the usb core scatter-gather primitives */
result = usb_stor_bulk_transfer_sglist(us, pipe,
(struct scatterlist *) buf, use_sg,
length_left, &partial);
length_left -= partial;
} else {
/* no scatter-gather, just make the request */
result = usb_stor_bulk_transfer_buf(us, pipe, buf,
length_left, &partial);
length_left -= partial;
}
/* store the residual and return the error code */
if (residual)
*residual = length_left;
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_sg);
/***********************************************************************
* Transport routines
***********************************************************************/
/*
* There are so many devices that report the capacity incorrectly,
* this routine was written to counteract some of the resulting
* problems.
*/
static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
{
struct gendisk *disk;
struct scsi_disk *sdkp;
u32 sector;
/* To Report "Medium Error: Record Not Found */
static unsigned char record_not_found[18] = {
[0] = 0x70, /* current error */
[2] = MEDIUM_ERROR, /* = 0x03 */
[7] = 0x0a, /* additional length */
[12] = 0x14 /* Record Not Found */
};
/*
* If last-sector problems can't occur, whether because the
* capacity was already decremented or because the device is
* known to report the correct capacity, then we don't need
* to do anything.
*/
if (!us->use_last_sector_hacks)
return;
/* Was this command a READ(10) or a WRITE(10)? */
if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
goto done;
/* Did this command access the last sector? */
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
disk = scsi_cmd_to_rq(srb)->q->disk;
if (!disk)
goto done;
sdkp = scsi_disk(disk);
if (!sdkp)
goto done;
if (sector + 1 != sdkp->capacity)
goto done;
if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
/*
* The command succeeded. We know this device doesn't
* have the last-sector bug, so stop checking it.
*/
us->use_last_sector_hacks = 0;
} else {
/*
* The command failed. Allow up to 3 retries in case this
* is some normal sort of failure. After that, assume the
* capacity is wrong and we're trying to access the sector
* beyond the end. Replace the result code and sense data
* with values that will cause the SCSI core to fail the
* command immediately, instead of going into an infinite
* (or even just a very long) retry loop.
*/
if (++us->last_sector_retries < 3)
return;
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer, record_not_found,
sizeof(record_not_found));
}
done:
/*
* Don't reset the retry counter for TEST UNIT READY commands,
* because they get issued after device resets which might be
* caused by a failed last-sector access.
*/
if (srb->cmnd[0] != TEST_UNIT_READY)
us->last_sector_retries = 0;
}
/*
* Invoke the transport and basic error-handling/recovery methods
*
* This is used by the protocol layers to actually send the message to
* the device and receive the response.
*/
void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int need_auto_sense;
int result;
/* send the command to the transport layer */
scsi_set_resid(srb, 0);
result = us->transport(srb, us);
/*
* if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
usb_stor_dbg(us, "-- command was aborted\n");
srb->result = DID_ABORT << 16;
goto Handle_Errors;
}
/* if there is a transport error, reset and don't auto-sense */
if (result == USB_STOR_TRANSPORT_ERROR) {
usb_stor_dbg(us, "-- transport indicates error, resetting\n");
srb->result = DID_ERROR << 16;
goto Handle_Errors;
}
/* if the transport provided its own sense data, don't auto-sense */
if (result == USB_STOR_TRANSPORT_NO_SENSE) {
srb->result = SAM_STAT_CHECK_CONDITION;
last_sector_hacks(us, srb);
return;
}
srb->result = SAM_STAT_GOOD;
/*
* Determine if we need to auto-sense
*
* I normally don't use a flag like this, but it's almost impossible
* to understand what's going on here if I don't.
*/
need_auto_sense = 0;
/*
* If we're running the CB transport, which is incapable
* of determining status on its own, we will auto-sense
* unless the operation involved a data-in transfer. Devices
* can signal most data-in errors by stalling the bulk-in pipe.
*/
if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_DPCM_USB) &&
srb->sc_data_direction != DMA_FROM_DEVICE) {
usb_stor_dbg(us, "-- CB transport device requiring auto-sense\n");
need_auto_sense = 1;
}
/* Some devices (Kindle) require another command after SYNC CACHE */
if ((us->fflags & US_FL_SENSE_AFTER_SYNC) &&
srb->cmnd[0] == SYNCHRONIZE_CACHE) {
usb_stor_dbg(us, "-- sense after SYNC CACHE\n");
need_auto_sense = 1;
}
/*
* If we have a failure, we're going to do a REQUEST_SENSE
* automatically. Note that we differentiate between a command
* "failure" and an "error" in the transport mechanism.
*/
if (result == USB_STOR_TRANSPORT_FAILED) {
usb_stor_dbg(us, "-- transport indicates command failure\n");
need_auto_sense = 1;
}
/*
* Determine if this device is SAT by seeing if the
* command executed successfully. Otherwise we'll have
* to wait for at least one CHECK_CONDITION to determine
* SANE_SENSE support
*/
if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
result == USB_STOR_TRANSPORT_GOOD &&
!(us->fflags & US_FL_SANE_SENSE) &&
!(us->fflags & US_FL_BAD_SENSE) &&
!(srb->cmnd[2] & 0x20))) {
usb_stor_dbg(us, "-- SAT supported, increasing auto-sense\n");
us->fflags |= US_FL_SANE_SENSE;
}
/*
* A short transfer on a command where we don't expect it
* is unusual, but it doesn't mean we need to auto-sense.
*/
if ((scsi_get_resid(srb) > 0) &&
!((srb->cmnd[0] == REQUEST_SENSE) ||
(srb->cmnd[0] == INQUIRY) ||
(srb->cmnd[0] == MODE_SENSE) ||
(srb->cmnd[0] == LOG_SENSE) ||
(srb->cmnd[0] == MODE_SENSE_10))) {
usb_stor_dbg(us, "-- unexpectedly short transfer\n");
}
/* Now, if we need to do the auto-sense, let's do it */
if (need_auto_sense) {
int temp_result;
struct scsi_eh_save ses;
int sense_size = US_SENSE_SIZE;
struct scsi_sense_hdr sshdr;
const u8 *scdd;
u8 fm_ili;
/* device supports and needs bigger sense buffer */
if (us->fflags & US_FL_SANE_SENSE)
sense_size = ~0;
Retry_Sense:
usb_stor_dbg(us, "Issuing auto-REQUEST_SENSE\n");
scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size);
/* FIXME: we must do the protocol translation here */
if (us->subclass == USB_SC_RBC || us->subclass == USB_SC_SCSI ||
us->subclass == USB_SC_CYP_ATACB)
srb->cmd_len = 6;
else
srb->cmd_len = 12;
/* issue the auto-sense command */
scsi_set_resid(srb, 0);
temp_result = us->transport(us->srb, us);
/* let's clean up right away */
scsi_eh_restore_cmnd(srb, &ses);
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
usb_stor_dbg(us, "-- auto-sense aborted\n");
srb->result = DID_ABORT << 16;
/* If SANE_SENSE caused this problem, disable it */
if (sense_size != US_SENSE_SIZE) {
us->fflags &= ~US_FL_SANE_SENSE;
us->fflags |= US_FL_BAD_SENSE;
}
goto Handle_Errors;
}
/*
* Some devices claim to support larger sense but fail when
* trying to request it. When a transport failure happens
* using US_FS_SANE_SENSE, we always retry with a standard
* (small) sense request. This fixes some USB GSM modems
*/
if (temp_result == USB_STOR_TRANSPORT_FAILED &&
sense_size != US_SENSE_SIZE) {
usb_stor_dbg(us, "-- auto-sense failure, retry small sense\n");
sense_size = US_SENSE_SIZE;
us->fflags &= ~US_FL_SANE_SENSE;
us->fflags |= US_FL_BAD_SENSE;
goto Retry_Sense;
}
/* Other failures */
if (temp_result != USB_STOR_TRANSPORT_GOOD) {
usb_stor_dbg(us, "-- auto-sense failure\n");
/*
* we skip the reset if this happens to be a
* multi-target device, since failure of an
* auto-sense is perfectly valid
*/
srb->result = DID_ERROR << 16;
if (!(us->fflags & US_FL_SCM_MULT_TARG))
goto Handle_Errors;
return;
}
/*
* If the sense data returned is larger than 18-bytes then we
* assume this device supports requesting more in the future.
* The response code must be 70h through 73h inclusive.
*/
if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
!(us->fflags & US_FL_SANE_SENSE) &&
!(us->fflags & US_FL_BAD_SENSE) &&
(srb->sense_buffer[0] & 0x7C) == 0x70) {
usb_stor_dbg(us, "-- SANE_SENSE support enabled\n");
us->fflags |= US_FL_SANE_SENSE;
/*
* Indicate to the user that we truncated their sense
* because we didn't know it supported larger sense.
*/
usb_stor_dbg(us, "-- Sense data truncated to %i from %i\n",
US_SENSE_SIZE,
srb->sense_buffer[7] + 8);
srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
}
scsi_normalize_sense(srb->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
usb_stor_dbg(us, "-- Result from auto-sense is %d\n",
temp_result);
usb_stor_dbg(us, "-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
sshdr.response_code, sshdr.sense_key,
sshdr.asc, sshdr.ascq);
#ifdef CONFIG_USB_STORAGE_DEBUG
usb_stor_show_sense(us, sshdr.sense_key, sshdr.asc, sshdr.ascq);
#endif
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
scdd = scsi_sense_desc_find(srb->sense_buffer,
SCSI_SENSE_BUFFERSIZE, 4);
fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;
/*
* We often get empty sense data. This could indicate that
* everything worked or that there was an unspecified
* problem. We have to decide which.
*/
if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
fm_ili == 0) {
/*
* If things are really okay, then let's show that.
* Zero out the sense buffer so the higher layers
* won't realize we did an unsolicited auto-sense.
*/
if (result == USB_STOR_TRANSPORT_GOOD) {
srb->result = SAM_STAT_GOOD;
srb->sense_buffer[0] = 0x0;
}
/*
* ATA-passthru commands use sense data to report
* the command completion status, and often devices
* return Check Condition status when nothing is
* wrong.
*/
else if (srb->cmnd[0] == ATA_16 ||
srb->cmnd[0] == ATA_12) {
/* leave the data alone */
}
/*
* If there was a problem, report an unspecified
* hardware error to prevent the higher layers from
* entering an infinite retry loop.
*/
else {
srb->result = DID_ERROR << 16;
if ((sshdr.response_code & 0x72) == 0x72)
srb->sense_buffer[1] = HARDWARE_ERROR;
else
srb->sense_buffer[2] = HARDWARE_ERROR;
}
}
}
/*
* Some devices don't work or return incorrect data the first
* time they get a READ(10) command, or for the first READ(10)
* after a media change. If the INITIAL_READ10 flag is set,
* keep track of whether READ(10) commands succeed. If the
* previous one succeeded and this one failed, set the REDO_READ10
* flag to force a retry.
*/
if (unlikely((us->fflags & US_FL_INITIAL_READ10) &&
srb->cmnd[0] == READ_10)) {
if (srb->result == SAM_STAT_GOOD) {
set_bit(US_FLIDX_READ10_WORKED, &us->dflags);
} else if (test_bit(US_FLIDX_READ10_WORKED, &us->dflags)) {
clear_bit(US_FLIDX_READ10_WORKED, &us->dflags);
set_bit(US_FLIDX_REDO_READ10, &us->dflags);
}
/*
* Next, if the REDO_READ10 flag is set, return a result
* code that will cause the SCSI core to retry the READ(10)
* command immediately.
*/
if (test_bit(US_FLIDX_REDO_READ10, &us->dflags)) {
clear_bit(US_FLIDX_REDO_READ10, &us->dflags);
srb->result = DID_IMM_RETRY << 16;
srb->sense_buffer[0] = 0;
}
}
/* Did we transfer less than the minimum amount required? */
if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
srb->result = DID_ERROR << 16;
last_sector_hacks(us, srb);
return;
/*
* Error and abort processing: try to resynchronize with the device
* by issuing a port reset. If that fails, try a class-specific
* device reset.
*/
Handle_Errors:
/*
* Set the RESETTING bit, and clear the ABORTING bit so that
* the reset may proceed.
*/
scsi_lock(us_to_host(us));
set_bit(US_FLIDX_RESETTING, &us->dflags);
clear_bit(US_FLIDX_ABORTING, &us->dflags);
scsi_unlock(us_to_host(us));
/*
* We must release the device lock because the pre_reset routine
* will want to acquire it.
*/
mutex_unlock(&us->dev_mutex);
result = usb_stor_port_reset(us);
mutex_lock(&us->dev_mutex);
if (result < 0) {
scsi_lock(us_to_host(us));
usb_stor_report_device_reset(us);
scsi_unlock(us_to_host(us));
us->transport_reset(us);
}
clear_bit(US_FLIDX_RESETTING, &us->dflags);
last_sector_hacks(us, srb);
}
/* Stop the current URB transfer */
void usb_stor_stop_transport(struct us_data *us)
{
/*
* If the state machine is blocked waiting for an URB,
* let's wake it up. The test_and_clear_bit() call
* guarantees that if a URB has just been submitted,
* it won't be cancelled more than once.
*/
if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
usb_stor_dbg(us, "-- cancelling URB\n");
usb_unlink_urb(us->current_urb);
}
/* If we are waiting for a scatter-gather operation, cancel it. */
if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags)) {
usb_stor_dbg(us, "-- cancelling sg request\n");
usb_sg_cancel(&us->current_sg);
}
}
/*
* Control/Bulk and Control/Bulk/Interrupt transport
*/
int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
{
unsigned int transfer_length = scsi_bufflen(srb);
unsigned int pipe = 0;
int result;
/* COMMAND STAGE */
/* let's send the command via the control pipe */
/*
* Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
* Stack may be vmallocated. So no DMA for us. Make a copy.
*/
memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
us->ifnum, us->iobuf, srb->cmd_len);
/* check the return code for the command */
usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
result);
/* if we stalled the command, it means command failed */
if (result == USB_STOR_XFER_STALLED) {
return USB_STOR_TRANSPORT_FAILED;
}
/* Uh oh... serious problem here */
if (result != USB_STOR_XFER_GOOD) {
return USB_STOR_TRANSPORT_ERROR;
}
/* DATA STAGE */
/* transfer the data payload for this command, if one exists*/
if (transfer_length) {
pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
us->recv_bulk_pipe : us->send_bulk_pipe;
result = usb_stor_bulk_srb(us, pipe, srb);
usb_stor_dbg(us, "CBI data stage result is 0x%x\n", result);
/* if we stalled the data transfer it means command failed */
if (result == USB_STOR_XFER_STALLED)
return USB_STOR_TRANSPORT_FAILED;
if (result > USB_STOR_XFER_STALLED)
return USB_STOR_TRANSPORT_ERROR;
}
/* STATUS STAGE */
/*
* NOTE: CB does not have a status stage. Silly, I know. So
* we have to catch this at a higher level.
*/
if (us->protocol != USB_PR_CBI)
return USB_STOR_TRANSPORT_GOOD;
result = usb_stor_intr_transfer(us, us->iobuf, 2);
usb_stor_dbg(us, "Got interrupt data (0x%x, 0x%x)\n",
us->iobuf[0], us->iobuf[1]);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/*
* UFI gives us ASC and ASCQ, like a request sense
*
* REQUEST_SENSE and INQUIRY don't affect the sense data on UFI
* devices, so we ignore the information for those commands. Note
* that this means we could be ignoring a real error on these
* commands, but that can't be helped.
*/
if (us->subclass == USB_SC_UFI) {
if (srb->cmnd[0] == REQUEST_SENSE ||
srb->cmnd[0] == INQUIRY)
return USB_STOR_TRANSPORT_GOOD;
if (us->iobuf[0])
goto Failed;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* If not UFI, we interpret the data as a result code
* The first byte should always be a 0x0.
*
* Some bogus devices don't follow that rule. They stuff the ASC
* into the first byte -- so if it's non-zero, call it a failure.
*/
if (us->iobuf[0]) {
usb_stor_dbg(us, "CBI IRQ data showed reserved bType 0x%x\n",
us->iobuf[0]);
goto Failed;
}
/* The second byte & 0x0F should be 0x0 for good, otherwise error */
switch (us->iobuf[1] & 0x0F) {
case 0x00:
return USB_STOR_TRANSPORT_GOOD;
case 0x01:
goto Failed;
}
return USB_STOR_TRANSPORT_ERROR;
/*
* the CBI spec requires that the bulk pipe must be cleared
* following any data-in/out command failure (section 2.4.3.1.3)
*/
Failed:
if (pipe)
usb_stor_clear_halt(us, pipe);
return USB_STOR_TRANSPORT_FAILED;
}
EXPORT_SYMBOL_GPL(usb_stor_CB_transport);
/*
* Bulk only transport
*/
/* Determine what the maximum LUN supported is */
int usb_stor_Bulk_max_lun(struct us_data *us)
{
int result;
/* issue the command */
us->iobuf[0] = 0;
result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
US_BULK_GET_MAX_LUN,
USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE,
0, us->ifnum, us->iobuf, 1, 10*HZ);
usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]);
/*
* If we have a successful request, return the result if valid. The
* CBW LUN field is 4 bits wide, so the value reported by the device
* should fit into that.
*/
if (result > 0) {
if (us->iobuf[0] < 16) {
return us->iobuf[0];
} else {
dev_info(&us->pusb_intf->dev,
"Max LUN %d is not valid, using 0 instead",
us->iobuf[0]);
}
}
/*
* Some devices don't like GetMaxLUN. They may STALL the control
* pipe, they may return a zero-length result, they may do nothing at
* all and timeout, or they may fail in even more bizarrely creative
* ways. In these cases the best approach is to use the default
* value: only one LUN.
*/
return 0;
}
int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
unsigned int transfer_length = scsi_bufflen(srb);
unsigned int residue;
int result;
int fake_sense = 0;
unsigned int cswlen;
unsigned int cbwlen = US_BULK_CB_WRAP_LEN;
/* Take care of BULK32 devices; set extra byte to 0 */
if (unlikely(us->fflags & US_FL_BULK32)) {
cbwlen = 32;
us->iobuf[31] = 0;
}
/* set up the command wrapper */
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = cpu_to_le32(transfer_length);
bcb->Flags = srb->sc_data_direction == DMA_FROM_DEVICE ?
US_BULK_FLAG_IN : 0;
bcb->Tag = ++us->tag;
bcb->Lun = srb->device->lun;
if (us->fflags & US_FL_SCM_MULT_TARG)
bcb->Lun |= srb->device->id << 4;
bcb->Length = srb->cmd_len;
/* copy the command payload */
memset(bcb->CDB, 0, sizeof(bcb->CDB));
memcpy(bcb->CDB, srb->cmnd, bcb->Length);
/* send it to out endpoint */
usb_stor_dbg(us, "Bulk Command S 0x%x T 0x%x L %d F %d Trg %d LUN %d CL %d\n",
le32_to_cpu(bcb->Signature), bcb->Tag,
le32_to_cpu(bcb->DataTransferLength), bcb->Flags,
(bcb->Lun >> 4), (bcb->Lun & 0x0F),
bcb->Length);
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
bcb, cbwlen, NULL);
usb_stor_dbg(us, "Bulk command transfer result=%d\n", result);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* DATA STAGE */
/* send/receive data payload, if there is any */
/*
* Some USB-IDE converter chips need a 100us delay between the
* command phase and the data phase. Some devices need a little
* more than that, probably because of clock rate inaccuracies.
*/
if (unlikely(us->fflags & US_FL_GO_SLOW))
usleep_range(125, 150);
if (transfer_length) {
unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
us->recv_bulk_pipe : us->send_bulk_pipe;
result = usb_stor_bulk_srb(us, pipe, srb);
usb_stor_dbg(us, "Bulk data transfer result 0x%x\n", result);
if (result == USB_STOR_XFER_ERROR)
return USB_STOR_TRANSPORT_ERROR;
/*
* If the device tried to send back more data than the
* amount requested, the spec requires us to transfer
* the CSW anyway. Since there's no point retrying
* the command, we'll return fake sense data indicating
* Illegal Request, Invalid Field in CDB.
*/
if (result == USB_STOR_XFER_LONG)
fake_sense = 1;
/*
* Sometimes a device will mistakenly skip the data phase
* and go directly to the status phase without sending a
* zero-length packet. If we get a 13-byte response here,
* check whether it really is a CSW.
*/
if (result == USB_STOR_XFER_SHORT &&
srb->sc_data_direction == DMA_FROM_DEVICE &&
transfer_length - scsi_get_resid(srb) ==
US_BULK_CS_WRAP_LEN) {
struct scatterlist *sg = NULL;
unsigned int offset = 0;
if (usb_stor_access_xfer_buf((unsigned char *) bcs,
US_BULK_CS_WRAP_LEN, srb, &sg,
&offset, FROM_XFER_BUF) ==
US_BULK_CS_WRAP_LEN &&
bcs->Signature ==
cpu_to_le32(US_BULK_CS_SIGN)) {
usb_stor_dbg(us, "Device skipped data phase\n");
scsi_set_resid(srb, transfer_length);
goto skipped_data_phase;
}
}
}
/*
* See flow chart on pg 15 of the Bulk Only Transport spec for
* an explanation of how this code works.
*/
/* get CSW for device status */
usb_stor_dbg(us, "Attempting to get CSW...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
/*
* Some broken devices add unnecessary zero-length packets to the
* end of their data transfers. Such packets show up as 0-length
* CSWs. If we encounter such a thing, try to read the CSW again.
*/
if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
usb_stor_dbg(us, "Received 0-length CSW; retrying...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
}
/* did the attempt to read the CSW fail? */
if (result == USB_STOR_XFER_STALLED) {
/* get the status again */
usb_stor_dbg(us, "Attempting to get CSW (2nd try)...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, NULL);
}
/* if we still have a failure at this point, we're in trouble */
usb_stor_dbg(us, "Bulk status result = %d\n", result);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
skipped_data_phase:
/* check bulk status */
residue = le32_to_cpu(bcs->Residue);
usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
le32_to_cpu(bcs->Signature), bcs->Tag,
residue, bcs->Status);
if (!(bcs->Tag == us->tag || (us->fflags & US_FL_BULK_IGNORE_TAG)) ||
bcs->Status > US_BULK_STAT_PHASE) {
usb_stor_dbg(us, "Bulk logical error\n");
return USB_STOR_TRANSPORT_ERROR;
}
/*
* Some broken devices report odd signatures, so we do not check them
* for validity against the spec. We store the first one we see,
* and check subsequent transfers for validity against this signature.
*/
if (!us->bcs_signature) {
us->bcs_signature = bcs->Signature;
if (us->bcs_signature != cpu_to_le32(US_BULK_CS_SIGN))
usb_stor_dbg(us, "Learnt BCS signature 0x%08X\n",
le32_to_cpu(us->bcs_signature));
} else if (bcs->Signature != us->bcs_signature) {
usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n",
le32_to_cpu(bcs->Signature),
le32_to_cpu(us->bcs_signature));
return USB_STOR_TRANSPORT_ERROR;
}
/*
* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us
*/
if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
/*
* Heuristically detect devices that generate bogus residues
* by seeing what happens with INQUIRY and READ CAPACITY
* commands.
*/
if (bcs->Status == US_BULK_STAT_OK &&
scsi_get_resid(srb) == 0 &&
((srb->cmnd[0] == INQUIRY &&
transfer_length == 36) ||
(srb->cmnd[0] == READ_CAPACITY &&
transfer_length == 8))) {
us->fflags |= US_FL_IGNORE_RESIDUE;
} else {
residue = min(residue, transfer_length);
scsi_set_resid(srb, max(scsi_get_resid(srb), residue));
}
}
/* based on the status code, we report good or bad */
switch (bcs->Status) {
case US_BULK_STAT_OK:
/* device babbled -- return fake sense data */
if (fake_sense) {
memcpy(srb->sense_buffer,
usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB));
return USB_STOR_TRANSPORT_NO_SENSE;
}
/* command good -- note that data could be short */
return USB_STOR_TRANSPORT_GOOD;
case US_BULK_STAT_FAIL:
/* command failed */
return USB_STOR_TRANSPORT_FAILED;
case US_BULK_STAT_PHASE:
/*
* phase error -- note that a transport reset will be
* invoked by the invoke_transport() function
*/
return USB_STOR_TRANSPORT_ERROR;
}
/* we should never get here, but if we do, we're in trouble */
return USB_STOR_TRANSPORT_ERROR;
}
EXPORT_SYMBOL_GPL(usb_stor_Bulk_transport);
/***********************************************************************
* Reset routines
***********************************************************************/
/*
* This is the common part of the device reset code.
*
* It's handy that every transport mechanism uses the control endpoint for
* resets.
*
* Basically, we send a reset with a 5-second timeout, so we don't get
* jammed attempting to do the reset.
*/
static int usb_stor_reset_common(struct us_data *us,
u8 request, u8 requesttype,
u16 value, u16 index, void *data, u16 size)
{
int result;
int result2;
if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
usb_stor_dbg(us, "No reset during disconnect\n");
return -EIO;
}
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
request, requesttype, value, index, data, size,
5*HZ);
if (result < 0) {
usb_stor_dbg(us, "Soft reset failed: %d\n", result);
return result;
}
/*
* Give the device some time to recover from the reset,
* but don't delay disconnect processing.
*/
wait_event_interruptible_timeout(us->delay_wait,
test_bit(US_FLIDX_DISCONNECTING, &us->dflags),
HZ*6);
if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
usb_stor_dbg(us, "Reset interrupted by disconnect\n");
return -EIO;
}
usb_stor_dbg(us, "Soft reset: clearing bulk-in endpoint halt\n");
result = usb_stor_clear_halt(us, us->recv_bulk_pipe);
usb_stor_dbg(us, "Soft reset: clearing bulk-out endpoint halt\n");
result2 = usb_stor_clear_halt(us, us->send_bulk_pipe);
/* return a result code based on the result of the clear-halts */
if (result >= 0)
result = result2;
if (result < 0)
usb_stor_dbg(us, "Soft reset failed\n");
else
usb_stor_dbg(us, "Soft reset done\n");
return result;
}
/* This issues a CB[I] Reset to the device in question */
#define CB_RESET_CMD_SIZE 12
int usb_stor_CB_reset(struct us_data *us)
{
memset(us->iobuf, 0xFF, CB_RESET_CMD_SIZE);
us->iobuf[0] = SEND_DIAGNOSTIC;
us->iobuf[1] = 4;
return usb_stor_reset_common(us, US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, us->ifnum, us->iobuf, CB_RESET_CMD_SIZE);
}
EXPORT_SYMBOL_GPL(usb_stor_CB_reset);
/*
* This issues a Bulk-only Reset to the device in question, including
* clearing the subsequent endpoint halts that may occur.
*/
int usb_stor_Bulk_reset(struct us_data *us)
{
return usb_stor_reset_common(us, US_BULK_RESET_REQUEST,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, us->ifnum, NULL, 0);
}
EXPORT_SYMBOL_GPL(usb_stor_Bulk_reset);
/*
* Issue a USB port reset to the device. The caller must not hold
* us->dev_mutex.
*/
int usb_stor_port_reset(struct us_data *us)
{
int result;
/*for these devices we must use the class specific method */
if (us->pusb_dev->quirks & USB_QUIRK_RESET)
return -EPERM;
result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
if (result < 0)
usb_stor_dbg(us, "unable to lock device for reset: %d\n",
result);
else {
/* Were we disconnected while waiting for the lock? */
if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
result = -EIO;
usb_stor_dbg(us, "No reset during disconnect\n");
} else {
result = usb_reset_device(us->pusb_dev);
usb_stor_dbg(us, "usb_reset_device returns %d\n",
result);
}
usb_unlock_device(us->pusb_dev);
}
return result;
}
| linux-master | drivers/usb/storage/transport.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.