python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI HCD (Host Controller Driver) for USB.
*
* Bus Glue for Xilinx EHCI core on the of_platform bus
*
* Copyright (c) 2009 Xilinx, Inc.
*
* Based on "ehci-ppc-of.c" by Valentine Barshak <[email protected]>
* and "ehci-ppc-soc.c" by Stefan Roese <[email protected]>
* and "ohci-ppc-of.c" by Sylvain Munaut <[email protected]>
*/
#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
/**
* ehci_xilinx_port_handed_over - hand the port out if failed to enable it
* @hcd: Pointer to the usb_hcd device to which the host controller bound
* @portnum:Port number to which the device is attached.
*
* This function is used as a place to tell the user that the Xilinx USB host
* controller does support LS devices. And in an HS only configuration, it
* does not support FS devices either. It is hoped that this can help a
* confused user.
*
* There are cases when the host controller fails to enable the port due to,
* for example, insufficient power that can be supplied to the device from
* the USB bus. In those cases, the messages printed here are not helpful.
*
* Return: Always return 0
*/
static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
{
dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
if (hcd->has_tt) {
dev_warn(hcd->self.controller,
"Maybe you have connected a low speed device?\n");
dev_warn(hcd->self.controller,
"We do not support low speed devices\n");
} else {
dev_warn(hcd->self.controller,
"Maybe your device is not a high speed device?\n");
dev_warn(hcd->self.controller,
"The USB host controller does not support full speed nor low speed devices\n");
dev_warn(hcd->self.controller,
"You can reconfigure the host controller to have full speed support\n");
}
return 0;
}
static const struct hc_driver ehci_xilinx_of_hc_driver = {
.description = hcd_name,
.product_desc = "OF EHCI",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
.reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = NULL,
.port_handed_over = ehci_xilinx_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
/**
* ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
* @op: pointer to the platform_device bound to the host controller
*
* This function requests resources and sets up appropriate properties for the
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
*
* Return: zero on success, negative error code otherwise
*/
static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource res;
int irq;
int rv;
int *value;
if (usb_disabled())
return -ENODEV;
dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
rv = of_address_to_resource(dn, 0, &res);
if (rv)
return rv;
hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
"XILINX-OF USB");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res.start;
hcd->rsrc_len = resource_size(&res);
irq = irq_of_parse_and_map(dn, 0);
if (!irq) {
dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
__FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = devm_ioremap_resource(&op->dev, &res);
if (IS_ERR(hcd->regs)) {
rv = PTR_ERR(hcd->regs);
goto err_irq;
}
ehci = hcd_to_ehci(hcd);
/* This core always has big-endian register interface and uses
* big-endian memory descriptors.
*/
ehci->big_endian_mmio = 1;
ehci->big_endian_desc = 1;
/* Check whether the FS support option is selected in the hardware.
*/
value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
if (value && (*value == 1)) {
ehci_dbg(ehci, "USB host controller supports FS devices\n");
hcd->has_tt = 1;
} else {
ehci_dbg(ehci,
"USB host controller is HS only\n");
hcd->has_tt = 0;
}
/* Debug registers are at the first 0x100 region
*/
ehci->caps = hcd->regs + 0x100;
rv = usb_add_hcd(hcd, irq, 0);
if (rv == 0) {
device_wakeup_enable(hcd->self.controller);
return 0;
}
err_irq:
usb_put_hcd(hcd);
return rv;
}
/**
* ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
* @op: pointer to platform_device structure that is to be removed
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
*
* Return: Always return 0
*/
static void ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
{.compatible = "xlnx,xps-usb-host-1.00.a",},
{},
};
MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
static struct platform_driver ehci_hcd_xilinx_of_driver = {
.probe = ehci_hcd_xilinx_of_probe,
.remove_new = ehci_hcd_xilinx_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xilinx-of-ehci",
.of_match_table = ehci_hcd_xilinx_of_match,
},
};
| linux-master | drivers/usb/host/ehci-xilinx-of.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell
* Author: Gregory CLEMENT <[email protected]>
*/
#include <linux/io.h>
#include <linux/mbus.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "xhci-mvebu.h"
#include "xhci.h"
#define USB3_MAX_WINDOWS 4
#define USB3_WIN_CTRL(w) (0x0 + ((w) * 8))
#define USB3_WIN_BASE(w) (0x4 + ((w) * 8))
static void xhci_mvebu_mbus_config(void __iomem *base,
const struct mbus_dram_target_info *dram)
{
int win;
/* Clear all existing windows */
for (win = 0; win < USB3_MAX_WINDOWS; win++) {
writel(0, base + USB3_WIN_CTRL(win));
writel(0, base + USB3_WIN_BASE(win));
}
/* Program each DRAM CS in a seperate window */
for (win = 0; win < dram->num_cs; win++) {
const struct mbus_dram_window *cs = &dram->cs[win];
writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
base + USB3_WIN_CTRL(win));
writel((cs->base & 0xffff0000), base + USB3_WIN_BASE(win));
}
}
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
void __iomem *base;
const struct mbus_dram_target_info *dram;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -ENODEV;
/*
* We don't use devm_ioremap() because this mapping should
* only exists for the duration of this probe function.
*/
base = ioremap(res->start, resource_size(res));
if (!base)
return -ENODEV;
dram = mv_mbus_dram_info();
xhci_mvebu_mbus_config(base, dram);
/*
* This memory area was only needed to configure the MBus
* windows, and is therefore no longer useful.
*/
iounmap(base);
return 0;
}
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
/* Without reset on resume, the HC won't work at all */
xhci->quirks |= XHCI_RESET_ON_RESUME;
return 0;
}
| linux-master | drivers/usb/host/xhci-mvebu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* Copyright (C) 2010 ST Microelectronics.
* Deepak Sikri<[email protected]>
*
* Based on various ohci-*.c drivers
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/signal.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ohci.h"
#define DRIVER_DESC "OHCI SPEAr driver"
struct spear_ohci {
struct clk *clk;
};
#define to_spear_ohci(hcd) (struct spear_ohci *)(hcd_to_ohci(hcd)->priv)
static struct hc_driver __read_mostly ohci_spear_hc_driver;
static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
{
const struct hc_driver *driver = &ohci_spear_hc_driver;
struct usb_hcd *hcd = NULL;
struct clk *usbh_clk;
struct spear_ohci *sohci_p;
struct resource *res;
int retval, irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = irq;
goto fail;
}
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = PTR_ERR(usbh_clk);
goto fail;
}
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
sohci_p = to_spear_ohci(hcd);
sohci_p->clk = usbh_clk;
clk_prepare_enable(sohci_p->clk);
retval = usb_add_hcd(hcd, irq, 0);
if (retval == 0) {
device_wakeup_enable(hcd->self.controller);
return retval;
}
clk_disable_unprepare(sohci_p->clk);
err_put_hcd:
usb_put_hcd(hcd);
fail:
dev_err(&pdev->dev, "init fail, %d\n", retval);
return retval;
}
static void spear_ohci_hcd_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct spear_ohci *sohci_p = to_spear_ohci(hcd);
usb_remove_hcd(hcd);
if (sohci_p->clk)
clk_disable_unprepare(sohci_p->clk);
usb_put_hcd(hcd);
}
#if defined(CONFIG_PM)
static int spear_ohci_hcd_drv_suspend(struct platform_device *pdev,
pm_message_t message)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct spear_ohci *sohci_p = to_spear_ohci(hcd);
bool do_wakeup = device_may_wakeup(&pdev->dev);
int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
ret = ohci_suspend(hcd, do_wakeup);
if (ret)
return ret;
clk_disable_unprepare(sohci_p->clk);
return ret;
}
static int spear_ohci_hcd_drv_resume(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct spear_ohci *sohci_p = to_spear_ohci(hcd);
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
clk_prepare_enable(sohci_p->clk);
ohci_resume(hcd, false);
return 0;
}
#endif
static const struct of_device_id spear_ohci_id_table[] = {
{ .compatible = "st,spear600-ohci", },
{ },
};
MODULE_DEVICE_TABLE(of, spear_ohci_id_table);
/* Driver definition to register with the platform bus */
static struct platform_driver spear_ohci_hcd_driver = {
.probe = spear_ohci_hcd_drv_probe,
.remove_new = spear_ohci_hcd_drv_remove,
#ifdef CONFIG_PM
.suspend = spear_ohci_hcd_drv_suspend,
.resume = spear_ohci_hcd_drv_resume,
#endif
.driver = {
.name = "spear-ohci",
.of_match_table = spear_ohci_id_table,
},
};
static const struct ohci_driver_overrides spear_overrides __initconst = {
.extra_priv_size = sizeof(struct spear_ohci),
};
static int __init ohci_spear_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_spear_hc_driver, &spear_overrides);
return platform_driver_register(&spear_ohci_hcd_driver);
}
module_init(ohci_spear_init);
static void __exit ohci_spear_cleanup(void)
{
platform_driver_unregister(&spear_ohci_hcd_driver);
}
module_exit(ohci_spear_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Deepak Sikri");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spear-ohci");
| linux-master | drivers/usb/host/ohci-spear.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006, 2011.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <soc/fsl/qe/qe.h>
#include <asm/fsl_gtm.h>
#include "fhci.h"
static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
{
pkt->data = NULL;
pkt->len = 0;
pkt->status = USB_TD_OK;
pkt->info = 0;
pkt->priv_data = NULL;
cq_put(&usb->ep0->empty_frame_Q, pkt);
}
/* confirm submitted packet */
void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
{
struct td *td;
struct packet *td_pkt;
struct ed *ed;
u32 trans_len;
bool td_done = false;
td = fhci_remove_td_from_frame(usb->actual_frame);
td_pkt = td->pkt;
trans_len = pkt->len;
td->status = pkt->status;
if (td->type == FHCI_TA_IN && td_pkt->info & PKT_DUMMY_PACKET) {
if ((td->data + td->actual_len) && trans_len)
memcpy(td->data + td->actual_len, pkt->data,
trans_len);
cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
}
recycle_frame(usb, pkt);
ed = td->ed;
if (ed->mode == FHCI_TF_ISO) {
if (ed->td_list.next->next != &ed->td_list) {
struct td *td_next =
list_entry(ed->td_list.next->next, struct td,
node);
td_next->start_frame = usb->actual_frame->frame_num;
}
td->actual_len = trans_len;
td_done = true;
} else if ((td->status & USB_TD_ERROR) &&
!(td->status & USB_TD_TX_ER_NAK)) {
/*
* There was an error on the transaction (but not NAK).
* If it is fatal error (data underrun, stall, bad pid or 3
* errors exceeded), mark this TD as done.
*/
if ((td->status & USB_TD_RX_DATA_UNDERUN) ||
(td->status & USB_TD_TX_ER_STALL) ||
(td->status & USB_TD_RX_ER_PID) ||
(++td->error_cnt >= 3)) {
ed->state = FHCI_ED_HALTED;
td_done = true;
if (td->status & USB_TD_RX_DATA_UNDERUN) {
fhci_dbg(usb->fhci, "td err fu\n");
td->toggle = !td->toggle;
td->actual_len += trans_len;
} else {
fhci_dbg(usb->fhci, "td err f!u\n");
}
} else {
fhci_dbg(usb->fhci, "td err !f\n");
/* it is not a fatal error -retry this transaction */
td->nak_cnt = 0;
td->error_cnt++;
td->status = USB_TD_OK;
}
} else if (td->status & USB_TD_TX_ER_NAK) {
/* there was a NAK response */
fhci_vdbg(usb->fhci, "td nack\n");
td->nak_cnt++;
td->error_cnt = 0;
td->status = USB_TD_OK;
} else {
/* there was no error on transaction */
td->error_cnt = 0;
td->nak_cnt = 0;
td->toggle = !td->toggle;
td->actual_len += trans_len;
if (td->len == td->actual_len)
td_done = true;
}
if (td_done)
fhci_move_td_from_ed_to_done_list(usb, ed);
}
/*
* Flush all transmitted packets from BDs
* This routine is called when disabling the USB port to flush all
* transmissions that are already scheduled in the BDs
*/
void fhci_flush_all_transmissions(struct fhci_usb *usb)
{
u8 mode;
struct td *td;
mode = in_8(&usb->fhci->regs->usb_usmod);
clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
fhci_flush_bds(usb);
while ((td = fhci_peek_td_from_frame(usb->actual_frame)) != NULL) {
struct packet *pkt = td->pkt;
pkt->status = USB_TD_TX_ER_TIMEOUT;
fhci_transaction_confirm(usb, pkt);
}
usb->actual_frame->frame_status = FRAME_END_TRANSMISSION;
/* reset the event register */
out_be16(&usb->fhci->regs->usb_usber, 0xffff);
/* enable the USB controller */
out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
}
/*
* This function forms the packet and transmit the packet. This function
* will handle all endpoint type:ISO,interrupt,control and bulk
*/
static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
{
u32 fw_transaction_time, len = 0;
struct packet *pkt;
u8 *data = NULL;
/* calcalate data address,len and toggle and then add the transaction */
if (td->toggle == USB_TD_TOGGLE_CARRY)
td->toggle = ed->toggle_carry;
switch (ed->mode) {
case FHCI_TF_ISO:
len = td->len;
if (td->type != FHCI_TA_IN)
data = td->data;
break;
case FHCI_TF_CTRL:
case FHCI_TF_BULK:
len = min(td->len - td->actual_len, ed->max_pkt_size);
if (!((td->type == FHCI_TA_IN) &&
((len + td->actual_len) == td->len)))
data = td->data + td->actual_len;
break;
case FHCI_TF_INTR:
len = min(td->len, ed->max_pkt_size);
if (!((td->type == FHCI_TA_IN) &&
((td->len + CRC_SIZE) >= ed->max_pkt_size)))
data = td->data;
break;
default:
break;
}
if (usb->port_status == FHCI_PORT_FULL)
fw_transaction_time = (((len + PROTOCOL_OVERHEAD) * 11) >> 4);
else
fw_transaction_time = ((len + PROTOCOL_OVERHEAD) * 6);
/* check if there's enough space in this frame to submit this TD */
if (usb->actual_frame->total_bytes + len + PROTOCOL_OVERHEAD >=
usb->max_bytes_per_frame) {
fhci_vdbg(usb->fhci, "not enough space in this frame: "
"%d %d %d\n", usb->actual_frame->total_bytes, len,
usb->max_bytes_per_frame);
return -1;
}
/* check if there's enough time in this frame to submit this TD */
if (usb->actual_frame->frame_status != FRAME_IS_PREPARED &&
(usb->actual_frame->frame_status & FRAME_END_TRANSMISSION ||
(fw_transaction_time + usb->sw_transaction_time >=
1000 - fhci_get_sof_timer_count(usb)))) {
fhci_dbg(usb->fhci, "not enough time in this frame\n");
return -1;
}
/* update frame object fields before transmitting */
pkt = cq_get(&usb->ep0->empty_frame_Q);
if (!pkt) {
fhci_dbg(usb->fhci, "there is no empty frame\n");
return -1;
}
td->pkt = pkt;
pkt->info = 0;
if (data == NULL) {
data = cq_get(&usb->ep0->dummy_packets_Q);
BUG_ON(!data);
pkt->info = PKT_DUMMY_PACKET;
}
pkt->data = data;
pkt->len = len;
pkt->status = USB_TD_OK;
/* update TD status field before transmitting */
td->status = USB_TD_INPROGRESS;
/* update actual frame time object with the actual transmission */
usb->actual_frame->total_bytes += (len + PROTOCOL_OVERHEAD);
fhci_add_td_to_frame(usb->actual_frame, td);
if (usb->port_status != FHCI_PORT_FULL &&
usb->port_status != FHCI_PORT_LOW) {
pkt->status = USB_TD_TX_ER_TIMEOUT;
pkt->len = 0;
fhci_transaction_confirm(usb, pkt);
} else if (fhci_host_transaction(usb, pkt, td->type, ed->dev_addr,
ed->ep_addr, ed->mode, ed->speed, td->toggle)) {
/* remove TD from actual frame */
list_del_init(&td->frame_lh);
td->status = USB_TD_OK;
if (pkt->info & PKT_DUMMY_PACKET)
cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
recycle_frame(usb, pkt);
usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
fhci_err(usb->fhci, "host transaction failed\n");
return -1;
}
return len;
}
static void move_head_to_tail(struct list_head *list)
{
struct list_head *node = list->next;
if (!list_empty(list)) {
list_move_tail(node, list);
}
}
/*
* This function goes through the endpoint list and schedules the
* transactions within this list
*/
static int scan_ed_list(struct fhci_usb *usb,
struct list_head *list, enum fhci_tf_mode list_type)
{
static const int frame_part[4] = {
[FHCI_TF_CTRL] = MAX_BYTES_PER_FRAME,
[FHCI_TF_ISO] = (MAX_BYTES_PER_FRAME *
MAX_PERIODIC_FRAME_USAGE) / 100,
[FHCI_TF_BULK] = MAX_BYTES_PER_FRAME,
[FHCI_TF_INTR] = (MAX_BYTES_PER_FRAME *
MAX_PERIODIC_FRAME_USAGE) / 100
};
struct ed *ed;
struct td *td;
int ans = 1;
u32 save_transaction_time = usb->sw_transaction_time;
list_for_each_entry(ed, list, node) {
td = ed->td_head;
if (!td || td->status == USB_TD_INPROGRESS)
continue;
if (ed->state != FHCI_ED_OPER) {
if (ed->state == FHCI_ED_URB_DEL) {
td->status = USB_TD_OK;
fhci_move_td_from_ed_to_done_list(usb, ed);
ed->state = FHCI_ED_SKIP;
}
continue;
}
/*
* if it isn't interrupt pipe or it is not iso pipe and the
* interval time passed
*/
if ((list_type == FHCI_TF_INTR || list_type == FHCI_TF_ISO) &&
(((usb->actual_frame->frame_num -
td->start_frame) & 0x7ff) < td->interval))
continue;
if (add_packet(usb, ed, td) < 0)
continue;
/* update time stamps in the TD */
td->start_frame = usb->actual_frame->frame_num;
usb->sw_transaction_time += save_transaction_time;
if (usb->actual_frame->total_bytes >=
usb->max_bytes_per_frame) {
usb->actual_frame->frame_status =
FRAME_DATA_END_TRANSMISSION;
fhci_push_dummy_bd(usb->ep0);
ans = 0;
break;
}
if (usb->actual_frame->total_bytes >= frame_part[list_type])
break;
}
/* be fair to each ED(move list head around) */
move_head_to_tail(list);
usb->sw_transaction_time = save_transaction_time;
return ans;
}
static u32 rotate_frames(struct fhci_usb *usb)
{
struct fhci_hcd *fhci = usb->fhci;
if (!list_empty(&usb->actual_frame->tds_list)) {
if ((((in_be16(&fhci->pram->frame_num) & 0x07ff) -
usb->actual_frame->frame_num) & 0x7ff) > 5)
fhci_flush_actual_frame(usb);
else
return -EINVAL;
}
usb->actual_frame->frame_status = FRAME_IS_PREPARED;
usb->actual_frame->frame_num = in_be16(&fhci->pram->frame_num) & 0x7ff;
usb->actual_frame->total_bytes = 0;
return 0;
}
/*
* This function schedule the USB transaction and will process the
* endpoint in the following order: iso, interrupt, control and bulk.
*/
void fhci_schedule_transactions(struct fhci_usb *usb)
{
int left = 1;
if (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)
if (rotate_frames(usb) != 0)
return;
if (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)
return;
if (usb->actual_frame->total_bytes == 0) {
/*
* schedule the next available ISO transfer
*or next stage of the ISO transfer
*/
scan_ed_list(usb, &usb->hc_list->iso_list, FHCI_TF_ISO);
/*
* schedule the next available interrupt transfer or
* the next stage of the interrupt transfer
*/
scan_ed_list(usb, &usb->hc_list->intr_list, FHCI_TF_INTR);
/*
* schedule the next available control transfer
* or the next stage of the control transfer
*/
left = scan_ed_list(usb, &usb->hc_list->ctrl_list,
FHCI_TF_CTRL);
}
/*
* schedule the next available bulk transfer or the next stage of the
* bulk transfer
*/
if (left > 0)
scan_ed_list(usb, &usb->hc_list->bulk_list, FHCI_TF_BULK);
}
/* Handles SOF interrupt */
static void sof_interrupt(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
if ((usb->port_status == FHCI_PORT_DISABLED) &&
(usb->vroot_hub->port.wPortStatus & USB_PORT_STAT_CONNECTION) &&
!(usb->vroot_hub->port.wPortChange & USB_PORT_STAT_C_CONNECTION)) {
if (usb->vroot_hub->port.wPortStatus & USB_PORT_STAT_LOW_SPEED)
usb->port_status = FHCI_PORT_LOW;
else
usb->port_status = FHCI_PORT_FULL;
/* Disable IDLE */
usb->saved_msk &= ~USB_E_IDLE_MASK;
out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
}
gtm_set_exact_timer16(fhci->timer, usb->max_frame_usage, false);
fhci_host_transmit_actual_frame(usb);
usb->actual_frame->frame_status = FRAME_IS_TRANSMITTED;
fhci_schedule_transactions(usb);
}
/* Handles device disconnected interrupt on port */
void fhci_device_disconnected_interrupt(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
fhci_dbg(fhci, "-> %s\n", __func__);
fhci_usb_disable_interrupt(usb);
clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->port_status = FHCI_PORT_DISABLED;
fhci_stop_sof_timer(fhci);
/* Enable IDLE since we want to know if something comes along */
usb->saved_msk |= USB_E_IDLE_MASK;
out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_CONNECTION;
usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_CONNECTION;
usb->max_bytes_per_frame = 0;
fhci_usb_enable_interrupt(usb);
fhci_dbg(fhci, "<- %s\n", __func__);
}
/* detect a new device connected on the USB port */
void fhci_device_connected_interrupt(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
int state;
int ret;
fhci_dbg(fhci, "-> %s\n", __func__);
fhci_usb_disable_interrupt(usb);
state = fhci_ioports_check_bus_state(fhci);
/* low-speed device was connected to the USB port */
if (state == 1) {
ret = qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3);
if (ret) {
fhci_warn(fhci, "Low-Speed device is not supported, "
"try use BRGx\n");
goto out;
}
usb->port_status = FHCI_PORT_LOW;
setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->vroot_hub->port.wPortStatus |=
(USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_CONNECTION);
usb->vroot_hub->port.wPortChange |=
USB_PORT_STAT_C_CONNECTION;
usb->max_bytes_per_frame =
(MAX_BYTES_PER_FRAME >> 3) - 7;
fhci_port_enable(usb);
} else if (state == 2) {
ret = qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK);
if (ret) {
fhci_warn(fhci, "Full-Speed device is not supported, "
"try use CLKx\n");
goto out;
}
usb->port_status = FHCI_PORT_FULL;
clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->vroot_hub->port.wPortStatus &=
~USB_PORT_STAT_LOW_SPEED;
usb->vroot_hub->port.wPortStatus |=
USB_PORT_STAT_CONNECTION;
usb->vroot_hub->port.wPortChange |=
USB_PORT_STAT_C_CONNECTION;
usb->max_bytes_per_frame = (MAX_BYTES_PER_FRAME - 15);
fhci_port_enable(usb);
}
out:
fhci_usb_enable_interrupt(usb);
fhci_dbg(fhci, "<- %s\n", __func__);
}
irqreturn_t fhci_frame_limit_timer_irq(int irq, void *_hcd)
{
struct usb_hcd *hcd = _hcd;
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
struct fhci_usb *usb = fhci->usb_lld;
spin_lock(&fhci->lock);
gtm_set_exact_timer16(fhci->timer, 1000, false);
if (usb->actual_frame->frame_status == FRAME_IS_TRANSMITTED) {
usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
fhci_push_dummy_bd(usb->ep0);
}
fhci_schedule_transactions(usb);
spin_unlock(&fhci->lock);
return IRQ_HANDLED;
}
/* Cancel transmission on the USB endpoint */
static void abort_transmission(struct fhci_usb *usb)
{
fhci_dbg(usb->fhci, "-> %s\n", __func__);
/* issue stop Tx command */
qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
/* flush Tx FIFOs */
out_8(&usb->fhci->regs->usb_uscom, USB_CMD_FLUSH_FIFO | EP_ZERO);
udelay(1000);
/* reset Tx BDs */
fhci_flush_bds(usb);
/* issue restart Tx command */
qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
fhci_dbg(usb->fhci, "<- %s\n", __func__);
}
irqreturn_t fhci_irq(struct usb_hcd *hcd)
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
struct fhci_usb *usb;
u16 usb_er = 0;
unsigned long flags;
spin_lock_irqsave(&fhci->lock, flags);
usb = fhci->usb_lld;
usb_er |= in_be16(&usb->fhci->regs->usb_usber) &
in_be16(&usb->fhci->regs->usb_usbmr);
/* clear event bits for next time */
out_be16(&usb->fhci->regs->usb_usber, usb_er);
fhci_dbg_isr(fhci, usb_er);
if (usb_er & USB_E_RESET_MASK) {
if ((usb->port_status == FHCI_PORT_FULL) ||
(usb->port_status == FHCI_PORT_LOW)) {
fhci_device_disconnected_interrupt(fhci);
usb_er &= ~USB_E_IDLE_MASK;
} else if (usb->port_status == FHCI_PORT_WAITING) {
usb->port_status = FHCI_PORT_DISCONNECTING;
/* Turn on IDLE since we want to disconnect */
usb->saved_msk |= USB_E_IDLE_MASK;
out_be16(&usb->fhci->regs->usb_usber,
usb->saved_msk);
} else if (usb->port_status == FHCI_PORT_DISABLED) {
if (fhci_ioports_check_bus_state(fhci) == 1)
fhci_device_connected_interrupt(fhci);
}
usb_er &= ~USB_E_RESET_MASK;
}
if (usb_er & USB_E_MSF_MASK) {
abort_transmission(fhci->usb_lld);
usb_er &= ~USB_E_MSF_MASK;
}
if (usb_er & (USB_E_SOF_MASK | USB_E_SFT_MASK)) {
sof_interrupt(fhci);
usb_er &= ~(USB_E_SOF_MASK | USB_E_SFT_MASK);
}
if (usb_er & USB_E_TXB_MASK) {
fhci_tx_conf_interrupt(fhci->usb_lld);
usb_er &= ~USB_E_TXB_MASK;
}
if (usb_er & USB_E_TXE1_MASK) {
fhci_tx_conf_interrupt(fhci->usb_lld);
usb_er &= ~USB_E_TXE1_MASK;
}
if (usb_er & USB_E_IDLE_MASK) {
if (usb->port_status == FHCI_PORT_DISABLED) {
usb_er &= ~USB_E_RESET_MASK;
fhci_device_connected_interrupt(fhci);
} else if (usb->port_status ==
FHCI_PORT_DISCONNECTING) {
/* XXX usb->port_status = FHCI_PORT_WAITING; */
/* Disable IDLE */
usb->saved_msk &= ~USB_E_IDLE_MASK;
out_be16(&usb->fhci->regs->usb_usbmr,
usb->saved_msk);
} else {
fhci_dbg_isr(fhci, -1);
}
usb_er &= ~USB_E_IDLE_MASK;
}
spin_unlock_irqrestore(&fhci->lock, flags);
return IRQ_HANDLED;
}
/*
* Process normal completions(error or success) and clean the schedule.
*
* This is the main path for handing urbs back to drivers. The only other patth
* is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning
* the (re-reversed) done list as this does.
*/
static void process_done_list(unsigned long data)
{
struct urb *urb;
struct ed *ed;
struct td *td;
struct urb_priv *urb_priv;
struct fhci_hcd *fhci = (struct fhci_hcd *)data;
disable_irq(fhci->timer->irq);
disable_irq(fhci_to_hcd(fhci)->irq);
spin_lock(&fhci->lock);
td = fhci_remove_td_from_done_list(fhci->hc_list);
while (td != NULL) {
urb = td->urb;
urb_priv = urb->hcpriv;
ed = td->ed;
/* update URB's length and status from TD */
fhci_done_td(urb, td);
urb_priv->tds_cnt++;
/*
* if all this urb's TDs are done, call complete()
* Interrupt transfers are the onley special case:
* they are reissued,until "deleted" by usb_unlink_urb
* (real work done in a SOF intr, by process_del_list)
*/
if (urb_priv->tds_cnt == urb_priv->num_of_tds) {
fhci_urb_complete_free(fhci, urb);
} else if (urb_priv->state == URB_DEL &&
ed->state == FHCI_ED_SKIP) {
fhci_del_ed_list(fhci, ed);
ed->state = FHCI_ED_OPER;
} else if (ed->state == FHCI_ED_HALTED) {
urb_priv->state = URB_DEL;
ed->state = FHCI_ED_URB_DEL;
fhci_del_ed_list(fhci, ed);
ed->state = FHCI_ED_OPER;
}
td = fhci_remove_td_from_done_list(fhci->hc_list);
}
spin_unlock(&fhci->lock);
enable_irq(fhci->timer->irq);
enable_irq(fhci_to_hcd(fhci)->irq);
}
DECLARE_TASKLET_OLD(fhci_tasklet, process_done_list);
/* transfer complted callback */
u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci)
{
if (!fhci->process_done_task->state)
tasklet_schedule(fhci->process_done_task);
return 0;
}
/*
* adds urb to the endpoint descriptor list
* arguments:
* fhci data structure for the Low level host controller
* ep USB Host endpoint data structure
* urb USB request block data structure
*/
void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
{
struct ed *ed = urb->ep->hcpriv;
struct urb_priv *urb_priv = urb->hcpriv;
u32 data_len = urb->transfer_buffer_length;
int urb_state = 0;
int toggle = 0;
u8 *data;
u16 cnt = 0;
if (ed == NULL) {
ed = fhci_get_empty_ed(fhci);
ed->dev_addr = usb_pipedevice(urb->pipe);
ed->ep_addr = usb_pipeendpoint(urb->pipe);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
ed->mode = FHCI_TF_CTRL;
break;
case PIPE_BULK:
ed->mode = FHCI_TF_BULK;
break;
case PIPE_INTERRUPT:
ed->mode = FHCI_TF_INTR;
break;
case PIPE_ISOCHRONOUS:
ed->mode = FHCI_TF_ISO;
break;
default:
break;
}
ed->speed = (urb->dev->speed == USB_SPEED_LOW) ?
FHCI_LOW_SPEED : FHCI_FULL_SPEED;
ed->max_pkt_size = usb_endpoint_maxp(&urb->ep->desc);
urb->ep->hcpriv = ed;
fhci_dbg(fhci, "new ep speed=%d max_pkt_size=%d\n",
ed->speed, ed->max_pkt_size);
}
/* for ISO transfer calculate start frame index */
if (ed->mode == FHCI_TF_ISO) {
/* Ignore the possibility of underruns */
urb->start_frame = ed->td_head ? ed->next_iso :
get_frame_num(fhci);
ed->next_iso = (urb->start_frame + urb->interval *
urb->number_of_packets) & 0x07ff;
}
/*
* OHCI handles the DATA toggle itself,we just use the USB
* toggle bits
*/
if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe)))
toggle = USB_TD_TOGGLE_CARRY;
else {
toggle = USB_TD_TOGGLE_DATA0;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 1);
}
urb_priv->tds_cnt = 0;
urb_priv->ed = ed;
if (data_len > 0)
data = urb->transfer_buffer;
else
data = NULL;
switch (ed->mode) {
case FHCI_TF_BULK:
if (urb->transfer_flags & URB_ZERO_PACKET &&
urb->transfer_buffer_length > 0 &&
((urb->transfer_buffer_length %
usb_endpoint_maxp(&urb->ep->desc)) == 0))
urb_state = US_BULK0;
while (data_len > 4096) {
fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
FHCI_TA_IN,
cnt ? USB_TD_TOGGLE_CARRY :
toggle,
data, 4096, 0, 0, true);
data += 4096;
data_len -= 4096;
cnt++;
}
fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT : FHCI_TA_IN,
cnt ? USB_TD_TOGGLE_CARRY : toggle,
data, data_len, 0, 0, true);
cnt++;
if (urb->transfer_flags & URB_ZERO_PACKET &&
cnt < urb_priv->num_of_tds) {
fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
FHCI_TA_IN,
USB_TD_TOGGLE_CARRY, NULL, 0, 0, 0, true);
cnt++;
}
break;
case FHCI_TF_INTR:
urb->start_frame = get_frame_num(fhci) + 1;
fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT : FHCI_TA_IN,
USB_TD_TOGGLE_DATA0, data, data_len,
urb->interval, urb->start_frame, true);
break;
case FHCI_TF_CTRL:
ed->dev_addr = usb_pipedevice(urb->pipe);
ed->max_pkt_size = usb_endpoint_maxp(&urb->ep->desc);
/* setup stage */
fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
/* data stage */
if (data_len > 0) {
fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
FHCI_TA_IN,
USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
true);
}
/* status stage */
if (data_len > 0)
fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
(usb_pipeout(urb->pipe) ? FHCI_TA_IN :
FHCI_TA_OUT),
USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
else
fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
FHCI_TA_IN,
USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
urb_state = US_CTRL_SETUP;
break;
case FHCI_TF_ISO:
for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
u16 frame = urb->start_frame;
/*
* FIXME scheduling should handle frame counter
* roll-around ... exotic case (and OHCI has
* a 2^16 iso range, vs other HCs max of 2^10)
*/
frame += cnt * urb->interval;
frame &= 0x07ff;
fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
FHCI_TA_IN,
USB_TD_TOGGLE_DATA0,
data + urb->iso_frame_desc[cnt].offset,
urb->iso_frame_desc[cnt].length,
urb->interval, frame, true);
}
break;
default:
break;
}
/*
* set the state of URB
* control pipe:3 states -- setup,data,status
* interrupt and bulk pipe:1 state -- data
*/
urb->pipe &= ~0x1f;
urb->pipe |= urb_state & 0x1f;
urb_priv->state = URB_INPROGRESS;
if (!ed->td_head) {
ed->state = FHCI_ED_OPER;
switch (ed->mode) {
case FHCI_TF_CTRL:
list_add(&ed->node, &fhci->hc_list->ctrl_list);
break;
case FHCI_TF_BULK:
list_add(&ed->node, &fhci->hc_list->bulk_list);
break;
case FHCI_TF_INTR:
list_add(&ed->node, &fhci->hc_list->intr_list);
break;
case FHCI_TF_ISO:
list_add(&ed->node, &fhci->hc_list->iso_list);
break;
default:
break;
}
}
fhci_add_tds_to_ed(ed, urb_priv->tds, urb_priv->num_of_tds);
fhci->active_urbs++;
}
| linux-master | drivers/usb/host/fhci-sched.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001-2004 by David Brownell
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
*
* Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
* entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
* buffers needed for the larger number). We use one QH per endpoint, queue
* multiple urbs (all three types) per endpoint. URBs may need several qtds.
*
* ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
* interrupts) needs careful scheduling. Performance improvements can be
* an ongoing challenge. That's in "ehci-sched.c".
*
* USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
* or otherwise through transaction translators (TTs) in USB 2.0 hubs using
* (b) special fields in qh entries or (c) split iso entries. TTs will
* buffer low/full speed data so the host collects it at high speed.
*/
/*-------------------------------------------------------------------------*/
/* PID Codes that are used here, from EHCI specification, Table 3-16. */
#define PID_CODE_IN 1
#define PID_CODE_SETUP 2
/* fill a qtd, returning how much of the buffer we were able to queue up */
static unsigned int
qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
size_t len, int token, int maxpacket)
{
unsigned int count;
u64 addr = buf;
int i;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely (len < count)) /* ... iff needed */
count = len;
else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
(u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
qtd->length = count;
return count;
}
/*-------------------------------------------------------------------------*/
static inline void
qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
struct ehci_qh_hw *hw = qh->hw;
/* writes to an active overlay are unsafe */
WARN_ON(qh->qh_state != QH_STATE_IDLE);
hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
hw->hw_alt_next = EHCI_LIST_END(ehci);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
unsigned is_out, epnum;
is_out = qh->is_out;
epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
usb_settoggle(qh->ps.udev, epnum, is_out, 1);
}
}
hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void
qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
/*
* first qtd may already be partially processed.
* If we come here during unlink, the QH overlay region
* might have reference to the just unlinked qtd. The
* qtd is updated in qh_completions(). Update the QH
* overlay here.
*/
if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
qh->hw->hw_qtd_next = qtd->hw_next;
if (qh->should_be_inactive)
ehci_warn(ehci, "qh %p should be inactive!\n", qh);
} else {
qh_update(ehci, qh, qtd);
}
qh->should_be_inactive = 0;
}
/*-------------------------------------------------------------------------*/
static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_qh *qh = ep->hcpriv;
unsigned long flags;
spin_lock_irqsave(&ehci->lock, flags);
qh->clearing_tt = 0;
if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
&& ehci->rh_state == EHCI_RH_RUNNING)
qh_link_async(ehci, qh);
spin_unlock_irqrestore(&ehci->lock, flags);
}
static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
struct urb *urb, u32 token)
{
/* If an async split transaction gets an error or is unlinked,
* the TT buffer may be left in an indeterminate state. We
* have to clear the TT buffer.
*
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
#ifdef CONFIG_DYNAMIC_DEBUG
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
#endif /* CONFIG_DYNAMIC_DEBUG */
if (!ehci_is_TDI(ehci)
|| urb->dev->tt->hub !=
ehci_to_hcd(ehci)->self.root_hub) {
if (usb_hub_clear_tt_buffer(urb) == 0)
qh->clearing_tt = 1;
} else {
/* REVISIT ARC-derived cores don't clear the root
* hub TT buffer in this way...
*/
}
}
}
static int qtd_copy_status (
struct ehci_hcd *ehci,
struct urb *urb,
size_t length,
u32 token
)
{
int status = -EINPROGRESS;
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely(QTD_PID(token) != PID_CODE_SETUP))
urb->actual_length += length - QTD_LENGTH (token);
/* don't modify error codes */
if (unlikely(urb->unlinked))
return status;
/* force cleanup after short read; not always an error */
if (unlikely (IS_SHORT_READ (token)))
status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
status = -EOVERFLOW;
/*
* When MMF is active and PID Code is IN, queue is halted.
* EHCI Specification, Table 4-13.
*/
} else if ((token & QTD_STS_MMF) &&
(QTD_PID(token) == PID_CODE_IN)) {
status = -EPROTO;
/* CERR nonzero + halt --> stall */
} else if (QTD_CERR(token)) {
status = -EPIPE;
/* In theory, more than one of the following bits can be set
* since they are sticky and the transaction is retried.
* Which to test first is rather arbitrary.
*/
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
status = -EPROTO;
} else if (token & QTD_STS_DBE) {
status = (QTD_PID (token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad CRC, wrong PID, etc */
ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
status = -EPROTO;
} else { /* unknown */
status = -EPROTO;
}
}
return status;
}
static void
ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
{
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
/* ... update hc-wide periodic stats */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
}
if (unlikely(urb->unlinked)) {
INCR(ehci->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
INCR(ehci->stats.complete);
}
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
"%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
status,
urb->actual_length, urb->transfer_buffer_length);
#endif
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
}
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/*
* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns nonzero if the caller should
* unlink qh.
*/
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *last, *end = qh->dummy;
struct list_head *entry, *tmp;
int last_status;
int stopped;
u8 state;
struct ehci_qh_hw *hw = qh->hw;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*
* It's a bug for qh->qh_state to be anything other than
* QH_STATE_IDLE, unless our caller is scan_async() or
* scan_intr().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
rescan:
last = NULL;
last_status = -EINPROGRESS;
qh->dequeue_during_giveback = 0;
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_safe (entry, tmp, &qh->qtd_list) {
struct ehci_qtd *qtd;
struct urb *urb;
u32 token = 0;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
urb = qtd->urb;
/* clean up any state from previous QTD ...*/
if (last) {
if (likely (last->urb != urb)) {
ehci_urb_done(ehci, last->urb, last_status);
last_status = -EINPROGRESS;
}
ehci_qtd_free (ehci, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end)
break;
/* hardware copies qtd out of qh overlay */
rmb ();
token = hc32_to_cpu(ehci, qtd->hw_token);
/* always clean up qtds the hc de-activated */
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
/* Report Data Buffer Error: non-fatal but useful */
if (token & QTD_STS_DBE)
ehci_dbg(ehci,
"detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
urb,
usb_endpoint_num(&urb->ep->desc),
usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
urb->transfer_buffer_length,
qtd,
qh);
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
if ((token & QTD_STS_HALT) != 0) {
/* retry transaction errors until we
* reach the software xacterr limit
*/
if ((token & QTD_STS_XACT) &&
QTD_CERR(token) == 0 &&
++qh->xacterrs < QH_XACTERR_MAX &&
!urb->unlinked) {
ehci_dbg(ehci,
"detected XactErr len %zu/%zu retry %d\n",
qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
/* reset the token in the qtd and the
* qh overlay (which still contains
* the qtd) so that we pick up from
* where we left off
*/
token &= ~QTD_STS_HALT;
token |= QTD_STS_ACTIVE |
(EHCI_TUNE_CERR << 10);
qtd->hw_token = cpu_to_hc32(ehci,
token);
wmb();
hw->hw_token = cpu_to_hc32(ehci,
token);
goto retry_xacterr;
}
stopped = 1;
qh->unlink_reason |= QH_UNLINK_HALTED;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*
* other short reads won't stop the queue, including
* control transfers (status stage handles that) or
* most other single-qtd reads ... the queue stops if
* URB_SHORT_NOT_OK was set so the driver submitting
* the urbs could clean it up.
*/
} else if (IS_SHORT_READ (token)
&& !(qtd->hw_alt_next
& EHCI_LIST_END(ehci))) {
stopped = 1;
qh->unlink_reason |= QH_UNLINK_SHORT_READ;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped
&& ehci->rh_state >= EHCI_RH_RUNNING)) {
break;
/* scan the whole queue for unlinks whenever it stops */
} else {
stopped = 1;
/* cancel everything if we halt, suspend, etc */
if (ehci->rh_state < EHCI_RH_RUNNING) {
last_status = -ESHUTDOWN;
qh->unlink_reason |= QH_UNLINK_SHUTDOWN;
}
/* this qtd is active; skip it unless a previous qtd
* for its urb faulted, or its urb was canceled.
*/
else if (last_status == -EINPROGRESS && !urb->unlinked)
continue;
/*
* If this was the active qtd when the qh was unlinked
* and the overlay's token is active, then the overlay
* hasn't been written back to the qtd yet so use its
* token instead of the qtd's. After the qtd is
* processed and removed, the overlay won't be valid
* any more.
*/
if (state == QH_STATE_IDLE &&
qh->qtd_list.next == &qtd->qtd_list &&
(hw->hw_token & ACTIVE_BIT(ehci))) {
token = hc32_to_cpu(ehci, hw->hw_token);
hw->hw_token &= ~ACTIVE_BIT(ehci);
qh->should_be_inactive = 1;
/* An unlink may leave an incomplete
* async transaction in the TT buffer.
* We have to clear it.
*/
ehci_clear_tt_buffer(ehci, qh, urb, token);
}
}
/* unless we already know the urb's status, collect qtd status
* and update count of bytes transferred. in common short read
* cases with only one data qtd (including control transfers),
* queue processing won't halt. but with two or more qtds (for
* example, with a 32 KB transfer), when the first qtd gets a
* short read the second must be removed by hand.
*/
if (last_status == -EINPROGRESS) {
last_status = qtd_copy_status(ehci, urb,
qtd->length, token);
if (last_status == -EREMOTEIO
&& (qtd->hw_alt_next
& EHCI_LIST_END(ehci)))
last_status = -EINPROGRESS;
/* As part of low/full-speed endpoint-halt processing
* we must clear the TT buffer (11.17.5).
*/
if (unlikely(last_status != -EINPROGRESS &&
last_status != -EREMOTEIO)) {
/* The TT's in some hubs malfunction when they
* receive this request following a STALL (they
* stop sending isochronous packets). Since a
* STALL can't leave the TT buffer in a busy
* state (if you believe Figures 11-48 - 11-51
* in the USB 2.0 spec), we won't clear the TT
* buffer in this case. Strictly speaking this
* is a violation of the spec.
*/
if (last_status != -EPIPE)
ehci_clear_tt_buffer(ehci, qh, urb,
token);
}
}
/* if we're removing something not at the queue head,
* patch the hardware queue pointer.
*/
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry (qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
/* remove qtd; it's recycled after possible urb completion */
list_del (&qtd->qtd_list);
last = qtd;
/* reinit the xacterr counter for the next qtd */
qh->xacterrs = 0;
}
/* last urb's completion might still need calling */
if (likely (last != NULL)) {
ehci_urb_done(ehci, last->urb, last_status);
ehci_qtd_free (ehci, last);
}
/* Do we need to rescan for URBs dequeued during a giveback? */
if (unlikely(qh->dequeue_during_giveback)) {
/* If the QH is already unlinked, do the rescan now. */
if (state == QH_STATE_IDLE)
goto rescan;
/* Otherwise the caller must unlink the QH. */
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*
* We won't refresh a QH that's linked (after the HC
* stopped the queue). That avoids a race:
* - HC reads first part of QH;
* - CPU updates that first part and the token;
* - HC reads rest of that QH, including token
* Result: HC gets an inconsistent image, and then
* DMAs to/from the wrong memory (corrupting it).
*
* That should be rare for interrupt transfers,
* except maybe high bandwidth ...
*/
if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY;
/* Let the caller know if the QH needs to be unlinked. */
return qh->unlink_reason;
}
/*-------------------------------------------------------------------------*/
/*
* reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list
) {
struct list_head *entry, *temp;
list_for_each_safe (entry, temp, qtd_list) {
struct ehci_qtd *qtd;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
list_del (&qtd->qtd_list);
ehci_qtd_free (ehci, qtd);
}
}
/*
* create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *
qh_urb_transaction (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *head,
gfp_t flags
) {
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, this_sg_len, maxpacket;
int is_input;
u32 token;
int i;
struct scatterlist *sg;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
return NULL;
list_add_tail (&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein (urb->pipe);
if (usb_pipecontrol (urb->pipe)) {
/* SETUP pid */
qtd_fill(ehci, qtd, urb->setup_dma,
sizeof (struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
token |= (1 /* "in" */ << 8);
}
/*
* data transfer stage: buffer setup
*/
i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
buf = sg_dma_address(sg);
/* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
this_sg_len = min_t(int, sg_dma_len(sg), len);
} else {
sg = NULL;
buf = urb->transfer_dma;
this_sg_len = len;
}
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_endpoint_maxp(&urb->ep->desc);
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
unsigned int this_qtd_len;
this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
maxpacket);
this_sg_len -= this_qtd_len;
len -= this_qtd_len;
buf += this_qtd_len;
/*
* short reads advance to a "magic" dummy instead of the next
* qtd ... that forces the queue to stop, for manual cleanup.
* (this will usually be overridden later.)
*/
if (is_input)
qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely(this_sg_len <= 0)) {
if (--i <= 0 || len <= 0)
break;
sg = sg_next(sg);
buf = sg_dma_address(sg);
this_sg_len = min_t(int, sg_dma_len(sg), len);
}
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
}
/*
* unless the caller requires manual cleanup after short reads,
* have the alt_next mechanism keep the queue running after the
* last data qtd (the only one, for control and most other cases).
*/
if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol (urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END(ehci);
/*
* control requests may need a terminating data "status" ack;
* other OUT ones may need a terminating short packet
* (zero length).
*/
if (likely (urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol (urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipeout(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(ehci, qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
return head;
cleanup:
qtd_list_free (ehci, urb, head);
return NULL;
}
/*-------------------------------------------------------------------------*/
// Would be best to create all qh's from config descriptors,
// when each interface/altsetting is established. Unlink
// any previous qh and cancel its urbs first; endpoints are
// implicitly reset then (data toggle too).
// That'd mean updating how usbcore talks to HCDs. (2.7?)
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *
qh_make (
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t flags
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
struct usb_host_endpoint *ep;
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
int mult;
struct usb_tt *tt = urb->dev->tt;
struct ehci_qh_hw *hw;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint (urb->pipe) << 8;
info1 |= usb_pipedevice (urb->pipe) << 0;
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
ep = usb_pipe_endpoint (urb->dev, urb->pipe);
maxp = usb_endpoint_maxp (&ep->desc);
mult = usb_endpoint_maxp_mult (&ep->desc);
/* 1024 byte maxpacket is a hardware ceiling. High bandwidth
* acts like up to 3KB, but is built from smaller packets.
*/
if (maxp > 1024) {
ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
goto done;
}
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
unsigned tmp;
qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0, mult * maxp));
qh->ps.phase = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->ps.c_usecs = 0;
qh->gap_uf = 0;
if (urb->interval > 1 && urb->interval < 8) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
urb->interval = 1;
} else if (urb->interval > ehci->periodic_size << 3) {
urb->interval = ehci->periodic_size << 3;
}
qh->ps.period = urb->interval >> 3;
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
} else {
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
qh->ps.usecs = HS_USECS(1);
} else { // SPLIT+DATA, gap, CSPLIT
qh->ps.usecs += HS_USECS(1);
qh->ps.c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
qh->ps.tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time (urb->dev->speed,
is_input, 0, maxp));
if (urb->interval > ehci->periodic_size)
urb->interval = ehci->periodic_size;
qh->ps.period = urb->interval;
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
urb->ep->desc.bInterval);
tmp = rounddown_pow_of_two(tmp);
/* Allow urb->interval to override */
qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
qh->ps.bw_uperiod = qh->ps.bw_period << 3;
}
}
/* support for tt scheduling, and access to toggles */
qh->ps.udev = urb->dev;
qh->ps.ep = urb->ep;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= QH_LOW_SPEED;
fallthrough;
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= QH_CONTROL_EP; /* for TT */
info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
/* Some Freescale processors have an erratum in which the
* port number in the queue head was 0..N-1 instead of 1..N.
*/
if (ehci_has_fsl_portno_bug(ehci))
info2 |= (urb->dev->ttport-1) << 23;
else
info2 |= urb->dev->ttport << 23;
/* set the address of the TT; for TDI's integrated
* root hub tt, leave it zeroed.
*/
if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
info2 |= tt->hub->devnum << 16;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= QH_HIGH_SPEED;
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
/* The USB spec says that high speed bulk endpoints
* always use 512 byte maxpacket. But some device
* vendors decided to ignore that, and MSFT is happy
* to help them do so. So now people expect to use
* such nonconformant devices with Linux too; sigh.
*/
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= maxp << 16;
info2 |= mult << 30;
}
break;
default:
ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
urb->dev->speed);
done:
qh_destroy(ehci, qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear */
qh->qh_state = QH_STATE_IDLE;
hw = qh->hw;
hw->hw_info1 = cpu_to_hc32(ehci, info1);
hw->hw_info2 = cpu_to_hc32(ehci, info2);
qh->is_out = !is_input;
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
return qh;
}
/*-------------------------------------------------------------------------*/
static void enable_async(struct ehci_hcd *ehci)
{
if (ehci->async_count++)
return;
/* Stop waiting to turn off the async schedule */
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
/* Don't start the schedule until ASS is 0 */
ehci_poll_ASS(ehci);
turn_on_io_watchdog(ehci);
}
static void disable_async(struct ehci_hcd *ehci)
{
if (--ehci->async_count)
return;
/* The async schedule and unlink lists are supposed to be empty */
WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
!list_empty(&ehci->async_idle));
/* Don't turn off the schedule until ASS is 1 */
ehci_poll_ASS(ehci);
}
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
__hc32 dma = QH_NEXT(ehci, qh->qh_dma);
struct ehci_qh *head;
/* Don't link a QH if there's a Clear-TT-Buffer pending */
if (unlikely(qh->clearing_tt))
return;
WARN_ON(qh->qh_state != QH_STATE_IDLE);
/* clear halt and/or toggle; and maybe recover from silicon quirk */
qh_refresh(ehci, qh);
/* splice right after start */
head = ehci->async;
qh->qh_next = head->qh_next;
qh->hw->hw_next = head->hw->hw_next;
wmb ();
head->qh_next.qh = qh;
head->hw->hw_next = dma;
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
qh->unlink_reason = 0;
/* qtd completions reported later by interrupt */
enable_async(ehci);
}
/*-------------------------------------------------------------------------*/
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int epnum,
void **ptr
)
{
struct ehci_qh *qh = NULL;
__hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
qh = (struct ehci_qh *) *ptr;
if (unlikely (qh == NULL)) {
/* can't sleep here, we have ehci->lock... */
qh = qh_make (ehci, urb, GFP_ATOMIC);
*ptr = qh;
}
if (likely (qh != NULL)) {
struct ehci_qtd *qtd;
if (unlikely (list_empty (qtd_list)))
qtd = NULL;
else
qtd = list_entry (qtd_list->next, struct ehci_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely (epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0)
qh->hw->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely (qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
__hc32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT(ehci);
dummy = qh->dummy;
dma = dummy->qtd_dma;
*dummy = *qtd;
dummy->qtd_dma = dma;
list_del (&qtd->qtd_list);
list_add (&dummy->qtd_list, qtd_list);
list_splice_tail(qtd_list, &qh->qtd_list);
ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(ehci, dma);
/* let the hc process these next qtds */
wmb ();
dummy->hw_token = token;
urb->hcpriv = qh;
}
}
return qh;
}
/*-------------------------------------------------------------------------*/
static int
submit_async (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
) {
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc;
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
{
struct ehci_qtd *qtd;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
ehci_dbg(ehci,
"%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
}
#endif
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
rc = -ESHUTDOWN;
goto done;
}
rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(rc))
goto done;
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async(ehci, qh);
done:
spin_unlock_irqrestore (&ehci->lock, flags);
if (unlikely (qh == NULL))
qtd_list_free (ehci, urb, qtd_list);
return rc;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_HCD_TEST_MODE
/*
* This function creates the qtds and submits them for the
* SINGLE_STEP_SET_FEATURE Test.
* This is done in two parts: first SETUP req for GetDesc is sent then
* 15 seconds later, the IN stage for GetDesc starts to req data from dev
*
* is_setup : i/p argument decides which of the two stage needs to be
* performed; TRUE - SETUP and FALSE - IN+STATUS
* Returns 0 if success
*/
static int ehci_submit_single_step_set_feature(
struct usb_hcd *hcd,
struct urb *urb,
int is_setup
) {
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct list_head qtd_list;
struct list_head *head;
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
u32 token;
INIT_LIST_HEAD(&qtd_list);
head = &qtd_list;
/* URBs map to sequences of QTDs: one logical transaction */
qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
if (unlikely(!qtd))
return -1;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
len = urb->transfer_buffer_length;
/*
* Check if the request is to perform just the SETUP stage (getDesc)
* as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
* 15 secs after the setup
*/
if (is_setup) {
/* SETUP pid, and interrupt after SETUP completion */
qtd_fill(ehci, qtd, urb->setup_dma,
sizeof(struct usb_ctrlrequest),
QTD_IOC | token | (2 /* "setup" */ << 8), 8);
submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
return 0; /*Return now; we shall come back after 15 seconds*/
}
/*
* IN: data transfer stage: buffer setup : start the IN txn phase for
* the get_Desc SETUP which was sent 15seconds back
*/
token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/
buf = urb->transfer_dma;
token |= (1 /* "in" */ << 8); /*This is IN stage*/
maxpacket = usb_endpoint_maxp(&urb->ep->desc);
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
/*
* Our IN phase shall always be a short read; so keep the queue running
* and let it advance to the next qtd which zero length OUT status
*/
qtd->hw_alt_next = EHCI_LIST_END(ehci);
/* STATUS stage for GetDesc control request */
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
qtd_prev = qtd;
qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* Interrupt after STATUS completion */
qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
return 0;
cleanup:
qtd_list_free(ehci, urb, head);
return -1;
}
#endif /* CONFIG_USB_HCD_TEST_MODE */
/*-------------------------------------------------------------------------*/
static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh *prev;
/* Add to the end of the list of QHs waiting for the next IAAD */
qh->qh_state = QH_STATE_UNLINK_WAIT;
list_add_tail(&qh->unlink_node, &ehci->async_unlink);
/* Unlink it from the schedule */
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
if (ehci->qh_scan_next == qh)
ehci->qh_scan_next = qh->qh_next.qh;
}
static void start_iaa_cycle(struct ehci_hcd *ehci)
{
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
end_unlink_async(ehci);
/* Otherwise start a new IAA cycle if one isn't already running */
} else if (ehci->rh_state == EHCI_RH_RUNNING &&
!ehci->iaa_in_progress) {
/* Make sure the unlinks are all visible to the hardware */
wmb();
ehci_writel(ehci, ehci->command | CMD_IAAD,
&ehci->regs->command);
ehci_readl(ehci, &ehci->regs->command);
ehci->iaa_in_progress = true;
ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
}
}
static void end_iaa_cycle(struct ehci_hcd *ehci)
{
if (ehci->has_synopsys_hc_bug)
ehci_writel(ehci, (u32) ehci->async->qh_dma,
&ehci->regs->async_next);
/* The current IAA cycle has ended */
ehci->iaa_in_progress = false;
end_unlink_async(ehci);
}
/* See if the async qh for the qtds being unlinked are now gone from the HC */
static void end_unlink_async(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
bool early_exit;
if (list_empty(&ehci->async_unlink))
return;
qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
unlink_node); /* QH whose IAA cycle just ended */
/*
* If async_unlinking is set then this routine is already running,
* either on the stack or on another CPU.
*/
early_exit = ehci->async_unlinking;
/* If the controller isn't running, process all the waiting QHs */
if (ehci->rh_state < EHCI_RH_RUNNING)
list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
/*
* Intel (?) bug: The HC can write back the overlay region even
* after the IAA interrupt occurs. In self-defense, always go
* through two IAA cycles for each QH.
*/
else if (qh->qh_state == QH_STATE_UNLINK) {
/*
* Second IAA cycle has finished. Process only the first
* waiting QH (NVIDIA (?) bug).
*/
list_move_tail(&qh->unlink_node, &ehci->async_idle);
}
/*
* AMD/ATI (?) bug: The HC can continue to use an active QH long
* after the IAA interrupt occurs. To prevent problems, QHs that
* may still be active will wait until 2 ms have passed with no
* change to the hw_current and hw_token fields (this delay occurs
* between the two IAA cycles).
*
* The EHCI spec (4.8.2) says that active QHs must not be removed
* from the async schedule and recommends waiting until the QH
* goes inactive. This is ridiculous because the QH will _never_
* become inactive if the endpoint NAKs indefinitely.
*/
/* Some reasons for unlinking guarantee the QH can't be active */
else if (qh->unlink_reason & (QH_UNLINK_HALTED |
QH_UNLINK_SHORT_READ | QH_UNLINK_DUMMY_OVERLAY))
goto DelayDone;
/* The QH can't be active if the queue was and still is empty... */
else if ((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) &&
list_empty(&qh->qtd_list))
goto DelayDone;
/* ... or if the QH has halted */
else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
goto DelayDone;
/* Otherwise we have to wait until the QH stops changing */
else {
__hc32 qh_current, qh_token;
qh_current = qh->hw->hw_current;
qh_token = qh->hw->hw_token;
if (qh_current != ehci->old_current ||
qh_token != ehci->old_token) {
ehci->old_current = qh_current;
ehci->old_token = qh_token;
ehci_enable_event(ehci,
EHCI_HRTIMER_ACTIVE_UNLINK, true);
return;
}
DelayDone:
qh->qh_state = QH_STATE_UNLINK;
early_exit = true;
}
ehci->old_current = ~0; /* Prepare for next QH */
/* Start a new IAA cycle if any QHs are waiting for it */
if (!list_empty(&ehci->async_unlink))
start_iaa_cycle(ehci);
/*
* Don't allow nesting or concurrent calls,
* or wait for the second IAA cycle for the next QH.
*/
if (early_exit)
return;
/* Process the idle QHs */
ehci->async_unlinking = true;
while (!list_empty(&ehci->async_idle)) {
qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
unlink_node);
list_del(&qh->unlink_node);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
if (!list_empty(&qh->qtd_list))
qh_completions(ehci, qh);
if (!list_empty(&qh->qtd_list) &&
ehci->rh_state == EHCI_RH_RUNNING)
qh_link_async(ehci, qh);
disable_async(ehci);
}
ehci->async_unlinking = false;
}
static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void unlink_empty_async(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
struct ehci_qh *qh_to_unlink = NULL;
int count = 0;
/* Find the last async QH which has been empty for a timer cycle */
for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
if (list_empty(&qh->qtd_list) &&
qh->qh_state == QH_STATE_LINKED) {
++count;
if (qh->unlink_cycle != ehci->async_unlink_cycle)
qh_to_unlink = qh;
}
}
/* If nothing else is being unlinked, unlink the last empty QH */
if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
qh_to_unlink->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
start_unlink_async(ehci, qh_to_unlink);
--count;
}
/* Other QHs will be handled later */
if (count > 0) {
ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
++ehci->async_unlink_cycle;
}
}
#ifdef CONFIG_PM
/* The root hub is suspended; unlink all the async QHs */
static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
while (ehci->async->qh_next.qh) {
qh = ehci->async->qh_next.qh;
WARN_ON(!list_empty(&qh->qtd_list));
single_unlink_async(ehci, qh);
}
}
#endif
/* makes sure the async qh will become idle */
/* caller must own ehci->lock */
static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* If the QH isn't linked then there's nothing we can do. */
if (qh->qh_state != QH_STATE_LINKED)
return;
single_unlink_async(ehci, qh);
start_iaa_cycle(ehci);
}
/*-------------------------------------------------------------------------*/
static void scan_async (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
bool check_unlinks_later = false;
ehci->qh_scan_next = ehci->async->qh_next.qh;
while (ehci->qh_scan_next) {
qh = ehci->qh_scan_next;
ehci->qh_scan_next = qh->qh_next.qh;
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
int temp;
/*
* Unlinks could happen here; completion reporting
* drops the lock. That's why ehci->qh_scan_next
* always holds the next qh to scan; if the next qh
* gets unlinked then ehci->qh_scan_next is adjusted
* in single_unlink_async().
*/
temp = qh_completions(ehci, qh);
if (unlikely(temp)) {
start_unlink_async(ehci, qh);
} else if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
qh->unlink_cycle = ehci->async_unlink_cycle;
check_unlinks_later = true;
}
}
}
/*
* Unlink empty entries, reducing DMA usage as well
* as HCD schedule-scanning costs. Delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
*/
if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
!(ehci->enabled_hrtimer_events &
BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
++ehci->async_unlink_cycle;
}
}
| linux-master | drivers/usb/host/ehci-q.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EHCI UHP on Atmel chips
*
* Copyright (C) 2009 Atmel Corporation,
* Nicolas Ferre <[email protected]>
*
* Based on various ehci-*.c drivers
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/phy.h>
#include <linux/usb/of.h>
#include "ehci.h"
#define DRIVER_DESC "EHCI Atmel driver"
#define EHCI_INSNREG(index) ((index) * 4 + 0x90)
#define EHCI_INSNREG08_HSIC_EN BIT(2)
/* interface and function clocks */
#define hcd_to_atmel_ehci_priv(h) \
((struct atmel_ehci_priv *)hcd_to_ehci(h)->priv)
struct atmel_ehci_priv {
struct clk *iclk;
struct clk *uclk;
bool clocked;
};
static struct hc_driver __read_mostly ehci_atmel_hc_driver;
static const struct ehci_driver_overrides ehci_atmel_drv_overrides __initconst = {
.extra_priv_size = sizeof(struct atmel_ehci_priv),
};
/*-------------------------------------------------------------------------*/
static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci)
{
if (atmel_ehci->clocked)
return;
clk_prepare_enable(atmel_ehci->uclk);
clk_prepare_enable(atmel_ehci->iclk);
atmel_ehci->clocked = true;
}
static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci)
{
if (!atmel_ehci->clocked)
return;
clk_disable_unprepare(atmel_ehci->iclk);
clk_disable_unprepare(atmel_ehci->uclk);
atmel_ehci->clocked = false;
}
static void atmel_start_ehci(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
dev_dbg(&pdev->dev, "start\n");
atmel_start_clock(atmel_ehci);
}
static void atmel_stop_ehci(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
dev_dbg(&pdev->dev, "stop\n");
atmel_stop_clock(atmel_ehci);
}
/*-------------------------------------------------------------------------*/
static int ehci_atmel_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
const struct hc_driver *driver = &ehci_atmel_hc_driver;
struct resource *res;
struct ehci_hcd *ehci;
struct atmel_ehci_priv *atmel_ehci;
int irq;
int retval;
if (usb_disabled())
return -ENODEV;
pr_debug("Initializing Atmel-SoC USB Host Controller\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = irq;
goto fail_create_hcd;
}
/* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
goto fail_create_hcd;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail_create_hcd;
}
atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
atmel_ehci->iclk = devm_clk_get(&pdev->dev, "ehci_clk");
if (IS_ERR(atmel_ehci->iclk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = -ENOENT;
goto fail_request_resource;
}
atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
if (IS_ERR(atmel_ehci->uclk)) {
dev_err(&pdev->dev, "failed to get uclk\n");
retval = PTR_ERR(atmel_ehci->uclk);
goto fail_request_resource;
}
ehci = hcd_to_ehci(hcd);
/* registers start at offset 0x0 */
ehci->caps = hcd->regs;
atmel_start_ehci(pdev);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto fail_add_hcd;
device_wakeup_enable(hcd->self.controller);
if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_HSIC)
writel(EHCI_INSNREG08_HSIC_EN, hcd->regs + EHCI_INSNREG(8));
return retval;
fail_add_hcd:
atmel_stop_ehci(pdev);
fail_request_resource:
usb_put_hcd(hcd);
fail_create_hcd:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), retval);
return retval;
}
static void ehci_atmel_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
atmel_stop_ehci(pdev);
}
static int __maybe_unused ehci_atmel_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
int ret;
ret = ehci_suspend(hcd, false);
if (ret)
return ret;
atmel_stop_clock(atmel_ehci);
return 0;
}
static int __maybe_unused ehci_atmel_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
atmel_start_clock(atmel_ehci);
ehci_resume(hcd, false);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id atmel_ehci_dt_ids[] = {
{ .compatible = "atmel,at91sam9g45-ehci" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_ehci_dt_ids);
#endif
static SIMPLE_DEV_PM_OPS(ehci_atmel_pm_ops, ehci_atmel_drv_suspend,
ehci_atmel_drv_resume);
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
.remove_new = ehci_atmel_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "atmel-ehci",
.pm = &ehci_atmel_pm_ops,
.of_match_table = of_match_ptr(atmel_ehci_dt_ids),
},
};
static int __init ehci_atmel_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_atmel_hc_driver, &ehci_atmel_drv_overrides);
return platform_driver_register(&ehci_atmel_driver);
}
module_init(ehci_atmel_init);
static void __exit ehci_atmel_cleanup(void)
{
platform_driver_unregister(&ehci_atmel_driver);
}
module_exit(ehci_atmel_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:atmel-ehci");
MODULE_AUTHOR("Nicolas Ferre");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ehci-atmel.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ISP1362 HCD (Host Controller Driver) for USB.
*
* Copyright (C) 2005 Lothar Wassmann <[email protected]>
*
* Derived from the SL811 HCD, rewritten for ISP116x.
* Copyright (C) 2005 Olav Kongas <[email protected]>
*
* Portions:
* Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
* Copyright (C) 2004 David Brownell
*/
/*
* The ISP1362 chip requires a large delay (300ns and 462ns) between
* accesses to the address and data register.
* The following timing options exist:
*
* 1. Configure your memory controller to add such delays if it can (the best)
* 2. Implement platform-specific delay function possibly
* combined with configuring the memory controller; see
* include/linux/usb_isp1362.h for more info.
* 3. Use ndelay (easiest, poorest).
*
* Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
* platform specific section of isp1362.h to select the appropriate variant.
*
* Also note that according to the Philips "ISP1362 Errata" document
* Rev 1.00 from 27 May data corruption may occur when the #WR signal
* is reasserted (even with #CS deasserted) within 132ns after a
* write cycle to any controller register. If the hardware doesn't
* implement the recommended fix (gating the #WR with #CS) software
* must ensure that no further write cycle (not necessarily to the chip!)
* is issued by the CPU within this interval.
* For PXA25x this can be ensured by using VLIO with the maximum
* recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
*/
#undef ISP1362_DEBUG
/*
* The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
* GET_INTERFACE requests correctly when the SETUP and DATA stages of the
* requests are carried out in separate frames. This will delay any SETUP
* packets until the start of the next frame so that this situation is
* unlikely to occur (and makes usbtest happy running with a PXA255 target
* device).
*/
#undef BUGGY_PXA2XX_UDC_USBTEST
#undef PTD_TRACE
#undef URB_TRACE
#undef VERBOSE
#undef REGISTERS
/* This enables a memory test on the ISP1362 chip memory to make sure the
* chip access timing is correct.
*/
#undef CHIP_BUFFER_TEST
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/isp1362.h>
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/bitmap.h>
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
static int dbg_level;
#ifdef ISP1362_DEBUG
module_param(dbg_level, int, 0644);
#else
module_param(dbg_level, int, 0);
#endif
#include "../core/usb.h"
#include "isp1362.h"
#define DRIVER_VERSION "2005-04-04"
#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static const char hcd_name[] = "isp1362-hcd";
static void isp1362_hc_stop(struct usb_hcd *hcd);
static int isp1362_hc_start(struct usb_hcd *hcd);
/*-------------------------------------------------------------------------*/
/*
* When called from the interrupthandler only isp1362_hcd->irqenb is modified,
* since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
* completion.
* We don't need a 'disable' counterpart, since interrupts will be disabled
* only by the interrupt handler.
*/
static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
{
if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
return;
if (mask & ~isp1362_hcd->irqenb)
isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
isp1362_hcd->irqenb |= mask;
if (isp1362_hcd->irq_active)
return;
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
}
/*-------------------------------------------------------------------------*/
static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
u16 offset)
{
struct isp1362_ep_queue *epq = NULL;
if (offset < isp1362_hcd->istl_queue[1].buf_start)
epq = &isp1362_hcd->istl_queue[0];
else if (offset < isp1362_hcd->intl_queue.buf_start)
epq = &isp1362_hcd->istl_queue[1];
else if (offset < isp1362_hcd->atl_queue.buf_start)
epq = &isp1362_hcd->intl_queue;
else if (offset < isp1362_hcd->atl_queue.buf_start +
isp1362_hcd->atl_queue.buf_size)
epq = &isp1362_hcd->atl_queue;
if (epq)
DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
else
pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
return epq;
}
static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
{
int offset;
if (index * epq->blk_size > epq->buf_size) {
pr_warn("%s: Bad %s index %d(%d)\n",
__func__, epq->name, index,
epq->buf_size / epq->blk_size);
return -EINVAL;
}
offset = epq->buf_start + index * epq->blk_size;
DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
return offset;
}
/*-------------------------------------------------------------------------*/
static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
int mps)
{
u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
if (xfer_size < size && xfer_size % mps)
xfer_size -= xfer_size % mps;
return xfer_size;
}
static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
struct isp1362_ep *ep, u16 len)
{
int ptd_offset = -EINVAL;
int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
int found;
BUG_ON(len > epq->buf_size);
if (!epq->buf_avail)
return -ENOMEM;
if (ep->num_ptds)
pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
BUG_ON(ep->num_ptds != 0);
found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
num_ptds, 0);
if (found >= epq->buf_count)
return -EOVERFLOW;
DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
ptd_offset = get_ptd_offset(epq, found);
WARN_ON(ptd_offset < 0);
ep->ptd_offset = ptd_offset;
ep->num_ptds += num_ptds;
epq->buf_avail -= num_ptds;
BUG_ON(epq->buf_avail > epq->buf_count);
ep->ptd_index = found;
bitmap_set(&epq->buf_map, found, num_ptds);
DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
__func__, epq->name, ep->ptd_index, ep->ptd_offset,
epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
return found;
}
static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
{
int last = ep->ptd_index + ep->num_ptds;
if (last > epq->buf_count)
pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
__func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
epq->buf_map, epq->skip_map);
BUG_ON(last > epq->buf_count);
bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
epq->buf_avail += ep->num_ptds;
epq->ptd_count--;
BUG_ON(epq->buf_avail > epq->buf_count);
BUG_ON(epq->ptd_count > epq->buf_count);
DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
__func__, epq->name,
ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
epq->buf_map, epq->skip_map);
ep->num_ptds = 0;
ep->ptd_offset = -EINVAL;
ep->ptd_index = -EINVAL;
}
/*-------------------------------------------------------------------------*/
/*
Set up PTD's.
*/
static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
u16 fno)
{
struct ptd *ptd;
int toggle;
int dir;
u16 len;
size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
ptd = &ep->ptd;
ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
switch (ep->nextpid) {
case USB_PID_IN:
toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
dir = PTD_DIR_IN;
if (usb_pipecontrol(urb->pipe)) {
len = min_t(size_t, ep->maxpacket, buf_len);
} else if (usb_pipeisoc(urb->pipe)) {
len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
} else
len = max_transfer_size(epq, buf_len, ep->maxpacket);
DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
(int)buf_len);
break;
case USB_PID_OUT:
toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
dir = PTD_DIR_OUT;
if (usb_pipecontrol(urb->pipe))
len = min_t(size_t, ep->maxpacket, buf_len);
else if (usb_pipeisoc(urb->pipe))
len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
else
len = max_transfer_size(epq, buf_len, ep->maxpacket);
if (len == 0)
pr_info("%s: Sending ZERO packet: %d\n", __func__,
urb->transfer_flags & URB_ZERO_PACKET);
DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
(int)buf_len);
break;
case USB_PID_SETUP:
toggle = 0;
dir = PTD_DIR_SETUP;
len = sizeof(struct usb_ctrlrequest);
DBG(1, "%s: SETUP len %d\n", __func__, len);
ep->data = urb->setup_packet;
break;
case USB_PID_ACK:
toggle = 1;
len = 0;
dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
PTD_DIR_OUT : PTD_DIR_IN;
DBG(1, "%s: ACK len %d\n", __func__, len);
break;
default:
toggle = dir = len = 0;
pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
BUG_ON(1);
}
ep->length = len;
if (!len)
ep->data = NULL;
ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
PTD_EP(ep->epnum);
ptd->len = PTD_LEN(len) | PTD_DIR(dir);
ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
if (usb_pipeint(urb->pipe)) {
ptd->faddr |= PTD_SF_INT(ep->branch);
ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
}
if (usb_pipeisoc(urb->pipe))
ptd->faddr |= PTD_SF_ISO(fno);
DBG(1, "%s: Finished\n", __func__);
}
static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
struct isp1362_ep_queue *epq)
{
struct ptd *ptd = &ep->ptd;
int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
prefetch(ptd);
isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
if (len)
isp1362_write_buffer(isp1362_hcd, ep->data,
ep->ptd_offset + PTD_HEADER_SIZE, len);
dump_ptd(ptd);
dump_ptd_out_data(ptd, ep->data);
}
static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
struct isp1362_ep_queue *epq)
{
struct ptd *ptd = &ep->ptd;
int act_len;
WARN_ON(list_empty(&ep->active));
BUG_ON(ep->ptd_offset < 0);
list_del_init(&ep->active);
DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
prefetchw(ptd);
isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
dump_ptd(ptd);
act_len = PTD_GET_COUNT(ptd);
if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
return;
if (act_len > ep->length)
pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
ep->ptd_offset, act_len, ep->length);
BUG_ON(act_len > ep->length);
/* Only transfer the amount of data that has actually been overwritten
* in the chip buffer. We don't want any data that doesn't belong to the
* transfer to leak out of the chip to the callers transfer buffer!
*/
prefetchw(ep->data);
isp1362_read_buffer(isp1362_hcd, ep->data,
ep->ptd_offset + PTD_HEADER_SIZE, act_len);
dump_ptd_in_data(ptd, ep->data);
}
/*
* INT PTDs will stay in the chip until data is available.
* This function will remove a PTD from the chip when the URB is dequeued.
* Must be called with the spinlock held and IRQs disabled
*/
static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
{
int index;
struct isp1362_ep_queue *epq;
DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
BUG_ON(ep->ptd_offset < 0);
epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
BUG_ON(!epq);
/* put ep in remove_list for cleanup */
WARN_ON(!list_empty(&ep->remove_list));
list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
/* let SOF interrupt handle the cleanup */
isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
index = ep->ptd_index;
if (index < 0)
/* ISO queues don't have SKIP registers */
return;
DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
index, ep->ptd_offset, epq->skip_map, 1 << index);
/* prevent further processing of PTD (will be effective after next SOF) */
epq->skip_map |= 1 << index;
if (epq == &isp1362_hcd->atl_queue) {
DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
if (~epq->skip_map == 0)
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
} else if (epq == &isp1362_hcd->intl_queue) {
DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
if (~epq->skip_map == 0)
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
}
}
/*
Take done or failed requests out of schedule. Give back
processed urbs.
*/
static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
struct urb *urb, int status)
__releases(isp1362_hcd->lock)
__acquires(isp1362_hcd->lock)
{
urb->hcpriv = NULL;
ep->error_count = 0;
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_SETUP;
URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
ep->num_req, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
!usb_pipein(urb->pipe) ? "out" : "in",
usb_pipecontrol(urb->pipe) ? "ctrl" :
usb_pipeint(urb->pipe) ? "int" :
usb_pipebulk(urb->pipe) ? "bulk" :
"iso",
urb->actual_length, urb->transfer_buffer_length,
!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
"short_ok" : "", urb->status);
usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
spin_unlock(&isp1362_hcd->lock);
usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
spin_lock(&isp1362_hcd->lock);
/* take idle endpoints out of the schedule right away */
if (!list_empty(&ep->hep->urb_list))
return;
/* async deschedule */
if (!list_empty(&ep->schedule)) {
list_del_init(&ep->schedule);
return;
}
if (ep->interval) {
/* periodic deschedule */
DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
ep, ep->branch, ep->load,
isp1362_hcd->load[ep->branch],
isp1362_hcd->load[ep->branch] - ep->load);
isp1362_hcd->load[ep->branch] -= ep->load;
ep->branch = PERIODIC_SIZE;
}
}
/*
* Analyze transfer results, handle partial transfers and errors
*/
static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
{
struct urb *urb = get_urb(ep);
struct usb_device *udev;
struct ptd *ptd;
int short_ok;
u16 len;
int urbstat = -EINPROGRESS;
u8 cc;
DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
udev = urb->dev;
ptd = &ep->ptd;
cc = PTD_GET_CC(ptd);
if (cc == PTD_NOTACCESSED) {
pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
ep->num_req, ptd);
cc = PTD_DEVNOTRESP;
}
short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
len = urb->transfer_buffer_length - urb->actual_length;
/* Data underrun is special. For allowed underrun
we clear the error and continue as normal. For
forbidden underrun we finish the DATA stage
immediately while for control transfer,
we do a STATUS stage.
*/
if (cc == PTD_DATAUNDERRUN) {
if (short_ok) {
DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
__func__, ep->num_req, short_ok ? "" : "not_",
PTD_GET_COUNT(ptd), ep->maxpacket, len);
cc = PTD_CC_NOERROR;
urbstat = 0;
} else {
DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
__func__, ep->num_req,
usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
short_ok ? "" : "not_",
PTD_GET_COUNT(ptd), ep->maxpacket, len);
/* save the data underrun error code for later and
* proceed with the status stage
*/
urb->actual_length += PTD_GET_COUNT(ptd);
if (usb_pipecontrol(urb->pipe)) {
ep->nextpid = USB_PID_ACK;
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
if (urb->status == -EINPROGRESS)
urb->status = cc_to_error[PTD_DATAUNDERRUN];
} else {
usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
PTD_GET_TOGGLE(ptd));
urbstat = cc_to_error[PTD_DATAUNDERRUN];
}
goto out;
}
}
if (cc != PTD_CC_NOERROR) {
if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
urbstat = cc_to_error[cc];
DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
__func__, ep->num_req, ep->nextpid, urbstat, cc,
ep->error_count);
}
goto out;
}
switch (ep->nextpid) {
case USB_PID_OUT:
if (PTD_GET_COUNT(ptd) != ep->length)
pr_err("%s: count=%d len=%d\n", __func__,
PTD_GET_COUNT(ptd), ep->length);
BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
urb->actual_length += ep->length;
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
if (urb->actual_length == urb->transfer_buffer_length) {
DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
ep->num_req, len, ep->maxpacket, urbstat);
if (usb_pipecontrol(urb->pipe)) {
DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
ep->num_req,
usb_pipein(urb->pipe) ? "IN" : "OUT");
ep->nextpid = USB_PID_ACK;
} else {
if (len % ep->maxpacket ||
!(urb->transfer_flags & URB_ZERO_PACKET)) {
urbstat = 0;
DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
__func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
urbstat, len, ep->maxpacket, urb->actual_length);
}
}
}
break;
case USB_PID_IN:
len = PTD_GET_COUNT(ptd);
BUG_ON(len > ep->length);
urb->actual_length += len;
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
/* if transfer completed or (allowed) data underrun */
if ((urb->transfer_buffer_length == urb->actual_length) ||
len % ep->maxpacket) {
DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
ep->num_req, len, ep->maxpacket, urbstat);
if (usb_pipecontrol(urb->pipe)) {
DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
ep->num_req,
usb_pipein(urb->pipe) ? "IN" : "OUT");
ep->nextpid = USB_PID_ACK;
} else {
urbstat = 0;
DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
__func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
urbstat, len, ep->maxpacket, urb->actual_length);
}
}
break;
case USB_PID_SETUP:
if (urb->transfer_buffer_length == urb->actual_length) {
ep->nextpid = USB_PID_ACK;
} else if (usb_pipeout(urb->pipe)) {
usb_settoggle(udev, 0, 1, 1);
ep->nextpid = USB_PID_OUT;
} else {
usb_settoggle(udev, 0, 0, 1);
ep->nextpid = USB_PID_IN;
}
break;
case USB_PID_ACK:
DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
urbstat);
WARN_ON(urbstat != -EINPROGRESS);
urbstat = 0;
ep->nextpid = 0;
break;
default:
BUG_ON(1);
}
out:
if (urbstat != -EINPROGRESS) {
DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
ep, ep->num_req, urb, urbstat);
finish_request(isp1362_hcd, ep, urb, urbstat);
}
}
static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
{
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
struct isp1362_ep_queue *epq =
get_ptd_queue(isp1362_hcd, ep->ptd_offset);
int index = ep->ptd_index;
BUG_ON(epq == NULL);
if (index >= 0) {
DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
BUG_ON(ep->num_ptds == 0);
release_ptd_buffers(epq, ep);
}
if (!list_empty(&ep->hep->urb_list)) {
struct urb *urb = get_urb(ep);
DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
ep->num_req, ep);
finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
}
WARN_ON(list_empty(&ep->active));
if (!list_empty(&ep->active)) {
list_del_init(&ep->active);
DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
}
list_del_init(&ep->remove_list);
DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
}
DBG(1, "%s: Done\n", __func__);
}
static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
{
if (count > 0) {
if (count < isp1362_hcd->atl_queue.ptd_count)
isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
} else
isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
}
static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
{
isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
}
static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
{
isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
}
static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
{
int index;
prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
index = claim_ptd_buffers(epq, ep, ep->length);
if (index == -ENOMEM) {
DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
return index;
} else if (index == -EOVERFLOW) {
DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
__func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
epq->buf_map, epq->skip_map);
return index;
} else
BUG_ON(index < 0);
list_add_tail(&ep->active, &epq->active);
DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
ep, ep->num_req, ep->length, &epq->active);
DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
ep->ptd_offset, ep, ep->num_req);
isp1362_write_ptd(isp1362_hcd, ep, epq);
__clear_bit(ep->ptd_index, &epq->skip_map);
return 0;
}
static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
{
int ptd_count = 0;
struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
struct isp1362_ep *ep;
int defer = 0;
if (atomic_read(&epq->finishing)) {
DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
return;
}
list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
struct urb *urb = get_urb(ep);
int ret;
if (!list_empty(&ep->active)) {
DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
continue;
}
DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
ep, ep->num_req);
ret = submit_req(isp1362_hcd, urb, ep, epq);
if (ret == -ENOMEM) {
defer = 1;
break;
} else if (ret == -EOVERFLOW) {
defer = 1;
continue;
}
#ifdef BUGGY_PXA2XX_UDC_USBTEST
defer = ep->nextpid == USB_PID_SETUP;
#endif
ptd_count++;
}
/* Avoid starving of endpoints */
if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
}
if (ptd_count || defer)
enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
epq->ptd_count += ptd_count;
if (epq->ptd_count > epq->stat_maxptds) {
epq->stat_maxptds = epq->ptd_count;
DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
}
}
static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
{
int ptd_count = 0;
struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
struct isp1362_ep *ep;
if (atomic_read(&epq->finishing)) {
DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
return;
}
list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
struct urb *urb = get_urb(ep);
int ret;
if (!list_empty(&ep->active)) {
DBG(1, "%s: Skipping active %s ep %p\n", __func__,
epq->name, ep);
continue;
}
DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
epq->name, ep, ep->num_req);
ret = submit_req(isp1362_hcd, urb, ep, epq);
if (ret == -ENOMEM)
break;
else if (ret == -EOVERFLOW)
continue;
ptd_count++;
}
if (ptd_count) {
static int last_count;
if (ptd_count != last_count) {
DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
last_count = ptd_count;
}
enable_intl_transfers(isp1362_hcd);
}
epq->ptd_count += ptd_count;
if (epq->ptd_count > epq->stat_maxptds)
epq->stat_maxptds = epq->ptd_count;
}
static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
{
u16 ptd_offset = ep->ptd_offset;
int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
ptd_offset += num_ptds * epq->blk_size;
if (ptd_offset < epq->buf_start + epq->buf_size)
return ptd_offset;
else
return -ENOMEM;
}
static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
{
int ptd_count = 0;
int flip = isp1362_hcd->istl_flip;
struct isp1362_ep_queue *epq;
int ptd_offset;
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
fill2:
epq = &isp1362_hcd->istl_queue[flip];
if (atomic_read(&epq->finishing)) {
DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
return;
}
if (!list_empty(&epq->active))
return;
ptd_offset = epq->buf_start;
list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
struct urb *urb = get_urb(ep);
s16 diff = fno - (u16)urb->start_frame;
DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
if (diff > urb->number_of_packets) {
/* time frame for this URB has elapsed */
finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
continue;
} else if (diff < -1) {
/* URB is not due in this frame or the next one.
* Comparing with '-1' instead of '0' accounts for double
* buffering in the ISP1362 which enables us to queue the PTD
* one frame ahead of time
*/
} else if (diff == -1) {
/* submit PTD's that are due in the next frame */
prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
if (ptd_offset + PTD_HEADER_SIZE + ep->length >
epq->buf_start + epq->buf_size) {
pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
__func__, ep->length);
continue;
}
ep->ptd_offset = ptd_offset;
list_add_tail(&ep->active, &epq->active);
ptd_offset = next_ptd(epq, ep);
if (ptd_offset < 0) {
pr_warn("%s: req %d No more %s PTD buffers available\n",
__func__, ep->num_req, epq->name);
break;
}
}
}
list_for_each_entry(ep, &epq->active, active) {
if (epq->active.next == &ep->active)
ep->ptd.mps |= PTD_LAST_MSK;
isp1362_write_ptd(isp1362_hcd, ep, epq);
ptd_count++;
}
if (ptd_count)
enable_istl_transfers(isp1362_hcd, flip);
epq->ptd_count += ptd_count;
if (epq->ptd_count > epq->stat_maxptds)
epq->stat_maxptds = epq->ptd_count;
/* check, whether the second ISTL buffer may also be filled */
if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
(flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
fno++;
ptd_count = 0;
flip = 1 - flip;
goto fill2;
}
}
static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
struct isp1362_ep_queue *epq)
{
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
if (list_empty(&epq->active)) {
DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
return;
}
DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
atomic_inc(&epq->finishing);
list_for_each_entry_safe(ep, tmp, &epq->active, active) {
int index = ep->ptd_index;
DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
index, ep->ptd_offset);
BUG_ON(index < 0);
if (__test_and_clear_bit(index, &done_map)) {
isp1362_read_ptd(isp1362_hcd, ep, epq);
epq->free_ptd = index;
BUG_ON(ep->num_ptds == 0);
release_ptd_buffers(epq, ep);
DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
ep, ep->num_req);
if (!list_empty(&ep->remove_list)) {
list_del_init(&ep->remove_list);
DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
}
DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
ep, ep->num_req);
postproc_ep(isp1362_hcd, ep);
}
if (!done_map)
break;
}
if (done_map)
pr_warn("%s: done_map not clear: %08lx:%08lx\n",
__func__, done_map, epq->skip_map);
atomic_dec(&epq->finishing);
}
static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
{
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
if (list_empty(&epq->active)) {
DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
return;
}
DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
atomic_inc(&epq->finishing);
list_for_each_entry_safe(ep, tmp, &epq->active, active) {
DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
isp1362_read_ptd(isp1362_hcd, ep, epq);
DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
postproc_ep(isp1362_hcd, ep);
}
WARN_ON(epq->blk_size != 0);
atomic_dec(&epq->finishing);
}
static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
{
int handled = 0;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
u16 irqstat;
u16 svc_mask;
spin_lock(&isp1362_hcd->lock);
BUG_ON(isp1362_hcd->irq_active++);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
/* only handle interrupts that are currently enabled */
irqstat &= isp1362_hcd->irqenb;
isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
svc_mask = irqstat;
if (irqstat & HCuPINT_SOF) {
isp1362_hcd->irqenb &= ~HCuPINT_SOF;
isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
handled = 1;
svc_mask &= ~HCuPINT_SOF;
DBG(3, "%s: SOF\n", __func__);
isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
if (!list_empty(&isp1362_hcd->remove_list))
finish_unlinks(isp1362_hcd);
if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
if (list_empty(&isp1362_hcd->atl_queue.active)) {
start_atl_transfers(isp1362_hcd);
} else {
isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
isp1362_hcd->atl_queue.skip_map);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
}
}
}
if (irqstat & HCuPINT_ISTL0) {
isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
handled = 1;
svc_mask &= ~HCuPINT_ISTL0;
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
DBG(1, "%s: ISTL0\n", __func__);
WARN_ON((int)!!isp1362_hcd->istl_flip);
WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL0_ACTIVE);
WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL0_DONE));
isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
}
if (irqstat & HCuPINT_ISTL1) {
isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
handled = 1;
svc_mask &= ~HCuPINT_ISTL1;
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
DBG(1, "%s: ISTL1\n", __func__);
WARN_ON(!(int)isp1362_hcd->istl_flip);
WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL1_ACTIVE);
WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL1_DONE));
isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
}
if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
(HCuPINT_ISTL0 | HCuPINT_ISTL1));
finish_iso_transfers(isp1362_hcd,
&isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
start_iso_transfers(isp1362_hcd);
isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
}
if (irqstat & HCuPINT_INTL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
DBG(2, "%s: INTL\n", __func__);
svc_mask &= ~HCuPINT_INTL;
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
if (~(done_map | skip_map) == 0)
/* All PTDs are finished, disable INTL processing entirely */
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
handled = 1;
WARN_ON(!done_map);
if (done_map) {
DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
start_intl_transfers(isp1362_hcd);
}
}
if (irqstat & HCuPINT_ATL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
DBG(2, "%s: ATL\n", __func__);
svc_mask &= ~HCuPINT_ATL;
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
if (~(done_map | skip_map) == 0)
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
if (done_map) {
DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
start_atl_transfers(isp1362_hcd);
}
handled = 1;
}
if (irqstat & HCuPINT_OPR) {
u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
svc_mask &= ~HCuPINT_OPR;
DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
intstat &= isp1362_hcd->intenb;
if (intstat & OHCI_INTR_UE) {
pr_err("Unrecoverable error\n");
/* FIXME: do here reset or cleanup or whatever */
}
if (intstat & OHCI_INTR_RHSC) {
isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
}
if (intstat & OHCI_INTR_RD) {
pr_info("%s: RESUME DETECTED\n", __func__);
isp1362_show_reg(isp1362_hcd, HCCONTROL);
usb_hcd_resume_root_hub(hcd);
}
isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
irqstat &= ~HCuPINT_OPR;
handled = 1;
}
if (irqstat & HCuPINT_SUSP) {
isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
handled = 1;
svc_mask &= ~HCuPINT_SUSP;
pr_info("%s: SUSPEND IRQ\n", __func__);
}
if (irqstat & HCuPINT_CLKRDY) {
isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
handled = 1;
isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
svc_mask &= ~HCuPINT_CLKRDY;
pr_info("%s: CLKRDY IRQ\n", __func__);
}
if (svc_mask)
pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
isp1362_hcd->irq_active--;
spin_unlock(&isp1362_hcd->lock);
return IRQ_RETVAL(handled);
}
/*-------------------------------------------------------------------------*/
#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
{
int i, branch = -ENOSPC;
/* search for the least loaded schedule branch of that interval
* which has enough bandwidth left unreserved.
*/
for (i = 0; i < interval; i++) {
if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
int j;
for (j = i; j < PERIODIC_SIZE; j += interval) {
if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
break;
}
}
if (j < PERIODIC_SIZE)
continue;
branch = i;
}
}
return branch;
}
/* NB! ALL the code above this point runs with isp1362_hcd->lock
held, irqs off
*/
/*-------------------------------------------------------------------------*/
static int isp1362_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
struct usb_device *udev = urb->dev;
unsigned int pipe = urb->pipe;
int is_out = !usb_pipein(pipe);
int type = usb_pipetype(pipe);
int epnum = usb_pipeendpoint(pipe);
struct usb_host_endpoint *hep = urb->ep;
struct isp1362_ep *ep = NULL;
unsigned long flags;
int retval = 0;
DBG(3, "%s: urb %p\n", __func__, urb);
if (type == PIPE_ISOCHRONOUS) {
pr_err("Isochronous transfers not supported\n");
return -ENOSPC;
}
URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
usb_pipedevice(pipe), epnum,
is_out ? "out" : "in",
usb_pipecontrol(pipe) ? "ctrl" :
usb_pipeint(pipe) ? "int" :
usb_pipebulk(pipe) ? "bulk" :
"iso",
urb->transfer_buffer_length,
(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
"short_ok" : "");
/* avoid all allocations within spinlocks: request or endpoint */
if (!hep->hcpriv) {
ep = kzalloc(sizeof *ep, mem_flags);
if (!ep)
return -ENOMEM;
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* don't submit to a dead or disabled port */
if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
USB_PORT_STAT_ENABLE) ||
!HC_IS_RUNNING(hcd->state)) {
kfree(ep);
retval = -ENODEV;
goto fail_not_linked;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval) {
kfree(ep);
goto fail_not_linked;
}
if (hep->hcpriv) {
ep = hep->hcpriv;
} else {
INIT_LIST_HEAD(&ep->schedule);
INIT_LIST_HEAD(&ep->active);
INIT_LIST_HEAD(&ep->remove_list);
ep->udev = usb_get_dev(udev);
ep->hep = hep;
ep->epnum = epnum;
ep->maxpacket = usb_maxpacket(udev, urb->pipe);
ep->ptd_offset = -EINVAL;
ep->ptd_index = -EINVAL;
usb_settoggle(udev, epnum, is_out, 0);
if (type == PIPE_CONTROL)
ep->nextpid = USB_PID_SETUP;
else if (is_out)
ep->nextpid = USB_PID_OUT;
else
ep->nextpid = USB_PID_IN;
switch (type) {
case PIPE_ISOCHRONOUS:
case PIPE_INTERRUPT:
if (urb->interval > PERIODIC_SIZE)
urb->interval = PERIODIC_SIZE;
ep->interval = urb->interval;
ep->branch = PERIODIC_SIZE;
ep->load = usb_calc_bus_time(udev->speed, !is_out,
type == PIPE_ISOCHRONOUS,
usb_maxpacket(udev, pipe)) / 1000;
break;
}
hep->hcpriv = ep;
}
ep->num_req = isp1362_hcd->req_serial++;
/* maybe put endpoint into schedule */
switch (type) {
case PIPE_CONTROL:
case PIPE_BULK:
if (list_empty(&ep->schedule)) {
DBG(1, "%s: Adding ep %p req %d to async schedule\n",
__func__, ep, ep->num_req);
list_add_tail(&ep->schedule, &isp1362_hcd->async);
}
break;
case PIPE_ISOCHRONOUS:
case PIPE_INTERRUPT:
urb->interval = ep->interval;
/* urb submitted for already existing EP */
if (ep->branch < PERIODIC_SIZE)
break;
retval = balance(isp1362_hcd, ep->interval, ep->load);
if (retval < 0) {
pr_err("%s: balance returned %d\n", __func__, retval);
goto fail;
}
ep->branch = retval;
retval = 0;
isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
__func__, isp1362_hcd->fmindex, ep->branch,
((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
~(PERIODIC_SIZE - 1)) + ep->branch,
(isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
if (list_empty(&ep->schedule)) {
if (type == PIPE_ISOCHRONOUS) {
u16 frame = isp1362_hcd->fmindex;
frame += max_t(u16, 8, ep->interval);
frame &= ~(ep->interval - 1);
frame |= ep->branch;
if (frame_before(frame, isp1362_hcd->fmindex))
frame += ep->interval;
urb->start_frame = frame;
DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
} else {
DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
}
} else
DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
ep->load / ep->interval, isp1362_hcd->load[ep->branch],
isp1362_hcd->load[ep->branch] + ep->load);
isp1362_hcd->load[ep->branch] += ep->load;
}
urb->hcpriv = hep;
ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
switch (type) {
case PIPE_CONTROL:
case PIPE_BULK:
start_atl_transfers(isp1362_hcd);
break;
case PIPE_INTERRUPT:
start_intl_transfers(isp1362_hcd);
break;
case PIPE_ISOCHRONOUS:
start_iso_transfers(isp1362_hcd);
break;
default:
BUG();
}
fail:
if (retval)
usb_hcd_unlink_urb_from_ep(hcd, urb);
fail_not_linked:
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (retval)
DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
return retval;
}
static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
struct usb_host_endpoint *hep;
unsigned long flags;
struct isp1362_ep *ep;
int retval = 0;
DBG(3, "%s: urb %p\n", __func__, urb);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
retval = usb_hcd_check_unlink_urb(hcd, urb, status);
if (retval)
goto done;
hep = urb->hcpriv;
if (!hep) {
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return -EIDRM;
}
ep = hep->hcpriv;
if (ep) {
/* In front of queue? */
if (ep->hep->urb_list.next == &urb->urb_list) {
if (!list_empty(&ep->active)) {
DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
/* disable processing and queue PTD for removal */
remove_ptd(isp1362_hcd, ep);
urb = NULL;
}
}
if (urb) {
DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
ep->num_req);
finish_request(isp1362_hcd, ep, urb, status);
} else
DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
} else {
pr_warn("%s: No EP in URB %p\n", __func__, urb);
retval = -EINVAL;
}
done:
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
DBG(3, "%s: exit\n", __func__);
return retval;
}
static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
struct isp1362_ep *ep = hep->hcpriv;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
DBG(1, "%s: ep %p\n", __func__, ep);
if (!ep)
return;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
if (!list_empty(&hep->urb_list)) {
if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
remove_ptd(isp1362_hcd, ep);
pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
}
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
/* Wait for interrupt to clear out active list */
while (!list_empty(&ep->active))
msleep(1);
DBG(1, "%s: Freeing EP %p\n", __func__, ep);
usb_put_dev(ep->udev);
kfree(ep);
hep->hcpriv = NULL;
}
static int isp1362_get_frame(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
u32 fmnum;
unsigned long flags;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return (int)fmnum;
}
/*-------------------------------------------------------------------------*/
/* Adapted from ohci-hub.c */
static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
int ports, i, changed = 0;
unsigned long flags;
if (!HC_IS_RUNNING(hcd->state))
return -ESHUTDOWN;
/* Report no status change now, if we are scheduled to be
called later */
if (timer_pending(&hcd->rh_timer))
return 0;
ports = isp1362_hcd->rhdesca & RH_A_NDP;
BUG_ON(ports > 2);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* init status */
if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
buf[0] = changed = 1;
else
buf[0] = 0;
for (i = 0; i < ports; i++) {
u32 status = isp1362_hcd->rhport[i];
if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
RH_PS_OCIC | RH_PS_PRSC)) {
changed = 1;
buf[0] |= 1 << (i + 1);
continue;
}
if (!(status & RH_PS_CCS))
continue;
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return changed;
}
static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
struct usb_hub_descriptor *desc)
{
u32 reg = isp1362_hcd->rhdesca;
DBG(3, "%s: enter\n", __func__);
desc->bDescriptorType = USB_DT_HUB;
desc->bDescLength = 9;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = reg & 0x3;
/* Power switching, device type, overcurrent. */
desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
(HUB_CHAR_LPSM |
HUB_CHAR_COMPOUND |
HUB_CHAR_OCPM));
DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
desc->wHubCharacteristics);
desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
/* ports removable, and legacy PortPwrCtrlMask */
desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
desc->u.hs.DeviceRemovable[1] = ~0;
DBG(3, "%s: exit\n", __func__);
}
/* Adapted from ohci-hub.c */
static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
int retval = 0;
unsigned long flags;
unsigned long t1;
int ports = isp1362_hcd->rhdesca & RH_A_NDP;
u32 tmp = 0;
switch (typeReq) {
case ClearHubFeature:
DBG(0, "ClearHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
DBG(0, "C_HUB_OVER_CURRENT\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case C_HUB_LOCAL_POWER:
DBG(0, "C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
}
break;
case SetHubFeature:
DBG(0, "SetHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
}
break;
case GetHubDescriptor:
DBG(0, "GetHubDescriptor\n");
isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
DBG(0, "GetHubStatus\n");
put_unaligned(cpu_to_le32(0), (__le32 *) buf);
break;
case GetPortStatus:
#ifndef VERBOSE
DBG(0, "GetPortStatus\n");
#endif
if (!wIndex || wIndex > ports)
goto error;
tmp = isp1362_hcd->rhport[--wIndex];
put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
break;
case ClearPortFeature:
DBG(0, "ClearPortFeature: ");
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
DBG(0, "USB_PORT_FEAT_ENABLE\n");
tmp = RH_PS_CCS;
break;
case USB_PORT_FEAT_C_ENABLE:
DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
tmp = RH_PS_PESC;
break;
case USB_PORT_FEAT_SUSPEND:
DBG(0, "USB_PORT_FEAT_SUSPEND\n");
tmp = RH_PS_POCI;
break;
case USB_PORT_FEAT_C_SUSPEND:
DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
tmp = RH_PS_PSSC;
break;
case USB_PORT_FEAT_POWER:
DBG(0, "USB_PORT_FEAT_POWER\n");
tmp = RH_PS_LSDA;
break;
case USB_PORT_FEAT_C_CONNECTION:
DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
tmp = RH_PS_CSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
tmp = RH_PS_OCIC;
break;
case USB_PORT_FEAT_C_RESET:
DBG(0, "USB_PORT_FEAT_C_RESET\n");
tmp = RH_PS_PRSC;
break;
default:
goto error;
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
isp1362_hcd->rhport[wIndex] =
isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case SetPortFeature:
DBG(0, "SetPortFeature: ");
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
DBG(0, "USB_PORT_FEAT_SUSPEND\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
isp1362_hcd->rhport[wIndex] =
isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_POWER:
DBG(0, "USB_PORT_FEAT_POWER\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
isp1362_hcd->rhport[wIndex] =
isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_RESET:
DBG(0, "USB_PORT_FEAT_RESET\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
while (time_before(jiffies, t1)) {
/* spin until any current reset finishes */
for (;;) {
tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
if (!(tmp & RH_PS_PRS))
break;
udelay(500);
}
if (!(tmp & RH_PS_CCS))
break;
/* Reset lasts 10ms (claims datasheet) */
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
msleep(10);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
}
isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
default:
goto error;
}
break;
default:
error:
/* "protocol stall" on error */
DBG(0, "PROTOCOL STALL\n");
retval = -EPIPE;
}
return retval;
}
#ifdef CONFIG_PM
static int isp1362_bus_suspend(struct usb_hcd *hcd)
{
int status = 0;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
if (time_before(jiffies, isp1362_hcd->next_statechange))
msleep(5);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_RESUME:
DBG(0, "%s: resume/suspend?\n", __func__);
isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
isp1362_hcd->hc_control |= OHCI_USB_RESET;
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
fallthrough;
case OHCI_USB_RESET:
status = -EBUSY;
pr_warn("%s: needs reinit!\n", __func__);
goto done;
case OHCI_USB_SUSPEND:
pr_warn("%s: already suspended?\n", __func__);
goto done;
}
DBG(0, "%s: suspend root hub\n", __func__);
/* First stop any processing */
hcd->state = HC_STATE_QUIESCING;
if (!list_empty(&isp1362_hcd->atl_queue.active) ||
!list_empty(&isp1362_hcd->intl_queue.active) ||
!list_empty(&isp1362_hcd->istl_queue[0] .active) ||
!list_empty(&isp1362_hcd->istl_queue[1] .active)) {
int limit;
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
DBG(0, "%s: stopping schedules ...\n", __func__);
limit = 2000;
while (limit > 0) {
udelay(250);
limit -= 250;
if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
break;
}
mdelay(7);
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
}
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
}
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
}
DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
/* Suspend hub */
isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
isp1362_show_reg(isp1362_hcd, HCCONTROL);
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
isp1362_show_reg(isp1362_hcd, HCCONTROL);
#if 1
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
pr_err("%s: controller won't suspend %08x\n", __func__,
isp1362_hcd->hc_control);
status = -EBUSY;
} else
#endif
{
/* no resumes until devices finish suspending */
isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
}
done:
if (status == 0) {
hcd->state = HC_STATE_SUSPENDED;
DBG(0, "%s: HCD suspended: %08x\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCCONTROL));
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return status;
}
static int isp1362_bus_resume(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
u32 port;
unsigned long flags;
int status = -EINPROGRESS;
if (time_before(jiffies, isp1362_hcd->next_statechange))
msleep(5);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
if (hcd->state == HC_STATE_RESUMING) {
pr_warn("%s: duplicate resume\n", __func__);
status = 0;
} else
switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_SUSPEND:
DBG(0, "%s: resume root hub\n", __func__);
isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
isp1362_hcd->hc_control |= OHCI_USB_RESUME;
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
break;
case OHCI_USB_RESUME:
/* HCFS changes sometime after INTR_RD */
DBG(0, "%s: remote wakeup\n", __func__);
break;
case OHCI_USB_OPER:
DBG(0, "%s: odd resume\n", __func__);
status = 0;
hcd->self.root_hub->dev.power.power_state = PMSG_ON;
break;
default: /* RESET, we lost power */
DBG(0, "%s: root hub hardware reset\n", __func__);
status = -EBUSY;
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (status == -EBUSY) {
DBG(0, "%s: Restarting HC\n", __func__);
isp1362_hc_stop(hcd);
return isp1362_hc_start(hcd);
}
if (status != -EINPROGRESS)
return status;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
while (port--) {
u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
/* force global, not selective, resume */
if (!(stat & RH_PS_PSS)) {
DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
continue;
}
DBG(0, "%s: Resuming RH port %d\n", __func__, port);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
/* Some controllers (lucent) need extra-long delays */
hcd->state = HC_STATE_RESUMING;
mdelay(20 /* usb 11.5.1.10 */ + 15);
isp1362_hcd->hc_control = OHCI_USB_OPER;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_show_reg(isp1362_hcd, HCCONTROL);
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
/* TRSMRCY */
msleep(10);
/* keep it alive for ~5x suspend + resume costs */
isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
hcd->self.root_hub->dev.power.power_state = PMSG_ON;
hcd->state = HC_STATE_RUNNING;
return 0;
}
#else
#define isp1362_bus_suspend NULL
#define isp1362_bus_resume NULL
#endif
/*-------------------------------------------------------------------------*/
static void dump_irq(struct seq_file *s, char *label, u16 mask)
{
seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
mask & HCuPINT_CLKRDY ? " clkrdy" : "",
mask & HCuPINT_SUSP ? " susp" : "",
mask & HCuPINT_OPR ? " opr" : "",
mask & HCuPINT_EOT ? " eot" : "",
mask & HCuPINT_ATL ? " atl" : "",
mask & HCuPINT_SOF ? " sof" : "");
}
static void dump_int(struct seq_file *s, char *label, u32 mask)
{
seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
mask & OHCI_INTR_MIE ? " MIE" : "",
mask & OHCI_INTR_RHSC ? " rhsc" : "",
mask & OHCI_INTR_FNO ? " fno" : "",
mask & OHCI_INTR_UE ? " ue" : "",
mask & OHCI_INTR_RD ? " rd" : "",
mask & OHCI_INTR_SF ? " sof" : "",
mask & OHCI_INTR_SO ? " so" : "");
}
static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
{
seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
mask & OHCI_CTRL_RWC ? " rwc" : "",
mask & OHCI_CTRL_RWE ? " rwe" : "",
({
char *hcfs;
switch (mask & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
hcfs = " oper";
break;
case OHCI_USB_RESET:
hcfs = " reset";
break;
case OHCI_USB_RESUME:
hcfs = " resume";
break;
case OHCI_USB_SUSPEND:
hcfs = " suspend";
break;
default:
hcfs = " ?";
}
hcfs;
}));
}
static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
{
seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
isp1362_read_reg32(isp1362_hcd, HCREVISION));
seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
isp1362_read_reg32(isp1362_hcd, HCCONTROL));
seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
isp1362_read_reg32(isp1362_hcd, HCINTENB));
seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
isp1362_read_reg32(isp1362_hcd, HCFMREM));
seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
isp1362_read_reg32(isp1362_hcd, HCFMNUM));
seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
seq_printf(s, "\n");
seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
isp1362_read_reg16(isp1362_hcd, HCHWCFG));
seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
isp1362_read_reg16(isp1362_hcd, HCDMACFG));
seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
isp1362_read_reg16(isp1362_hcd, HCuPINT));
seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
isp1362_read_reg16(isp1362_hcd, HCCHIPID));
seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
#if 0
seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
#endif
seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
seq_printf(s, "\n");
seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
seq_printf(s, "\n");
seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
#if 0
seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
isp1362_read_reg32(isp1362_hcd, HCATLDONE));
#endif
seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
isp1362_read_reg32(isp1362_hcd, HCATLLAST));
seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
isp1362_read_reg16(isp1362_hcd, HCATLCURR));
seq_printf(s, "\n");
seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
isp1362_read_reg16(isp1362_hcd, HCATLDTC));
seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
}
static int isp1362_show(struct seq_file *s, void *unused)
{
struct isp1362_hcd *isp1362_hcd = s->private;
struct isp1362_ep *ep;
int i;
seq_printf(s, "%s\n%s version %s\n",
isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
/* collect statistics to help estimate potential win for
* DMA engines that care about alignment (PXA)
*/
seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
isp1362_hcd->stat2, isp1362_hcd->stat1);
seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
seq_printf(s, "max # ptds in ISTL fifo: %d\n",
max(isp1362_hcd->istl_queue[0] .stat_maxptds,
isp1362_hcd->istl_queue[1] .stat_maxptds));
/* FIXME: don't show the following in suspended state */
spin_lock_irq(&isp1362_hcd->lock);
dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
for (i = 0; i < NUM_ISP1362_IRQS; i++)
if (isp1362_hcd->irq_stat[i])
seq_printf(s, "%-15s: %d\n",
ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
dump_regs(s, isp1362_hcd);
list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
struct urb *urb;
seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
({
char *s;
switch (ep->nextpid) {
case USB_PID_IN:
s = "in";
break;
case USB_PID_OUT:
s = "out";
break;
case USB_PID_SETUP:
s = "setup";
break;
case USB_PID_ACK:
s = "status";
break;
default:
s = "?";
break;
}
s;}), ep->maxpacket) ;
list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
seq_printf(s, " urb%p, %d/%d\n", urb,
urb->actual_length,
urb->transfer_buffer_length);
}
}
if (!list_empty(&isp1362_hcd->async))
seq_printf(s, "\n");
dump_ptd_queue(&isp1362_hcd->atl_queue);
seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
ep->interval, ep,
(ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
ep->udev->devnum, ep->epnum,
(ep->epnum == 0) ? "" :
((ep->nextpid == USB_PID_IN) ?
"in" : "out"), ep->maxpacket);
}
dump_ptd_queue(&isp1362_hcd->intl_queue);
seq_printf(s, "ISO:\n");
list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
ep->interval, ep,
(ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
ep->udev->devnum, ep->epnum,
(ep->epnum == 0) ? "" :
((ep->nextpid == USB_PID_IN) ?
"in" : "out"), ep->maxpacket);
}
spin_unlock_irq(&isp1362_hcd->lock);
seq_printf(s, "\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(isp1362);
/* expect just one isp1362_hcd per system */
static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
{
debugfs_create_file("isp1362", S_IRUGO, usb_debug_root, isp1362_hcd,
&isp1362_fops);
}
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
debugfs_lookup_and_remove("isp1362", usb_debug_root);
}
/*-------------------------------------------------------------------------*/
static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
{
int tmp = 20;
isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
while (--tmp) {
mdelay(1);
if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
break;
}
if (!tmp)
pr_err("Software reset timeout\n");
}
static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
{
unsigned long flags;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
__isp1362_sw_reset(isp1362_hcd);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
}
static int isp1362_mem_config(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
u32 total;
u16 istl_size = ISP1362_ISTL_BUFSIZE;
u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
u16 atl_size;
int i;
WARN_ON(istl_size & 3);
WARN_ON(atl_blksize & 3);
WARN_ON(intl_blksize & 3);
WARN_ON(atl_blksize < PTD_HEADER_SIZE);
WARN_ON(intl_blksize < PTD_HEADER_SIZE);
BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
if (atl_buffers > 32)
atl_buffers = 32;
atl_size = atl_buffers * atl_blksize;
total = atl_size + intl_size + istl_size;
dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
istl_size / 2, istl_size, 0, istl_size / 2);
dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
intl_size, istl_size);
dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
atl_buffers, atl_blksize - PTD_HEADER_SIZE,
atl_size, istl_size + intl_size);
dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
ISP1362_BUF_SIZE - total);
if (total > ISP1362_BUF_SIZE) {
dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
__func__, total, ISP1362_BUF_SIZE);
return -ENOMEM;
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
for (i = 0; i < 2; i++) {
isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
isp1362_hcd->istl_queue[i].blk_size = 4;
INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
snprintf(isp1362_hcd->istl_queue[i].name,
sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
DBG(3, "%s: %5s buf $%04x %d\n", __func__,
isp1362_hcd->istl_queue[i].name,
isp1362_hcd->istl_queue[i].buf_start,
isp1362_hcd->istl_queue[i].buf_size);
}
isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
isp1362_hcd->intl_queue.buf_start = istl_size;
isp1362_hcd->intl_queue.buf_size = intl_size;
isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
isp1362_hcd->intl_queue.blk_size = intl_blksize;
isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
isp1362_hcd->intl_queue.skip_map = ~0;
INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
isp1362_hcd->intl_queue.buf_size);
isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
1 << (ISP1362_INTL_BUFFERS - 1));
isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
isp1362_hcd->atl_queue.buf_size = atl_size;
isp1362_hcd->atl_queue.buf_count = atl_buffers;
isp1362_hcd->atl_queue.blk_size = atl_blksize;
isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
isp1362_hcd->atl_queue.skip_map = ~0;
INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
isp1362_hcd->atl_queue.buf_size);
isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
isp1362_write_reg32(isp1362_hcd, HCATLLAST,
1 << (atl_buffers - 1));
snprintf(isp1362_hcd->atl_queue.name,
sizeof(isp1362_hcd->atl_queue.name), "ATL");
snprintf(isp1362_hcd->intl_queue.name,
sizeof(isp1362_hcd->intl_queue.name), "INTL");
DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
isp1362_hcd->intl_queue.name,
isp1362_hcd->intl_queue.buf_start,
ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
isp1362_hcd->intl_queue.buf_size);
DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
isp1362_hcd->atl_queue.name,
isp1362_hcd->atl_queue.buf_start,
atl_buffers, isp1362_hcd->atl_queue.blk_size,
isp1362_hcd->atl_queue.buf_size);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return 0;
}
static int isp1362_hc_reset(struct usb_hcd *hcd)
{
int ret = 0;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long t;
unsigned long timeout = 100;
unsigned long flags;
int clkrdy = 0;
pr_debug("%s:\n", __func__);
if (isp1362_hcd->board && isp1362_hcd->board->reset) {
isp1362_hcd->board->reset(hcd->self.controller, 1);
msleep(20);
if (isp1362_hcd->board->clock)
isp1362_hcd->board->clock(hcd->self.controller, 1);
isp1362_hcd->board->reset(hcd->self.controller, 0);
} else
isp1362_sw_reset(isp1362_hcd);
/* chip has been reset. First we need to see a clock */
t = jiffies + msecs_to_jiffies(timeout);
while (!clkrdy && time_before_eq(jiffies, t)) {
spin_lock_irqsave(&isp1362_hcd->lock, flags);
clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (!clkrdy)
msleep(4);
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (!clkrdy) {
pr_err("Clock not ready after %lums\n", timeout);
ret = -ENODEV;
}
return ret;
}
static void isp1362_hc_stop(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
u32 tmp;
pr_debug("%s:\n", __func__);
del_timer_sync(&hcd->rh_timer);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
/* Switch off power for all ports */
tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
tmp &= ~(RH_A_NPS | RH_A_PSM);
isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
/* Reset the chip */
if (isp1362_hcd->board && isp1362_hcd->board->reset)
isp1362_hcd->board->reset(hcd->self.controller, 1);
else
__isp1362_sw_reset(isp1362_hcd);
if (isp1362_hcd->board && isp1362_hcd->board->clock)
isp1362_hcd->board->clock(hcd->self.controller, 0);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
}
#ifdef CHIP_BUFFER_TEST
static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
{
int ret = 0;
u16 *ref;
unsigned long flags;
ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
if (ref) {
int offset;
u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
ref[offset] = ~offset;
tst[offset] = offset;
}
for (offset = 0; offset < 4; offset++) {
int j;
for (j = 0; j < 8; j++) {
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, j)) {
ret = -ENODEV;
pr_err("%s: memory check with %d byte offset %d failed\n",
__func__, j, offset);
dump_data((u8 *)ref + offset, j);
dump_data((u8 *)tst + offset, j);
}
}
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
ret = -ENODEV;
pr_err("%s: memory check failed\n", __func__);
dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
}
for (offset = 0; offset < 256; offset++) {
int test_size = 0;
yield();
memset(tst, 0, ISP1362_BUF_SIZE);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
ISP1362_BUF_SIZE / 2)) {
pr_err("%s: Failed to clear buffer\n", __func__);
dump_data((u8 *)tst, ISP1362_BUF_SIZE);
break;
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
offset * 2 + PTD_HEADER_SIZE, test_size);
isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
PTD_HEADER_SIZE + test_size);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
PTD_HEADER_SIZE + test_size);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
ret = -ENODEV;
pr_err("%s: memory check with offset %02x failed\n",
__func__, offset);
break;
}
pr_warn("%s: memory check with offset %02x ok after second read\n",
__func__, offset);
}
}
kfree(ref);
}
return ret;
}
#endif
static int isp1362_hc_start(struct usb_hcd *hcd)
{
int ret;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
struct isp1362_platform_data *board = isp1362_hcd->board;
u16 hwcfg;
u16 chipid;
unsigned long flags;
pr_debug("%s:\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
return -ENODEV;
}
#ifdef CHIP_BUFFER_TEST
ret = isp1362_chip_test(isp1362_hcd);
if (ret)
return -ENODEV;
#endif
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* clear interrupt status and disable all interrupt sources */
isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
/* HW conf */
hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
if (board->sel15Kres)
hwcfg |= HCHWCFG_PULLDOWN_DS2 |
((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
if (board->clknotstop)
hwcfg |= HCHWCFG_CLKNOTSTOP;
if (board->oc_enable)
hwcfg |= HCHWCFG_ANALOG_OC;
if (board->int_act_high)
hwcfg |= HCHWCFG_INT_POL;
if (board->int_edge_triggered)
hwcfg |= HCHWCFG_INT_TRIGGER;
if (board->dreq_act_high)
hwcfg |= HCHWCFG_DREQ_POL;
if (board->dack_act_high)
hwcfg |= HCHWCFG_DACK_POL;
isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
isp1362_show_reg(isp1362_hcd, HCHWCFG);
isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
ret = isp1362_mem_config(hcd);
if (ret)
return ret;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* Root hub conf */
isp1362_hcd->rhdesca = 0;
if (board->no_power_switching)
isp1362_hcd->rhdesca |= RH_A_NPS;
if (board->power_switching_mode)
isp1362_hcd->rhdesca |= RH_A_PSM;
if (board->potpg)
isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
else
isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
isp1362_hcd->rhdescb = RH_B_PPCM;
isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
isp1362_hcd->hc_control = OHCI_USB_OPER;
hcd->state = HC_STATE_RUNNING;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* Set up interrupts */
isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
isp1362_hcd->intenb |= OHCI_INTR_RD;
isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
/* Go operational */
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
/* enable global power */
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static const struct hc_driver isp1362_hc_driver = {
.description = hcd_name,
.product_desc = "ISP1362 Host Controller",
.hcd_priv_size = sizeof(struct isp1362_hcd),
.irq = isp1362_irq,
.flags = HCD_USB11 | HCD_MEMORY,
.reset = isp1362_hc_reset,
.start = isp1362_hc_start,
.stop = isp1362_hc_stop,
.urb_enqueue = isp1362_urb_enqueue,
.urb_dequeue = isp1362_urb_dequeue,
.endpoint_disable = isp1362_endpoint_disable,
.get_frame_number = isp1362_get_frame,
.hub_status_data = isp1362_hub_status_data,
.hub_control = isp1362_hub_control,
.bus_suspend = isp1362_bus_suspend,
.bus_resume = isp1362_bus_resume,
};
/*-------------------------------------------------------------------------*/
static void isp1362_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
remove_debug_file(isp1362_hcd);
DBG(0, "%s: Removing HCD\n", __func__);
usb_remove_hcd(hcd);
DBG(0, "%s: put_hcd\n", __func__);
usb_put_hcd(hcd);
DBG(0, "%s: Done\n", __func__);
}
static int isp1362_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp1362_hcd *isp1362_hcd;
struct resource *data, *irq_res;
void __iomem *addr_reg;
void __iomem *data_reg;
int irq;
int retval = 0;
unsigned int irq_flags = 0;
if (usb_disabled())
return -ENODEV;
/* basic sanity checks first. board-specific init logic should
* have initialized this the three resources and probably board
* specific platform_data. we don't probe for IRQs, and do only
* minimal sanity checking.
*/
if (pdev->num_resources < 3)
return -ENODEV;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res)
return -ENODEV;
irq = irq_res->start;
addr_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(addr_reg))
return PTR_ERR(addr_reg);
data_reg = devm_platform_get_and_ioremap_resource(pdev, 0, &data);
if (IS_ERR(data_reg))
return PTR_ERR(data_reg);
/* allocate and initialize hcd */
hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = data->start;
isp1362_hcd = hcd_to_isp1362_hcd(hcd);
isp1362_hcd->data_reg = data_reg;
isp1362_hcd->addr_reg = addr_reg;
isp1362_hcd->next_statechange = jiffies;
spin_lock_init(&isp1362_hcd->lock);
INIT_LIST_HEAD(&isp1362_hcd->async);
INIT_LIST_HEAD(&isp1362_hcd->periodic);
INIT_LIST_HEAD(&isp1362_hcd->isoc);
INIT_LIST_HEAD(&isp1362_hcd->remove_list);
isp1362_hcd->board = dev_get_platdata(&pdev->dev);
#if USE_PLATFORM_DELAY
if (!isp1362_hcd->board->delay) {
dev_err(hcd->self.controller, "No platform delay function given\n");
retval = -ENODEV;
goto err;
}
#endif
if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
irq_flags |= IRQF_TRIGGER_RISING;
if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
irq_flags |= IRQF_TRIGGER_FALLING;
if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
irq_flags |= IRQF_TRIGGER_HIGH;
if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
irq_flags |= IRQF_TRIGGER_LOW;
retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
if (retval != 0)
goto err;
device_wakeup_enable(hcd->self.controller);
dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
create_debug_file(isp1362_hcd);
return 0;
err:
usb_put_hcd(hcd);
return retval;
}
#ifdef CONFIG_PM
static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
int retval = 0;
DBG(0, "%s: Suspending device\n", __func__);
if (state.event == PM_EVENT_FREEZE) {
DBG(0, "%s: Suspending root hub\n", __func__);
retval = isp1362_bus_suspend(hcd);
} else {
DBG(0, "%s: Suspending RH ports\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
}
if (retval == 0)
pdev->dev.power.power_state = state;
return retval;
}
static int isp1362_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
DBG(0, "%s: Resuming\n", __func__);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
DBG(0, "%s: Resume RH ports\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return 0;
}
pdev->dev.power.power_state = PMSG_ON;
return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
}
#else
#define isp1362_suspend NULL
#define isp1362_resume NULL
#endif
static struct platform_driver isp1362_driver = {
.probe = isp1362_probe,
.remove_new = isp1362_remove,
.suspend = isp1362_suspend,
.resume = isp1362_resume,
.driver = {
.name = hcd_name,
},
};
module_platform_driver(isp1362_driver);
| linux-master | drivers/usb/host/isp1362-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UHCI HCD (Host Controller Driver) for GRLIB GRUSBHC
*
* Copyright (c) 2011 Jan Andersson <[email protected]>
*
* This file is based on UHCI PCI HCD:
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, [email protected]
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, [email protected]
* (C) Copyright 1999 Deti Fliegl, [email protected]
* (C) Copyright 1999 Thomas Sailer, [email protected]
* (C) Copyright 1999 Roman Weissgaerber, [email protected]
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
* support from usb-ohci.c by Adam Richter, [email protected]).
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
* (C) Copyright 2004-2007 Alan Stern, [email protected]
*/
#include <linux/device.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
static int uhci_grlib_init(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
/*
* Probe to determine the endianness of the controller.
* We know that bit 7 of the PORTSC1 register is always set
* and bit 15 is always clear. If uhci_readw() yields a value
* with bit 7 (0x80) turned on then the current little-endian
* setting is correct. Otherwise we assume the value was
* byte-swapped; hence the register interface and presumably
* also the descriptors are big-endian.
*/
if (!(uhci_readw(uhci, USBPORTSC1) & 0x80)) {
uhci->big_endian_mmio = 1;
uhci->big_endian_desc = 1;
}
uhci->rh_numports = uhci_count_ports(hcd);
/* Set up pointers to generic functions */
uhci->reset_hc = uhci_generic_reset_hc;
uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc;
/* No special actions need to be taken for the functions below */
uhci->configure_hc = NULL;
uhci->resume_detect_interrupts_are_broken = NULL;
uhci->global_suspend_mode_is_broken = NULL;
/* Reset if the controller isn't already safely quiescent. */
check_and_reset_hc(uhci);
return 0;
}
static const struct hc_driver uhci_grlib_hc_driver = {
.description = hcd_name,
.product_desc = "GRLIB GRUSBHC UHCI Host Controller",
.hcd_priv_size = sizeof(struct uhci_hcd),
/* Generic hardware linkage */
.irq = uhci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
/* Basic lifecycle operations */
.reset = uhci_grlib_init,
.start = uhci_start,
#ifdef CONFIG_PM
.pci_suspend = NULL,
.pci_resume = NULL,
.bus_suspend = uhci_rh_suspend,
.bus_resume = uhci_rh_resume,
#endif
.stop = uhci_stop,
.urb_enqueue = uhci_urb_enqueue,
.urb_dequeue = uhci_urb_dequeue,
.endpoint_disable = uhci_hcd_endpoint_disable,
.get_frame_number = uhci_hcd_get_frame_number,
.hub_status_data = uhci_hub_status_data,
.hub_control = uhci_hub_control,
};
static int uhci_hcd_grlib_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct uhci_hcd *uhci = NULL;
struct resource res;
int irq;
int rv;
if (usb_disabled())
return -ENODEV;
dev_dbg(&op->dev, "initializing GRUSBHC UHCI USB Controller\n");
rv = of_address_to_resource(dn, 0, &res);
if (rv)
return rv;
/* usb_create_hcd requires dma_mask != NULL */
op->dev.dma_mask = &op->dev.coherent_dma_mask;
hcd = usb_create_hcd(&uhci_grlib_hc_driver, &op->dev,
"GRUSBHC UHCI USB");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res.start;
hcd->rsrc_len = resource_size(&res);
irq = irq_of_parse_and_map(dn, 0);
if (!irq) {
printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_usb;
}
hcd->regs = devm_ioremap_resource(&op->dev, &res);
if (IS_ERR(hcd->regs)) {
rv = PTR_ERR(hcd->regs);
goto err_irq;
}
uhci = hcd_to_uhci(hcd);
uhci->regs = hcd->regs;
rv = usb_add_hcd(hcd, irq, 0);
if (rv)
goto err_irq;
device_wakeup_enable(hcd->self.controller);
return 0;
err_irq:
irq_dispose_mapping(irq);
err_usb:
usb_put_hcd(hcd);
return rv;
}
static void uhci_hcd_grlib_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
dev_dbg(&op->dev, "stopping GRLIB GRUSBHC UHCI USB Controller\n");
usb_remove_hcd(hcd);
irq_dispose_mapping(hcd->irq);
usb_put_hcd(hcd);
}
/* Make sure the controller is quiescent and that we're not using it
* any more. This is mainly for the benefit of programs which, like kexec,
* expect the hardware to be idle: not doing DMA or generating IRQs.
*
* This routine may be called in a damaged or failing kernel. Hence we
* do not acquire the spinlock before shutting down the controller.
*/
static void uhci_hcd_grlib_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
uhci_hc_died(hcd_to_uhci(hcd));
}
static const struct of_device_id uhci_hcd_grlib_of_match[] = {
{ .name = "GAISLER_UHCI", },
{ .name = "01_027", },
{},
};
MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match);
static struct platform_driver uhci_grlib_driver = {
.probe = uhci_hcd_grlib_probe,
.remove_new = uhci_hcd_grlib_remove,
.shutdown = uhci_hcd_grlib_shutdown,
.driver = {
.name = "grlib-uhci",
.of_match_table = uhci_hcd_grlib_of_match,
},
};
| linux-master | drivers/usb/host/uhci-grlib.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2005-2009 MontaVista Software, Inc.
* Copyright 2008,2012,2015 Freescale Semiconductor, Inc.
*
* Ported to 834x by Randy Vinson <[email protected]> using code provided
* by Hunter Wu.
* Power Management support by Dave Liu <[email protected]>,
* Jerry Huang <[email protected]> and
* Anton Vorontsov <[email protected]>.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/err.h>
#include <linux/usb.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/of.h>
#include <linux/io.h>
#include "ehci.h"
#include "ehci-fsl.h"
#define DRIVER_DESC "Freescale EHCI Host controller driver"
#define DRV_NAME "fsl-ehci"
static struct hc_driver __read_mostly fsl_ehci_hc_driver;
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/*
* fsl_ehci_drv_probe - initialize FSL-based HCDs
* @pdev: USB Host Controller being probed
*
* Context: task context, might sleep
*
* Allocates basic resources for this USB host controller.
*/
static int fsl_ehci_drv_probe(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct usb_hcd *hcd;
struct resource *res;
int irq;
int retval;
u32 tmp;
pr_debug("initializing FSL-SOC USB Controller\n");
/* Need platform data for setup */
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev,
"No platform data for %s.\n", dev_name(&pdev->dev));
return -ENODEV;
}
/*
* This is a host mode driver, verify that we're supposed to be
* in host mode.
*/
if (!((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_MPH_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))) {
dev_err(&pdev->dev,
"Non Host Mode configured for %s. Wrong driver linked.\n",
dev_name(&pdev->dev));
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
hcd = __usb_create_hcd(&fsl_ehci_hc_driver, pdev->dev.parent,
&pdev->dev, dev_name(&pdev->dev), NULL);
if (!hcd) {
retval = -ENOMEM;
goto err1;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
pdata->regs = hcd->regs;
if (pdata->power_budget)
hcd->power_budget = pdata->power_budget;
/*
* do platform specific init: check the clock, grab/config pins, etc.
*/
if (pdata->init && pdata->init(pdev)) {
retval = -ENODEV;
goto err2;
}
/* Enable USB controller, 83xx or 8536 */
if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6) {
tmp = ioread32be(hcd->regs + FSL_SOC_USB_CTRL);
tmp &= ~CONTROL_REGISTER_W1C_MASK;
tmp |= 0x4;
iowrite32be(tmp, hcd->regs + FSL_SOC_USB_CTRL);
}
/* Set USB_EN bit to select ULPI phy for USB controller version 2.5 */
if (pdata->controller_ver == FSL_USB_VER_2_5 &&
pdata->phy_mode == FSL_USB2_PHY_ULPI)
iowrite32be(USB_CTRL_USB_EN, hcd->regs + FSL_SOC_USB_CTRL);
/*
* Enable UTMI phy and program PTS field in UTMI mode before asserting
* controller reset for USB Controller version 2.5
*/
if (pdata->has_fsl_erratum_a007792) {
tmp = ioread32be(hcd->regs + FSL_SOC_USB_CTRL);
tmp &= ~CONTROL_REGISTER_W1C_MASK;
tmp |= CTRL_UTMI_PHY_EN;
iowrite32be(tmp, hcd->regs + FSL_SOC_USB_CTRL);
writel(PORT_PTS_UTMI, hcd->regs + FSL_SOC_USB_PORTSC1);
}
/* Don't need to set host mode here. It will be done by tdi_reset() */
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
goto err2;
device_wakeup_enable(hcd->self.controller);
#ifdef CONFIG_USB_OTG
if (pdata->operating_mode == FSL_USB2_DR_OTG) {
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, phy=0x%p\n",
hcd, ehci, hcd->usb_phy);
if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
retval = otg_set_host(hcd->usb_phy->otg,
&ehci_to_hcd(ehci)->self);
if (retval) {
usb_put_phy(hcd->usb_phy);
goto err2;
}
} else {
dev_err(&pdev->dev, "can't find phy\n");
retval = -ENODEV;
goto err2;
}
hcd->skip_phy_initialization = 1;
}
#endif
return retval;
err2:
usb_put_hcd(hcd);
err1:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
if (pdata->exit)
pdata->exit(pdev);
return retval;
}
static bool usb_phy_clk_valid(struct usb_hcd *hcd)
{
void __iomem *non_ehci = hcd->regs;
bool ret = true;
if (!(ioread32be(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID))
ret = false;
return ret;
}
static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
enum fsl_usb2_phy_modes phy_mode,
unsigned int port_offset)
{
u32 portsc, tmp;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
struct device *dev = hcd->self.controller;
struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
if (pdata->controller_ver < 0) {
dev_warn(hcd->self.controller, "Could not get controller version\n");
return -ENODEV;
}
portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
switch (phy_mode) {
case FSL_USB2_PHY_ULPI:
if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
/* turn off UTMI PHY first */
tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
tmp &= ~(CONTROL_REGISTER_W1C_MASK | UTMI_PHY_EN);
iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
/* then turn on ULPI and enable USB controller */
tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
tmp &= ~CONTROL_REGISTER_W1C_MASK;
tmp |= ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN;
iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
}
portsc |= PORT_PTS_ULPI;
break;
case FSL_USB2_PHY_SERIAL:
portsc |= PORT_PTS_SERIAL;
break;
case FSL_USB2_PHY_UTMI_WIDE:
portsc |= PORT_PTS_PTW;
fallthrough;
case FSL_USB2_PHY_UTMI:
/* Presence of this node "has_fsl_erratum_a006918"
* in device-tree is used to stop USB controller
* initialization in Linux
*/
if (pdata->has_fsl_erratum_a006918) {
dev_warn(dev, "USB PHY clock invalid\n");
return -EINVAL;
}
fallthrough;
case FSL_USB2_PHY_UTMI_DUAL:
/* PHY_CLK_VALID bit is de-featured from all controller
* versions below 2.4 and is to be checked only for
* internal UTMI phy
*/
if (pdata->controller_ver > FSL_USB_VER_2_4 &&
pdata->have_sysif_regs && !usb_phy_clk_valid(hcd)) {
dev_err(dev, "USB PHY clock invalid\n");
return -EINVAL;
}
if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
tmp &= ~CONTROL_REGISTER_W1C_MASK;
tmp |= UTMI_PHY_EN;
iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI PHY CLK to
become stable - 10ms*/
}
/* enable UTMI PHY */
if (pdata->have_sysif_regs) {
tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
tmp &= ~CONTROL_REGISTER_W1C_MASK;
tmp |= CTRL_UTMI_PHY_EN;
iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
}
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
break;
}
if (pdata->have_sysif_regs &&
pdata->controller_ver > FSL_USB_VER_1_6 &&
!usb_phy_clk_valid(hcd)) {
dev_warn(hcd->self.controller, "USB PHY clock invalid\n");
return -EINVAL;
}
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs) {
tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
tmp &= ~CONTROL_REGISTER_W1C_MASK;
tmp |= USB_CTRL_USB_EN;
iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
}
return 0;
}
static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
{
struct usb_hcd *hcd = ehci_to_hcd(ehci);
struct fsl_usb2_platform_data *pdata;
void __iomem *non_ehci = hcd->regs;
pdata = dev_get_platdata(hcd->self.controller);
if (pdata->have_sysif_regs) {
/*
* Turn on cache snooping hardware, since some PowerPC platforms
* wholly rely on hardware to deal with cache coherent
*/
/* Setup Snooping for all the 4GB space */
/* SNOOP1 starts from 0x0, size 2G */
iowrite32be(0x0 | SNOOP_SIZE_2GB,
non_ehci + FSL_SOC_USB_SNOOP1);
/* SNOOP2 starts from 0x80000000, size 2G */
iowrite32be(0x80000000 | SNOOP_SIZE_2GB,
non_ehci + FSL_SOC_USB_SNOOP2);
}
/* Deal with USB erratum A-005275 */
if (pdata->has_fsl_erratum_a005275 == 1)
ehci->has_fsl_hs_errata = 1;
if (pdata->has_fsl_erratum_a005697 == 1)
ehci->has_fsl_susp_errata = 1;
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
return -EINVAL;
if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
/* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */
if (pdata->has_fsl_erratum_14 == 1)
ehci->has_fsl_port_bug = 1;
if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
return -EINVAL;
if (pdata->port_enables & FSL_USB2_PORT1_ENABLED)
if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 1))
return -EINVAL;
}
if (pdata->have_sysif_regs) {
#ifdef CONFIG_FSL_SOC_BOOKE
iowrite32be(0x00000008, non_ehci + FSL_SOC_USB_PRICTRL);
iowrite32be(0x00000080, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
#else
iowrite32be(0x0000000c, non_ehci + FSL_SOC_USB_PRICTRL);
iowrite32be(0x00000040, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
#endif
iowrite32be(0x00000001, non_ehci + FSL_SOC_USB_SICTRL);
}
return 0;
}
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_fsl_reinit(struct ehci_hcd *ehci)
{
if (ehci_fsl_usb_setup(ehci))
return -EINVAL;
return 0;
}
/* called during probe() after chip reset completes */
static int ehci_fsl_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
struct fsl_usb2_platform_data *pdata;
struct device *dev;
dev = hcd->self.controller;
pdata = dev_get_platdata(hcd->self.controller);
ehci->big_endian_desc = pdata->big_endian_desc;
ehci->big_endian_mmio = pdata->big_endian_mmio;
/* EHCI registers start at offset 0x100 */
ehci->caps = hcd->regs + 0x100;
#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_85xx)
/*
* Deal with MPC834X/85XX that need port power to be cycled
* after the power fault condition is removed. Otherwise the
* state machine does not reflect PORTSC[CSC] correctly.
*/
ehci->need_oc_pp_cycle = 1;
#endif
hcd->has_tt = 1;
retval = ehci_setup(hcd);
if (retval)
return retval;
if (of_device_is_compatible(dev->parent->of_node,
"fsl,mpc5121-usb2-dr")) {
/*
* set SBUSCFG:AHBBRST so that control msgs don't
* fail when doing heavy PATA writes.
*/
ehci_writel(ehci, SBUSCFG_INCR8,
hcd->regs + FSL_SOC_USB_SBUSCFG);
}
retval = ehci_fsl_reinit(ehci);
return retval;
}
struct ehci_fsl {
struct ehci_hcd ehci;
#ifdef CONFIG_PM
/* Saved USB PHY settings, need to restore after deep sleep. */
u32 usb_ctrl;
#endif
};
#ifdef CONFIG_PM
#ifdef CONFIG_PPC_MPC512x
static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
u32 tmp;
#ifdef CONFIG_DYNAMIC_DEBUG
u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE);
mode &= USBMODE_CM_MASK;
tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */
dev_dbg(dev, "suspend=%d already_suspended=%d "
"mode=%d usbcmd %08x\n", pdata->suspended,
pdata->already_suspended, mode, tmp);
#endif
/*
* If the controller is already suspended, then this must be a
* PM suspend. Remember this fact, so that we will leave the
* controller suspended at PM resume time.
*/
if (pdata->suspended) {
dev_dbg(dev, "already suspended, leaving early\n");
pdata->already_suspended = 1;
return 0;
}
dev_dbg(dev, "suspending...\n");
ehci->rh_state = EHCI_RH_SUSPENDED;
dev->power.power_state = PMSG_SUSPEND;
/* ignore non-host interrupts */
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* stop the controller */
tmp = ehci_readl(ehci, &ehci->regs->command);
tmp &= ~CMD_RUN;
ehci_writel(ehci, tmp, &ehci->regs->command);
/* save EHCI registers */
pdata->pm_command = ehci_readl(ehci, &ehci->regs->command);
pdata->pm_command &= ~CMD_RUN;
pdata->pm_status = ehci_readl(ehci, &ehci->regs->status);
pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable);
pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index);
pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment);
pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list);
pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next);
pdata->pm_configured_flag =
ehci_readl(ehci, &ehci->regs->configured_flag);
pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]);
pdata->pm_usbgenctrl = ehci_readl(ehci,
hcd->regs + FSL_SOC_USB_USBGENCTRL);
/* clear the W1C bits */
pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS);
pdata->suspended = 1;
/* clear PP to cut power to the port */
tmp = ehci_readl(ehci, &ehci->regs->port_status[0]);
tmp &= ~PORT_POWER;
ehci_writel(ehci, tmp, &ehci->regs->port_status[0]);
return 0;
}
static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
u32 tmp;
dev_dbg(dev, "suspend=%d already_suspended=%d\n",
pdata->suspended, pdata->already_suspended);
/*
* If the controller was already suspended at suspend time,
* then don't resume it now.
*/
if (pdata->already_suspended) {
dev_dbg(dev, "already suspended, leaving early\n");
pdata->already_suspended = 0;
return 0;
}
if (!pdata->suspended) {
dev_dbg(dev, "not suspended, leaving early\n");
return 0;
}
pdata->suspended = 0;
dev_dbg(dev, "resuming...\n");
/* set host mode */
tmp = USBMODE_CM_HOST | (pdata->es ? USBMODE_ES : 0);
ehci_writel(ehci, tmp, hcd->regs + FSL_SOC_USB_USBMODE);
ehci_writel(ehci, pdata->pm_usbgenctrl,
hcd->regs + FSL_SOC_USB_USBGENCTRL);
ehci_writel(ehci, ISIPHYCTRL_PXE | ISIPHYCTRL_PHYE,
hcd->regs + FSL_SOC_USB_ISIPHYCTRL);
ehci_writel(ehci, SBUSCFG_INCR8, hcd->regs + FSL_SOC_USB_SBUSCFG);
/* restore EHCI registers */
ehci_writel(ehci, pdata->pm_command, &ehci->regs->command);
ehci_writel(ehci, pdata->pm_intr_enable, &ehci->regs->intr_enable);
ehci_writel(ehci, pdata->pm_frame_index, &ehci->regs->frame_index);
ehci_writel(ehci, pdata->pm_segment, &ehci->regs->segment);
ehci_writel(ehci, pdata->pm_frame_list, &ehci->regs->frame_list);
ehci_writel(ehci, pdata->pm_async_next, &ehci->regs->async_next);
ehci_writel(ehci, pdata->pm_configured_flag,
&ehci->regs->configured_flag);
ehci_writel(ehci, pdata->pm_portsc, &ehci->regs->port_status[0]);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
ehci->rh_state = EHCI_RH_RUNNING;
dev->power.power_state = PMSG_ON;
tmp = ehci_readl(ehci, &ehci->regs->command);
tmp |= CMD_RUN;
ehci_writel(ehci, tmp, &ehci->regs->command);
usb_hcd_resume_root_hub(hcd);
return 0;
}
#else
static inline int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
{
return 0;
}
static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
{
return 0;
}
#endif /* CONFIG_PPC_MPC512x */
static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
return container_of(ehci, struct ehci_fsl, ehci);
}
static int ehci_fsl_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;
if (of_device_is_compatible(dev->parent->of_node,
"fsl,mpc5121-usb2-dr")) {
return ehci_fsl_mpc512x_drv_suspend(dev);
}
ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
device_may_wakeup(dev));
if (!fsl_deep_sleep())
return 0;
ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
return 0;
}
static int ehci_fsl_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
if (of_device_is_compatible(dev->parent->of_node,
"fsl,mpc5121-usb2-dr")) {
return ehci_fsl_mpc512x_drv_resume(dev);
}
ehci_prepare_ports_for_controller_resume(ehci);
if (!fsl_deep_sleep())
return 0;
usb_root_hub_lost_power(hcd->self.root_hub);
/* Restore USB PHY settings and enable the controller. */
iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
ehci_reset(ehci);
ehci_fsl_reinit(ehci);
return 0;
}
static int ehci_fsl_drv_restore(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_root_hub_lost_power(hcd->self.root_hub);
return 0;
}
static const struct dev_pm_ops ehci_fsl_pm_ops = {
.suspend = ehci_fsl_drv_suspend,
.resume = ehci_fsl_drv_resume,
.restore = ehci_fsl_drv_restore,
};
#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
#else
#define EHCI_FSL_PM_OPS NULL
#endif /* CONFIG_PM */
#ifdef CONFIG_USB_OTG
static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 status;
if (!port)
return -EINVAL;
port--;
/* start port reset before HNP protocol time out */
status = readl(&ehci->regs->port_status[port]);
if (!(status & PORT_CONNECT))
return -ENODEV;
/* hub_wq will finish the reset later */
if (ehci_is_TDI(ehci)) {
writel(PORT_RESET |
(status & ~(PORT_CSC | PORT_PEC | PORT_OCC)),
&ehci->regs->port_status[port]);
} else {
writel(PORT_RESET, &ehci->regs->port_status[port]);
}
return 0;
}
#else
#define ehci_start_port_reset NULL
#endif /* CONFIG_USB_OTG */
static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = {
.extra_priv_size = sizeof(struct ehci_fsl),
.reset = ehci_fsl_setup,
};
/**
* fsl_ehci_drv_remove - shutdown processing for FSL-based HCDs
* @pdev: USB Host Controller being removed
*
* Context: task context, might sleep
*
* Reverses the effect of usb_hcd_fsl_probe().
*/
static void fsl_ehci_drv_remove(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
otg_set_host(hcd->usb_phy->otg, NULL);
usb_put_phy(hcd->usb_phy);
}
usb_remove_hcd(hcd);
/*
* do platform specific un-initialization:
* release iomux pins, disable clock, etc.
*/
if (pdata->exit)
pdata->exit(pdev);
usb_put_hcd(hcd);
}
static struct platform_driver ehci_fsl_driver = {
.probe = fsl_ehci_drv_probe,
.remove_new = fsl_ehci_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = DRV_NAME,
.pm = EHCI_FSL_PM_OPS,
},
};
static int __init ehci_fsl_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&fsl_ehci_hc_driver, &ehci_fsl_overrides);
fsl_ehci_hc_driver.product_desc =
"Freescale On-Chip EHCI Host Controller";
fsl_ehci_hc_driver.start_port_reset = ehci_start_port_reset;
return platform_driver_register(&ehci_fsl_driver);
}
module_init(ehci_fsl_init);
static void __exit ehci_fsl_cleanup(void)
{
platform_driver_unregister(&ehci_fsl_driver);
}
module_exit(ehci_fsl_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/usb/host/ehci-fsl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Universal Host Controller Interface driver for USB.
*
* Maintainer: Alan Stern <[email protected]>
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, [email protected]
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, [email protected]
* (C) Copyright 1999 Deti Fliegl, [email protected]
* (C) Copyright 1999 Thomas Sailer, [email protected]
* (C) Copyright 1999 Roman Weissgaerber, [email protected]
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
* support from usb-ohci.c by Adam Richter, [email protected]).
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
* (C) Copyright 2004-2007 Alan Stern, [email protected]
*
* Intel documents this fairly well, and as far as I know there
* are no royalties or anything like that, but even so there are
* people who decided that they want to do the same thing in a
* completely different way.
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/pm.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/bitops.h>
#include <linux/dmi.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "uhci-hcd.h"
/*
* Version Information
*/
#define DRIVER_AUTHOR \
"Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \
"Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, " \
"Roman Weissgaerber, Alan Stern"
#define DRIVER_DESC "USB Universal Host Controller Interface driver"
/* for flakey hardware, ignore overcurrent indicators */
static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
/*
* debug = 0, no debugging messages
* debug = 1, dump failed URBs except for stalls
* debug = 2, dump all failed URBs (including stalls)
* show all queues in /sys/kernel/debug/uhci/[pci_addr]
* debug = 3, show all TDs in URBs when dumping
*/
#ifdef CONFIG_DYNAMIC_DEBUG
static int debug = 1;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level");
static char *errbuf;
#else
#define debug 0
#define errbuf NULL
#endif
#define ERRBUF_LEN (32 * 1024)
static struct kmem_cache *uhci_up_cachep; /* urb_priv */
static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
static void wakeup_rh(struct uhci_hcd *uhci);
static void uhci_get_current_frame_number(struct uhci_hcd *uhci);
/*
* Calculate the link pointer DMA value for the first Skeleton QH in a frame.
*/
static __hc32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
{
int skelnum;
/*
* The interrupt queues will be interleaved as evenly as possible.
* There's not much to be done about period-1 interrupts; they have
* to occur in every frame. But we can schedule period-2 interrupts
* in odd-numbered frames, period-4 interrupts in frames congruent
* to 2 (mod 4), and so on. This way each frame only has two
* interrupt QHs, which will help spread out bandwidth utilization.
*
* ffs (Find First bit Set) does exactly what we need:
* 1,3,5,... => ffs = 0 => use period-2 QH = skelqh[8],
* 2,6,10,... => ffs = 1 => use period-4 QH = skelqh[7], etc.
* ffs >= 7 => not on any high-period queue, so use
* period-1 QH = skelqh[9].
* Add in UHCI_NUMFRAMES to insure at least one bit is set.
*/
skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
if (skelnum <= 1)
skelnum = 9;
return LINK_TO_QH(uhci, uhci->skelqh[skelnum]);
}
#include "uhci-debug.c"
#include "uhci-q.c"
#include "uhci-hub.c"
/*
* Finish up a host controller reset and update the recorded state.
*/
static void finish_reset(struct uhci_hcd *uhci)
{
int port;
/* HCRESET doesn't affect the Suspend, Reset, and Resume Detect
* bits in the port status and control registers.
* We have to clear them by hand.
*/
for (port = 0; port < uhci->rh_numports; ++port)
uhci_writew(uhci, 0, USBPORTSC1 + (port * 2));
uhci->port_c_suspend = uhci->resuming_ports = 0;
uhci->rh_state = UHCI_RH_RESET;
uhci->is_stopped = UHCI_IS_STOPPED;
clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
}
/*
* Last rites for a defunct/nonfunctional controller
* or one we don't want to use any more.
*/
static void uhci_hc_died(struct uhci_hcd *uhci)
{
uhci_get_current_frame_number(uhci);
uhci->reset_hc(uhci);
finish_reset(uhci);
uhci->dead = 1;
/* The current frame may already be partway finished */
++uhci->frame_number;
}
/*
* Initialize a controller that was newly discovered or has lost power
* or otherwise been reset while it was suspended. In none of these cases
* can we be sure of its previous state.
*/
static void check_and_reset_hc(struct uhci_hcd *uhci)
{
if (uhci->check_and_reset_hc(uhci))
finish_reset(uhci);
}
#if defined(CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC)
/*
* The two functions below are generic reset functions that are used on systems
* that do not have keyboard and mouse legacy support. We assume that we are
* running on such a system if CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC is defined.
*/
/*
* Make sure the controller is completely inactive, unable to
* generate interrupts or do DMA.
*/
static void uhci_generic_reset_hc(struct uhci_hcd *uhci)
{
/* Reset the HC - this will force us to get a
* new notification of any already connected
* ports due to the virtual disconnect that it
* implies.
*/
uhci_writew(uhci, USBCMD_HCRESET, USBCMD);
mb();
udelay(5);
if (uhci_readw(uhci, USBCMD) & USBCMD_HCRESET)
dev_warn(uhci_dev(uhci), "HCRESET not completed yet!\n");
/* Just to be safe, disable interrupt requests and
* make sure the controller is stopped.
*/
uhci_writew(uhci, 0, USBINTR);
uhci_writew(uhci, 0, USBCMD);
}
/*
* Initialize a controller that was newly discovered or has just been
* resumed. In either case we can't be sure of its previous state.
*
* Returns: 1 if the controller was reset, 0 otherwise.
*/
static int uhci_generic_check_and_reset_hc(struct uhci_hcd *uhci)
{
unsigned int cmd, intr;
/*
* When restarting a suspended controller, we expect all the
* settings to be the same as we left them:
*
* Controller is stopped and configured with EGSM set;
* No interrupts enabled except possibly Resume Detect.
*
* If any of these conditions are violated we do a complete reset.
*/
cmd = uhci_readw(uhci, USBCMD);
if ((cmd & USBCMD_RS) || !(cmd & USBCMD_CF) || !(cmd & USBCMD_EGSM)) {
dev_dbg(uhci_dev(uhci), "%s: cmd = 0x%04x\n",
__func__, cmd);
goto reset_needed;
}
intr = uhci_readw(uhci, USBINTR);
if (intr & (~USBINTR_RESUME)) {
dev_dbg(uhci_dev(uhci), "%s: intr = 0x%04x\n",
__func__, intr);
goto reset_needed;
}
return 0;
reset_needed:
dev_dbg(uhci_dev(uhci), "Performing full reset\n");
uhci_generic_reset_hc(uhci);
return 1;
}
#endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */
/*
* Store the basic register settings needed by the controller.
*/
static void configure_hc(struct uhci_hcd *uhci)
{
/* Set the frame length to the default: 1 ms exactly */
uhci_writeb(uhci, USBSOF_DEFAULT, USBSOF);
/* Store the frame list base address */
uhci_writel(uhci, uhci->frame_dma_handle, USBFLBASEADD);
/* Set the current frame number */
uhci_writew(uhci, uhci->frame_number & UHCI_MAX_SOF_NUMBER,
USBFRNUM);
/* perform any arch/bus specific configuration */
if (uhci->configure_hc)
uhci->configure_hc(uhci);
}
static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
{
/*
* If we have to ignore overcurrent events then almost by definition
* we can't depend on resume-detect interrupts.
*
* Those interrupts also don't seem to work on ASpeed SoCs.
*/
if (ignore_oc || uhci_is_aspeed(uhci))
return 1;
return uhci->resume_detect_interrupts_are_broken ?
uhci->resume_detect_interrupts_are_broken(uhci) : 0;
}
static int global_suspend_mode_is_broken(struct uhci_hcd *uhci)
{
return uhci->global_suspend_mode_is_broken ?
uhci->global_suspend_mode_is_broken(uhci) : 0;
}
static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state)
__releases(uhci->lock)
__acquires(uhci->lock)
{
int auto_stop;
int int_enable, egsm_enable, wakeup_enable;
struct usb_device *rhdev = uhci_to_hcd(uhci)->self.root_hub;
auto_stop = (new_state == UHCI_RH_AUTO_STOPPED);
dev_dbg(&rhdev->dev, "%s%s\n", __func__,
(auto_stop ? " (auto-stop)" : ""));
/* Start off by assuming Resume-Detect interrupts and EGSM work
* and that remote wakeups should be enabled.
*/
egsm_enable = USBCMD_EGSM;
int_enable = USBINTR_RESUME;
wakeup_enable = 1;
/*
* In auto-stop mode, we must be able to detect new connections.
* The user can force us to poll by disabling remote wakeup;
* otherwise we will use the EGSM/RD mechanism.
*/
if (auto_stop) {
if (!device_may_wakeup(&rhdev->dev))
egsm_enable = int_enable = 0;
}
#ifdef CONFIG_PM
/*
* In bus-suspend mode, we use the wakeup setting specified
* for the root hub.
*/
else {
if (!rhdev->do_remote_wakeup)
wakeup_enable = 0;
}
#endif
/*
* UHCI doesn't distinguish between wakeup requests from downstream
* devices and local connect/disconnect events. There's no way to
* enable one without the other; both are controlled by EGSM. Thus
* if wakeups are disallowed then EGSM must be turned off -- in which
* case remote wakeup requests from downstream during system sleep
* will be lost.
*
* In addition, if EGSM is broken then we can't use it. Likewise,
* if Resume-Detect interrupts are broken then we can't use them.
*
* Finally, neither EGSM nor RD is useful by itself. Without EGSM,
* the RD status bit will never get set. Without RD, the controller
* won't generate interrupts to tell the system about wakeup events.
*/
if (!wakeup_enable || global_suspend_mode_is_broken(uhci) ||
resume_detect_interrupts_are_broken(uhci))
egsm_enable = int_enable = 0;
uhci->RD_enable = !!int_enable;
uhci_writew(uhci, int_enable, USBINTR);
uhci_writew(uhci, egsm_enable | USBCMD_CF, USBCMD);
mb();
udelay(5);
/* If we're auto-stopping then no devices have been attached
* for a while, so there shouldn't be any active URBs and the
* controller should stop after a few microseconds. Otherwise
* we will give the controller one frame to stop.
*/
if (!auto_stop && !(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) {
uhci->rh_state = UHCI_RH_SUSPENDING;
spin_unlock_irq(&uhci->lock);
msleep(1);
spin_lock_irq(&uhci->lock);
if (uhci->dead)
return;
}
if (!(uhci_readw(uhci, USBSTS) & USBSTS_HCH))
dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n");
uhci_get_current_frame_number(uhci);
uhci->rh_state = new_state;
uhci->is_stopped = UHCI_IS_STOPPED;
/*
* If remote wakeup is enabled but either EGSM or RD interrupts
* doesn't work, then we won't get an interrupt when a wakeup event
* occurs. Thus the suspended root hub needs to be polled.
*/
if (wakeup_enable && (!int_enable || !egsm_enable))
set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
else
clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
uhci_scan_schedule(uhci);
uhci_fsbr_off(uhci);
}
static void start_rh(struct uhci_hcd *uhci)
{
uhci->is_stopped = 0;
/*
* Clear stale status bits on Aspeed as we get a stale HCH
* which causes problems later on
*/
if (uhci_is_aspeed(uhci))
uhci_writew(uhci, uhci_readw(uhci, USBSTS), USBSTS);
/* Mark it configured and running with a 64-byte max packet.
* All interrupts are enabled, even though RESUME won't do anything.
*/
uhci_writew(uhci, USBCMD_RS | USBCMD_CF | USBCMD_MAXP, USBCMD);
uhci_writew(uhci, USBINTR_TIMEOUT | USBINTR_RESUME |
USBINTR_IOC | USBINTR_SP, USBINTR);
mb();
uhci->rh_state = UHCI_RH_RUNNING;
set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
}
static void wakeup_rh(struct uhci_hcd *uhci)
__releases(uhci->lock)
__acquires(uhci->lock)
{
dev_dbg(&uhci_to_hcd(uhci)->self.root_hub->dev,
"%s%s\n", __func__,
uhci->rh_state == UHCI_RH_AUTO_STOPPED ?
" (auto-start)" : "");
/* If we are auto-stopped then no devices are attached so there's
* no need for wakeup signals. Otherwise we send Global Resume
* for 20 ms.
*/
if (uhci->rh_state == UHCI_RH_SUSPENDED) {
unsigned egsm;
/* Keep EGSM on if it was set before */
egsm = uhci_readw(uhci, USBCMD) & USBCMD_EGSM;
uhci->rh_state = UHCI_RH_RESUMING;
uhci_writew(uhci, USBCMD_FGR | USBCMD_CF | egsm, USBCMD);
spin_unlock_irq(&uhci->lock);
msleep(20);
spin_lock_irq(&uhci->lock);
if (uhci->dead)
return;
/* End Global Resume and wait for EOP to be sent */
uhci_writew(uhci, USBCMD_CF, USBCMD);
mb();
udelay(4);
if (uhci_readw(uhci, USBCMD) & USBCMD_FGR)
dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n");
}
start_rh(uhci);
/* Restart root hub polling */
mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
}
static irqreturn_t uhci_irq(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned short status;
/*
* Read the interrupt status, and write it back to clear the
* interrupt cause. Contrary to the UHCI specification, the
* "HC Halted" status bit is persistent: it is RO, not R/WC.
*/
status = uhci_readw(uhci, USBSTS);
if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
return IRQ_NONE;
uhci_writew(uhci, status, USBSTS); /* Clear it */
spin_lock(&uhci->lock);
if (unlikely(!uhci->is_initialized)) /* not yet configured */
goto done;
if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
if (status & USBSTS_HSE)
dev_err(uhci_dev(uhci),
"host system error, PCI problems?\n");
if (status & USBSTS_HCPE)
dev_err(uhci_dev(uhci),
"host controller process error, something bad happened!\n");
if (status & USBSTS_HCH) {
if (uhci->rh_state >= UHCI_RH_RUNNING) {
dev_err(uhci_dev(uhci),
"host controller halted, very bad!\n");
if (debug > 1 && errbuf) {
/* Print the schedule for debugging */
uhci_sprint_schedule(uhci, errbuf,
ERRBUF_LEN - EXTRA_SPACE);
lprintk(errbuf);
}
uhci_hc_died(uhci);
usb_hc_died(hcd);
/* Force a callback in case there are
* pending unlinks */
mod_timer(&hcd->rh_timer, jiffies);
}
}
}
if (status & USBSTS_RD) {
spin_unlock(&uhci->lock);
usb_hcd_poll_rh_status(hcd);
} else {
uhci_scan_schedule(uhci);
done:
spin_unlock(&uhci->lock);
}
return IRQ_HANDLED;
}
/*
* Store the current frame number in uhci->frame_number if the controller
* is running. Expand from 11 bits (of which we use only 10) to a
* full-sized integer.
*
* Like many other parts of the driver, this code relies on being polled
* more than once per second as long as the controller is running.
*/
static void uhci_get_current_frame_number(struct uhci_hcd *uhci)
{
if (!uhci->is_stopped) {
unsigned delta;
delta = (uhci_readw(uhci, USBFRNUM) - uhci->frame_number) &
(UHCI_NUMFRAMES - 1);
uhci->frame_number += delta;
}
}
/*
* De-allocate all resources
*/
static void release_uhci(struct uhci_hcd *uhci)
{
int i;
spin_lock_irq(&uhci->lock);
uhci->is_initialized = 0;
spin_unlock_irq(&uhci->lock);
debugfs_lookup_and_remove(uhci_to_hcd(uhci)->self.bus_name,
uhci_debugfs_root);
for (i = 0; i < UHCI_NUM_SKELQH; i++)
uhci_free_qh(uhci, uhci->skelqh[i]);
uhci_free_td(uhci, uhci->term_td);
dma_pool_destroy(uhci->qh_pool);
dma_pool_destroy(uhci->td_pool);
kfree(uhci->frame_cpu);
dma_free_coherent(uhci_dev(uhci),
UHCI_NUMFRAMES * sizeof(*uhci->frame),
uhci->frame, uhci->frame_dma_handle);
}
/*
* Allocate a frame list, and then setup the skeleton
*
* The hardware doesn't really know any difference
* in the queues, but the order does matter for the
* protocols higher up. The order in which the queues
* are encountered by the hardware is:
*
* - All isochronous events are handled before any
* of the queues. We don't do that here, because
* we'll create the actual TD entries on demand.
* - The first queue is the high-period interrupt queue.
* - The second queue is the period-1 interrupt and async
* (low-speed control, full-speed control, then bulk) queue.
* - The third queue is the terminating bandwidth reclamation queue,
* which contains no members, loops back to itself, and is present
* only when FSBR is on and there are no full-speed control or bulk QHs.
*/
static int uhci_start(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int retval = -EBUSY;
int i;
hcd->uses_new_polling = 1;
/* Accept arbitrarily long scatter-gather lists */
if (!hcd->localmem_pool)
hcd->self.sg_tablesize = ~0;
spin_lock_init(&uhci->lock);
timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
#ifdef UHCI_DEBUG_OPS
debugfs_create_file(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR,
uhci_debugfs_root, uhci, &uhci_debug_operations);
#endif
uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
UHCI_NUMFRAMES * sizeof(*uhci->frame),
&uhci->frame_dma_handle, GFP_KERNEL);
if (!uhci->frame) {
dev_err(uhci_dev(uhci),
"unable to allocate consistent memory for frame list\n");
goto err_alloc_frame;
}
uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
GFP_KERNEL);
if (!uhci->frame_cpu)
goto err_alloc_frame_cpu;
uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
sizeof(struct uhci_td), 16, 0);
if (!uhci->td_pool) {
dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
goto err_create_td_pool;
}
uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
sizeof(struct uhci_qh), 16, 0);
if (!uhci->qh_pool) {
dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
goto err_create_qh_pool;
}
uhci->term_td = uhci_alloc_td(uhci);
if (!uhci->term_td) {
dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
goto err_alloc_term_td;
}
for (i = 0; i < UHCI_NUM_SKELQH; i++) {
uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL);
if (!uhci->skelqh[i]) {
dev_err(uhci_dev(uhci), "unable to allocate QH\n");
goto err_alloc_skelqh;
}
}
/*
* 8 Interrupt queues; link all higher int queues to int1 = async
*/
for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i)
uhci->skelqh[i]->link = LINK_TO_QH(uhci, uhci->skel_async_qh);
uhci->skel_async_qh->link = UHCI_PTR_TERM(uhci);
uhci->skel_term_qh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
/* This dummy TD is to work around a bug in Intel PIIX controllers */
uhci_fill_td(uhci, uhci->term_td, 0, uhci_explen(0) |
(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
uhci->term_td->link = UHCI_PTR_TERM(uhci);
uhci->skel_async_qh->element = uhci->skel_term_qh->element =
LINK_TO_TD(uhci, uhci->term_td);
/*
* Fill the frame list: make all entries point to the proper
* interrupt queue.
*/
for (i = 0; i < UHCI_NUMFRAMES; i++) {
/* Only place we don't use the frame list routines */
uhci->frame[i] = uhci_frame_skel_link(uhci, i);
}
/*
* Some architectures require a full mb() to enforce completion of
* the memory writes above before the I/O transfers in configure_hc().
*/
mb();
spin_lock_irq(&uhci->lock);
configure_hc(uhci);
uhci->is_initialized = 1;
start_rh(uhci);
spin_unlock_irq(&uhci->lock);
return 0;
/*
* error exits:
*/
err_alloc_skelqh:
for (i = 0; i < UHCI_NUM_SKELQH; i++) {
if (uhci->skelqh[i])
uhci_free_qh(uhci, uhci->skelqh[i]);
}
uhci_free_td(uhci, uhci->term_td);
err_alloc_term_td:
dma_pool_destroy(uhci->qh_pool);
err_create_qh_pool:
dma_pool_destroy(uhci->td_pool);
err_create_td_pool:
kfree(uhci->frame_cpu);
err_alloc_frame_cpu:
dma_free_coherent(uhci_dev(uhci),
UHCI_NUMFRAMES * sizeof(*uhci->frame),
uhci->frame, uhci->frame_dma_handle);
err_alloc_frame:
debugfs_lookup_and_remove(hcd->self.bus_name, uhci_debugfs_root);
return retval;
}
static void uhci_stop(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
spin_lock_irq(&uhci->lock);
if (HCD_HW_ACCESSIBLE(hcd) && !uhci->dead)
uhci_hc_died(uhci);
uhci_scan_schedule(uhci);
spin_unlock_irq(&uhci->lock);
synchronize_irq(hcd->irq);
del_timer_sync(&uhci->fsbr_timer);
release_uhci(uhci);
}
#ifdef CONFIG_PM
static int uhci_rh_suspend(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int rc = 0;
spin_lock_irq(&uhci->lock);
if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else if (uhci->dead)
; /* Dead controllers tell no tales */
/* Once the controller is stopped, port resumes that are already
* in progress won't complete. Hence if remote wakeup is enabled
* for the root hub and any ports are in the middle of a resume or
* remote wakeup, we must fail the suspend.
*/
else if (hcd->self.root_hub->do_remote_wakeup &&
uhci->resuming_ports) {
dev_dbg(uhci_dev(uhci),
"suspend failed because a port is resuming\n");
rc = -EBUSY;
} else
suspend_rh(uhci, UHCI_RH_SUSPENDED);
spin_unlock_irq(&uhci->lock);
return rc;
}
static int uhci_rh_resume(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int rc = 0;
spin_lock_irq(&uhci->lock);
if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else if (!uhci->dead)
wakeup_rh(uhci);
spin_unlock_irq(&uhci->lock);
return rc;
}
#endif
/* Wait until a particular device/endpoint's QH is idle, and free it */
static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *hep)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
struct uhci_qh *qh;
spin_lock_irq(&uhci->lock);
qh = (struct uhci_qh *) hep->hcpriv;
if (qh == NULL)
goto done;
while (qh->state != QH_STATE_IDLE) {
++uhci->num_waiting;
spin_unlock_irq(&uhci->lock);
wait_event_interruptible(uhci->waitqh,
qh->state == QH_STATE_IDLE);
spin_lock_irq(&uhci->lock);
--uhci->num_waiting;
}
uhci_free_qh(uhci, qh);
done:
spin_unlock_irq(&uhci->lock);
}
static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned frame_number;
unsigned delta;
/* Minimize latency by avoiding the spinlock */
frame_number = uhci->frame_number;
barrier();
delta = (uhci_readw(uhci, USBFRNUM) - frame_number) &
(UHCI_NUMFRAMES - 1);
return frame_number + delta;
}
/* Determines number of ports on controller */
static int uhci_count_ports(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned io_size = (unsigned) hcd->rsrc_len;
int port;
/* The UHCI spec says devices must have 2 ports, and goes on to say
* they may have more but gives no way to determine how many there
* are. However according to the UHCI spec, Bit 7 of the port
* status and control register is always set to 1. So we try to
* use this to our advantage. Another common failure mode when
* a nonexistent register is addressed is to return all ones, so
* we test for that also.
*/
for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) {
unsigned int portstatus;
portstatus = uhci_readw(uhci, USBPORTSC1 + (port * 2));
if (!(portstatus & 0x0080) || portstatus == 0xffff)
break;
}
if (debug)
dev_info(uhci_dev(uhci), "detected %d ports\n", port);
/* Anything greater than 7 is weird so we'll ignore it. */
if (port > UHCI_RH_MAXCHILD) {
dev_info(uhci_dev(uhci),
"port count misdetected? forcing to 2 ports\n");
port = 2;
}
return port;
}
static const char hcd_name[] = "uhci_hcd";
#if defined(CONFIG_USB_PCI) && defined(CONFIG_HAS_IOPORT)
#include "uhci-pci.c"
#define PCI_DRIVER uhci_pci_driver
#endif
#ifdef CONFIG_SPARC_LEON
#include "uhci-grlib.c"
#define PLATFORM_DRIVER uhci_grlib_driver
#endif
#ifdef CONFIG_USB_UHCI_PLATFORM
#include "uhci-platform.c"
#define PLATFORM_DRIVER uhci_platform_driver
#endif
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER)
#error "missing bus glue for uhci-hcd"
#endif
static int __init uhci_hcd_init(void)
{
int retval = -ENOMEM;
if (usb_disabled())
return -ENODEV;
set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
#ifdef CONFIG_DYNAMIC_DEBUG
errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
if (!errbuf)
goto errbuf_failed;
uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
#endif
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
sizeof(struct urb_priv), 0, 0, NULL);
if (!uhci_up_cachep)
goto up_failed;
#ifdef PLATFORM_DRIVER
retval = platform_driver_register(&PLATFORM_DRIVER);
if (retval < 0)
goto clean0;
#endif
#ifdef PCI_DRIVER
retval = pci_register_driver(&PCI_DRIVER);
if (retval < 0)
goto clean1;
#endif
return 0;
#ifdef PCI_DRIVER
clean1:
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
clean0:
#endif
kmem_cache_destroy(uhci_up_cachep);
up_failed:
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
debugfs_remove(uhci_debugfs_root);
kfree(errbuf);
errbuf_failed:
#endif
clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
return retval;
}
static void __exit uhci_hcd_cleanup(void)
{
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
#ifdef PCI_DRIVER
pci_unregister_driver(&PCI_DRIVER);
#endif
kmem_cache_destroy(uhci_up_cachep);
debugfs_remove(uhci_debugfs_root);
#ifdef CONFIG_DYNAMIC_DEBUG
kfree(errbuf);
#endif
clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
}
module_init(uhci_hcd_init);
module_exit(uhci_hcd_cleanup);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/uhci-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
* Copyright (C) 2013 Xenia Ragiadakou
*
* Author: Xenia Ragiadakou
* Email : [email protected]
*/
#define CREATE_TRACE_POINTS
#include "xhci-trace.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(xhci_dbg_quirks);
EXPORT_TRACEPOINT_SYMBOL_GPL(xhci_dbg_init);
| linux-master | drivers/usb/host/xhci-trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xhci-debugfs.c - xHCI debugfs interface
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Lu Baolu <[email protected]>
*/
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "xhci.h"
#include "xhci-debugfs.h"
static const struct debugfs_reg32 xhci_cap_regs[] = {
dump_register(CAPLENGTH),
dump_register(HCSPARAMS1),
dump_register(HCSPARAMS2),
dump_register(HCSPARAMS3),
dump_register(HCCPARAMS1),
dump_register(DOORBELLOFF),
dump_register(RUNTIMEOFF),
dump_register(HCCPARAMS2),
};
static const struct debugfs_reg32 xhci_op_regs[] = {
dump_register(USBCMD),
dump_register(USBSTS),
dump_register(PAGESIZE),
dump_register(DNCTRL),
dump_register(CRCR),
dump_register(DCBAAP_LOW),
dump_register(DCBAAP_HIGH),
dump_register(CONFIG),
};
static const struct debugfs_reg32 xhci_runtime_regs[] = {
dump_register(MFINDEX),
dump_register(IR0_IMAN),
dump_register(IR0_IMOD),
dump_register(IR0_ERSTSZ),
dump_register(IR0_ERSTBA_LOW),
dump_register(IR0_ERSTBA_HIGH),
dump_register(IR0_ERDP_LOW),
dump_register(IR0_ERDP_HIGH),
};
static const struct debugfs_reg32 xhci_extcap_legsup[] = {
dump_register(EXTCAP_USBLEGSUP),
dump_register(EXTCAP_USBLEGCTLSTS),
};
static const struct debugfs_reg32 xhci_extcap_protocol[] = {
dump_register(EXTCAP_REVISION),
dump_register(EXTCAP_NAME),
dump_register(EXTCAP_PORTINFO),
dump_register(EXTCAP_PORTTYPE),
dump_register(EXTCAP_MANTISSA1),
dump_register(EXTCAP_MANTISSA2),
dump_register(EXTCAP_MANTISSA3),
dump_register(EXTCAP_MANTISSA4),
dump_register(EXTCAP_MANTISSA5),
dump_register(EXTCAP_MANTISSA6),
};
static const struct debugfs_reg32 xhci_extcap_dbc[] = {
dump_register(EXTCAP_DBC_CAPABILITY),
dump_register(EXTCAP_DBC_DOORBELL),
dump_register(EXTCAP_DBC_ERSTSIZE),
dump_register(EXTCAP_DBC_ERST_LOW),
dump_register(EXTCAP_DBC_ERST_HIGH),
dump_register(EXTCAP_DBC_ERDP_LOW),
dump_register(EXTCAP_DBC_ERDP_HIGH),
dump_register(EXTCAP_DBC_CONTROL),
dump_register(EXTCAP_DBC_STATUS),
dump_register(EXTCAP_DBC_PORTSC),
dump_register(EXTCAP_DBC_CONT_LOW),
dump_register(EXTCAP_DBC_CONT_HIGH),
dump_register(EXTCAP_DBC_DEVINFO1),
dump_register(EXTCAP_DBC_DEVINFO2),
};
static struct dentry *xhci_debugfs_root;
static struct xhci_regset *xhci_debugfs_alloc_regset(struct xhci_hcd *xhci)
{
struct xhci_regset *regset;
regset = kzalloc(sizeof(*regset), GFP_KERNEL);
if (!regset)
return NULL;
/*
* The allocation and free of regset are executed in order.
* We needn't a lock here.
*/
INIT_LIST_HEAD(®set->list);
list_add_tail(®set->list, &xhci->regset_list);
return regset;
}
static void xhci_debugfs_free_regset(struct xhci_regset *regset)
{
if (!regset)
return;
list_del(®set->list);
kfree(regset);
}
__printf(6, 7)
static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
const struct debugfs_reg32 *regs,
size_t nregs, struct dentry *parent,
const char *fmt, ...)
{
struct xhci_regset *rgs;
va_list args;
struct debugfs_regset32 *regset;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
rgs = xhci_debugfs_alloc_regset(xhci);
if (!rgs)
return;
va_start(args, fmt);
vsnprintf(rgs->name, sizeof(rgs->name), fmt, args);
va_end(args);
regset = &rgs->regset;
regset->regs = regs;
regset->nregs = nregs;
regset->base = hcd->regs + base;
regset->dev = hcd->self.controller;
debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
}
static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
const struct debugfs_reg32 *regs,
size_t n, const char *cap_name)
{
u32 offset;
int index = 0;
size_t psic, nregs = n;
void __iomem *base = &xhci->cap_regs->hc_capbase;
offset = xhci_find_next_ext_cap(base, 0, cap_id);
while (offset) {
if (cap_id == XHCI_EXT_CAPS_PROTOCOL) {
psic = XHCI_EXT_PORT_PSIC(readl(base + offset + 8));
nregs = min(4 + psic, n);
}
xhci_debugfs_regset(xhci, offset, regs, nregs,
xhci->debugfs_root, "%s:%02d",
cap_name, index);
offset = xhci_find_next_ext_cap(base, offset, cap_id);
index++;
}
}
static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
seq_printf(s, "%pad\n", &dma);
return 0;
}
static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
seq_printf(s, "%pad\n", &dma);
return 0;
}
static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
{
struct xhci_ring *ring = *(struct xhci_ring **)s->private;
seq_printf(s, "%d\n", ring->cycle_state);
return 0;
}
static void xhci_ring_dump_segment(struct seq_file *s,
struct xhci_segment *seg)
{
int i;
dma_addr_t dma;
union xhci_trb *trb;
char str[XHCI_MSG_MAX];
for (i = 0; i < TRBS_PER_SEGMENT; i++) {
trb = &seg->trbs[i];
dma = seg->dma + i * sizeof(*trb);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_trb(str, XHCI_MSG_MAX, le32_to_cpu(trb->generic.field[0]),
le32_to_cpu(trb->generic.field[1]),
le32_to_cpu(trb->generic.field[2]),
le32_to_cpu(trb->generic.field[3])));
}
}
static int xhci_ring_trb_show(struct seq_file *s, void *unused)
{
int i;
struct xhci_ring *ring = *(struct xhci_ring **)s->private;
struct xhci_segment *seg = ring->first_seg;
for (i = 0; i < ring->num_segs; i++) {
xhci_ring_dump_segment(s, seg);
seg = seg->next;
}
return 0;
}
static struct xhci_file_map ring_files[] = {
{"enqueue", xhci_ring_enqueue_show, },
{"dequeue", xhci_ring_dequeue_show, },
{"cycle", xhci_ring_cycle_show, },
{"trbs", xhci_ring_trb_show, },
};
static int xhci_ring_open(struct inode *inode, struct file *file)
{
int i;
struct xhci_file_map *f_map;
const char *file_name = file_dentry(file)->d_iname;
for (i = 0; i < ARRAY_SIZE(ring_files); i++) {
f_map = &ring_files[i];
if (strcmp(f_map->name, file_name) == 0)
break;
}
return single_open(file, f_map->show, inode->i_private);
}
static const struct file_operations xhci_ring_fops = {
.open = xhci_ring_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int xhci_slot_context_show(struct seq_file *s, void *unused)
{
struct xhci_hcd *xhci;
struct xhci_slot_ctx *slot_ctx;
struct xhci_slot_priv *priv = s->private;
struct xhci_virt_device *dev = priv->dev;
char str[XHCI_MSG_MAX];
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
xhci_decode_slot_context(str,
le32_to_cpu(slot_ctx->dev_info),
le32_to_cpu(slot_ctx->dev_info2),
le32_to_cpu(slot_ctx->tt_info),
le32_to_cpu(slot_ctx->dev_state)));
return 0;
}
static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
{
int ep_index;
dma_addr_t dma;
struct xhci_hcd *xhci;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_priv *priv = s->private;
struct xhci_virt_device *dev = priv->dev;
char str[XHCI_MSG_MAX];
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
for (ep_index = 0; ep_index < 31; ep_index++) {
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_ep_context(str,
le32_to_cpu(ep_ctx->ep_info),
le32_to_cpu(ep_ctx->ep_info2),
le64_to_cpu(ep_ctx->deq),
le32_to_cpu(ep_ctx->tx_info)));
}
return 0;
}
static int xhci_device_name_show(struct seq_file *s, void *unused)
{
struct xhci_slot_priv *priv = s->private;
struct xhci_virt_device *dev = priv->dev;
seq_printf(s, "%s\n", dev_name(&dev->udev->dev));
return 0;
}
static struct xhci_file_map context_files[] = {
{"name", xhci_device_name_show, },
{"slot-context", xhci_slot_context_show, },
{"ep-context", xhci_endpoint_context_show, },
};
static int xhci_context_open(struct inode *inode, struct file *file)
{
int i;
struct xhci_file_map *f_map;
const char *file_name = file_dentry(file)->d_iname;
for (i = 0; i < ARRAY_SIZE(context_files); i++) {
f_map = &context_files[i];
if (strcmp(f_map->name, file_name) == 0)
break;
}
return single_open(file, f_map->show, inode->i_private);
}
static const struct file_operations xhci_context_fops = {
.open = xhci_context_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int xhci_portsc_show(struct seq_file *s, void *unused)
{
struct xhci_port *port = s->private;
u32 portsc;
char str[XHCI_MSG_MAX];
portsc = readl(port->addr);
seq_printf(s, "%s\n", xhci_decode_portsc(str, portsc));
return 0;
}
static int xhci_port_open(struct inode *inode, struct file *file)
{
return single_open(file, xhci_portsc_show, inode->i_private);
}
static ssize_t xhci_port_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct xhci_port *port = s->private;
struct xhci_hcd *xhci = hcd_to_xhci(port->rhub->hcd);
char buf[32];
u32 portsc;
unsigned long flags;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "compliance", 10)) {
/* If CTC is clear, compliance is enabled by default */
if (!HCC2_CTC(xhci->hcc_params2))
return count;
spin_lock_irqsave(&xhci->lock, flags);
/* compliance mode can only be enabled on ports in RxDetect */
portsc = readl(port->addr);
if ((portsc & PORT_PLS_MASK) != XDEV_RXDETECT) {
spin_unlock_irqrestore(&xhci->lock, flags);
return -EPERM;
}
portsc = xhci_port_state_to_neutral(portsc);
portsc &= ~PORT_PLS_MASK;
portsc |= PORT_LINK_STROBE | XDEV_COMP_MODE;
writel(portsc, port->addr);
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
return -EINVAL;
}
return count;
}
static const struct file_operations port_fops = {
.open = xhci_port_open,
.write = xhci_port_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
struct xhci_file_map *files,
size_t nentries, void *data,
struct dentry *parent,
const struct file_operations *fops)
{
int i;
for (i = 0; i < nentries; i++)
debugfs_create_file(files[i].name, 0444, parent, data, fops);
}
static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
struct xhci_ring **ring,
const char *name,
struct dentry *parent)
{
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
xhci_debugfs_create_files(xhci, ring_files, ARRAY_SIZE(ring_files),
ring, dir, &xhci_ring_fops);
return dir;
}
static void xhci_debugfs_create_context_files(struct xhci_hcd *xhci,
struct dentry *parent,
int slot_id)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
xhci_debugfs_create_files(xhci, context_files,
ARRAY_SIZE(context_files),
dev->debugfs_private,
parent, &xhci_context_fops);
}
void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
int ep_index)
{
struct xhci_ep_priv *epriv;
struct xhci_slot_priv *spriv = dev->debugfs_private;
if (!spriv)
return;
if (spriv->eps[ep_index])
return;
epriv = kzalloc(sizeof(*epriv), GFP_KERNEL);
if (!epriv)
return;
epriv->show_ring = dev->eps[ep_index].ring;
snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
epriv->root = xhci_debugfs_create_ring_dir(xhci,
&epriv->show_ring,
epriv->name,
spriv->root);
spriv->eps[ep_index] = epriv;
}
void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
int ep_index)
{
struct xhci_ep_priv *epriv;
struct xhci_slot_priv *spriv = dev->debugfs_private;
if (!spriv || !spriv->eps[ep_index])
return;
epriv = spriv->eps[ep_index];
debugfs_remove_recursive(epriv->root);
spriv->eps[ep_index] = NULL;
kfree(epriv);
}
static int xhci_stream_id_show(struct seq_file *s, void *unused)
{
struct xhci_ep_priv *epriv = s->private;
if (!epriv->stream_info)
return -EPERM;
seq_printf(s, "Show stream ID %d trb ring, supported [1 - %d]\n",
epriv->stream_id, epriv->stream_info->num_streams - 1);
return 0;
}
static int xhci_stream_id_open(struct inode *inode, struct file *file)
{
return single_open(file, xhci_stream_id_show, inode->i_private);
}
static ssize_t xhci_stream_id_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct xhci_ep_priv *epriv = s->private;
int ret;
u16 stream_id; /* MaxPStreams + 1 <= 16 */
if (!epriv->stream_info)
return -EPERM;
/* Decimal number */
ret = kstrtou16_from_user(ubuf, count, 10, &stream_id);
if (ret)
return ret;
if (stream_id == 0 || stream_id >= epriv->stream_info->num_streams)
return -EINVAL;
epriv->stream_id = stream_id;
epriv->show_ring = epriv->stream_info->stream_rings[stream_id];
return count;
}
static const struct file_operations stream_id_fops = {
.open = xhci_stream_id_open,
.write = xhci_stream_id_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int xhci_stream_context_array_show(struct seq_file *s, void *unused)
{
struct xhci_ep_priv *epriv = s->private;
struct xhci_stream_ctx *stream_ctx;
dma_addr_t dma;
int id;
if (!epriv->stream_info)
return -EPERM;
seq_printf(s, "Allocated %d streams and %d stream context array entries\n",
epriv->stream_info->num_streams,
epriv->stream_info->num_stream_ctxs);
for (id = 0; id < epriv->stream_info->num_stream_ctxs; id++) {
stream_ctx = epriv->stream_info->stream_ctx_array + id;
dma = epriv->stream_info->ctx_array_dma + id * 16;
if (id < epriv->stream_info->num_streams)
seq_printf(s, "%pad stream id %d deq %016llx\n", &dma,
id, le64_to_cpu(stream_ctx->stream_ring));
else
seq_printf(s, "%pad stream context entry not used deq %016llx\n",
&dma, le64_to_cpu(stream_ctx->stream_ring));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(xhci_stream_context_array);
void xhci_debugfs_create_stream_files(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
int ep_index)
{
struct xhci_slot_priv *spriv = dev->debugfs_private;
struct xhci_ep_priv *epriv;
if (!spriv || !spriv->eps[ep_index] ||
!dev->eps[ep_index].stream_info)
return;
epriv = spriv->eps[ep_index];
epriv->stream_info = dev->eps[ep_index].stream_info;
/* Show trb ring of stream ID 1 by default */
epriv->stream_id = 1;
epriv->show_ring = epriv->stream_info->stream_rings[1];
debugfs_create_file("stream_id", 0644,
epriv->root, epriv,
&stream_id_fops);
debugfs_create_file("stream_context_array", 0444,
epriv->root, epriv,
&xhci_stream_context_array_fops);
}
void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_slot_priv *priv;
struct xhci_virt_device *dev = xhci->devs[slot_id];
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return;
snprintf(priv->name, sizeof(priv->name), "%02d", slot_id);
priv->root = debugfs_create_dir(priv->name, xhci->debugfs_slots);
priv->dev = dev;
dev->debugfs_private = priv;
xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
"ep00", priv->root);
xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
}
void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id)
{
int i;
struct xhci_slot_priv *priv;
struct xhci_virt_device *dev = xhci->devs[slot_id];
if (!dev || !dev->debugfs_private)
return;
priv = dev->debugfs_private;
debugfs_remove_recursive(priv->root);
for (i = 0; i < 31; i++)
kfree(priv->eps[i]);
kfree(priv);
dev->debugfs_private = NULL;
}
static void xhci_debugfs_create_ports(struct xhci_hcd *xhci,
struct dentry *parent)
{
unsigned int num_ports;
char port_name[8];
struct xhci_port *port;
struct dentry *dir;
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
parent = debugfs_create_dir("ports", parent);
while (num_ports--) {
scnprintf(port_name, sizeof(port_name), "port%02d",
num_ports + 1);
dir = debugfs_create_dir(port_name, parent);
port = &xhci->hw_ports[num_ports];
debugfs_create_file("portsc", 0644, dir, port, &port_fops);
}
}
void xhci_debugfs_init(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
xhci->debugfs_root = debugfs_create_dir(dev_name(dev),
xhci_debugfs_root);
INIT_LIST_HEAD(&xhci->regset_list);
xhci_debugfs_regset(xhci,
0,
xhci_cap_regs, ARRAY_SIZE(xhci_cap_regs),
xhci->debugfs_root, "reg-cap");
xhci_debugfs_regset(xhci,
HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)),
xhci_op_regs, ARRAY_SIZE(xhci_op_regs),
xhci->debugfs_root, "reg-op");
xhci_debugfs_regset(xhci,
readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK,
xhci_runtime_regs, ARRAY_SIZE(xhci_runtime_regs),
xhci->debugfs_root, "reg-runtime");
xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_LEGACY,
xhci_extcap_legsup,
ARRAY_SIZE(xhci_extcap_legsup),
"reg-ext-legsup");
xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_PROTOCOL,
xhci_extcap_protocol,
ARRAY_SIZE(xhci_extcap_protocol),
"reg-ext-protocol");
xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_DEBUG,
xhci_extcap_dbc,
ARRAY_SIZE(xhci_extcap_dbc),
"reg-ext-dbc");
xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
"command-ring",
xhci->debugfs_root);
xhci_debugfs_create_ring_dir(xhci, &xhci->interrupter->event_ring,
"event-ring",
xhci->debugfs_root);
xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
xhci_debugfs_create_ports(xhci, xhci->debugfs_root);
}
void xhci_debugfs_exit(struct xhci_hcd *xhci)
{
struct xhci_regset *rgs, *tmp;
debugfs_remove_recursive(xhci->debugfs_root);
xhci->debugfs_root = NULL;
xhci->debugfs_slots = NULL;
list_for_each_entry_safe(rgs, tmp, &xhci->regset_list, list)
xhci_debugfs_free_regset(rgs);
}
void __init xhci_debugfs_create_root(void)
{
xhci_debugfs_root = debugfs_create_dir("xhci", usb_debug_root);
}
void __exit xhci_debugfs_remove_root(void)
{
debugfs_remove_recursive(xhci_debugfs_root);
xhci_debugfs_root = NULL;
}
| linux-master | drivers/usb/host/xhci-debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019-2020 Linaro Limited */
#include <linux/acpi.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-pci.h"
#define RENESAS_FW_VERSION 0x6C
#define RENESAS_ROM_CONFIG 0xF0
#define RENESAS_FW_STATUS 0xF4
#define RENESAS_FW_STATUS_MSB 0xF5
#define RENESAS_ROM_STATUS 0xF6
#define RENESAS_ROM_STATUS_MSB 0xF7
#define RENESAS_DATA0 0xF8
#define RENESAS_DATA1 0xFC
#define RENESAS_FW_VERSION_FIELD GENMASK(23, 7)
#define RENESAS_FW_VERSION_OFFSET 8
#define RENESAS_FW_STATUS_DOWNLOAD_ENABLE BIT(0)
#define RENESAS_FW_STATUS_LOCK BIT(1)
#define RENESAS_FW_STATUS_RESULT GENMASK(6, 4)
#define RENESAS_FW_STATUS_INVALID 0
#define RENESAS_FW_STATUS_SUCCESS BIT(4)
#define RENESAS_FW_STATUS_ERROR BIT(5)
#define RENESAS_FW_STATUS_SET_DATA0 BIT(8)
#define RENESAS_FW_STATUS_SET_DATA1 BIT(9)
#define RENESAS_ROM_STATUS_ACCESS BIT(0)
#define RENESAS_ROM_STATUS_ERASE BIT(1)
#define RENESAS_ROM_STATUS_RELOAD BIT(2)
#define RENESAS_ROM_STATUS_RESULT GENMASK(6, 4)
#define RENESAS_ROM_STATUS_NO_RESULT 0
#define RENESAS_ROM_STATUS_SUCCESS BIT(4)
#define RENESAS_ROM_STATUS_ERROR BIT(5)
#define RENESAS_ROM_STATUS_SET_DATA0 BIT(8)
#define RENESAS_ROM_STATUS_SET_DATA1 BIT(9)
#define RENESAS_ROM_STATUS_ROM_EXISTS BIT(15)
#define RENESAS_ROM_ERASE_MAGIC 0x5A65726F
#define RENESAS_ROM_WRITE_MAGIC 0x53524F4D
#define RENESAS_RETRY 10000
#define RENESAS_DELAY 10
static int renesas_fw_download_image(struct pci_dev *dev,
const u32 *fw, size_t step, bool rom)
{
size_t i;
int err;
u8 fw_status;
bool data0_or_data1;
u32 status_reg;
if (rom)
status_reg = RENESAS_ROM_STATUS_MSB;
else
status_reg = RENESAS_FW_STATUS_MSB;
/*
* The hardware does alternate between two 32-bit pages.
* (This is because each row of the firmware is 8 bytes).
*
* for even steps we use DATA0, for odd steps DATA1.
*/
data0_or_data1 = (step & 1) == 1;
/* step+1. Read "Set DATAX" and confirm it is cleared. */
for (i = 0; i < RENESAS_RETRY; i++) {
err = pci_read_config_byte(dev, status_reg, &fw_status);
if (err) {
dev_err(&dev->dev, "Read Status failed: %d\n",
pcibios_err_to_errno(err));
return pcibios_err_to_errno(err);
}
if (!(fw_status & BIT(data0_or_data1)))
break;
udelay(RENESAS_DELAY);
}
if (i == RENESAS_RETRY) {
dev_err(&dev->dev, "Timeout for Set DATAX step: %zd\n", step);
return -ETIMEDOUT;
}
/*
* step+2. Write FW data to "DATAX".
* "LSB is left" => force little endian
*/
err = pci_write_config_dword(dev, data0_or_data1 ?
RENESAS_DATA1 : RENESAS_DATA0,
(__force u32)cpu_to_le32(fw[step]));
if (err) {
dev_err(&dev->dev, "Write to DATAX failed: %d\n",
pcibios_err_to_errno(err));
return pcibios_err_to_errno(err);
}
udelay(100);
/* step+3. Set "Set DATAX". */
err = pci_write_config_byte(dev, status_reg, BIT(data0_or_data1));
if (err) {
dev_err(&dev->dev, "Write config for DATAX failed: %d\n",
pcibios_err_to_errno(err));
return pcibios_err_to_errno(err);
}
return 0;
}
static int renesas_fw_verify(const void *fw_data,
size_t length)
{
u16 fw_version_pointer;
/*
* The Firmware's Data Format is describe in
* "6.3 Data Format" R19UH0078EJ0500 Rev.5.00 page 124
*/
/*
* The bootrom chips of the big brother have sizes up to 64k, let's
* assume that's the biggest the firmware can get.
*/
if (length < 0x1000 || length >= 0x10000) {
pr_err("firmware is size %zd is not (4k - 64k).",
length);
return -EINVAL;
}
/* The First 2 bytes are fixed value (55aa). "LSB on Left" */
if (get_unaligned_le16(fw_data) != 0x55aa) {
pr_err("no valid firmware header found.");
return -EINVAL;
}
/* verify the firmware version position and print it. */
fw_version_pointer = get_unaligned_le16(fw_data + 4);
if (fw_version_pointer + 2 >= length) {
pr_err("fw ver pointer is outside of the firmware image");
return -EINVAL;
}
return 0;
}
static bool renesas_check_rom(struct pci_dev *pdev)
{
u16 rom_status;
int retval;
/* Check if external ROM exists */
retval = pci_read_config_word(pdev, RENESAS_ROM_STATUS, &rom_status);
if (retval)
return false;
rom_status &= RENESAS_ROM_STATUS_ROM_EXISTS;
if (rom_status) {
dev_dbg(&pdev->dev, "External ROM exists\n");
return true; /* External ROM exists */
}
return false;
}
static int renesas_check_rom_state(struct pci_dev *pdev)
{
u16 rom_state;
u32 version;
int err;
/* check FW version */
err = pci_read_config_dword(pdev, RENESAS_FW_VERSION, &version);
if (err)
return pcibios_err_to_errno(err);
version &= RENESAS_FW_VERSION_FIELD;
version = version >> RENESAS_FW_VERSION_OFFSET;
dev_dbg(&pdev->dev, "Found ROM version: %x\n", version);
/*
* Test if ROM is present and loaded, if so we can skip everything
*/
err = pci_read_config_word(pdev, RENESAS_ROM_STATUS, &rom_state);
if (err)
return pcibios_err_to_errno(err);
if (rom_state & RENESAS_ROM_STATUS_ROM_EXISTS) {
/* ROM exists */
dev_dbg(&pdev->dev, "ROM exists\n");
/* Check the "Result Code" Bits (6:4) and act accordingly */
switch (rom_state & RENESAS_ROM_STATUS_RESULT) {
case RENESAS_ROM_STATUS_SUCCESS:
return 0;
case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
return -ENOENT;
case RENESAS_ROM_STATUS_ERROR: /* Error State */
default: /* All other states are marked as "Reserved states" */
dev_err(&pdev->dev, "Invalid ROM..");
break;
}
}
return -EIO;
}
static int renesas_fw_check_running(struct pci_dev *pdev)
{
u8 fw_state;
int err;
/*
* Test if the device is actually needing the firmware. As most
* BIOSes will initialize the device for us. If the device is
* initialized.
*/
err = pci_read_config_byte(pdev, RENESAS_FW_STATUS, &fw_state);
if (err)
return pcibios_err_to_errno(err);
/*
* Check if "FW Download Lock" is locked. If it is and the FW is
* ready we can simply continue. If the FW is not ready, we have
* to give up.
*/
if (fw_state & RENESAS_FW_STATUS_LOCK) {
dev_dbg(&pdev->dev, "FW Download Lock is engaged.");
if (fw_state & RENESAS_FW_STATUS_SUCCESS)
return 0;
dev_err(&pdev->dev,
"FW Download Lock is set and FW is not ready. Giving Up.");
return -EIO;
}
/*
* Check if "FW Download Enable" is set. If someone (us?) tampered
* with it and it can't be reset, we have to give up too... and
* ask for a forgiveness and a reboot.
*/
if (fw_state & RENESAS_FW_STATUS_DOWNLOAD_ENABLE) {
dev_err(&pdev->dev,
"FW Download Enable is stale. Giving Up (poweroff/reboot needed).");
return -EIO;
}
/* Otherwise, Check the "Result Code" Bits (6:4) and act accordingly */
switch (fw_state & RENESAS_FW_STATUS_RESULT) {
case 0: /* No result yet */
dev_dbg(&pdev->dev, "FW is not ready/loaded yet.");
/* tell the caller, that this device needs the firmware. */
return 1;
case RENESAS_FW_STATUS_SUCCESS: /* Success, device should be working. */
dev_dbg(&pdev->dev, "FW is ready.");
return 0;
case RENESAS_FW_STATUS_ERROR: /* Error State */
dev_err(&pdev->dev,
"hardware is in an error state. Giving up (poweroff/reboot needed).");
return -ENODEV;
default: /* All other states are marked as "Reserved states" */
dev_err(&pdev->dev,
"hardware is in an invalid state %lx. Giving up (poweroff/reboot needed).",
(fw_state & RENESAS_FW_STATUS_RESULT) >> 4);
return -EINVAL;
}
}
static int renesas_fw_download(struct pci_dev *pdev,
const struct firmware *fw)
{
const u32 *fw_data = (const u32 *)fw->data;
size_t i;
int err;
u8 fw_status;
/*
* For more information and the big picture: please look at the
* "Firmware Download Sequence" in "7.1 FW Download Interface"
* of R19UH0078EJ0500 Rev.5.00 page 131
*/
/*
* 0. Set "FW Download Enable" bit in the
* "FW Download Control & Status Register" at 0xF4
*/
err = pci_write_config_byte(pdev, RENESAS_FW_STATUS,
RENESAS_FW_STATUS_DOWNLOAD_ENABLE);
if (err)
return pcibios_err_to_errno(err);
/* 1 - 10 follow one step after the other. */
for (i = 0; i < fw->size / 4; i++) {
err = renesas_fw_download_image(pdev, fw_data, i, false);
if (err) {
dev_err(&pdev->dev,
"Firmware Download Step %zd failed at position %zd bytes with (%d).",
i, i * 4, err);
return err;
}
}
/*
* This sequence continues until the last data is written to
* "DATA0" or "DATA1". Naturally, we wait until "SET DATA0/1"
* is cleared by the hardware beforehand.
*/
for (i = 0; i < RENESAS_RETRY; i++) {
err = pci_read_config_byte(pdev, RENESAS_FW_STATUS_MSB,
&fw_status);
if (err)
return pcibios_err_to_errno(err);
if (!(fw_status & (BIT(0) | BIT(1))))
break;
udelay(RENESAS_DELAY);
}
if (i == RENESAS_RETRY)
dev_warn(&pdev->dev, "Final Firmware Download step timed out.");
/*
* 11. After finishing writing the last data of FW, the
* System Software must clear "FW Download Enable"
*/
err = pci_write_config_byte(pdev, RENESAS_FW_STATUS, 0);
if (err)
return pcibios_err_to_errno(err);
/* 12. Read "Result Code" and confirm it is good. */
for (i = 0; i < RENESAS_RETRY; i++) {
err = pci_read_config_byte(pdev, RENESAS_FW_STATUS, &fw_status);
if (err)
return pcibios_err_to_errno(err);
if (fw_status & RENESAS_FW_STATUS_SUCCESS)
break;
udelay(RENESAS_DELAY);
}
if (i == RENESAS_RETRY) {
/* Timed out / Error - let's see if we can fix this */
err = renesas_fw_check_running(pdev);
switch (err) {
case 0: /*
* we shouldn't end up here.
* maybe it took a little bit longer.
* But all should be well?
*/
break;
case 1: /* (No result yet! */
dev_err(&pdev->dev, "FW Load timedout");
return -ETIMEDOUT;
default:
return err;
}
}
return 0;
}
static void renesas_rom_erase(struct pci_dev *pdev)
{
int retval, i;
u8 status;
dev_dbg(&pdev->dev, "Performing ROM Erase...\n");
retval = pci_write_config_dword(pdev, RENESAS_DATA0,
RENESAS_ROM_ERASE_MAGIC);
if (retval) {
dev_err(&pdev->dev, "ROM erase, magic word write failed: %d\n",
pcibios_err_to_errno(retval));
return;
}
retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
if (retval) {
dev_err(&pdev->dev, "ROM status read failed: %d\n",
pcibios_err_to_errno(retval));
return;
}
status |= RENESAS_ROM_STATUS_ERASE;
retval = pci_write_config_byte(pdev, RENESAS_ROM_STATUS, status);
if (retval) {
dev_err(&pdev->dev, "ROM erase set word write failed\n");
return;
}
/* sleep a bit while ROM is erased */
msleep(20);
for (i = 0; i < RENESAS_RETRY; i++) {
retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS,
&status);
status &= RENESAS_ROM_STATUS_ERASE;
if (!status)
break;
mdelay(RENESAS_DELAY);
}
if (i == RENESAS_RETRY)
dev_dbg(&pdev->dev, "Chip erase timedout: %x\n", status);
dev_dbg(&pdev->dev, "ROM Erase... Done success\n");
}
static bool renesas_setup_rom(struct pci_dev *pdev, const struct firmware *fw)
{
const u32 *fw_data = (const u32 *)fw->data;
int err, i;
u8 status;
/* 2. Write magic word to Data0 */
err = pci_write_config_dword(pdev, RENESAS_DATA0,
RENESAS_ROM_WRITE_MAGIC);
if (err)
return false;
/* 3. Set External ROM access */
err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS,
RENESAS_ROM_STATUS_ACCESS);
if (err)
goto remove_bypass;
/* 4. Check the result */
err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
if (err)
goto remove_bypass;
status &= GENMASK(6, 4);
if (status) {
dev_err(&pdev->dev,
"setting external rom failed: %x\n", status);
goto remove_bypass;
}
/* 5 to 16 Write FW to DATA0/1 while checking SetData0/1 */
for (i = 0; i < fw->size / 4; i++) {
err = renesas_fw_download_image(pdev, fw_data, i, true);
if (err) {
dev_err(&pdev->dev,
"ROM Download Step %d failed at position %d bytes with (%d)\n",
i, i * 4, err);
goto remove_bypass;
}
}
/*
* wait till DATA0/1 is cleared
*/
for (i = 0; i < RENESAS_RETRY; i++) {
err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS_MSB,
&status);
if (err)
goto remove_bypass;
if (!(status & (BIT(0) | BIT(1))))
break;
udelay(RENESAS_DELAY);
}
if (i == RENESAS_RETRY) {
dev_err(&pdev->dev, "Final Firmware ROM Download step timed out\n");
goto remove_bypass;
}
/* 17. Remove bypass */
err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS, 0);
if (err)
return false;
udelay(10);
/* 18. check result */
for (i = 0; i < RENESAS_RETRY; i++) {
err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
if (err) {
dev_err(&pdev->dev, "Read ROM status failed:%d\n",
pcibios_err_to_errno(err));
return false;
}
status &= RENESAS_ROM_STATUS_RESULT;
if (status == RENESAS_ROM_STATUS_SUCCESS) {
dev_dbg(&pdev->dev, "Download ROM success\n");
break;
}
udelay(RENESAS_DELAY);
}
if (i == RENESAS_RETRY) { /* Timed out */
dev_err(&pdev->dev,
"Download to external ROM TO: %x\n", status);
return false;
}
dev_dbg(&pdev->dev, "Download to external ROM succeeded\n");
/* Last step set Reload */
err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS,
RENESAS_ROM_STATUS_RELOAD);
if (err) {
dev_err(&pdev->dev, "Set ROM execute failed: %d\n",
pcibios_err_to_errno(err));
return false;
}
/*
* wait till Reload is cleared
*/
for (i = 0; i < RENESAS_RETRY; i++) {
err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
if (err)
return false;
if (!(status & RENESAS_ROM_STATUS_RELOAD))
break;
udelay(RENESAS_DELAY);
}
if (i == RENESAS_RETRY) {
dev_err(&pdev->dev, "ROM Exec timed out: %x\n", status);
return false;
}
return true;
remove_bypass:
pci_write_config_byte(pdev, RENESAS_ROM_STATUS, 0);
return false;
}
static int renesas_load_fw(struct pci_dev *pdev, const struct firmware *fw)
{
int err = 0;
bool rom;
/* Check if the device has external ROM */
rom = renesas_check_rom(pdev);
if (rom) {
/* perform chip erase first */
renesas_rom_erase(pdev);
/* lets try loading fw on ROM first */
rom = renesas_setup_rom(pdev, fw);
if (!rom) {
dev_dbg(&pdev->dev,
"ROM load failed, falling back on FW load\n");
} else {
dev_dbg(&pdev->dev,
"ROM load success\n");
goto exit;
}
}
err = renesas_fw_download(pdev, fw);
exit:
if (err)
dev_err(&pdev->dev, "firmware failed to download (%d).", err);
return err;
}
int renesas_xhci_check_request_fw(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct xhci_driver_data *driver_data =
(struct xhci_driver_data *)id->driver_data;
const char *fw_name = driver_data->firmware;
const struct firmware *fw;
bool has_rom;
int err;
/* Check if device has ROM and loaded, if so skip everything */
has_rom = renesas_check_rom(pdev);
if (has_rom) {
err = renesas_check_rom_state(pdev);
if (!err)
return 0;
else if (err != -ENOENT)
has_rom = false;
}
err = renesas_fw_check_running(pdev);
/* Continue ahead, if the firmware is already running. */
if (!err)
return 0;
/* no firmware interface available */
if (err != 1)
return has_rom ? 0 : err;
pci_dev_get(pdev);
err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
pci_dev_put(pdev);
if (err) {
if (has_rom) {
dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
fw_name);
return 0;
}
dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
fw_name, err);
return err;
}
err = renesas_fw_verify(fw->data, fw->size);
if (err)
goto exit;
err = renesas_load_fw(pdev, fw);
exit:
release_firmware(fw);
return err;
}
EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/xhci-pci-renesas.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* EHCI HCD (Host Controller Driver) for USB.
*
* Bus Glue for PPC On-Chip EHCI driver on the of_platform bus
* Tested on AMCC PPC 440EPx
*
* Valentine Barshak <[email protected]>
*
* Based on "ehci-ppc-soc.c" by Stefan Roese <[email protected]>
* and "ohci-ppc-of.c" by Sylvain Munaut <[email protected]>
*
* This file is licenced under the GPL.
*/
#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
static const struct hc_driver ehci_ppc_of_hc_driver = {
.description = hcd_name,
.product_desc = "OF EHCI",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
.reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
/*
* 440EPx Errata USBH_3
* Fix: Enable Break Memory Transfer (BMT) in INSNREG3
*/
#define PPC440EPX_EHCI0_INSREG_BMT (0x1 << 0)
static int
ppc44x_enable_bmt(struct device_node *dn)
{
__iomem u32 *insreg_virt;
insreg_virt = of_iomap(dn, 1);
if (!insreg_virt)
return -EINVAL;
out_be32(insreg_virt + 3, PPC440EPX_EHCI0_INSREG_BMT);
iounmap(insreg_virt);
return 0;
}
static int ehci_hcd_ppc_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ehci_hcd *ehci = NULL;
struct resource res;
int irq;
int rv;
struct device_node *np;
if (usb_disabled())
return -ENODEV;
dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n");
rv = of_address_to_resource(dn, 0, &res);
if (rv)
return rv;
hcd = usb_create_hcd(&ehci_ppc_of_hc_driver, &op->dev, "PPC-OF USB");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res.start;
hcd->rsrc_len = resource_size(&res);
irq = irq_of_parse_and_map(dn, 0);
if (!irq) {
dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
__FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = devm_ioremap_resource(&op->dev, &res);
if (IS_ERR(hcd->regs)) {
rv = PTR_ERR(hcd->regs);
goto err_ioremap;
}
ehci = hcd_to_ehci(hcd);
np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
if (np != NULL) {
/* claim we really affected by usb23 erratum */
if (!of_address_to_resource(np, 0, &res))
ehci->ohci_hcctrl_reg =
devm_ioremap(&op->dev,
res.start + OHCI_HCCTRL_OFFSET,
OHCI_HCCTRL_LEN);
else
pr_debug("%s: no ohci offset in fdt\n", __FILE__);
if (!ehci->ohci_hcctrl_reg) {
pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
} else {
ehci->has_amcc_usb23 = 1;
}
of_node_put(np);
}
if (of_property_read_bool(dn, "big-endian")) {
ehci->big_endian_mmio = 1;
ehci->big_endian_desc = 1;
}
if (of_property_read_bool(dn, "big-endian-regs"))
ehci->big_endian_mmio = 1;
if (of_property_read_bool(dn, "big-endian-desc"))
ehci->big_endian_desc = 1;
ehci->caps = hcd->regs;
if (of_device_is_compatible(dn, "ibm,usb-ehci-440epx")) {
rv = ppc44x_enable_bmt(dn);
ehci_dbg(ehci, "Break Memory Transfer (BMT) is %senabled!\n",
rv ? "NOT ": "");
}
rv = usb_add_hcd(hcd, irq, 0);
if (rv)
goto err_ioremap;
device_wakeup_enable(hcd->self.controller);
return 0;
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
usb_put_hcd(hcd);
return rv;
}
static void ehci_hcd_ppc_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct device_node *np;
struct resource res;
dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
usb_remove_hcd(hcd);
irq_dispose_mapping(hcd->irq);
/* use request_mem_region to test if the ohci driver is loaded. if so
* ensure the ohci core is operational.
*/
if (ehci->has_amcc_usb23) {
np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
if (np != NULL) {
if (!of_address_to_resource(np, 0, &res))
if (!request_mem_region(res.start,
0x4, hcd_name))
set_ohci_hcfs(ehci, 1);
else
release_mem_region(res.start, 0x4);
else
pr_debug("%s: no ohci offset in fdt\n", __FILE__);
of_node_put(np);
}
}
usb_put_hcd(hcd);
}
static const struct of_device_id ehci_hcd_ppc_of_match[] = {
{
.compatible = "usb-ehci",
},
{},
};
MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
static struct platform_driver ehci_hcd_ppc_of_driver = {
.probe = ehci_hcd_ppc_of_probe,
.remove_new = ehci_hcd_ppc_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ehci",
.of_match_table = ehci_hcd_ppc_of_match,
},
};
| linux-master | drivers/usb/host/ehci-ppc-of.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains code to reset and initialize USB host controllers.
* Some of it includes work-arounds for PCI hardware and BIOS quirks.
* It may need to run early during booting -- before USB would normally
* initialize -- to ensure that Linux doesn't use any legacy modes.
*
* Copyright (c) 1999 Martin Mares <[email protected]>
* (and others)
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/of.h>
#include <linux/iopoll.h>
#include "pci-quirks.h"
#include "xhci-ext-caps.h"
#define UHCI_USBLEGSUP 0xc0 /* legacy support */
#define UHCI_USBCMD 0 /* command register */
#define UHCI_USBINTR 4 /* interrupt register */
#define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
#define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
#define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */
#define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */
#define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */
#define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */
#define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */
#define OHCI_CONTROL 0x04
#define OHCI_CMDSTATUS 0x08
#define OHCI_INTRSTATUS 0x0c
#define OHCI_INTRENABLE 0x10
#define OHCI_INTRDISABLE 0x14
#define OHCI_FMINTERVAL 0x34
#define OHCI_HCFS (3 << 6) /* hc functional state */
#define OHCI_HCR (1 << 0) /* host controller reset */
#define OHCI_OCR (1 << 3) /* ownership change request */
#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
#define OHCI_INTR_OC (1 << 30) /* ownership change */
#define EHCI_HCC_PARAMS 0x08 /* extended capabilities */
#define EHCI_USBCMD 0 /* command register */
#define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */
#define EHCI_USBSTS 4 /* status register */
#define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */
#define EHCI_USBINTR 8 /* interrupt register */
#define EHCI_CONFIGFLAG 0x40 /* configured flag register */
#define EHCI_USBLEGSUP 0 /* legacy support register */
#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
#define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */
#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
/* AMD quirk use */
#define AB_REG_BAR_LOW 0xe0
#define AB_REG_BAR_HIGH 0xe1
#define AB_REG_BAR_SB700 0xf0
#define AB_INDX(addr) ((addr) + 0x00)
#define AB_DATA(addr) ((addr) + 0x04)
#define AX_INDXC 0x30
#define AX_DATAC 0x34
#define PT_ADDR_INDX 0xE8
#define PT_READ_INDX 0xE4
#define PT_SIG_1_ADDR 0xA520
#define PT_SIG_2_ADDR 0xA521
#define PT_SIG_3_ADDR 0xA522
#define PT_SIG_4_ADDR 0xA523
#define PT_SIG_1_DATA 0x78
#define PT_SIG_2_DATA 0x56
#define PT_SIG_3_DATA 0x34
#define PT_SIG_4_DATA 0x12
#define PT4_P1_REG 0xB521
#define PT4_P2_REG 0xB522
#define PT2_P1_REG 0xD520
#define PT2_P2_REG 0xD521
#define PT1_P1_REG 0xD522
#define PT1_P2_REG 0xD523
#define NB_PCIE_INDX_ADDR 0xe0
#define NB_PCIE_INDX_DATA 0xe4
#define PCIE_P_CNTL 0x10040
#define BIF_NB 0x10002
#define NB_PIF0_PWRDOWN_0 0x01100012
#define NB_PIF0_PWRDOWN_1 0x01100013
#define USB_INTEL_XUSB2PR 0xD0
#define USB_INTEL_USB2PRM 0xD4
#define USB_INTEL_USB3_PSSEN 0xD8
#define USB_INTEL_USB3PRM 0xDC
/* ASMEDIA quirk use */
#define ASMT_DATA_WRITE0_REG 0xF8
#define ASMT_DATA_WRITE1_REG 0xFC
#define ASMT_CONTROL_REG 0xE0
#define ASMT_CONTROL_WRITE_BIT 0x02
#define ASMT_WRITEREG_CMD 0x10423
#define ASMT_FLOWCTL_ADDR 0xFA30
#define ASMT_FLOWCTL_DATA 0xBA
#define ASMT_PSEUDO_DATA 0
/*
* amd_chipset_gen values represent AMD different chipset generations
*/
enum amd_chipset_gen {
NOT_AMD_CHIPSET = 0,
AMD_CHIPSET_SB600,
AMD_CHIPSET_SB700,
AMD_CHIPSET_SB800,
AMD_CHIPSET_HUDSON2,
AMD_CHIPSET_BOLTON,
AMD_CHIPSET_YANGTZE,
AMD_CHIPSET_TAISHAN,
AMD_CHIPSET_UNKNOWN,
};
struct amd_chipset_type {
enum amd_chipset_gen gen;
u8 rev;
};
static struct amd_chipset_info {
struct pci_dev *nb_dev;
struct pci_dev *smbus_dev;
int nb_type;
struct amd_chipset_type sb_type;
int isoc_reqs;
int probe_count;
bool need_pll_quirk;
} amd_chipset;
static DEFINE_SPINLOCK(amd_lock);
/*
* amd_chipset_sb_type_init - initialize amd chipset southbridge type
*
* AMD FCH/SB generation and revision is identified by SMBus controller
* vendor, device and revision IDs.
*
* Returns: 1 if it is an AMD chipset, 0 otherwise.
*/
static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
{
u8 rev = 0;
pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
if (pinfo->smbus_dev) {
rev = pinfo->smbus_dev->revision;
if (rev >= 0x10 && rev <= 0x1f)
pinfo->sb_type.gen = AMD_CHIPSET_SB600;
else if (rev >= 0x30 && rev <= 0x3f)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
if (pinfo->smbus_dev) {
rev = pinfo->smbus_dev->revision;
if (rev >= 0x11 && rev <= 0x14)
pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
else if (rev >= 0x15 && rev <= 0x18)
pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
else if (rev >= 0x39 && rev <= 0x3a)
pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x145c, NULL);
if (pinfo->smbus_dev) {
rev = pinfo->smbus_dev->revision;
pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->sb_type.gen = NOT_AMD_CHIPSET;
return 0;
}
}
}
pinfo->sb_type.rev = rev;
return 1;
}
void sb800_prefetch(struct device *dev, int on)
{
u16 misc;
struct pci_dev *pdev = to_pci_dev(dev);
pci_read_config_word(pdev, 0x50, &misc);
if (on == 0)
pci_write_config_word(pdev, 0x50, misc & 0xfcff);
else
pci_write_config_word(pdev, 0x50, misc | 0x0300);
}
EXPORT_SYMBOL_GPL(sb800_prefetch);
static void usb_amd_find_chipset_info(void)
{
unsigned long flags;
struct amd_chipset_info info = { };
spin_lock_irqsave(&amd_lock, flags);
/* probe only once */
if (amd_chipset.probe_count > 0) {
amd_chipset.probe_count++;
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
spin_unlock_irqrestore(&amd_lock, flags);
if (!amd_chipset_sb_type_init(&info)) {
goto commit;
}
switch (info.sb_type.gen) {
case AMD_CHIPSET_SB700:
info.need_pll_quirk = info.sb_type.rev <= 0x3B;
break;
case AMD_CHIPSET_SB800:
case AMD_CHIPSET_HUDSON2:
case AMD_CHIPSET_BOLTON:
info.need_pll_quirk = true;
break;
default:
info.need_pll_quirk = false;
break;
}
if (!info.need_pll_quirk) {
if (info.smbus_dev) {
pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL;
}
goto commit;
}
info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
if (info.nb_dev) {
info.nb_type = 1;
} else {
info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
if (info.nb_dev) {
info.nb_type = 2;
} else {
info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x9600, NULL);
if (info.nb_dev)
info.nb_type = 3;
}
}
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
commit:
spin_lock_irqsave(&amd_lock, flags);
if (amd_chipset.probe_count > 0) {
/* race - someone else was faster - drop devices */
/* Mark that we where here */
amd_chipset.probe_count++;
spin_unlock_irqrestore(&amd_lock, flags);
pci_dev_put(info.nb_dev);
pci_dev_put(info.smbus_dev);
} else {
/* no race - commit the result */
info.probe_count++;
amd_chipset = info;
spin_unlock_irqrestore(&amd_lock, flags);
}
}
int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
{
/* Make sure amd chipset type has already been initialized */
usb_amd_find_chipset_info();
if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
bool usb_amd_hang_symptom_quirk(void)
{
u8 rev;
usb_amd_find_chipset_info();
rev = amd_chipset.sb_type.rev;
/* SB600 and old version of SB700 have hang symptom bug */
return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
rev >= 0x3a && rev <= 0x3b);
}
EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
bool usb_amd_prefetch_quirk(void)
{
usb_amd_find_chipset_info();
/* SB800 needs pre-fetch fix */
return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
}
EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
bool usb_amd_quirk_pll_check(void)
{
usb_amd_find_chipset_info();
return amd_chipset.need_pll_quirk;
}
EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
/*
* The hardware normally enables the A-link power management feature, which
* lets the system lower the power consumption in idle states.
*
* This USB quirk prevents the link going into that lower power state
* during isochronous transfers.
*
* Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
* some AMD platforms may stutter or have breaks occasionally.
*/
static void usb_amd_quirk_pll(int disable)
{
u32 addr, addr_low, addr_high, val;
u32 bit = disable ? 0 : 1;
unsigned long flags;
spin_lock_irqsave(&amd_lock, flags);
if (disable) {
amd_chipset.isoc_reqs++;
if (amd_chipset.isoc_reqs > 1) {
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
} else {
amd_chipset.isoc_reqs--;
if (amd_chipset.isoc_reqs > 0) {
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
}
if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
outb_p(AB_REG_BAR_LOW, 0xcd6);
addr_low = inb_p(0xcd7);
outb_p(AB_REG_BAR_HIGH, 0xcd6);
addr_high = inb_p(0xcd7);
addr = addr_high << 8 | addr_low;
outl_p(0x30, AB_INDX(addr));
outl_p(0x40, AB_DATA(addr));
outl_p(0x34, AB_INDX(addr));
val = inl_p(AB_DATA(addr));
} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
amd_chipset.sb_type.rev <= 0x3b) {
pci_read_config_dword(amd_chipset.smbus_dev,
AB_REG_BAR_SB700, &addr);
outl(AX_INDXC, AB_INDX(addr));
outl(0x40, AB_DATA(addr));
outl(AX_DATAC, AB_INDX(addr));
val = inl(AB_DATA(addr));
} else {
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
if (disable) {
val &= ~0x08;
val |= (1 << 4) | (1 << 9);
} else {
val |= 0x08;
val &= ~((1 << 4) | (1 << 9));
}
outl_p(val, AB_DATA(addr));
if (!amd_chipset.nb_dev) {
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
addr = PCIE_P_CNTL;
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_ADDR, addr);
pci_read_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, &val);
val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
val |= bit | (bit << 3) | (bit << 12);
val |= ((!bit) << 4) | ((!bit) << 9);
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, val);
addr = BIF_NB;
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_ADDR, addr);
pci_read_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, &val);
val &= ~(1 << 8);
val |= bit << 8;
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, val);
} else if (amd_chipset.nb_type == 2) {
addr = NB_PIF0_PWRDOWN_0;
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_ADDR, addr);
pci_read_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, &val);
if (disable)
val &= ~(0x3f << 7);
else
val |= 0x3f << 7;
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, val);
addr = NB_PIF0_PWRDOWN_1;
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_ADDR, addr);
pci_read_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, &val);
if (disable)
val &= ~(0x3f << 7);
else
val |= 0x3f << 7;
pci_write_config_dword(amd_chipset.nb_dev,
NB_PCIE_INDX_DATA, val);
}
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
void usb_amd_quirk_pll_disable(void)
{
usb_amd_quirk_pll(1);
}
EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
static int usb_asmedia_wait_write(struct pci_dev *pdev)
{
unsigned long retry_count;
unsigned char value;
for (retry_count = 1000; retry_count > 0; --retry_count) {
pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
if (value == 0xff) {
dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
return -EIO;
}
if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
return 0;
udelay(50);
}
dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
return -ETIMEDOUT;
}
void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
{
if (usb_asmedia_wait_write(pdev) != 0)
return;
/* send command and address to device */
pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
if (usb_asmedia_wait_write(pdev) != 0)
return;
/* send data to device */
pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
}
EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
void usb_amd_quirk_pll_enable(void)
{
usb_amd_quirk_pll(0);
}
EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
void usb_amd_dev_put(void)
{
struct pci_dev *nb, *smbus;
unsigned long flags;
spin_lock_irqsave(&amd_lock, flags);
amd_chipset.probe_count--;
if (amd_chipset.probe_count > 0) {
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
/* save them to pci_dev_put outside of spinlock */
nb = amd_chipset.nb_dev;
smbus = amd_chipset.smbus_dev;
amd_chipset.nb_dev = NULL;
amd_chipset.smbus_dev = NULL;
amd_chipset.nb_type = 0;
memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
amd_chipset.isoc_reqs = 0;
amd_chipset.need_pll_quirk = false;
spin_unlock_irqrestore(&amd_lock, flags);
pci_dev_put(nb);
pci_dev_put(smbus);
}
EXPORT_SYMBOL_GPL(usb_amd_dev_put);
/*
* Check if port is disabled in BIOS on AMD Promontory host.
* BIOS Disabled ports may wake on connect/disconnect and need
* driver workaround to keep them disabled.
* Returns true if port is marked disabled.
*/
bool usb_amd_pt_check_port(struct device *device, int port)
{
unsigned char value, port_shift;
struct pci_dev *pdev;
u16 reg;
pdev = to_pci_dev(device);
pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
pci_read_config_byte(pdev, PT_READ_INDX, &value);
if (value != PT_SIG_1_DATA)
return false;
pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
pci_read_config_byte(pdev, PT_READ_INDX, &value);
if (value != PT_SIG_2_DATA)
return false;
pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
pci_read_config_byte(pdev, PT_READ_INDX, &value);
if (value != PT_SIG_3_DATA)
return false;
pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
pci_read_config_byte(pdev, PT_READ_INDX, &value);
if (value != PT_SIG_4_DATA)
return false;
/* Check disabled port setting, if bit is set port is enabled */
switch (pdev->device) {
case 0x43b9:
case 0x43ba:
/*
* device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
* PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
* PT4_P2_REG bits[6..0] represents ports 13 to 7
*/
if (port > 6) {
reg = PT4_P2_REG;
port_shift = port - 7;
} else {
reg = PT4_P1_REG;
port_shift = port + 1;
}
break;
case 0x43bb:
/*
* device is AMD_PROMONTORYA_2(0x43bb)
* PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
* PT2_P2_REG bits[5..0] represents ports 9 to 3
*/
if (port > 2) {
reg = PT2_P2_REG;
port_shift = port - 3;
} else {
reg = PT2_P1_REG;
port_shift = port + 5;
}
break;
case 0x43bc:
/*
* device is AMD_PROMONTORYA_1(0x43bc)
* PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
* PT1_P2_REG[5..0] represents ports 9 to 4
*/
if (port > 3) {
reg = PT1_P2_REG;
port_shift = port - 4;
} else {
reg = PT1_P1_REG;
port_shift = port + 4;
}
break;
default:
return false;
}
pci_write_config_word(pdev, PT_ADDR_INDX, reg);
pci_read_config_byte(pdev, PT_READ_INDX, &value);
return !(value & BIT(port_shift));
}
EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
/*
* Make sure the controller is completely inactive, unable to
* generate interrupts or do DMA.
*/
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
{
/* Turn off PIRQ enable and SMI enable. (This also turns off the
* BIOS's USB Legacy Support.) Turn off all the R/WC bits too.
*/
pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
/* Reset the HC - this will force us to get a
* new notification of any already connected
* ports due to the virtual disconnect that it
* implies.
*/
outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
mb();
udelay(5);
if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
/* Just to be safe, disable interrupt requests and
* make sure the controller is stopped.
*/
outw(0, base + UHCI_USBINTR);
outw(0, base + UHCI_USBCMD);
}
EXPORT_SYMBOL_GPL(uhci_reset_hc);
/*
* Initialize a controller that was newly discovered or has just been
* resumed. In either case we can't be sure of its previous state.
*
* Returns: 1 if the controller was reset, 0 otherwise.
*/
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
{
u16 legsup;
unsigned int cmd, intr;
/*
* When restarting a suspended controller, we expect all the
* settings to be the same as we left them:
*
* PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
* Controller is stopped and configured with EGSM set;
* No interrupts enabled except possibly Resume Detect.
*
* If any of these conditions are violated we do a complete reset.
*/
pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
__func__, legsup);
goto reset_needed;
}
cmd = inw(base + UHCI_USBCMD);
if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
!(cmd & UHCI_USBCMD_EGSM)) {
dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
__func__, cmd);
goto reset_needed;
}
intr = inw(base + UHCI_USBINTR);
if (intr & (~UHCI_USBINTR_RESUME)) {
dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
__func__, intr);
goto reset_needed;
}
return 0;
reset_needed:
dev_dbg(&pdev->dev, "Performing full reset\n");
uhci_reset_hc(pdev, base);
return 1;
}
EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
{
u16 cmd;
return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
}
#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
{
unsigned long base = 0;
int i;
if (!pio_enabled(pdev))
return;
for (i = 0; i < PCI_STD_NUM_BARS; i++)
if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
base = pci_resource_start(pdev, i);
break;
}
if (base)
uhci_check_and_reset_hc(pdev, base);
}
static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
{
return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
}
static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
u32 control;
u32 fminterval = 0;
bool no_fminterval = false;
int cnt;
if (!mmio_resource_enabled(pdev, 0))
return;
base = pci_ioremap_bar(pdev, 0);
if (base == NULL)
return;
/*
* ULi M5237 OHCI controller locks the whole system when accessing
* the OHCI_FMINTERVAL offset.
*/
if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
no_fminterval = true;
control = readl(base + OHCI_CONTROL);
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
#ifdef __hppa__
#define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR)
#else
#define OHCI_CTRL_MASK OHCI_CTRL_RWC
if (control & OHCI_CTRL_IR) {
int wait_time = 500; /* arbitrary; 5 seconds */
writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
writel(OHCI_OCR, base + OHCI_CMDSTATUS);
while (wait_time > 0 &&
readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
wait_time -= 10;
msleep(10);
}
if (wait_time <= 0)
dev_warn(&pdev->dev,
"OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
readl(base + OHCI_CONTROL));
}
#endif
/* disable interrupts */
writel((u32) ~0, base + OHCI_INTRDISABLE);
/* Go into the USB_RESET state, preserving RWC (and possibly IR) */
writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
readl(base + OHCI_CONTROL);
/* software reset of the controller, preserving HcFmInterval */
if (!no_fminterval)
fminterval = readl(base + OHCI_FMINTERVAL);
writel(OHCI_HCR, base + OHCI_CMDSTATUS);
/* reset requires max 10 us delay */
for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
break;
udelay(1);
}
if (!no_fminterval)
writel(fminterval, base + OHCI_FMINTERVAL);
/* Now the controller is safely in SUSPEND and nothing can wake it up */
iounmap(base);
}
static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
{
/* Pegatron Lucid (ExoPC) */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
},
},
{
/* Pegatron Lucid (Ordissimo AIRIS) */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
},
},
{
/* Pegatron Lucid (Ordissimo) */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
},
},
{
/* HASEE E200 */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
DMI_MATCH(DMI_BOARD_NAME, "E210"),
DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
},
},
{ }
};
static void ehci_bios_handoff(struct pci_dev *pdev,
void __iomem *op_reg_base,
u32 cap, u8 offset)
{
int try_handoff = 1, tried_handoff = 0;
/*
* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
* the handoff on its unused controller. Skip it.
*
* The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
*/
if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
pdev->device == 0x27cc)) {
if (dmi_check_system(ehci_dmi_nohandoff_table))
try_handoff = 0;
}
if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
#if 0
/* [email protected] reports that some systems need SMI forced on,
* but that seems dubious in general (the BIOS left it off intentionally)
* and is known to prevent some systems from booting. so we won't do this
* unless maybe we can determine when we're on a system that needs SMI forced.
*/
/* BIOS workaround (?): be sure the pre-Linux code
* receives the SMI
*/
pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
val | EHCI_USBLEGCTLSTS_SOOE);
#endif
/* some systems get upset if this semaphore is
* set for any other reason than forcing a BIOS
* handoff..
*/
pci_write_config_byte(pdev, offset + 3, 1);
}
/* if boot firmware now owns EHCI, spin till it hands it over. */
if (try_handoff) {
int msec = 1000;
while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
tried_handoff = 1;
msleep(10);
msec -= 10;
pci_read_config_dword(pdev, offset, &cap);
}
}
if (cap & EHCI_USBLEGSUP_BIOS) {
/* well, possibly buggy BIOS... try to shut it down,
* and hope nothing goes too wrong
*/
if (try_handoff)
dev_warn(&pdev->dev,
"EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
cap);
pci_write_config_byte(pdev, offset + 2, 0);
}
/* just in case, always disable EHCI SMIs */
pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
/* If the BIOS ever owned the controller then we can't expect
* any power sessions to remain intact.
*/
if (tried_handoff)
writel(0, op_reg_base + EHCI_CONFIGFLAG);
}
static void quirk_usb_disable_ehci(struct pci_dev *pdev)
{
void __iomem *base, *op_reg_base;
u32 hcc_params, cap, val;
u8 offset, cap_length;
int wait_time, count = 256/4;
if (!mmio_resource_enabled(pdev, 0))
return;
base = pci_ioremap_bar(pdev, 0);
if (base == NULL)
return;
cap_length = readb(base);
op_reg_base = base + cap_length;
/* EHCI 0.96 and later may have "extended capabilities"
* spec section 5.1 explains the bios handoff, e.g. for
* booting from USB disk or using a usb keyboard
*/
hcc_params = readl(base + EHCI_HCC_PARAMS);
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
pci_read_config_dword(pdev, offset, &cap);
switch (cap & 0xff) {
case 1:
ehci_bios_handoff(pdev, op_reg_base, cap, offset);
break;
case 0: /* Illegal reserved cap, set cap=0 so we exit */
cap = 0;
fallthrough;
default:
dev_warn(&pdev->dev,
"EHCI: unrecognized capability %02x\n",
cap & 0xff);
}
offset = (cap >> 8) & 0xff;
}
if (!count)
dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
/*
* halt EHCI & disable its interrupts in any case
*/
val = readl(op_reg_base + EHCI_USBSTS);
if ((val & EHCI_USBSTS_HALTED) == 0) {
val = readl(op_reg_base + EHCI_USBCMD);
val &= ~EHCI_USBCMD_RUN;
writel(val, op_reg_base + EHCI_USBCMD);
wait_time = 2000;
do {
writel(0x3f, op_reg_base + EHCI_USBSTS);
udelay(100);
wait_time -= 100;
val = readl(op_reg_base + EHCI_USBSTS);
if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
break;
}
} while (wait_time > 0);
}
writel(0, op_reg_base + EHCI_USBINTR);
writel(0x3f, op_reg_base + EHCI_USBSTS);
iounmap(base);
}
/*
* handshake - spin reading a register until handshake completes
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @wait_usec: timeout in microseconds
* @delay_usec: delay in microseconds to wait between polling
*
* Polls a register every delay_usec microseconds.
* Returns 0 when the mask bits have the value done.
* Returns -ETIMEDOUT if this condition is not true after
* wait_usec microseconds have passed.
*/
static int handshake(void __iomem *ptr, u32 mask, u32 done,
int wait_usec, int delay_usec)
{
u32 result;
return readl_poll_timeout_atomic(ptr, result,
((result & mask) == done),
delay_usec, wait_usec);
}
/*
* Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
* share some number of ports. These ports can be switched between either
* controller. Not all of the ports under the EHCI host controller may be
* switchable.
*
* The ports should be switched over to xHCI before PCI probes for any device
* start. This avoids active devices under EHCI being disconnected during the
* port switchover, which could cause loss of data on USB storage devices, or
* failed boot when the root file system is on a USB mass storage device and is
* enumerated under EHCI first.
*
* We write into the xHC's PCI configuration space in some Intel-specific
* registers to switch the ports over. The USB 3.0 terminations and the USB
* 2.0 data wires are switched separately. We want to enable the SuperSpeed
* terminations before switching the USB 2.0 wires over, so that USB 3.0
* devices connect at SuperSpeed, rather than at USB 2.0 speeds.
*/
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
{
u32 ports_available;
bool ehci_found = false;
struct pci_dev *companion = NULL;
/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
* switching ports from EHCI to xHCI
*/
if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
xhci_pdev->subsystem_device == 0x90a8)
return;
/* make sure an intel EHCI controller exists */
for_each_pci_dev(companion) {
if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
companion->vendor == PCI_VENDOR_ID_INTEL) {
ehci_found = true;
break;
}
}
if (!ehci_found)
return;
/* Don't switchover the ports if the user hasn't compiled the xHCI
* driver. Otherwise they will see "dead" USB ports that don't power
* the devices.
*/
if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
dev_warn(&xhci_pdev->dev,
"CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
dev_warn(&xhci_pdev->dev,
"USB 3.0 devices will work at USB 2.0 speeds.\n");
usb_disable_xhci_ports(xhci_pdev);
return;
}
/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
* Indicate the ports that can be changed from OS.
*/
pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
&ports_available);
dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
ports_available);
/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
* Register, to turn on SuperSpeed terminations for the
* switchable ports.
*/
pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
ports_available);
pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
&ports_available);
dev_dbg(&xhci_pdev->dev,
"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
ports_available);
/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
* Indicate the USB 2.0 ports to be controlled by the xHCI host.
*/
pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
&ports_available);
dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
ports_available);
/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
* switch the USB 2.0 power and data lines over to the xHCI
* host.
*/
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
ports_available);
pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
&ports_available);
dev_dbg(&xhci_pdev->dev,
"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
ports_available);
}
EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
{
pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
}
EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
/*
* PCI Quirks for xHCI.
*
* Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
* It signals to the BIOS that the OS wants control of the host controller,
* and then waits 1 second for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
{
void __iomem *base;
int ext_cap_offset;
void __iomem *op_reg_base;
u32 val;
int timeout;
int len = pci_resource_len(pdev, 0);
if (!mmio_resource_enabled(pdev, 0))
return;
base = ioremap(pci_resource_start(pdev, 0), len);
if (base == NULL)
return;
/*
* Find the Legacy Support Capability register -
* this is optional for xHCI host controllers.
*/
ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
if (!ext_cap_offset)
goto hc_init;
if ((ext_cap_offset + sizeof(val)) > len) {
/* We're reading garbage from the controller */
dev_warn(&pdev->dev, "xHCI controller failing to respond");
goto iounmap;
}
val = readl(base + ext_cap_offset);
/* Auto handoff never worked for these devices. Force it and continue */
if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
(pdev->vendor == PCI_VENDOR_ID_RENESAS
&& pdev->device == 0x0014)) {
val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
writel(val, base + ext_cap_offset);
}
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
/* Wait for 1 second with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
0, 1000000, 10);
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
dev_warn(&pdev->dev,
"xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
val);
writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
}
}
val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
/* Mask off (turn off) any enabled SMIs */
val &= XHCI_LEGACY_DISABLE_SMI;
/* Mask all SMI events bits, RW1C */
val |= XHCI_LEGACY_SMI_EVENTS;
/* Disable any BIOS SMIs and clear all SMI events*/
writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
hc_init:
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
usb_enable_intel_xhci_ports(pdev);
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
/* Wait for the host controller to be ready before writing any
* operational or runtime registers. Wait 5 seconds and no more.
*/
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
5000000, 10);
/* Assume a buggy HC and start HC initialization anyway */
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
dev_warn(&pdev->dev,
"xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
val);
}
/* Send the halt and disable interrupts command */
val = readl(op_reg_base + XHCI_CMD_OFFSET);
val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
writel(val, op_reg_base + XHCI_CMD_OFFSET);
/* Wait for the HC to halt - poll every 125 usec (one microframe). */
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
XHCI_MAX_HALT_USEC, 125);
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
dev_warn(&pdev->dev,
"xHCI HW did not halt within %d usec status = 0x%x\n",
XHCI_MAX_HALT_USEC, val);
}
iounmap:
iounmap(base);
}
static void quirk_usb_early_handoff(struct pci_dev *pdev)
{
struct device_node *parent;
bool is_rpi;
/* Skip Netlogic mips SoC's internal PCI USB controller.
* This device does not need/support EHCI/OHCI handoff
*/
if (pdev->vendor == 0x184e) /* vendor Netlogic */
return;
/*
* Bypass the Raspberry Pi 4 controller xHCI controller, things are
* taken care of by the board's co-processor.
*/
if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
parent = of_get_parent(pdev->bus->dev.of_node);
is_rpi = of_device_is_compatible(parent, "brcm,bcm2711-pcie");
of_node_put(parent);
if (is_rpi)
return;
}
if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
return;
if (pci_enable_device(pdev) < 0) {
dev_warn(&pdev->dev,
"Can't enable PCI device, BIOS handoff failed.\n");
return;
}
if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
quirk_usb_handoff_uhci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
quirk_usb_handoff_ohci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
quirk_usb_disable_ehci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev);
pci_disable_device(pdev);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
| linux-master | drivers/usb/host/pci-quirks.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001-2004 by David Brownell
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* EHCI Root Hub ... the nonsharable stuff
*
* Registers don't need cpu_to_le32, that happens transparently
*/
/*-------------------------------------------------------------------------*/
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
#ifdef CONFIG_PM
static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
{
return !udev->maxchild && udev->persist_enabled &&
udev->bus->root_hub->speed < USB_SPEED_HIGH;
}
/* After a power loss, ports that were owned by the companion must be
* reset so that the companion can still own them.
*/
static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
{
u32 __iomem *reg;
u32 status;
int port;
__le32 buf;
struct usb_hcd *hcd = ehci_to_hcd(ehci);
if (!ehci->owned_ports)
return;
/*
* USB 1.1 devices are mostly HIDs, which don't need to persist across
* suspends. If we ensure that none of our companion's devices have
* persist_enabled (by looking through all USB 1.1 buses in the system),
* we can skip this and avoid slowing resume down. Devices without
* persist will just get reenumerated shortly after resume anyway.
*/
if (!usb_for_each_dev(NULL, persist_enabled_on_companion))
return;
/* Make sure the ports are powered */
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
reg = &ehci->regs->port_status[port];
status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
if (!(status & PORT_POWER))
ehci_port_power(ehci, port, true);
}
}
/* Give the connections some time to appear */
msleep(20);
spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
reg = &ehci->regs->port_status[port];
status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
/* Port already owned by companion? */
if (status & PORT_OWNER)
clear_bit(port, &ehci->owned_ports);
else if (test_bit(port, &ehci->companion_ports))
ehci_writel(ehci, status & ~PORT_PE, reg);
else {
spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, SetPortFeature,
USB_PORT_FEAT_RESET, port + 1,
NULL, 0);
spin_lock_irq(&ehci->lock);
}
}
}
spin_unlock_irq(&ehci->lock);
if (!ehci->owned_ports)
return;
msleep(90); /* Wait for resets to complete */
spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, GetPortStatus,
0, port + 1,
(char *) &buf, sizeof(buf));
spin_lock_irq(&ehci->lock);
/* The companion should now own the port,
* but if something went wrong the port must not
* remain enabled.
*/
reg = &ehci->regs->port_status[port];
status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
if (status & PORT_OWNER)
ehci_writel(ehci, status | PORT_CSC, reg);
else {
ehci_dbg(ehci, "failed handover port %d: %x\n",
port + 1, status);
ehci_writel(ehci, status & ~PORT_PE, reg);
}
}
}
ehci->owned_ports = 0;
spin_unlock_irq(&ehci->lock);
}
static int ehci_port_change(struct ehci_hcd *ehci)
{
int i = HCS_N_PORTS(ehci->hcs_params);
/* First check if the controller indicates a change event */
if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)
return 1;
/*
* Not all controllers appear to update this while going from D3 to D0,
* so check the individual port status registers as well
*/
while (i--)
if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC)
return 1;
return 0;
}
void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
bool suspending, bool do_wakeup)
{
int port;
u32 temp;
/* If remote wakeup is enabled for the root hub but disabled
* for the controller, we must adjust all the port wakeup flags
* when the controller is suspended or resumed. In all other
* cases they don't need to be changed.
*/
if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
return;
spin_lock_irq(&ehci->lock);
/* clear phy low-power mode before changing wakeup flags */
if (ehci->has_tdi_phy_lpm) {
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
}
spin_unlock_irq(&ehci->lock);
msleep(5);
spin_lock_irq(&ehci->lock);
}
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status[port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
u32 t2 = t1 & ~PORT_WAKE_BITS;
/* If we are suspending the controller, clear the flags.
* If we are resuming the controller, set the wakeup flags.
*/
if (!suspending) {
if (t1 & PORT_CONNECT)
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
else
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
}
ehci_writel(ehci, t2, reg);
}
/* enter phy low-power mode again */
if (ehci->has_tdi_phy_lpm) {
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
}
}
/* Does the root hub have a port wakeup pending? */
if (!suspending && ehci_port_change(ehci))
usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
spin_unlock_irq(&ehci->lock);
}
EXPORT_SYMBOL_GPL(ehci_adjust_port_wakeup_flags);
static int ehci_bus_suspend (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
int changed;
bool fs_idle_delay;
ehci_dbg(ehci, "suspend root hub\n");
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
/* stop the schedules */
ehci_quiesce(ehci);
spin_lock_irq (&ehci->lock);
if (ehci->rh_state < EHCI_RH_RUNNING)
goto done;
/* Once the controller is stopped, port resumes that are already
* in progress won't complete. Hence if remote wakeup is enabled
* for the root hub and any ports are in the middle of a resume or
* remote wakeup, we must fail the suspend.
*/
if (hcd->self.root_hub->do_remote_wakeup) {
if (ehci->resuming_ports) {
spin_unlock_irq(&ehci->lock);
ehci_dbg(ehci, "suspend failed because a port is resuming\n");
return -EBUSY;
}
}
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
* then manually resume them in the bus_resume() routine.
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
changed = 0;
fs_idle_delay = false;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
u32 t2 = t1 & ~PORT_WAKE_BITS;
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
set_bit(port, &ehci->owned_ports);
else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) {
t2 |= PORT_SUSPEND;
set_bit(port, &ehci->bus_suspended);
}
/* enable remote wakeup on all ports, if told to do so */
if (hcd->self.root_hub->do_remote_wakeup) {
/* only enable appropriate wake bits, otherwise the
* hardware can not go phy low power mode. If a race
* condition happens here(connection change during bits
* set), the port change detection will finally fix it.
*/
if (t1 & PORT_CONNECT)
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
else
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
}
if (t1 != t2) {
/*
* On some controllers, Wake-On-Disconnect will
* generate false wakeup signals until the bus
* switches over to full-speed idle. For their
* sake, add a delay if we need one.
*/
if ((t2 & PORT_WKDISC_E) &&
ehci_port_speed(ehci, t2) ==
USB_PORT_STAT_HIGH_SPEED)
fs_idle_delay = true;
ehci_writel(ehci, t2, reg);
changed = 1;
}
}
spin_unlock_irq(&ehci->lock);
if (changed && ehci_has_fsl_susp_errata(ehci))
/*
* Wait for at least 10 millisecondes to ensure the controller
* enter the suspend status before initiating a port resume
* using the Force Port Resume bit (Not-EHCI compatible).
*/
usleep_range(10000, 20000);
if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
/*
* Wait for HCD to enter low-power mode or for the bus
* to switch to full-speed idle.
*/
usleep_range(5000, 5500);
}
if (changed && ehci->has_tdi_phy_lpm) {
spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
u32 t3;
t3 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
t3 = ehci_readl(ehci, hostpc_reg);
ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
spin_unlock_irq(&ehci->lock);
}
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
udelay(150);
/* turn off now-idle HC */
ehci_halt (ehci);
spin_lock_irq(&ehci->lock);
if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD))
ehci_handle_controller_death(ehci);
if (ehci->rh_state != EHCI_RH_RUNNING)
goto done;
ehci->rh_state = EHCI_RH_SUSPENDED;
unlink_empty_async_suspended(ehci);
/* Some Synopsys controllers mistakenly leave IAA turned on */
ehci_writel(ehci, STS_IAA, &ehci->regs->status);
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
ehci_handle_intr_unlinks(ehci);
end_free_itds(ehci);
/* allow remote wakeup */
mask = INTR_MASK;
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
done:
ehci->next_statechange = jiffies + msecs_to_jiffies(10);
ehci->enabled_hrtimer_events = 0;
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
spin_unlock_irq (&ehci->lock);
hrtimer_cancel(&ehci->hrtimer);
return 0;
}
/* caller has locked the root hub, and should reset/reinit on error */
static int ehci_bus_resume (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 power_okay;
int i;
unsigned long resume_needed = 0;
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown)
goto shutdown;
if (unlikely(ehci->debug)) {
if (!dbgp_reset_prep(hcd))
ehci->debug = NULL;
else
dbgp_external_startup(hcd);
}
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
* the last user of the controller, not reset/pm hardware keeping
* state we gave to it.
*/
power_okay = ehci_readl(ehci, &ehci->regs->intr_enable);
ehci_dbg(ehci, "resume root hub%s\n",
power_okay ? "" : " after power loss");
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
*/
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
/* re-init operational registers */
ehci_writel(ehci, 0, &ehci->regs->segment);
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci->rh_state = EHCI_RH_RUNNING;
/*
* According to Bugzilla #8190, the port status for some controllers
* will be wrong without a delay. At their wrong status, the port
* is enabled, but not suspended neither resumed.
*/
i = HCS_N_PORTS(ehci->hcs_params);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
if ((temp & PORT_PE) &&
!(temp & (PORT_SUSPEND | PORT_RESUME))) {
ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp);
spin_unlock_irq(&ehci->lock);
msleep(8);
spin_lock_irq(&ehci->lock);
break;
}
}
if (ehci->shutdown)
goto shutdown;
/* clear phy low-power mode before resume */
if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) {
i = HCS_N_PORTS(ehci->hcs_params);
while (i--) {
if (test_bit(i, &ehci->bus_suspended)) {
u32 __iomem *hostpc_reg =
&ehci->regs->hostpc[i];
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp & ~HOSTPC_PHCD,
hostpc_reg);
}
}
spin_unlock_irq(&ehci->lock);
msleep(5);
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto shutdown;
}
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
if (test_bit(i, &ehci->bus_suspended) &&
(temp & PORT_SUSPEND)) {
temp |= PORT_RESUME;
set_bit(i, &resume_needed);
}
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
/*
* msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
* port
*/
if (resume_needed) {
spin_unlock_irq(&ehci->lock);
msleep(USB_RESUME_TIMEOUT);
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto shutdown;
}
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
if (test_bit(i, &resume_needed)) {
temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
}
ehci->next_statechange = jiffies + msecs_to_jiffies(5);
spin_unlock_irq(&ehci->lock);
ehci_handover_companion_ports(ehci);
/* Now we can safely re-enable irqs */
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto shutdown;
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
(void) ehci_readl(ehci, &ehci->regs->intr_enable);
spin_unlock_irq(&ehci->lock);
return 0;
shutdown:
spin_unlock_irq(&ehci->lock);
return -ESHUTDOWN;
}
static unsigned long ehci_get_resuming_ports(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
return ehci->resuming_ports;
}
#else
#define ehci_bus_suspend NULL
#define ehci_bus_resume NULL
#define ehci_get_resuming_ports NULL
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
/*
* Sets the owner of a port
*/
static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner)
{
u32 __iomem *status_reg;
u32 port_status;
int try;
status_reg = &ehci->regs->port_status[portnum];
/*
* The controller won't set the OWNER bit if the port is
* enabled, so this loop will sometimes require at least two
* iterations: one to disable the port and one to set OWNER.
*/
for (try = 4; try > 0; --try) {
spin_lock_irq(&ehci->lock);
port_status = ehci_readl(ehci, status_reg);
if ((port_status & PORT_OWNER) == new_owner
|| (port_status & (PORT_OWNER | PORT_CONNECT))
== 0)
try = 0;
else {
port_status ^= PORT_OWNER;
port_status &= ~(PORT_PE | PORT_RWC_BITS);
ehci_writel(ehci, port_status, status_reg);
}
spin_unlock_irq(&ehci->lock);
if (try > 1)
msleep(5);
}
}
/*-------------------------------------------------------------------------*/
static int check_reset_complete (
struct ehci_hcd *ehci,
int index,
u32 __iomem *status_reg,
int port_status
) {
if (!(port_status & PORT_CONNECT))
return port_status;
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
/* with integrated TT, there's nobody to hand it to! */
if (ehci_is_TDI(ehci)) {
ehci_dbg (ehci,
"Failed to enable port %d on root hub TT\n",
index+1);
return port_status;
}
ehci_dbg (ehci, "port %d full speed --> companion\n",
index + 1);
// what happens if HCS_N_CC(params) == 0 ?
port_status |= PORT_OWNER;
port_status &= ~PORT_RWC_BITS;
ehci_writel(ehci, port_status, status_reg);
/* ensure 440EPX ohci controller state is operational */
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 1);
} else {
ehci_dbg(ehci, "port %d reset complete, port enabled\n",
index + 1);
/* ensure 440EPx ohci controller state is suspended */
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 0);
}
return port_status;
}
/*-------------------------------------------------------------------------*/
/* build "status change" packet (one or two bytes) from HC registers */
static int
ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp, status;
u32 mask;
int ports, i, retval = 1;
unsigned long flags;
u32 ppcd = ~0;
/* init status to no-changes */
buf [0] = 0;
ports = HCS_N_PORTS (ehci->hcs_params);
if (ports > 7) {
buf [1] = 0;
retval++;
}
/* Inform the core about resumes-in-progress by returning
* a non-zero value even if there are no status changes.
*/
status = ehci->resuming_ports;
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
if (!ignore_oc && !ehci->spurious_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
// PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
/* no hub change reports (bit 0) for now (power, ...) */
/* port N changes (bit N)? */
spin_lock_irqsave (&ehci->lock, flags);
/* get per-port change detect bits */
if (ehci->has_ppcd)
ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16;
for (i = 0; i < ports; i++) {
/* leverage per-port change bits feature */
if (ppcd & (1 << i))
temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
else
temp = 0;
/*
* Return status information even for ports with OWNER set.
* Otherwise hub_wq wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
|| (ehci->reset_done[i] && time_after_eq(
jiffies, ehci->reset_done[i]))
|| ehci_has_ci_pec_bug(ehci, temp)) {
if (i < 7)
buf [0] |= 1 << (i + 1);
else
buf [1] |= 1 << (i - 7);
status = STS_PCD;
}
}
/* If a resume is in progress, make sure it can finish */
if (ehci->resuming_ports)
mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
spin_unlock_irqrestore (&ehci->lock, flags);
return status ? retval : 0;
}
/*-------------------------------------------------------------------------*/
static void
ehci_hub_descriptor (
struct ehci_hcd *ehci,
struct usb_hub_descriptor *desc
) {
int ports = HCS_N_PORTS (ehci->hcs_params);
u16 temp;
desc->bDescriptorType = USB_DT_HUB;
desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
if (HCS_PPC (ehci->hcs_params))
temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
else
temp |= HUB_CHAR_NO_LPSM; /* no power switching */
#if 0
// re-enable when we support USB_PORT_FEAT_INDICATOR below.
if (HCS_INDICATOR (ehci->hcs_params))
temp |= HUB_CHAR_PORTIND; /* per-port indicators (LEDs) */
#endif
desc->wHubCharacteristics = cpu_to_le16(temp);
}
/*-------------------------------------------------------------------------*/
int ehci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int ports = HCS_N_PORTS (ehci->hcs_params);
u32 __iomem *status_reg, *hostpc_reg;
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
* Avoid out-of-bounds values while calculating the port index
* from wIndex. The compiler doesn't like pointers to invalid
* addresses, even if they are never used.
*/
temp = (wIndex - 1) & 0xff;
if (temp >= HCS_N_PORTS_MAX)
temp = 0;
status_reg = &ehci->regs->port_status[temp];
hostpc_reg = &ehci->regs->hostpc[temp];
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
spin_lock_irqsave (&ehci->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = ehci_readl(ehci, status_reg);
temp &= ~PORT_RWC_BITS;
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, hub_wq needs to be able to clear
* the port-change status bits (especially
* USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
ehci_writel(ehci, temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
ehci_writel(ehci, temp | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (ehci->no_selective_suspend)
break;
#ifdef CONFIG_USB_OTG
if ((hcd->self.otg_port == (wIndex + 1))
&& hcd->self.b_hnp_enable) {
otg_start_hnp(hcd->usb_phy->otg);
break;
}
#endif
if (!(temp & PORT_SUSPEND))
break;
if ((temp & PORT_PE) == 0)
goto error;
/* clear phy low-power mode before resume */
if (ehci->has_tdi_phy_lpm) {
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
hostpc_reg);
spin_unlock_irqrestore(&ehci->lock, flags);
msleep(5);/* wait to leave low-power mode */
spin_lock_irqsave(&ehci->lock, flags);
}
/* resume signaling for 20 msec */
temp &= ~PORT_WAKE_BITS;
ehci_writel(ehci, temp | PORT_RESUME, status_reg);
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(wIndex, &ehci->resuming_ports);
usb_hcd_start_port_resume(&hcd->self, wIndex);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(ehci->hcs_params)) {
spin_unlock_irqrestore(&ehci->lock, flags);
ehci_port_power(ehci, wIndex, false);
spin_lock_irqsave(&ehci->lock, flags);
}
break;
case USB_PORT_FEAT_C_CONNECTION:
ehci_writel(ehci, temp | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
ehci_writel(ehci, temp | PORT_OCC, status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */
break;
case GetHubDescriptor:
ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset (buf, 0, 4);
//cpu_to_le32s ((u32 *) buf);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
temp = ehci_readl(ehci, status_reg);
// wPortChange bits
if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
if (ehci_has_ci_pec_bug(ehci, temp)) {
status |= USB_PORT_STAT_C_ENABLE << 16;
ehci_info(ehci,
"PE is cleared by HW port:%d PORTSC:%08x\n",
wIndex + 1, temp);
}
if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
* Hubs should disable port power on over-current.
* However, not all EHCI implementations do this
* automatically, even if they _do_ support per-port
* power switching; they're allowed to just limit the
* current. hub_wq will turn the power back on.
*/
if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle))
&& HCS_PPC(ehci->hcs_params)) {
spin_unlock_irqrestore(&ehci->lock, flags);
ehci_port_power(ehci, wIndex, false);
spin_lock_irqsave(&ehci->lock, flags);
temp = ehci_readl(ehci, status_reg);
}
}
/* no reset or resume pending */
if (!ehci->reset_done[wIndex]) {
/* Remote Wakeup received? */
if (temp & PORT_RESUME) {
/* resume signaling for 20 msec */
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
usb_hcd_start_port_resume(&hcd->self, wIndex);
set_bit(wIndex, &ehci->resuming_ports);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
}
/* reset or resume not yet complete */
} else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) {
; /* wait until it is complete */
/* resume completed */
} else if (test_bit(wIndex, &ehci->resuming_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
set_bit(wIndex, &ehci->port_c_suspend);
ehci->reset_done[wIndex] = 0;
usb_hcd_end_port_resume(&hcd->self, wIndex);
/* stop resume signaling */
temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
ehci_writel(ehci, temp, status_reg);
clear_bit(wIndex, &ehci->resuming_ports);
retval = ehci_handshake(ehci, status_reg,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
ehci_err(ehci, "port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
temp = ehci_readl(ehci, status_reg);
/* whoever resets must GetPortStatus to complete it!! */
} else {
status |= USB_PORT_STAT_C_RESET << 16;
ehci->reset_done [wIndex] = 0;
/* force reset to complete */
ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = ehci_handshake(ehci, status_reg,
PORT_RESET, 0, 1000);
if (retval != 0) {
ehci_err (ehci, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
temp = check_reset_complete (ehci, wIndex, status_reg,
ehci_readl(ehci, status_reg));
}
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &ehci->companion_ports)) {
temp &= ~PORT_RWC_BITS;
temp |= PORT_OWNER;
ehci_writel(ehci, temp, status_reg);
ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1);
temp = ehci_readl(ehci, status_reg);
}
/*
* Even if OWNER is set, there's no harm letting hub_wq
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
// status may be from integrated TT
if (ehci->has_hostpc) {
temp1 = ehci_readl(ehci, hostpc_reg);
status |= ehci_port_speed(ehci, temp1);
} else
status |= ehci_port_speed(ehci, temp);
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
/* maybe the port was unsuspended without our knowledge */
if (temp & (PORT_SUSPEND|PORT_RESUME)) {
status |= USB_PORT_STAT_SUSPEND;
} else if (test_bit(wIndex, &ehci->suspended_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
clear_bit(wIndex, &ehci->resuming_ports);
ehci->reset_done[wIndex] = 0;
if (temp & PORT_PE)
set_bit(wIndex, &ehci->port_c_suspend);
usb_hcd_end_port_resume(&hcd->self, wIndex);
}
if (temp & PORT_OC)
status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
status |= USB_PORT_STAT_POWER;
if (test_bit(wIndex, &ehci->port_c_suspend))
status |= USB_PORT_STAT_C_SUSPEND << 16;
if (status & ~0xffff) /* only if wPortChange is interesting */
dbg_port(ehci, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (unlikely(ehci->debug)) {
/* If the debug port is active any port
* feature requests should get denied */
if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) &&
(readl(&ehci->debug->control) & DBGP_ENABLED)) {
retval = -ENODEV;
goto error_exit;
}
}
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = ehci_readl(ehci, status_reg);
if (temp & PORT_OWNER)
break;
temp &= ~PORT_RWC_BITS;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (ehci->no_selective_suspend)
break;
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
* mode if we have tdi_phy_lpm feature
*/
temp &= ~PORT_WKCONN_E;
temp |= PORT_WKDISC_E | PORT_WKOC_E;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
if (ehci->has_tdi_phy_lpm) {
spin_unlock_irqrestore(&ehci->lock, flags);
msleep(5);/* 5ms for HCD enter low pwr mode */
spin_lock_irqsave(&ehci->lock, flags);
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 | HOSTPC_PHCD,
hostpc_reg);
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
wIndex, (temp1 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
if (ehci_has_fsl_susp_errata(ehci)) {
/* 10ms for HCD enter suspend */
spin_unlock_irqrestore(&ehci->lock, flags);
usleep_range(10000, 20000);
spin_lock_irqsave(&ehci->lock, flags);
}
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(ehci->hcs_params)) {
spin_unlock_irqrestore(&ehci->lock, flags);
ehci_port_power(ehci, wIndex, true);
spin_lock_irqsave(&ehci->lock, flags);
}
break;
case USB_PORT_FEAT_RESET:
if (temp & (PORT_SUSPEND|PORT_RESUME))
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
&& !ehci_is_TDI(ehci)
&& PORT_USB11 (temp)) {
ehci_dbg (ehci,
"port %d low speed --> companion\n",
wIndex + 1);
temp |= PORT_OWNER;
} else {
temp |= PORT_RESET;
temp &= ~PORT_PE;
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
ehci->reset_done [wIndex] = jiffies
+ msecs_to_jiffies (50);
/*
* Force full-speed connect for FSL high-speed
* erratum; disable HS Chirp by setting PFSC bit
*/
if (ehci_has_fsl_hs_errata(ehci))
temp |= (1 << PORTSC_FSL_PFSC);
}
ehci_writel(ehci, temp, status_reg);
break;
/* For downstream facing ports (these): one hub port is put
* into test mode according to USB2 11.24.2.13, then the hub
* must be reset (which for root hub now means rmmod+modprobe,
* or else system reboot). See EHCI 2.3.9 and 4.14 for info
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
#ifdef CONFIG_USB_HCD_TEST_MODE
if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
spin_unlock_irqrestore(&ehci->lock, flags);
retval = ehset_single_step_set_feature(hcd,
wIndex + 1);
spin_lock_irqsave(&ehci->lock, flags);
break;
}
#endif
if (!selector || selector > 5)
goto error;
spin_unlock_irqrestore(&ehci->lock, flags);
ehci_quiesce(ehci);
spin_lock_irqsave(&ehci->lock, flags);
/* Put all enabled ports into suspend */
while (ports--) {
u32 __iomem *sreg =
&ehci->regs->port_status[ports];
temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
if (temp & PORT_PE)
ehci_writel(ehci, temp | PORT_SUSPEND,
sreg);
}
spin_unlock_irqrestore(&ehci->lock, flags);
ehci_halt(ehci);
spin_lock_irqsave(&ehci->lock, flags);
temp = ehci_readl(ehci, status_reg);
temp |= selector << 16;
ehci_writel(ehci, temp, status_reg);
break;
default:
goto error;
}
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
error_exit:
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(ehci_hub_control);
static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
if (ehci_is_TDI(ehci))
return;
set_owner(ehci, --portnum, PORT_OWNER);
}
static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 __iomem *reg;
if (ehci_is_TDI(ehci))
return 0;
reg = &ehci->regs->port_status[portnum - 1];
return ehci_readl(ehci, reg) & PORT_OWNER;
}
static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable)
{
struct usb_hcd *hcd = ehci_to_hcd(ehci);
u32 __iomem *status_reg = &ehci->regs->port_status[portnum];
u32 temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS;
if (enable)
ehci_writel(ehci, temp | PORT_POWER, status_reg);
else
ehci_writel(ehci, temp & ~PORT_POWER, status_reg);
if (hcd->driver->port_power)
hcd->driver->port_power(hcd, portnum, enable);
return 0;
}
| linux-master | drivers/usb/host/ehci-hub.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ST EHCI driver
*
* Copyright (C) 2014 STMicroelectronics – All Rights Reserved
*
* Author: Peter Griffin <[email protected]>
*
* Derived from ehci-platform.c
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/pinctrl/consumer.h>
#include "ehci.h"
#define USB_MAX_CLKS 3
struct st_ehci_platform_priv {
struct clk *clks[USB_MAX_CLKS];
struct clk *clk48;
struct reset_control *rst;
struct reset_control *pwr;
struct phy *phy;
};
#define DRIVER_DESC "EHCI STMicroelectronics driver"
#define hcd_to_ehci_priv(h) \
((struct st_ehci_platform_priv *)hcd_to_ehci(h)->priv)
#define EHCI_CAPS_SIZE 0x10
#define AHB2STBUS_INSREG01 (EHCI_CAPS_SIZE + 0x84)
static int st_ehci_platform_reset(struct usb_hcd *hcd)
{
struct platform_device *pdev = to_platform_device(hcd->self.controller);
struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 threshold;
/* Set EHCI packet buffer IN/OUT threshold to 128 bytes */
threshold = 128 | (128 << 16);
writel(threshold, hcd->regs + AHB2STBUS_INSREG01);
ehci->caps = hcd->regs + pdata->caps_offset;
return ehci_setup(hcd);
}
static int st_ehci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct st_ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk, ret;
ret = reset_control_deassert(priv->pwr);
if (ret)
return ret;
ret = reset_control_deassert(priv->rst);
if (ret)
goto err_assert_power;
/* some SoCs don't have a dedicated 48Mhz clock, but those that do
need the rate to be explicitly set */
if (priv->clk48) {
ret = clk_set_rate(priv->clk48, 48000000);
if (ret)
goto err_assert_reset;
}
for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++) {
ret = clk_prepare_enable(priv->clks[clk]);
if (ret)
goto err_disable_clks;
}
ret = phy_init(priv->phy);
if (ret)
goto err_disable_clks;
ret = phy_power_on(priv->phy);
if (ret)
goto err_exit_phy;
return 0;
err_exit_phy:
phy_exit(priv->phy);
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(priv->clks[clk]);
err_assert_reset:
reset_control_assert(priv->rst);
err_assert_power:
reset_control_assert(priv->pwr);
return ret;
}
static void st_ehci_platform_power_off(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct st_ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk;
reset_control_assert(priv->pwr);
reset_control_assert(priv->rst);
phy_power_off(priv->phy);
phy_exit(priv->phy);
for (clk = USB_MAX_CLKS - 1; clk >= 0; clk--)
if (priv->clks[clk])
clk_disable_unprepare(priv->clks[clk]);
}
static struct hc_driver __read_mostly ehci_platform_hc_driver;
static const struct ehci_driver_overrides platform_overrides __initconst = {
.reset = st_ehci_platform_reset,
.extra_priv_size = sizeof(struct st_ehci_platform_priv),
};
static struct usb_ehci_pdata ehci_platform_defaults = {
.power_on = st_ehci_platform_power_on,
.power_suspend = st_ehci_platform_power_off,
.power_off = st_ehci_platform_power_off,
};
static int st_ehci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ehci_pdata *pdata = &ehci_platform_defaults;
struct st_ehci_platform_priv *priv;
int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
if (!hcd)
return -ENOMEM;
platform_set_drvdata(dev, hcd);
dev->dev.platform_data = pdata;
priv = hcd_to_ehci_priv(hcd);
priv->phy = devm_phy_get(&dev->dev, "usb");
if (IS_ERR(priv->phy)) {
err = PTR_ERR(priv->phy);
goto err_put_hcd;
}
for (clk = 0; clk < USB_MAX_CLKS; clk++) {
priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
if (IS_ERR(priv->clks[clk])) {
err = PTR_ERR(priv->clks[clk]);
if (err == -EPROBE_DEFER)
goto err_put_clks;
priv->clks[clk] = NULL;
break;
}
}
/* some SoCs don't have a dedicated 48Mhz clock, but those that
do need the rate to be explicitly set */
priv->clk48 = devm_clk_get(&dev->dev, "clk48");
if (IS_ERR(priv->clk48)) {
dev_info(&dev->dev, "48MHz clk not found\n");
priv->clk48 = NULL;
}
priv->pwr =
devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
if (err == -EPROBE_DEFER)
goto err_put_clks;
priv->pwr = NULL;
}
priv->rst =
devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
if (err == -EPROBE_DEFER)
goto err_put_clks;
priv->rst = NULL;
}
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
goto err_put_clks;
}
hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_put_clks;
}
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_put_clks;
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(dev, hcd);
return err;
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
err_put_hcd:
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
usb_put_hcd(hcd);
return err;
}
static void st_ehci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct st_ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
usb_put_hcd(hcd);
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
}
#ifdef CONFIG_PM_SLEEP
static int st_ehci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int ret;
ret = ehci_suspend(hcd, do_wakeup);
if (ret)
return ret;
if (pdata->power_suspend)
pdata->power_suspend(pdev);
pinctrl_pm_select_sleep_state(dev);
return ret;
}
static int st_ehci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
int err;
pinctrl_pm_select_default_state(dev);
if (pdata->power_on) {
err = pdata->power_on(pdev);
if (err < 0)
return err;
}
ehci_resume(hcd, false);
return 0;
}
static SIMPLE_DEV_PM_OPS(st_ehci_pm_ops, st_ehci_suspend, st_ehci_resume);
#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id st_ehci_ids[] = {
{ .compatible = "st,st-ehci-300x", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, st_ehci_ids);
static struct platform_driver ehci_platform_driver = {
.probe = st_ehci_platform_probe,
.remove_new = st_ehci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "st-ehci",
#ifdef CONFIG_PM_SLEEP
.pm = &st_ehci_pm_ops,
#endif
.of_match_table = st_ehci_ids,
}
};
static int __init ehci_platform_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ehci_platform_driver);
}
module_init(ehci_platform_init);
static void __exit ehci_platform_cleanup(void)
{
platform_driver_unregister(&ehci_platform_driver);
}
module_exit(ehci_platform_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Peter Griffin <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ehci-st.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2008 Rodolfo Giometti <[email protected]>
* Copyright (c) 2008 Eurotech S.p.A. <[email protected]>
*
* This code is *strongly* based on EHCI-HCD code by David Brownell since
* the chip is a quasi-EHCI compatible.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#define DRIVER_VERSION "0.0.50"
#define OXU_DEVICEID 0x00
#define OXU_REV_MASK 0xffff0000
#define OXU_REV_SHIFT 16
#define OXU_REV_2100 0x2100
#define OXU_BO_SHIFT 8
#define OXU_BO_MASK (0x3 << OXU_BO_SHIFT)
#define OXU_MAJ_REV_SHIFT 4
#define OXU_MAJ_REV_MASK (0xf << OXU_MAJ_REV_SHIFT)
#define OXU_MIN_REV_SHIFT 0
#define OXU_MIN_REV_MASK (0xf << OXU_MIN_REV_SHIFT)
#define OXU_HOSTIFCONFIG 0x04
#define OXU_SOFTRESET 0x08
#define OXU_SRESET (1 << 0)
#define OXU_PIOBURSTREADCTRL 0x0C
#define OXU_CHIPIRQSTATUS 0x10
#define OXU_CHIPIRQEN_SET 0x14
#define OXU_CHIPIRQEN_CLR 0x18
#define OXU_USBSPHLPWUI 0x00000080
#define OXU_USBOTGLPWUI 0x00000040
#define OXU_USBSPHI 0x00000002
#define OXU_USBOTGI 0x00000001
#define OXU_CLKCTRL_SET 0x1C
#define OXU_SYSCLKEN 0x00000008
#define OXU_USBSPHCLKEN 0x00000002
#define OXU_USBOTGCLKEN 0x00000001
#define OXU_ASO 0x68
#define OXU_SPHPOEN 0x00000100
#define OXU_OVRCCURPUPDEN 0x00000800
#define OXU_ASO_OP (1 << 10)
#define OXU_COMPARATOR 0x000004000
#define OXU_USBMODE 0x1A8
#define OXU_VBPS 0x00000020
#define OXU_ES_LITTLE 0x00000000
#define OXU_CM_HOST_ONLY 0x00000003
/*
* Proper EHCI structs & defines
*/
/* Magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
struct oxu_hcd;
/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
/* Section 2.2 Host Controller Capability Registers */
struct ehci_caps {
/* these fields are specified as 8 and 16 bit registers,
* but some hosts can't perform 8 or 16 bit PCI accesses.
*/
u32 hc_capbase;
#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
u32 hcs_params; /* HCSPARAMS - offset 0x4 */
#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */
#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */
#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */
#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
u32 hcc_params; /* HCCPARAMS - offset 0x8 */
#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */
u8 portroute[8]; /* nibbles for routing - offset 0xC */
} __packed;
/* Section 2.3 Host Controller Operational Registers */
struct ehci_regs {
/* USBCMD: offset 0x00 */
u32 command;
/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
#define CMD_PARK (1<<11) /* enable "park" on async qh */
#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
#define CMD_ASE (1<<5) /* async schedule enable */
#define CMD_PSE (1<<4) /* periodic schedule enable */
/* 3:2 is periodic frame list size */
#define CMD_RESET (1<<1) /* reset HC not bus */
#define CMD_RUN (1<<0) /* start/stop HC */
/* USBSTS: offset 0x04 */
u32 status;
#define STS_ASS (1<<15) /* Async Schedule Status */
#define STS_PSS (1<<14) /* Periodic Schedule Status */
#define STS_RECL (1<<13) /* Reclamation */
#define STS_HALT (1<<12) /* Not running (any reason) */
/* some bits reserved */
/* these STS_* flags are also intr_enable bits (USBINTR) */
#define STS_IAA (1<<5) /* Interrupted on async advance */
#define STS_FATAL (1<<4) /* such as some PCI access errors */
#define STS_FLR (1<<3) /* frame list rolled over */
#define STS_PCD (1<<2) /* port change detect */
#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
#define STS_INT (1<<0) /* "normal" completion (short, ...) */
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
/* USBINTR: offset 0x08 */
u32 intr_enable;
/* FRINDEX: offset 0x0C */
u32 frame_index; /* current microframe number */
/* CTRLDSSEGMENT: offset 0x10 */
u32 segment; /* address bits 63:32 if needed */
/* PERIODICLISTBASE: offset 0x14 */
u32 frame_list; /* points to periodic list */
/* ASYNCLISTADDR: offset 0x18 */
u32 async_next; /* address of next async queue head */
u32 reserved[9];
/* CONFIGFLAG: offset 0x40 */
u32 configured_flag;
#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
/* PORTSC: offset 0x44 */
u32 port_status[]; /* up to N_PORTS */
/* 31:23 reserved */
#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
/* 19:16 for port testing */
#define PORT_LED_OFF (0<<14)
#define PORT_LED_AMBER (1<<14)
#define PORT_LED_GREEN (2<<14)
#define PORT_LED_MASK (3<<14)
#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
#define PORT_POWER (1<<12) /* true: has power (see PPC) */
#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
/* 9 reserved */
#define PORT_RESET (1<<8) /* reset port */
#define PORT_SUSPEND (1<<7) /* suspend port */
#define PORT_RESUME (1<<6) /* resume it */
#define PORT_OCC (1<<5) /* over current change */
#define PORT_OC (1<<4) /* over current active */
#define PORT_PEC (1<<3) /* port enable change */
#define PORT_PE (1<<2) /* port enable */
#define PORT_CSC (1<<1) /* connect status change */
#define PORT_CONNECT (1<<0) /* device connected */
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
} __packed;
/* Appendix C, Debug port ... intended for use with special "debug devices"
* that can help if there's no serial console. (nonstandard enumeration.)
*/
struct ehci_dbg_port {
u32 control;
#define DBGP_OWNER (1<<30)
#define DBGP_ENABLED (1<<28)
#define DBGP_DONE (1<<16)
#define DBGP_INUSE (1<<10)
#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
# define DBGP_ERR_BAD 1
# define DBGP_ERR_SIGNAL 2
#define DBGP_ERROR (1<<6)
#define DBGP_GO (1<<5)
#define DBGP_OUT (1<<4)
#define DBGP_LEN(x) (((x)>>0)&0x0f)
u32 pids;
#define DBGP_PID_GET(x) (((x)>>16)&0xff)
#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
u32 data03;
u32 data47;
u32 address;
#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
} __packed;
#define QTD_NEXT(dma) cpu_to_le32((u32)dma)
/*
* EHCI Specification 0.95 Section 3.5
* QTD: describe data transfer components (buffer, direction, ...)
* See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
*
* These are associated only with "QH" (Queue Head) structures,
* used with control, bulk, and interrupt transfers.
*/
struct ehci_qtd {
/* first part defined by EHCI spec */
__le32 hw_next; /* see EHCI 3.5.1 */
__le32 hw_alt_next; /* see EHCI 3.5.2 */
__le32 hw_token; /* see EHCI 3.5.3 */
#define QTD_TOGGLE (1 << 31) /* data toggle */
#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
#define QTD_IOC (1 << 15) /* interrupt on complete */
#define QTD_CERR(tok) (((tok)>>10) & 0x3)
#define QTD_PID(tok) (((tok)>>8) & 0x3)
#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
#define QTD_STS_HALT (1 << 6) /* halted on error */
#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
#define QTD_STS_STS (1 << 1) /* split transaction state */
#define QTD_STS_PING (1 << 0) /* issue PING? */
__le32 hw_buf[5]; /* see EHCI 3.5.4 */
__le32 hw_buf_hi[5]; /* Appendix B */
/* the rest is HCD-private */
dma_addr_t qtd_dma; /* qtd address */
struct list_head qtd_list; /* sw qtd list */
struct urb *urb; /* qtd's urb */
size_t length; /* length of buffer */
u32 qtd_buffer_len;
void *buffer;
dma_addr_t buffer_dma;
void *transfer_buffer;
void *transfer_dma;
} __aligned(32);
/* mask NakCnt+T in qh->hw_alt_next */
#define QTD_MASK cpu_to_le32 (~0x1f)
#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
/* Type tag from {qh, itd, sitd, fstn}->hw_next */
#define Q_NEXT_TYPE(dma) ((dma) & cpu_to_le32 (3 << 1))
/* values for that type tag */
#define Q_TYPE_QH cpu_to_le32 (1 << 1)
/* next async queue entry, or pointer to interrupt/periodic QH */
#define QH_NEXT(dma) (cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
/* for periodic/async schedules and qtd lists, mark end of list */
#define EHCI_LIST_END cpu_to_le32(1) /* "null pointer" to hw */
/*
* Entries in periodic shadow table are pointers to one of four kinds
* of data structure. That's dictated by the hardware; a type tag is
* encoded in the low bits of the hardware's periodic schedule. Use
* Q_NEXT_TYPE to get the tag.
*
* For entries in the async schedule, the type tag always says "qh".
*/
union ehci_shadow {
struct ehci_qh *qh; /* Q_TYPE_QH */
__le32 *hw_next; /* (all types) */
void *ptr;
};
/*
* EHCI Specification 0.95 Section 3.6
* QH: describes control/bulk/interrupt endpoints
* See Fig 3-7 "Queue Head Structure Layout".
*
* These appear in both the async and (for interrupt) periodic schedules.
*/
struct ehci_qh {
/* first part defined by EHCI spec */
__le32 hw_next; /* see EHCI 3.6.1 */
__le32 hw_info1; /* see EHCI 3.6.2 */
#define QH_HEAD 0x00008000
__le32 hw_info2; /* see EHCI 3.6.2 */
#define QH_SMASK 0x000000ff
#define QH_CMASK 0x0000ff00
#define QH_HUBADDR 0x007f0000
#define QH_HUBPORT 0x3f800000
#define QH_MULT 0xc0000000
__le32 hw_current; /* qtd list - see EHCI 3.6.4 */
/* qtd overlay (hardware parts of a struct ehci_qtd) */
__le32 hw_qtd_next;
__le32 hw_alt_next;
__le32 hw_token;
__le32 hw_buf[5];
__le32 hw_buf_hi[5];
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */
union ehci_shadow qh_next; /* ptr to qh; or periodic */
struct list_head qtd_list; /* sw qtd list */
struct ehci_qtd *dummy;
struct ehci_qh *reclaim; /* next to reclaim */
struct oxu_hcd *oxu;
struct kref kref;
unsigned int stamp;
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
/* periodic schedule info */
u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
u8 c_usecs; /* ... split completion bw */
u16 tt_usecs; /* tt downstream bandwidth */
unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */
struct usb_device *dev; /* access to TT */
} __aligned(32);
/*
* Proper OXU210HP structs
*/
#define OXU_OTG_CORE_OFFSET 0x00400
#define OXU_OTG_CAP_OFFSET (OXU_OTG_CORE_OFFSET + 0x100)
#define OXU_SPH_CORE_OFFSET 0x00800
#define OXU_SPH_CAP_OFFSET (OXU_SPH_CORE_OFFSET + 0x100)
#define OXU_OTG_MEM 0xE000
#define OXU_SPH_MEM 0x16000
/* Only how many elements & element structure are specifies here. */
/* 2 host controllers are enabled - total size <= 28 kbytes */
#define DEFAULT_I_TDPS 1024
#define QHEAD_NUM 16
#define QTD_NUM 32
#define SITD_NUM 8
#define MURB_NUM 8
#define BUFFER_NUM 8
#define BUFFER_SIZE 512
struct oxu_info {
struct usb_hcd *hcd[2];
};
struct oxu_buf {
u8 buffer[BUFFER_SIZE];
} __aligned(BUFFER_SIZE);
struct oxu_onchip_mem {
struct oxu_buf db_pool[BUFFER_NUM];
u32 frame_list[DEFAULT_I_TDPS];
struct ehci_qh qh_pool[QHEAD_NUM];
struct ehci_qtd qtd_pool[QTD_NUM];
} __aligned(4 << 10);
#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
struct oxu_murb {
struct urb urb;
struct urb *main;
u8 last;
};
struct oxu_hcd { /* one per controller */
unsigned int is_otg:1;
u8 qh_used[QHEAD_NUM];
u8 qtd_used[QTD_NUM];
u8 db_used[BUFFER_NUM];
u8 murb_used[MURB_NUM];
struct oxu_onchip_mem __iomem *mem;
spinlock_t mem_lock;
struct timer_list urb_timer;
struct ehci_caps __iomem *caps;
struct ehci_regs __iomem *regs;
u32 hcs_params; /* cached register copy */
spinlock_t lock;
/* async schedule support */
struct ehci_qh *async;
struct ehci_qh *reclaim;
unsigned int reclaim_ready:1;
unsigned int scanning:1;
/* periodic schedule support */
unsigned int periodic_size;
__le32 *periodic; /* hw periodic table */
dma_addr_t periodic_dma;
unsigned int i_thresh; /* uframes HC might cache */
union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_uframe; /* scan periodic, start here */
unsigned int periodic_sched; /* periodic activity count */
/* per root hub port */
unsigned long reset_done[EHCI_MAX_ROOT_PORTS];
/* bit vectors (one bit per port) */
unsigned long bus_suspended; /* which ports were
* already suspended at the
* start of a bus suspend
*/
unsigned long companion_ports;/* which ports are dedicated
* to the companion controller
*/
struct timer_list watchdog;
unsigned long actions;
unsigned int stamp;
unsigned long next_statechange;
u32 command;
/* SILICON QUIRKS */
struct list_head urb_list; /* this is the head to urb
* queue that didn't get enough
* resources
*/
struct oxu_murb *murb_pool; /* murb per split big urb */
unsigned int urb_len;
u8 sbrn; /* packed release number */
};
#define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
#define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
enum ehci_timer_action {
TIMER_IO_WATCHDOG,
TIMER_IAA_WATCHDOG,
TIMER_ASYNC_SHRINK,
TIMER_ASYNC_OFF,
};
/*
* Main defines
*/
#define oxu_dbg(oxu, fmt, args...) \
dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_err(oxu, fmt, args...) \
dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_info(oxu, fmt, args...) \
dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#ifdef CONFIG_DYNAMIC_DEBUG
#define DEBUG
#endif
static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
{
return container_of((void *) oxu, struct usb_hcd, hcd_priv);
}
static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
{
return (struct oxu_hcd *) (hcd->hcd_priv);
}
/*
* Debug stuff
*/
#undef OXU_URB_TRACE
#undef OXU_VERBOSE_DEBUG
#ifdef OXU_VERBOSE_DEBUG
#define oxu_vdbg oxu_dbg
#else
#define oxu_vdbg(oxu, fmt, args...) /* Nop */
#endif
#ifdef DEBUG
static int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", status,
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
(status & STS_HALT) ? " Halt" : "",
(status & STS_IAA) ? " IAA" : "",
(status & STS_FATAL) ? " FATAL" : "",
(status & STS_FLR) ? " FLR" : "",
(status & STS_PCD) ? " PCD" : "",
(status & STS_ERR) ? " ERR" : "",
(status & STS_INT) ? " INT" : ""
);
}
static int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
label, label[0] ? " " : "", enable,
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
(enable & STS_PCD) ? " PCD" : "",
(enable & STS_ERR) ? " ERR" : "",
(enable & STS_INT) ? " INT" : ""
);
}
static const char *const fls_strings[] =
{ "1024", "512", "256", "??" };
static int dbg_command_buf(char *buf, unsigned len,
const char *label, u32 command)
{
return scnprintf(buf, len,
"%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
label, label[0] ? " " : "", command,
(command & CMD_PARK) ? "park" : "(park)",
CMD_PARK_CNT(command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
(command & CMD_IAAD) ? " IAAD" : "",
(command & CMD_ASE) ? " Async" : "",
(command & CMD_PSE) ? " Periodic" : "",
fls_strings[(command >> 2) & 0x3],
(command & CMD_RESET) ? " Reset" : "",
(command & CMD_RUN) ? "RUN" : "HALT"
);
}
static int dbg_port_buf(char *buf, unsigned len, const char *label,
int port, u32 status)
{
char *sig;
/* signaling state */
switch (status & (3 << 10)) {
case 0 << 10:
sig = "se0";
break;
case 1 << 10:
sig = "k"; /* low speed */
break;
case 2 << 10:
sig = "j";
break;
default:
sig = "?";
break;
}
return scnprintf(buf, len,
"%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", port, status,
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
(status & PORT_OCC) ? " OCC" : "",
(status & PORT_OC) ? " OC" : "",
(status & PORT_PEC) ? " PEC" : "",
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
(status & PORT_CONNECT) ? " CONNECT" : ""
);
}
#else
static inline int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
#endif /* DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(oxu, label, status) { \
char _buf[80]; \
dbg_status_buf(_buf, sizeof _buf, label, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_cmd(oxu, label, command) { \
char _buf[80]; \
dbg_command_buf(_buf, sizeof _buf, label, command); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_port(oxu, label, port, status) { \
char _buf[80]; \
dbg_port_buf(_buf, sizeof _buf, label, port, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
/*
* Module parameters
*/
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh; /* 0 to 6 */
module_param(log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* Initial park setting: slower than hw default */
static unsigned park;
module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* For flakey hardware, ignore overcurrent indicators */
static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
static void ehci_work(struct oxu_hcd *oxu);
static int oxu_hub_control(struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
/*
* Local functions
*/
/* Low level read/write registers functions */
static inline u32 oxu_readl(void __iomem *base, u32 reg)
{
return readl(base + reg);
}
static inline void oxu_writel(void __iomem *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
static inline void timer_action_done(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
clear_bit(action, &oxu->actions);
}
static inline void timer_action(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
if (!test_and_set_bit(action, &oxu->actions)) {
unsigned long t;
switch (action) {
case TIMER_IAA_WATCHDOG:
t = EHCI_IAA_JIFFIES;
break;
case TIMER_IO_WATCHDOG:
t = EHCI_IO_JIFFIES;
break;
case TIMER_ASYNC_OFF:
t = EHCI_ASYNC_JIFFIES;
break;
case TIMER_ASYNC_SHRINK:
default:
t = EHCI_SHRINK_JIFFIES;
break;
}
t += jiffies;
/* all timings except IAA watchdog can be overridden.
* async queue SHRINK often precedes IAA. while it's ready
* to go OFF neither can matter, and afterwards the IO
* watchdog stops unless there's still periodic traffic.
*/
if (action != TIMER_IAA_WATCHDOG
&& t > oxu->watchdog.expires
&& timer_pending(&oxu->watchdog))
return;
mod_timer(&oxu->watchdog, t);
}
}
/*
* handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*
* That last failure should_only happen in cases like physical cardbus eject
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
int ret;
ret = readl_poll_timeout_atomic(ptr, result,
((result & mask) == done ||
result == U32_MAX),
1, usec);
if (result == U32_MAX) /* card removed */
return -ENODEV;
return ret;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3) */
static int ehci_halt(struct oxu_hcd *oxu)
{
u32 temp = readl(&oxu->regs->status);
/* disable any irqs left enabled by previous code */
writel(0, &oxu->regs->intr_enable);
if ((temp & STS_HALT) != 0)
return 0;
temp = readl(&oxu->regs->command);
temp &= ~CMD_RUN;
writel(temp, &oxu->regs->command);
return handshake(oxu, &oxu->regs->status,
STS_HALT, STS_HALT, 16 * 125);
}
/* Put TDI/ARC silicon into EHCI mode */
static void tdi_reset(struct oxu_hcd *oxu)
{
u32 __iomem *reg_ptr;
u32 tmp;
reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
tmp = readl(reg_ptr);
tmp |= 0x3;
writel(tmp, reg_ptr);
}
/* Reset a non-running (STS_HALT == 1) controller */
static int ehci_reset(struct oxu_hcd *oxu)
{
int retval;
u32 command = readl(&oxu->regs->command);
command |= CMD_RESET;
dbg_cmd(oxu, "reset", command);
writel(command, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
oxu->next_statechange = jiffies;
retval = handshake(oxu, &oxu->regs->command,
CMD_RESET, 0, 250 * 1000);
if (retval)
return retval;
tdi_reset(oxu);
return retval;
}
/* Idle the controller (from running) */
static void ehci_quiesce(struct oxu_hcd *oxu)
{
u32 temp;
#ifdef DEBUG
BUG_ON(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state));
#endif
/* wait for any schedule enables/disables to take effect */
temp = readl(&oxu->regs->command) << 10;
temp &= STS_ASS | STS_PSS;
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
temp, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
/* then disable anything that's still active */
temp = readl(&oxu->regs->command);
temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
writel(temp, &oxu->regs->command);
/* hardware can take 16 microframes to turn off ... */
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
0, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
}
static int check_reset_complete(struct oxu_hcd *oxu, int index,
u32 __iomem *status_reg, int port_status)
{
if (!(port_status & PORT_CONNECT)) {
oxu->reset_done[index] = 0;
return port_status;
}
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
index+1);
return port_status;
} else
oxu_dbg(oxu, "port %d high speed\n", index + 1);
return port_status;
}
static void ehci_hub_descriptor(struct oxu_hcd *oxu,
struct usb_hub_descriptor *desc)
{
int ports = HCS_N_PORTS(oxu->hcs_params);
u16 temp;
desc->bDescriptorType = USB_DT_HUB;
desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
if (HCS_PPC(oxu->hcs_params))
temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
else
temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
}
/* Allocate an OXU210HP on-chip memory data buffer
*
* An on-chip memory data buffer is required for each OXU210HP USB transfer.
* Each transfer descriptor has one or more on-chip memory data buffers.
*
* Data buffers are allocated from a fix sized pool of data blocks.
* To minimise fragmentation and give reasonable memory utlisation,
* data buffers are allocated with sizes the power of 2 multiples of
* the block size, starting on an address a multiple of the allocated size.
*
* FIXME: callers of this function require a buffer to be allocated for
* len=0. This is a waste of on-chip memory and should be fix. Then this
* function should be changed to not allocate a buffer for len=0.
*/
static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
{
int n_blocks; /* minium blocks needed to hold len */
int a_blocks; /* blocks allocated */
int i, j;
/* Don't allocte bigger than supported */
if (len > BUFFER_SIZE * BUFFER_NUM) {
oxu_err(oxu, "buffer too big (%d)\n", len);
return -ENOMEM;
}
spin_lock(&oxu->mem_lock);
/* Number of blocks needed to hold len */
n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
/* Round the number of blocks up to the power of 2 */
for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
;
/* Find a suitable available data buffer */
for (i = 0; i < BUFFER_NUM;
i += max(a_blocks, (int)oxu->db_used[i])) {
/* Check all the required blocks are available */
for (j = 0; j < a_blocks; j++)
if (oxu->db_used[i + j])
break;
if (j != a_blocks)
continue;
/* Allocate blocks found! */
qtd->buffer = (void *) &oxu->mem->db_pool[i];
qtd->buffer_dma = virt_to_phys(qtd->buffer);
qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
oxu->db_used[i] = a_blocks;
spin_unlock(&oxu->mem_lock);
return 0;
}
/* Failed */
spin_unlock(&oxu->mem_lock);
return -ENOMEM;
}
static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
spin_lock(&oxu->mem_lock);
index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
/ BUFFER_SIZE;
oxu->db_used[index] = 0;
qtd->qtd_buffer_len = 0;
qtd->buffer_dma = 0;
qtd->buffer = NULL;
spin_unlock(&oxu->mem_lock);
}
static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
{
memset(qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
}
static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
if (qtd->buffer)
oxu_buf_free(oxu, qtd);
spin_lock(&oxu->mem_lock);
index = qtd - &oxu->mem->qtd_pool[0];
oxu->qtd_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qtd *qtd = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QTD_NUM; i++)
if (!oxu->qtd_used[i])
break;
if (i < QTD_NUM) {
qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
memset(qtd, 0, sizeof *qtd);
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
qtd->qtd_dma = virt_to_phys(qtd);
oxu->qtd_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return qtd;
}
static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int index;
spin_lock(&oxu->mem_lock);
index = qh - &oxu->mem->qh_pool[0];
oxu->qh_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static void qh_destroy(struct kref *kref)
{
struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
struct oxu_hcd *oxu = qh->oxu;
/* clean qtds first, and know this is not linked */
if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
oxu_dbg(oxu, "unused qh not empty!\n");
BUG();
}
if (qh->dummy)
oxu_qtd_free(oxu, qh->dummy);
oxu_qh_free(oxu, qh);
}
static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qh *qh = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QHEAD_NUM; i++)
if (!oxu->qh_used[i])
break;
if (i < QHEAD_NUM) {
qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
memset(qh, 0, sizeof *qh);
kref_init(&qh->kref);
qh->oxu = oxu;
qh->qh_dma = virt_to_phys(qh);
INIT_LIST_HEAD(&qh->qtd_list);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc(oxu);
if (qh->dummy == NULL) {
oxu_dbg(oxu, "no dummy td\n");
oxu->qh_used[i] = 0;
qh = NULL;
goto unlock;
}
oxu->qh_used[i] = 1;
}
unlock:
spin_unlock(&oxu->mem_lock);
return qh;
}
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
{
kref_get(&qh->kref);
return qh;
}
static inline void qh_put(struct ehci_qh *qh)
{
kref_put(&qh->kref, qh_destroy);
}
static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
{
int index;
spin_lock(&oxu->mem_lock);
index = murb - &oxu->murb_pool[0];
oxu->murb_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
{
int i;
struct oxu_murb *murb = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < MURB_NUM; i++)
if (!oxu->murb_used[i])
break;
if (i < MURB_NUM) {
murb = &(oxu->murb_pool)[i];
oxu->murb_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return murb;
}
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void ehci_mem_cleanup(struct oxu_hcd *oxu)
{
kfree(oxu->murb_pool);
oxu->murb_pool = NULL;
if (oxu->async)
qh_put(oxu->async);
oxu->async = NULL;
del_timer(&oxu->urb_timer);
oxu->periodic = NULL;
/* shadow periodic table */
kfree(oxu->pshadow);
oxu->pshadow = NULL;
}
/* Remember to add cleanup code (above) if you add anything here.
*/
static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
{
int i;
for (i = 0; i < oxu->periodic_size; i++)
oxu->mem->frame_list[i] = EHCI_LIST_END;
for (i = 0; i < QHEAD_NUM; i++)
oxu->qh_used[i] = 0;
for (i = 0; i < QTD_NUM; i++)
oxu->qtd_used[i] = 0;
oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
if (!oxu->murb_pool)
goto fail;
for (i = 0; i < MURB_NUM; i++)
oxu->murb_used[i] = 0;
oxu->async = oxu_qh_alloc(oxu);
if (!oxu->async)
goto fail;
oxu->periodic = (__le32 *) &oxu->mem->frame_list;
oxu->periodic_dma = virt_to_phys(oxu->periodic);
for (i = 0; i < oxu->periodic_size; i++)
oxu->periodic[i] = EHCI_LIST_END;
/* software shadow of hardware table */
oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
if (oxu->pshadow != NULL)
return 0;
fail:
oxu_dbg(oxu, "couldn't init memory\n");
ehci_mem_cleanup(oxu);
return -ENOMEM;
}
/* Fill a qtd, returning how much of the buffer we were able to queue up.
*/
static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely(len < count)) /* ... iff needed */
count = len;
else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_le32((count << 16) | token);
qtd->length = count;
return count;
}
static inline void qh_update(struct oxu_hcd *oxu,
struct ehci_qh *qh, struct ehci_qtd *qtd)
{
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END;
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
unsigned is_out, epnum;
is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
usb_settoggle(qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb();
qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
}
/* If it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
if (list_empty(&qh->qtd_list))
qtd = qh->dummy;
else {
qtd = list_entry(qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
qtd = NULL;
}
if (qtd)
qh_update(oxu, qh, qtd);
}
static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
size_t length, u32 token)
{
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely(QTD_PID(token) != 2))
urb->actual_length += length - QTD_LENGTH(token);
/* don't modify error codes */
if (unlikely(urb->status != -EINPROGRESS))
return;
/* force cleanup after short read; not always an error */
if (unlikely(IS_SHORT_READ(token)))
urb->status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
urb->status = -EOVERFLOW;
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
urb->status = -EPROTO;
} else if (token & QTD_STS_DBE) {
urb->status = (QTD_PID(token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad crc, wrong PID, etc; retried */
if (QTD_CERR(token))
urb->status = -EPIPE;
else {
oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
urb->status = -EPROTO;
}
/* CERR nonzero + no errors + halt --> stall */
} else if (QTD_CERR(token))
urb->status = -EPIPE;
else /* unknown */
urb->status = -EPROTO;
oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
token, urb->status);
}
}
static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
__releases(oxu->lock)
__acquires(oxu->lock)
{
if (likely(urb->hcpriv != NULL)) {
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
}
qh_put(qh);
}
urb->hcpriv = NULL;
switch (urb->status) {
case -EINPROGRESS: /* success */
urb->status = 0;
break;
default: /* fault */
break;
case -EREMOTEIO: /* fault or normal */
if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = 0;
break;
case -ECONNRESET: /* canceled */
case -ENOENT:
break;
}
#ifdef OXU_URB_TRACE
oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
spin_unlock(&oxu->lock);
usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
spin_lock(&oxu->lock);
}
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
#define HALT_BIT cpu_to_le32(QTD_STS_HALT)
/* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *last = NULL, *end = qh->dummy;
struct ehci_qtd *qtd, *tmp;
int stopped;
unsigned count = 0;
int do_status = 0;
u8 state;
struct oxu_murb *murb = NULL;
if (unlikely(list_empty(&qh->qtd_list)))
return count;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
struct urb *urb;
u32 token = 0;
urb = qtd->urb;
/* Clean up any state from previous QTD ...*/
if (last) {
if (likely(last->urb != urb)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
}
oxu_qtd_free(oxu, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end)
break;
/* hardware copies qtd out of qh overlay */
rmb();
token = le32_to_cpu(qtd->hw_token);
/* always clean up qtds the hc de-activated */
if ((token & QTD_STS_ACTIVE) == 0) {
if ((token & QTD_STS_HALT) != 0) {
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*/
} else if (IS_SHORT_READ(token) &&
!(qtd->hw_alt_next & EHCI_LIST_END)) {
stopped = 1;
goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely(!stopped &&
HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
break;
} else {
stopped = 1;
if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
urb->status = -ESHUTDOWN;
/* ignore active urbs unless some previous qtd
* for the urb faulted (including short read) or
* its urb was canceled. we may patch qh or qtds.
*/
if (likely(urb->status == -EINPROGRESS))
continue;
/* issue status after short control reads */
if (unlikely(do_status != 0)
&& QTD_PID(token) == 0 /* OUT */) {
do_status = 0;
continue;
}
/* token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_le32(qtd->qtd_dma)
== qh->hw_current)
token = le32_to_cpu(qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
if ((HALT_BIT & qh->hw_token) == 0) {
halt:
qh->hw_token |= HALT_BIT;
wmb();
}
}
/* Remove it from the queue */
qtd_copy_status(oxu, urb->complete ?
urb : ((struct oxu_murb *) urb)->main,
qtd->length, token);
if ((usb_pipein(qtd->urb->pipe)) &&
(NULL != qtd->transfer_buffer))
memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
do_status = (urb->status == -EREMOTEIO)
&& usb_pipecontrol(urb->pipe);
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry(qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
list_del(&qtd->qtd_list);
last = qtd;
}
/* last urb's completion might still need calling */
if (likely(last != NULL)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_qtd_free(oxu, last);
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(oxu, qh);
break;
case QH_STATE_LINKED:
/* should be rare for periodic transfers,
* except maybe high bandwidth ...
*/
if ((cpu_to_le32(QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule(oxu, qh);
(void) qh_schedule(oxu, qh);
} else
unlink_async(oxu, qh);
break;
/* otherwise, unlink already started */
}
}
return count;
}
/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
/* ... and packet size, for any kind of endpoint descriptor */
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/* Reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *head)
{
struct ehci_qtd *qtd, *temp;
list_for_each_entry_safe(qtd, temp, head, qtd_list) {
list_del(&qtd->qtd_list);
oxu_qtd_free(oxu, qtd);
}
}
/* Create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
struct urb *urb,
struct list_head *head,
gfp_t flags)
{
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
int is_input;
u32 token;
void *transfer_buf = NULL;
int ret;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
return NULL;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein(urb->pipe);
if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
if (usb_pipecontrol(urb->pipe)) {
/* SETUP pid */
ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
if (ret)
goto cleanup;
qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
memcpy(qtd->buffer, qtd->urb->setup_packet,
sizeof(struct usb_ctrlrequest));
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
token |= (1 /* "in" */ << 8);
}
/*
* Data transfer stage: buffer setup
*/
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
buf = qtd->buffer_dma;
transfer_buf = urb->transfer_buffer;
if (!is_input)
memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
int this_qtd_len;
this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
qtd->transfer_buffer = transfer_buf;
len -= this_qtd_len;
buf += this_qtd_len;
transfer_buf += this_qtd_len;
if (is_input)
qtd->hw_alt_next = oxu->async->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely(len <= 0))
break;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
if (likely(len > 0)) {
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
}
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
}
/* unless the bulk/interrupt caller wants a chance to clean
* up after short reads, hc should advance qh past this urb
*/
if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol(urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END;
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (likely(urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipebulk(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
qtd->hw_token |= cpu_to_le32(QTD_IOC);
return head;
cleanup:
qtd_list_free(oxu, urb, head);
return NULL;
}
/* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
struct urb *urb, gfp_t flags)
{
struct ehci_qh *qh = oxu_qh_alloc(oxu);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint(urb->pipe) << 8;
info1 |= usb_pipedevice(urb->pipe) << 0;
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
maxp = usb_maxpacket(urb->dev, urb->pipe);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
urb->interval);
goto done;
}
} else {
struct usb_tt *tt = urb->dev->tt;
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
qh->c_usecs = qh->usecs + HS_USECS(0);
qh->usecs = HS_USECS(1);
} else { /* SPLIT+DATA, gap, CSPLIT */
qh->usecs += HS_USECS(1);
qh->c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time(urb->dev->speed,
is_input, 0, max_packet(maxp)));
qh->period = urb->interval;
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= (1 << 12); /* EPS "low" */
fallthrough;
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= max_packet(maxp) << 16;
info2 |= hb_mult(maxp) << 30;
}
break;
default:
oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
done:
qh_put(qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32(info1);
qh->hw_info2 = cpu_to_le32(info2);
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
qh_refresh(oxu, qh);
return qh;
}
/* Move qh (and its qtds) onto async queue; maybe enable queue.
*/
static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
__le32 dma = QH_NEXT(qh->qh_dma);
struct ehci_qh *head;
/* (re)start the async schedule? */
head = oxu->async;
timer_action_done(oxu, TIMER_ASYNC_OFF);
if (!head->qh_next.qh) {
u32 cmd = readl(&oxu->regs->command);
if (!(cmd & CMD_ASE)) {
/* in case a clear of CMD_ASE didn't take yet */
(void)handshake(oxu, &oxu->regs->status,
STS_ASS, 0, 150);
cmd |= CMD_ASE | CMD_RUN;
writel(cmd, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
if (qh->qh_state == QH_STATE_IDLE)
qh_refresh(oxu, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw_next = head->hw_next;
wmb();
head->qh_next.qh = qh;
head->hw_next = dma;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
}
#define QH_ADDR_MASK cpu_to_le32(0x7f)
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *qtd_list,
int epnum, void **ptr)
{
struct ehci_qh *qh = NULL;
qh = (struct ehci_qh *) *ptr;
if (unlikely(qh == NULL)) {
/* can't sleep here, we have oxu->lock... */
qh = qh_make(oxu, urb, GFP_ATOMIC);
*ptr = qh;
}
if (likely(qh != NULL)) {
struct ehci_qtd *qtd;
if (unlikely(list_empty(qtd_list)))
qtd = NULL;
else
qtd = list_entry(qtd_list->next, struct ehci_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely(epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice(urb->pipe) == 0)
qh->hw_info1 &= ~QH_ADDR_MASK;
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely(qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
__le32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT;
wmb();
dummy = qh->dummy;
dma = dummy->qtd_dma;
*dummy = *qtd;
dummy->qtd_dma = dma;
list_del(&qtd->qtd_list);
list_add(&dummy->qtd_list, qtd_list);
list_splice(qtd_list, qh->qtd_list.prev);
ehci_qtd_init(qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry(qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(dma);
/* let the hc process these next qtds */
dummy->hw_token = (token & ~(0x80));
wmb();
dummy->hw_token = token;
urb->hcpriv = qh_get(qh);
}
}
return qh;
}
static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
int epnum = urb->ep->desc.bEndpointAddress;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc = 0;
#ifdef OXU_URB_TRACE
struct ehci_qtd *qtd;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
rc = -ESHUTDOWN;
goto done;
}
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely(qh->qh_state == QH_STATE_IDLE))
qh_link_async(oxu, qh_get(qh));
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (unlikely(qh == NULL))
qtd_list_free(oxu, urb, qtd_list);
return rc;
}
/* The async qh for the qtds being reclaimed are now unlinked from the HC */
static void end_unlink_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh = oxu->reclaim;
struct ehci_qh *next;
timer_action_done(oxu, TIMER_IAA_WATCHDOG);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put(qh); /* refcount from reclaim */
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
oxu->reclaim = next;
oxu->reclaim_ready = 0;
qh->reclaim = NULL;
qh_completions(oxu, qh);
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
qh_link_async(oxu, qh);
else {
qh_put(qh); /* refcount from async list */
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
&& oxu->async->qh_next.qh == NULL)
timer_action(oxu, TIMER_ASYNC_OFF);
}
if (next) {
oxu->reclaim = NULL;
start_unlink_async(oxu, next);
}
}
/* makes sure the async qh will become idle */
/* caller must own oxu->lock */
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int cmd = readl(&oxu->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
assert_spin_locked(&oxu->lock);
BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
&& qh->qh_state != QH_STATE_UNLINK_WAIT));
#endif
/* stop async schedule right now? */
if (unlikely(qh == oxu->async)) {
/* can't get here without STS_ASS set */
if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
&& !oxu->reclaim) {
/* ... and CMD_IAAD clear */
writel(cmd & ~CMD_ASE, &oxu->regs->command);
wmb();
/* handshake later, if we need to */
timer_action_done(oxu, TIMER_ASYNC_OFF);
}
return;
}
qh->qh_state = QH_STATE_UNLINK;
oxu->reclaim = qh = qh_get(qh);
prev = oxu->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next;
wmb();
if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
/* if (unlikely(qh->reclaim != 0))
* this will recurse, probably not much
*/
end_unlink_async(oxu);
return;
}
oxu->reclaim_ready = 0;
cmd |= CMD_IAAD;
writel(cmd, &oxu->regs->command);
(void) readl(&oxu->regs->command);
timer_action(oxu, TIMER_IAA_WATCHDOG);
}
static void scan_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
if (!++(oxu->stamp))
oxu->stamp++;
timer_action_done(oxu, TIMER_ASYNC_SHRINK);
rescan:
qh = oxu->async->qh_next.qh;
if (likely(qh != NULL)) {
do {
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)
&& qh->stamp != oxu->stamp) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
* qhs we already finished (no looping).
*/
qh = qh_get(qh);
qh->stamp = oxu->stamp;
temp = qh_completions(oxu, qh);
qh_put(qh);
if (temp != 0)
goto rescan;
}
/* unlink idle entries, reducing HC PCI usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
if (list_empty(&qh->qtd_list)) {
if (qh->stamp == oxu->stamp)
action = TIMER_ASYNC_SHRINK;
else if (!oxu->reclaim
&& qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
qh = qh->qh_next.qh;
} while (qh);
}
if (action == TIMER_ASYNC_SHRINK)
timer_action(oxu, TIMER_ASYNC_SHRINK);
}
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
__le32 tag)
{
switch (tag) {
default:
case Q_TYPE_QH:
return &periodic->qh->qh_next;
}
}
/* caller must hold oxu->lock */
static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &oxu->pshadow[frame];
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
hw_p = here.hw_next;
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr)
return;
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
*hw_p = *here.hw_next;
}
/* how many of the uframe's 125 usecs are allocated? */
static unsigned short periodic_usecs(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe)
{
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow *q = &oxu->pshadow[frame];
unsigned usecs = 0;
while (q->ptr) {
switch (Q_NEXT_TYPE(*hw_p)) {
case Q_TYPE_QH:
default:
/* is it in the S-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &q->qh->hw_next;
q = &q->qh->qh_next;
break;
}
}
#ifdef DEBUG
if (usecs > 100)
oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
#endif
return usecs;
}
static int enable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) | CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... PSS happens later */
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* make sure ehci_work scans these */
oxu->next_uframe = readl(&oxu->regs->frame_index)
% (oxu->periodic_size << 3);
return 0;
}
static int disable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) & ~CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... */
oxu->next_uframe = -1;
return 0;
}
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; oxu 0.96+)
*/
static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->period;
dev_dbg(&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period) {
union ehci_shadow *prev = &oxu->pshadow[i];
__le32 *hw_p = &oxu->periodic[i];
union ehci_shadow here = *prev;
__le32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
type = Q_NEXT_TYPE(*hw_p);
if (type == Q_TYPE_QH)
break;
prev = periodic_next_shadow(prev, type);
hw_p = &here.qh->hw_next;
here = *prev;
}
/* sorting each branch by period (slow-->fast)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->period > here.qh->period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh_get(qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
/* maybe enable periodic schedule processing */
if (!oxu->periodic_sched++)
return enable_periodic(oxu);
return 0;
}
static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
/* FIXME:
* IF this isn't high speed
* and this qh is active in the current uframe
* (and overlay token SplitXstate is false?)
* THEN
* qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->period;
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period)
periodic_unlink(oxu, i, qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
dev_dbg(&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
qh_put(qh);
/* maybe turn off periodic schedule */
oxu->periodic_sched--;
if (!oxu->periodic_sched)
(void) disable_periodic(oxu);
}
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned wait;
qh_unlink_periodic(oxu, qh);
/* simple/paranoid: always delay, expecting the HC needs to read
* qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
* expect hub_wq to clean up after any CSPLITs we won't issue.
* active high speed queues may need bigger delays...
*/
if (list_empty(&qh->qtd_list)
|| (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
wait = 2;
else
wait = 55; /* worst case: 3 * 1024 */
udelay(wait);
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
wmb();
}
static int check_period(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
unsigned period, unsigned usecs)
{
int claimed;
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/*
* 80% periodic == 100 usec/uframe available
* convert "usecs we need" to "max already claimed"
*/
usecs = 100 - usecs;
/* we "know" 2 and 4 uframe intervals were rejected; so
* for period 0, check _every_ microframe in the schedule.
*/
if (unlikely(period == 0)) {
do {
for (uframe = 0; uframe < 7; uframe++) {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
}
} while ((frame += 1) < oxu->periodic_size);
/* just check the specified uframe, at that period */
} else {
do {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
} while ((frame += period) < oxu->periodic_size);
}
return 1;
}
static int check_intr_schedule(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
const struct ehci_qh *qh, __le32 *c_maskp)
{
int retval = -ENOSPC;
if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
done:
return retval;
}
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int status;
unsigned uframe;
__le32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh_refresh(oxu, qh);
qh->hw_next = EHCI_LIST_END;
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
status = check_intr_schedule(oxu, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC;
}
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
frame = qh->period - 1;
do {
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule(oxu,
frame, uframe, qh,
&c_mask);
if (status == 0)
break;
}
} while (status && frame--);
/* qh->period == 0 means every uframe */
} else {
frame = 0;
status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
}
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
qh->hw_info2 |= qh->period
? cpu_to_le32(1 << uframe)
: cpu_to_le32(QH_SMASK);
qh->hw_info2 |= c_mask;
} else
oxu_dbg(oxu, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
status = qh_link_periodic(oxu, qh);
done:
return status;
}
static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
int status = 0;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
status = -ESHUTDOWN;
goto done;
}
/* get qh and force any scheduling errors */
INIT_LIST_HEAD(&empty);
qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
status = qh_schedule(oxu, qh);
if (status != 0)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON(qh == NULL);
/* ... update usbfs periodic stats */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (status)
qtd_list_free(oxu, urb, qtd_list);
return status;
}
static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "iso support is missing!\n");
return -ENOSYS;
}
static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "split iso support is missing!\n");
return -ENOSYS;
}
static void scan_periodic(struct oxu_hcd *oxu)
{
unsigned frame, clock, now_uframe, mod;
unsigned modified;
mod = oxu->periodic_size << 3;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
now_uframe = oxu->next_uframe;
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
clock = readl(&oxu->regs->frame_index);
else
clock = now_uframe + mod - 1;
clock %= mod;
for (;;) {
union ehci_shadow q, *q_p;
__le32 type, *hw_p;
/* don't scan past the live uframe */
frame = now_uframe >> 3;
if (frame != (clock >> 3)) {
/* safe to scan the whole frame at once */
now_uframe |= 0x07;
}
restart:
/* scan each element in frame's queue for completions */
q_p = &oxu->pshadow[frame];
hw_p = &oxu->periodic[frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(*hw_p);
modified = 0;
while (q.ptr != NULL) {
union ehci_shadow temp;
switch (type) {
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get(q.qh);
type = Q_NEXT_TYPE(q.qh->hw_next);
q = q.qh->qh_next;
modified = qh_completions(oxu, temp.qh);
if (unlikely(list_empty(&temp.qh->qtd_list)))
intr_deschedule(oxu, temp.qh);
qh_put(temp.qh);
break;
default:
oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
q.ptr = NULL;
}
/* assume completion callbacks modify the queue */
if (unlikely(modified))
goto restart;
}
/* Stop when we catch up to the HC */
/* FIXME: this assumes we won't get lapped when
* latencies climb; that should be rare, but...
* detect it, and just go all the way around.
* FLR might help detect this case, so long as latencies
* don't exceed periodic_size msec (default 1.024 sec).
*/
/* FIXME: likewise assumes HC doesn't halt mid-scan */
if (now_uframe == clock) {
unsigned now;
if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
break;
oxu->next_uframe = now_uframe;
now = readl(&oxu->regs->frame_index) % mod;
if (now_uframe == now)
break;
/* rescan the rest of this frame, then ... */
clock = now;
} else {
now_uframe++;
now_uframe %= mod;
}
}
}
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
*/
static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
{
int port = HCS_N_PORTS(oxu->hcs_params);
while (port--)
writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
}
static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
{
unsigned port;
if (!HCS_PPC(oxu->hcs_params))
return;
oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
if (is_on)
oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
USB_PORT_FEAT_POWER, port--, NULL, 0);
else
oxu_hub_control(oxu_to_hcd(oxu), ClearPortFeature,
USB_PORT_FEAT_POWER, port--, NULL, 0);
}
msleep(20);
}
/* Called from some interrupts, timers, and so on.
* It calls driver completion functions, after dropping oxu->lock.
*/
static void ehci_work(struct oxu_hcd *oxu)
{
timer_action_done(oxu, TIMER_IO_WATCHDOG);
if (oxu->reclaim_ready)
end_unlink_async(oxu);
/* another CPU may drop oxu->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
if (oxu->scanning)
return;
oxu->scanning = 1;
scan_async(oxu);
if (oxu->next_uframe != -1)
scan_periodic(oxu);
oxu->scanning = 0;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
(oxu->async->qh_next.ptr != NULL ||
oxu->periodic_sched != 0))
timer_action(oxu, TIMER_IO_WATCHDOG);
}
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
/* if we need to use IAA and it's busy, defer */
if (qh->qh_state == QH_STATE_LINKED
&& oxu->reclaim
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
struct ehci_qh *last;
for (last = oxu->reclaim;
last->reclaim;
last = last->reclaim)
continue;
qh->qh_state = QH_STATE_UNLINK_WAIT;
last->reclaim = qh;
/* bypass IAA if the hc can't care */
} else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
end_unlink_async(oxu);
/* something else might have unlinked the qh by now */
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
/*
* USB host controller methods
*/
static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 status, pcd_status = 0;
int bh;
spin_lock(&oxu->lock);
status = readl(&oxu->regs->status);
/* e.g. cardbus physical eject */
if (status == ~(u32) 0) {
oxu_dbg(oxu, "device removed\n");
goto dead;
}
/* Shared IRQ? */
status &= INTR_MASK;
if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
spin_unlock(&oxu->lock);
return IRQ_NONE;
}
/* clear (just) interrupts */
writel(status, &oxu->regs->status);
readl(&oxu->regs->command); /* unblock posted write */
bh = 0;
#ifdef OXU_VERBOSE_DEBUG
/* unrequested/ignored: Frame List Rollover */
dbg_status(oxu, "irq", status);
#endif
/* INT, ERR, and IAA interrupt rates can be throttled */
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0))
bh = 1;
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
oxu->reclaim_ready = 1;
bh = 1;
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS(oxu->hcs_params);
pcd_status = status;
/* resume root hub? */
if (!(readl(&oxu->regs->command) & CMD_RUN))
usb_hcd_resume_root_hub(hcd);
while (i--) {
int pstatus = readl(&oxu->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
if (!(pstatus & PORT_RESUME)
|| oxu->reset_done[i] != 0)
continue;
/* start USB_RESUME_TIMEOUT resume signaling from this
* port, and make hub_wq collect PORT_STAT_C_SUSPEND to
* stop that signaling.
*/
oxu->reset_done[i] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
}
}
/* PCI errors [4.15.2.4] */
if (unlikely((status & STS_FATAL) != 0)) {
/* bogus "fatal" IRQs appear on some chips... why? */
status = readl(&oxu->regs->status);
dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
dbg_status(oxu, "fatal", status);
if (status & STS_HALT) {
oxu_err(oxu, "fatal error\n");
dead:
ehci_reset(oxu);
writel(0, &oxu->regs->configured_flag);
usb_hc_died(hcd);
/* generic layer kills/unlinks all urbs, then
* uses oxu_stop to clean up the rest
*/
bh = 1;
}
}
if (bh)
ehci_work(oxu);
spin_unlock(&oxu->lock);
if (pcd_status & STS_PCD)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
}
static irqreturn_t oxu_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ret = IRQ_HANDLED;
u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
/* Disable all interrupt */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
(!oxu->is_otg && (status & OXU_USBSPHI)))
oxu210_hcd_irq(hcd);
else
ret = IRQ_NONE;
/* Enable all interrupt back */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
return ret;
}
static void oxu_watchdog(struct timer_list *t)
{
struct oxu_hcd *oxu = from_timer(oxu, t, watchdog);
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
/* lost IAA irqs wedge things badly; seen with a vt8235 */
if (oxu->reclaim) {
u32 status = readl(&oxu->regs->status);
if (status & STS_IAA) {
oxu_vdbg(oxu, "lost IAA\n");
writel(STS_IAA, &oxu->regs->status);
oxu->reclaim_ready = 1;
}
}
/* stop async processing after it's idled a bit */
if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
start_unlink_async(oxu, oxu->async);
/* oxu could run by timer, without IRQs ... */
ehci_work(oxu);
spin_unlock_irqrestore(&oxu->lock, flags);
}
/* One-time init, only for memory state.
*/
static int oxu_hcd_init(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int retval;
u32 hcc_params;
spin_lock_init(&oxu->lock);
timer_setup(&oxu->watchdog, oxu_watchdog, 0);
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
oxu->periodic_size = DEFAULT_I_TDPS;
retval = ehci_mem_init(oxu, GFP_KERNEL);
if (retval < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
oxu->i_thresh = 8;
else /* N microframes cached */
oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
oxu->reclaim = NULL;
oxu->reclaim_ready = 0;
oxu->next_uframe = -1;
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
oxu->async->qh_next.qh = NULL;
oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
oxu->async->hw_qtd_next = EHCI_LIST_END;
oxu->async->qh_state = QH_STATE_LINKED;
oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
* schedule by avoiding QH fetches between transfers.
*
* With fast usb storage devices and NForce2, "park" seems to
* make problems: throughput reduction (!), data errors...
*/
if (park) {
park = min(park, (unsigned) 3);
temp |= CMD_PARK;
temp |= park << 8;
}
oxu_dbg(oxu, "park %d\n", park);
}
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
}
oxu->command = temp;
return 0;
}
/* Called during probe() after chip reset completes.
*/
static int oxu_reset(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
spin_lock_init(&oxu->mem_lock);
INIT_LIST_HEAD(&oxu->urb_list);
oxu->urb_len = 0;
if (oxu->is_otg) {
oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_SPH_MEM;
} else {
oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_OTG_MEM;
}
oxu->hcs_params = readl(&oxu->caps->hcs_params);
oxu->sbrn = 0x20;
return oxu_hcd_init(hcd);
}
static int oxu_run(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int retval;
u32 temp, hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
retval = ehci_reset(oxu);
if (retval != 0) {
ehci_mem_cleanup(oxu);
return retval;
}
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* hcc_params controls whether oxu->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* dma_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like dma_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
*/
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params))
writel(0, &oxu->regs->segment);
oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
CMD_ASE | CMD_RESET);
oxu->command |= CMD_RUN;
writel(oxu->command, &oxu->regs->command);
dbg_cmd(oxu, "init", oxu->command);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
* involved with the root hub. (Except where one is integrated,
* and there's no companion controller unless maybe for USB OTG.)
*/
hcd->state = HC_STATE_RUNNING;
writel(FLAG_CF, &oxu->regs->configured_flag);
readl(&oxu->regs->command); /* unblock posted writes */
temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
temp >> 8, temp & 0xff, DRIVER_VERSION,
ignore_oc ? ", overcurrent ignored" : "");
writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
return 0;
}
static void oxu_stop(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
/* Turn off port power on all root hub ports. */
ehci_port_power(oxu, 0);
/* no more interrupts ... */
del_timer_sync(&oxu->watchdog);
spin_lock_irq(&oxu->lock);
if (HC_IS_RUNNING(hcd->state))
ehci_quiesce(oxu);
ehci_reset(oxu);
writel(0, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
/* let companion controllers work when we aren't */
writel(0, &oxu->regs->configured_flag);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq(&oxu->lock);
if (oxu->async)
ehci_work(oxu);
spin_unlock_irq(&oxu->lock);
ehci_mem_cleanup(oxu);
dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
}
/* Kick in for silicon on any bus (not just pci, etc).
* This forcibly disables dma and IRQs, helping kexec and other cases
* where the next system software may expect clean state.
*/
static void oxu_shutdown(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
(void) ehci_halt(oxu);
ehci_turn_off_all_ports(oxu);
/* make BIOS/etc use companion controller during reboot */
writel(0, &oxu->regs->configured_flag);
/* unblock posted writes */
readl(&oxu->regs->configured_flag);
}
/* Non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
* we're queueing TDs onto software and hardware lists
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct list_head qtd_list;
INIT_LIST_HEAD(&qtd_list);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async(oxu, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit(oxu, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
return itd_submit(oxu, urb, mem_flags);
else
return sitd_submit(oxu, urb, mem_flags);
}
}
/* This function is responsible for breaking URBs with big data size
* into smaller size and processing small urbs in sequence.
*/
static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int num, rem;
void *transfer_buffer;
struct urb *murb;
int i, ret;
/* If not bulk pipe just enqueue the URB */
if (!usb_pipebulk(urb->pipe))
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Otherwise we should verify the USB transfer buffer size! */
transfer_buffer = urb->transfer_buffer;
num = urb->transfer_buffer_length / 4096;
rem = urb->transfer_buffer_length % 4096;
if (rem != 0)
num++;
/* If URB is smaller than 4096 bytes just enqueue it! */
if (num == 1)
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Ok, we have more job to do! :) */
for (i = 0; i < num - 1; i++) {
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = 4096;
murb->transfer_buffer = transfer_buffer + i * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 0;
/* This loop is to guarantee urb to be processed when there's
* not enough resources at a particular time by retrying.
*/
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
}
/* Last urb requires special handling */
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = rem > 0 ? rem : 4096;
murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 1;
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
return ret;
}
/* Remove from hardware lists.
* Completions normally happen asynchronously
*/
static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct ehci_qh *qh;
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
unlink_async(oxu, qh);
break;
case PIPE_INTERRUPT:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
intr_deschedule(oxu, qh);
fallthrough;
case QH_STATE_IDLE:
qh_completions(oxu, qh);
break;
default:
oxu_dbg(oxu, "bogus qh %p state %d\n",
qh, qh->qh_state);
goto done;
}
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(hcd->state)) {
int status;
status = qh_schedule(oxu, qh);
spin_unlock_irqrestore(&oxu->lock, flags);
if (status != 0) {
/* shouldn't happen often, but ...
* FIXME kill those tds' urbs
*/
dev_err(hcd->self.controller,
"can't reschedule qh %p, err %d\n", qh,
status);
}
return status;
}
break;
}
done:
spin_unlock_irqrestore(&oxu->lock, flags);
return 0;
}
/* Bulk qh holds the data toggle */
static void oxu_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
unsigned long flags;
struct ehci_qh *qh, *tmp;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
rescan:
spin_lock_irqsave(&oxu->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw_info1 == 0) {
oxu_vdbg(oxu, "iso delay\n");
goto idle_timeout;
}
if (!HC_IS_RUNNING(hcd->state))
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
for (tmp = oxu->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
/* periodic qh self-unlinks on empty */
if (!tmp)
goto nogood;
unlink_async(oxu, qh);
fallthrough;
case QH_STATE_UNLINK: /* wait for hw to finish? */
idle_timeout:
spin_unlock_irqrestore(&oxu->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (list_empty(&qh->qtd_list)) {
qh_put(qh);
break;
}
fallthrough;
default:
nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty(&qh->qtd_list) ? "" : "(has tds)");
break;
}
ep->hcpriv = NULL;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
}
static int oxu_get_frame(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
return (readl(&oxu->regs->frame_index) >> 3) %
oxu->periodic_size;
}
/* Build "status change" packet (one or two bytes) from HC registers */
static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp, mask, status = 0;
int ports, i, retval = 1;
unsigned long flags;
/* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
/* init status to no-changes */
buf[0] = 0;
ports = HCS_N_PORTS(oxu->hcs_params);
if (ports > 7) {
buf[1] = 0;
retval++;
}
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
if (!ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
/* no hub change reports (bit 0) for now (power, ...) */
/* port N changes (bit N)? */
spin_lock_irqsave(&oxu->lock, flags);
for (i = 0; i < ports; i++) {
temp = readl(&oxu->regs->port_status[i]);
/*
* Return status information even for ports with OWNER set.
* Otherwise hub_wq wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if (!(temp & PORT_CONNECT))
oxu->reset_done[i] = 0;
if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
time_after_eq(jiffies, oxu->reset_done[i]))) {
if (i < 7)
buf[0] |= 1 << (i + 1);
else
buf[1] |= 1 << (i - 7);
status = STS_PCD;
}
}
/* FIXME autosuspend idle root hubs */
spin_unlock_irqrestore(&oxu->lock, flags);
return status ? retval : 0;
}
/* Returns the speed of a device attached to a port on the root hub. */
static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
unsigned int portsc)
{
switch ((portsc >> 26) & 3) {
case 0:
return 0;
case 1:
return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
return USB_PORT_STAT_HIGH_SPEED;
}
}
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ports = HCS_N_PORTS(oxu->hcs_params);
u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
u32 temp, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
spin_lock_irqsave(&oxu->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, hub_wq needs to be able to clear
* the port-change status bits (especially
* USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
writel(temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (temp & PORT_SUSPEND) {
if ((temp & PORT_PE) == 0)
goto error;
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
writel(temp | PORT_RESUME, status_reg);
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
/* we auto-clear this feature */
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted write */
break;
case GetHubDescriptor:
ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset(buf, 0, 4);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
temp = readl(status_reg);
/* wPortChange bits */
if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc)
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
/* Remote Wakeup received? */
if (!oxu->reset_done[wIndex]) {
/* resume signaling for 20 msec */
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
mod_timer(&oxu_to_hcd(oxu)->rh_timer,
oxu->reset_done[wIndex]);
}
/* resume completed? */
else if (time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_SUSPEND << 16;
oxu->reset_done[wIndex] = 0;
/* stop resume signaling */
temp = readl(status_reg);
writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
status_reg);
retval = handshake(oxu, status_reg,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
oxu_err(oxu,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
}
}
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
oxu->reset_done[wIndex] = 0;
/* force reset to complete */
writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = handshake(oxu, status_reg,
PORT_RESET, 0, 750);
if (retval != 0) {
oxu_err(oxu, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
temp = check_reset_complete(oxu, wIndex, status_reg,
readl(status_reg));
}
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &oxu->companion_ports)) {
temp &= ~PORT_RWC_BITS;
temp |= PORT_OWNER;
writel(temp, status_reg);
oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
temp = readl(status_reg);
}
/*
* Even if OWNER is set, there's no harm letting hub_wq
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= oxu_port_speed(oxu, temp);
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_OC)
status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
status |= USB_PORT_STAT_POWER;
#ifndef OXU_VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
#endif
dbg_port(oxu, "GetStatus", wIndex + 1, temp);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
if (temp & PORT_OWNER)
break;
temp &= ~PORT_RWC_BITS;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
if (device_may_wakeup(&hcd->self.root_hub->dev))
temp |= PORT_WAKE_BITS;
writel(temp | PORT_SUSPEND, status_reg);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp | PORT_POWER, status_reg);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(50);
writel(temp, status_reg);
break;
/* For downstream facing ports (these): one hub port is put
* into test mode according to USB2 11.24.2.13, then the hub
* must be reset (which for root hub now means rmmod+modprobe,
* or else system reboot). See EHCI 2.3.9 and 4.14 for info
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
if (!selector || selector > 5)
goto error;
ehci_quiesce(oxu);
ehci_halt(oxu);
temp |= selector << 16;
writel(temp, status_reg);
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted writes */
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&oxu->lock, flags);
return retval;
}
#ifdef CONFIG_PM
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int port;
int mask;
oxu_dbg(oxu, "suspend root hub\n");
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
port = HCS_N_PORTS(oxu->hcs_params);
spin_lock_irq(&oxu->lock);
/* stop schedules, clean any completed work */
if (HC_IS_RUNNING(hcd->state)) {
ehci_quiesce(oxu);
hcd->state = HC_STATE_QUIESCING;
}
oxu->command = readl(&oxu->regs->command);
if (oxu->reclaim)
oxu->reclaim_ready = 1;
ehci_work(oxu);
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
* then manually resume them in the bus_resume() routine.
*/
oxu->bus_suspended = 0;
while (port--) {
u32 __iomem *reg = &oxu->regs->port_status[port];
u32 t1 = readl(reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
/* keep track of which ports we suspend */
if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
!(t1 & PORT_SUSPEND)) {
t2 |= PORT_SUSPEND;
set_bit(port, &oxu->bus_suspended);
}
/* enable remote wakeup on all ports */
if (device_may_wakeup(&hcd->self.root_hub->dev))
t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
else
t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
if (t1 != t2) {
oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
writel(t2, reg);
}
}
spin_unlock_irq(&oxu->lock);
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
spin_lock_irq(&oxu->lock);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
/* allow remote wakeup */
mask = INTR_MASK;
if (!device_may_wakeup(&hcd->self.root_hub->dev))
mask &= ~STS_PCD;
writel(mask, &oxu->regs->intr_enable);
readl(&oxu->regs->intr_enable);
oxu->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irq(&oxu->lock);
return 0;
}
/* Caller has locked the root hub, and should reset/reinit on error */
static int oxu_bus_resume(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int i;
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
spin_lock_irq(&oxu->lock);
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
* the last user of the controller, not reset/pm hardware keeping
* state we gave to it.
*/
temp = readl(&oxu->regs->intr_enable);
oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
*/
writel(0, &oxu->regs->intr_enable);
/* re-init operational registers */
writel(0, &oxu->regs->segment);
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
writel(oxu->command, &oxu->regs->command);
/* Some controller/firmware combinations need a delay during which
* they set up the port statuses. See Bugzilla #8190. */
mdelay(8);
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS(oxu->hcs_params);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
temp &= ~(PORT_RWC_BITS
| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
temp |= PORT_RESUME;
}
writel(temp, &oxu->regs->port_status[i]);
}
i = HCS_N_PORTS(oxu->hcs_params);
mdelay(20);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
temp &= ~(PORT_RWC_BITS | PORT_RESUME);
writel(temp, &oxu->regs->port_status[i]);
oxu_vdbg(oxu, "resumed port %d\n", i + 1);
}
}
(void) readl(&oxu->regs->command);
/* maybe re-activate the schedule(s) */
temp = 0;
if (oxu->async->qh_next.qh)
temp |= CMD_ASE;
if (oxu->periodic_sched)
temp |= CMD_PSE;
if (temp) {
oxu->command |= temp;
writel(oxu->command, &oxu->regs->command);
}
oxu->next_statechange = jiffies + msecs_to_jiffies(5);
hcd->state = HC_STATE_RUNNING;
/* Now we can safely re-enable irqs */
writel(INTR_MASK, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
return 0;
}
#else
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
return 0;
}
static int oxu_bus_resume(struct usb_hcd *hcd)
{
return 0;
}
#endif /* CONFIG_PM */
static const struct hc_driver oxu_hc_driver = {
.description = "oxu210hp_hcd",
.product_desc = "oxu210hp HCD",
.hcd_priv_size = sizeof(struct oxu_hcd),
/*
* Generic hardware linkage
*/
.irq = oxu_irq,
.flags = HCD_MEMORY | HCD_USB2,
/*
* Basic lifecycle operations
*/
.reset = oxu_reset,
.start = oxu_run,
.stop = oxu_stop,
.shutdown = oxu_shutdown,
/*
* Managing i/o requests and associated device resources
*/
.urb_enqueue = oxu_urb_enqueue,
.urb_dequeue = oxu_urb_dequeue,
.endpoint_disable = oxu_endpoint_disable,
/*
* Scheduling support
*/
.get_frame_number = oxu_get_frame,
/*
* Root hub support
*/
.hub_status_data = oxu_hub_status_data,
.hub_control = oxu_hub_control,
.bus_suspend = oxu_bus_suspend,
.bus_resume = oxu_bus_resume,
};
/*
* Module stuff
*/
static void oxu_configuration(struct platform_device *pdev, void __iomem *base)
{
u32 tmp;
/* Initialize top level registers.
* First write ever
*/
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
OXU_COMPARATOR | OXU_ASO_OP);
tmp = oxu_readl(base, OXU_CLKCTRL_SET);
oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
/* Clear all top interrupt enable */
oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
/* Clear all top interrupt status */
oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
/* Enable all needed top interrupt except OTG SPH core */
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
static int oxu_verify_id(struct platform_device *pdev, void __iomem *base)
{
u32 id;
static const char * const bo[] = {
"reserved",
"128-pin LQFP",
"84-pin TFBGA",
"reserved",
};
/* Read controller signature register to find a match */
id = oxu_readl(base, OXU_DEVICEID);
dev_info(&pdev->dev, "device ID %x\n", id);
if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
return -1;
dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
id >> OXU_REV_SHIFT,
bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
(id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
(id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
return 0;
}
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void __iomem *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd;
struct oxu_hcd *oxu;
int ret;
/* Set endian mode and host mode */
oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
OXU_USBMODE,
OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
hcd = usb_create_hcd(&oxu_hc_driver, dev,
otg ? "oxu210hp_otg" : "oxu210hp_sph");
if (!hcd)
return ERR_PTR(-ENOMEM);
hcd->rsrc_start = memstart;
hcd->rsrc_len = memlen;
hcd->regs = base;
hcd->irq = irq;
hcd->state = HC_STATE_HALT;
oxu = hcd_to_oxu(hcd);
oxu->is_otg = otg;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret < 0) {
usb_put_hcd(hcd);
return ERR_PTR(ret);
}
device_wakeup_enable(hcd->self.controller);
return hcd;
}
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void __iomem *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
int ret;
/* First time configuration at start up */
oxu_configuration(pdev, base);
ret = oxu_verify_id(pdev, base);
if (ret) {
dev_err(&pdev->dev, "no devices found!\n");
return -ENODEV;
}
/* Create the OTG controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create OTG controller!\n");
ret = PTR_ERR(hcd);
goto error_create_otg;
}
info->hcd[0] = hcd;
/* Create the SPH host controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create SPH controller!\n");
ret = PTR_ERR(hcd);
goto error_create_sph;
}
info->hcd[1] = hcd;
oxu_writel(base, OXU_CHIPIRQEN_SET,
oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
return 0;
error_create_sph:
usb_remove_hcd(info->hcd[0]);
usb_put_hcd(info->hcd[0]);
error_create_otg:
return ret;
}
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
void __iomem *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
if (usb_disabled())
return -ENODEV;
/*
* Get the platform resources
*/
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto error;
}
memstart = res->start;
memlen = resource_size(res);
ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
if (ret) {
dev_err(&pdev->dev, "error setting irq type\n");
ret = -EFAULT;
goto error;
}
/* Allocate a driver data struct to hold useful info for both
* SPH & OTG devices
*/
info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL);
if (!info) {
ret = -EFAULT;
goto error;
}
platform_set_drvdata(pdev, info);
ret = oxu_init(pdev, memstart, memlen, base, irq);
if (ret < 0) {
dev_dbg(&pdev->dev, "cannot init USB devices\n");
goto error;
}
dev_info(&pdev->dev, "devices enabled and running\n");
platform_set_drvdata(pdev, info);
return 0;
error:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
return ret;
}
static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
{
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
static void oxu_drv_remove(struct platform_device *pdev)
{
struct oxu_info *info = platform_get_drvdata(pdev);
oxu_remove(pdev, info->hcd[0]);
oxu_remove(pdev, info->hcd[1]);
}
static void oxu_drv_shutdown(struct platform_device *pdev)
{
oxu_drv_remove(pdev);
}
#if 0
/* FIXME: TODO */
static int oxu_drv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
static int oxu_drv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
#else
#define oxu_drv_suspend NULL
#define oxu_drv_resume NULL
#endif
static struct platform_driver oxu_driver = {
.probe = oxu_drv_probe,
.remove_new = oxu_drv_remove,
.shutdown = oxu_drv_shutdown,
.suspend = oxu_drv_suspend,
.resume = oxu_drv_resume,
.driver = {
.name = "oxu210hp-hcd",
.bus = &platform_bus_type
}
};
module_platform_driver(oxu_driver);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/oxu210hp-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCMCIA driver for SL811HS (as found in REX-CFU1U)
* Filename: sl811_cs.c
* Author: Yukio Yamamoto
*
* Port to sl811-hcd and 2.6.x by
* Botond Botyanszki <[email protected]>
* Simon Pickering
*
* Last update: 2005-05-12
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include <linux/usb/sl811.h>
MODULE_AUTHOR("Botond Botyanszki");
MODULE_DESCRIPTION("REX-CFU1U PCMCIA driver for 2.6");
MODULE_LICENSE("GPL");
/*====================================================================*/
/* MACROS */
/*====================================================================*/
#define INFO(args...) printk(KERN_INFO "sl811_cs: " args)
/*====================================================================*/
/* VARIABLES */
/*====================================================================*/
typedef struct local_info_t {
struct pcmcia_device *p_dev;
} local_info_t;
static void sl811_cs_release(struct pcmcia_device * link);
/*====================================================================*/
static void release_platform_dev(struct device * dev)
{
dev_dbg(dev, "sl811_cs platform_dev release\n");
dev->parent = NULL;
}
static struct sl811_platform_data platform_data = {
.potpg = 100,
.power = 50, /* == 100mA */
// .reset = ... FIXME: invoke CF reset on the card
};
static struct resource resources[] = {
[0] = {
.flags = IORESOURCE_IRQ,
},
[1] = {
// .name = "address",
.flags = IORESOURCE_IO,
},
[2] = {
// .name = "data",
.flags = IORESOURCE_IO,
},
};
extern struct platform_driver sl811h_driver;
static struct platform_device platform_dev = {
.id = -1,
.dev = {
.platform_data = &platform_data,
.release = release_platform_dev,
},
.resource = resources,
.num_resources = ARRAY_SIZE(resources),
};
static int sl811_hc_init(struct device *parent, resource_size_t base_addr,
int irq)
{
if (platform_dev.dev.parent)
return -EBUSY;
platform_dev.dev.parent = parent;
/* finish setting up the platform device */
resources[0].start = irq;
resources[1].start = base_addr;
resources[1].end = base_addr;
resources[2].start = base_addr + 1;
resources[2].end = base_addr + 1;
/* The driver core will probe for us. We know sl811-hcd has been
* initialized already because of the link order dependency created
* by referencing "sl811h_driver".
*/
platform_dev.name = sl811h_driver.driver.name;
return platform_device_register(&platform_dev);
}
/*====================================================================*/
static void sl811_cs_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "sl811_cs_detach\n");
sl811_cs_release(link);
/* This points to the parent local_info_t struct */
kfree(link->priv);
}
static void sl811_cs_release(struct pcmcia_device * link)
{
dev_dbg(&link->dev, "sl811_cs_release\n");
pcmcia_disable_device(link);
platform_device_unregister(&platform_dev);
}
static int sl811_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
if (p_dev->config_index == 0)
return -EINVAL;
return pcmcia_request_io(p_dev);
}
static int sl811_cs_config(struct pcmcia_device *link)
{
struct device *parent = &link->dev;
int ret;
dev_dbg(&link->dev, "sl811_cs_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO;
if (pcmcia_loop_config(link, sl811_cs_config_check, NULL))
goto failed;
/* require an IRQ and two registers */
if (resource_size(link->resource[0]) < 2)
goto failed;
if (!link->irq)
goto failed;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
if (sl811_hc_init(parent, link->resource[0]->start, link->irq)
< 0) {
failed:
printk(KERN_WARNING "sl811_cs_config failed\n");
sl811_cs_release(link);
return -ENODEV;
}
return 0;
}
static int sl811_cs_probe(struct pcmcia_device *link)
{
local_info_t *local;
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local)
return -ENOMEM;
local->p_dev = link;
link->priv = local;
return sl811_cs_config(link);
}
static const struct pcmcia_device_id sl811_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0xc015, 0x0001), /* RATOC USB HOST CF+ Card */
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, sl811_ids);
static struct pcmcia_driver sl811_cs_driver = {
.owner = THIS_MODULE,
.name = "sl811_cs",
.probe = sl811_cs_probe,
.remove = sl811_cs_detach,
.id_table = sl811_ids,
};
module_pcmcia_driver(sl811_cs_driver);
| linux-master | drivers/usb/host/sl811_cs.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2004 David Brownell <[email protected]>
*
* This file is licenced under GPL
*/
/*-------------------------------------------------------------------------*/
/*
* OHCI Root Hub ... the nonsharable stuff
*/
#define dbg_port(hc,label,num,value) \
ohci_dbg (hc, \
"%s roothub.portstatus [%d] " \
"= 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
label, num, value, \
(value & RH_PS_PRSC) ? " PRSC" : "", \
(value & RH_PS_OCIC) ? " OCIC" : "", \
(value & RH_PS_PSSC) ? " PSSC" : "", \
(value & RH_PS_PESC) ? " PESC" : "", \
(value & RH_PS_CSC) ? " CSC" : "", \
\
(value & RH_PS_LSDA) ? " LSDA" : "", \
(value & RH_PS_PPS) ? " PPS" : "", \
(value & RH_PS_PRS) ? " PRS" : "", \
(value & RH_PS_POCI) ? " POCI" : "", \
(value & RH_PS_PSS) ? " PSS" : "", \
\
(value & RH_PS_PES) ? " PES" : "", \
(value & RH_PS_CCS) ? " CCS" : "" \
);
/*-------------------------------------------------------------------------*/
#define OHCI_SCHED_ENABLES \
(OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
static void update_done_list(struct ohci_hcd *);
static void ohci_work(struct ohci_hcd *);
#ifdef CONFIG_PM
static int ohci_rh_suspend (struct ohci_hcd *ohci, int autostop)
__releases(ohci->lock)
__acquires(ohci->lock)
{
int status = 0;
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_RESUME:
ohci_dbg (ohci, "resume/suspend?\n");
ohci->hc_control &= ~OHCI_CTRL_HCFS;
ohci->hc_control |= OHCI_USB_RESET;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
(void) ohci_readl (ohci, &ohci->regs->control);
fallthrough;
case OHCI_USB_RESET:
status = -EBUSY;
ohci_dbg (ohci, "needs reinit!\n");
goto done;
case OHCI_USB_SUSPEND:
if (!ohci->autostop) {
ohci_dbg (ohci, "already suspended\n");
goto done;
}
}
ohci_dbg (ohci, "%s root hub\n",
autostop ? "auto-stop" : "suspend");
/* First stop any processing */
if (!autostop && (ohci->hc_control & OHCI_SCHED_ENABLES)) {
ohci->hc_control &= ~OHCI_SCHED_ENABLES;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
/* sched disables take effect on the next frame,
* then the last WDH could take 6+ msec
*/
ohci_dbg (ohci, "stopping schedules ...\n");
ohci->autostop = 0;
spin_unlock_irq (&ohci->lock);
msleep (8);
spin_lock_irq (&ohci->lock);
}
update_done_list(ohci);
ohci_work(ohci);
/* All ED unlinks should be finished, no need for SOF interrupts */
ohci_writel(ohci, OHCI_INTR_SF, &ohci->regs->intrdisable);
/*
* Some controllers don't handle "global" suspend properly if
* there are unsuspended ports. For these controllers, put all
* the enabled ports into suspend before suspending the root hub.
*/
if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
__hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
int i;
unsigned temp;
for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
temp = ohci_readl(ohci, portstat);
if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
RH_PS_PES)
ohci_writel(ohci, RH_PS_PSS, portstat);
}
}
/* maybe resume can wake root hub */
if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
ohci->hc_control |= OHCI_CTRL_RWE;
} else {
ohci_writel(ohci, OHCI_INTR_RHSC | OHCI_INTR_RD,
&ohci->regs->intrdisable);
ohci->hc_control &= ~OHCI_CTRL_RWE;
}
/* Suspend hub ... this is the "global (to this bus) suspend" mode,
* which doesn't imply ports will first be individually suspended.
*/
ohci->hc_control &= ~OHCI_CTRL_HCFS;
ohci->hc_control |= OHCI_USB_SUSPEND;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
(void) ohci_readl (ohci, &ohci->regs->control);
/* no resumes until devices finish suspending */
if (!autostop) {
ohci->next_statechange = jiffies + msecs_to_jiffies (5);
ohci->autostop = 0;
ohci->rh_state = OHCI_RH_SUSPENDED;
}
done:
return status;
}
static inline struct ed *find_head (struct ed *ed)
{
/* for bulk and control lists */
while (ed->ed_prev)
ed = ed->ed_prev;
return ed;
}
/* caller has locked the root hub */
static int ohci_rh_resume (struct ohci_hcd *ohci)
__releases(ohci->lock)
__acquires(ohci->lock)
{
struct usb_hcd *hcd = ohci_to_hcd (ohci);
u32 temp, enables;
int status = -EINPROGRESS;
int autostopped = ohci->autostop;
ohci->autostop = 0;
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
/* this can happen after resuming a swsusp snapshot */
if (ohci->rh_state != OHCI_RH_RUNNING) {
ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
ohci->hc_control);
status = -EBUSY;
/* this happens when pmcore resumes HC then root */
} else {
ohci_dbg (ohci, "duplicate resume\n");
status = 0;
}
} else switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_SUSPEND:
ohci->hc_control &= ~(OHCI_CTRL_HCFS|OHCI_SCHED_ENABLES);
ohci->hc_control |= OHCI_USB_RESUME;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
(void) ohci_readl (ohci, &ohci->regs->control);
ohci_dbg (ohci, "%s root hub\n",
autostopped ? "auto-start" : "resume");
break;
case OHCI_USB_RESUME:
/* HCFS changes sometime after INTR_RD */
ohci_dbg(ohci, "%swakeup root hub\n",
autostopped ? "auto-" : "");
break;
case OHCI_USB_OPER:
/* this can happen after resuming a swsusp snapshot */
ohci_dbg (ohci, "snapshot resume? reinit\n");
status = -EBUSY;
break;
default: /* RESET, we lost power */
ohci_dbg (ohci, "lost power\n");
status = -EBUSY;
}
if (status == -EBUSY) {
if (!autostopped) {
spin_unlock_irq (&ohci->lock);
status = ohci_restart (ohci);
usb_root_hub_lost_power(hcd->self.root_hub);
spin_lock_irq (&ohci->lock);
}
return status;
}
if (status != -EINPROGRESS)
return status;
if (autostopped)
goto skip_resume;
spin_unlock_irq (&ohci->lock);
/* Some controllers (lucent erratum) need extra-long delays */
msleep (20 /* usb 11.5.1.10 */ + 12 /* 32 msec counter */ + 1);
temp = ohci_readl (ohci, &ohci->regs->control);
temp &= OHCI_CTRL_HCFS;
if (temp != OHCI_USB_RESUME) {
ohci_err (ohci, "controller won't resume\n");
spin_lock_irq(&ohci->lock);
return -EBUSY;
}
/* disable old schedule state, reinit from scratch */
ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
ohci_writel (ohci, 0, &ohci->regs->ed_periodcurrent);
ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
/* Sometimes PCI D3 suspend trashes frame timings ... */
periodic_reinit (ohci);
/*
* The following code is executed with ohci->lock held and
* irqs disabled if and only if autostopped is true. This
* will cause sparse to warn about a "context imbalance".
*/
skip_resume:
/* interrupts might have been disabled */
ohci_writel (ohci, OHCI_INTR_INIT, &ohci->regs->intrenable);
if (ohci->ed_rm_list)
ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
/* Then re-enable operations */
ohci_writel (ohci, OHCI_USB_OPER, &ohci->regs->control);
(void) ohci_readl (ohci, &ohci->regs->control);
if (!autostopped)
msleep (3);
temp = ohci->hc_control;
temp &= OHCI_CTRL_RWC;
temp |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci->hc_control = temp;
ohci_writel (ohci, temp, &ohci->regs->control);
(void) ohci_readl (ohci, &ohci->regs->control);
/* TRSMRCY */
if (!autostopped) {
msleep (10);
spin_lock_irq (&ohci->lock);
}
/* now ohci->lock is always held and irqs are always disabled */
/* keep it alive for more than ~5x suspend + resume costs */
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
/* maybe turn schedules back on */
enables = 0;
temp = 0;
if (!ohci->ed_rm_list) {
if (ohci->ed_controltail) {
ohci_writel (ohci,
find_head (ohci->ed_controltail)->dma,
&ohci->regs->ed_controlhead);
enables |= OHCI_CTRL_CLE;
temp |= OHCI_CLF;
}
if (ohci->ed_bulktail) {
ohci_writel (ohci, find_head (ohci->ed_bulktail)->dma,
&ohci->regs->ed_bulkhead);
enables |= OHCI_CTRL_BLE;
temp |= OHCI_BLF;
}
}
if (hcd->self.bandwidth_isoc_reqs || hcd->self.bandwidth_int_reqs)
enables |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
if (enables) {
ohci_dbg (ohci, "restarting schedules ... %08x\n", enables);
ohci->hc_control |= enables;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
if (temp)
ohci_writel (ohci, temp, &ohci->regs->cmdstatus);
(void) ohci_readl (ohci, &ohci->regs->control);
}
ohci->rh_state = OHCI_RH_RUNNING;
return 0;
}
static int ohci_bus_suspend (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int rc;
spin_lock_irq (&ohci->lock);
if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_suspend (ohci, 0);
spin_unlock_irq (&ohci->lock);
if (rc == 0) {
del_timer_sync(&ohci->io_watchdog);
ohci->prev_frame_no = IO_WATCHDOG_OFF;
}
return rc;
}
static int ohci_bus_resume (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int rc;
if (time_before (jiffies, ohci->next_statechange))
msleep(5);
spin_lock_irq (&ohci->lock);
if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_resume (ohci);
spin_unlock_irq (&ohci->lock);
/* poll until we know a device is connected or we autostop */
if (rc == 0)
usb_hcd_poll_rh_status(hcd);
return rc;
}
/* Carry out polling-, autostop-, and autoresume-related state changes */
static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected, int rhsc_status)
{
int poll_rh = 1;
int rhsc_enable;
/* Some broken controllers never turn off RHSC in the interrupt
* status register. For their sake we won't re-enable RHSC
* interrupts if the interrupt bit is already active.
*/
rhsc_enable = ohci_readl(ohci, &ohci->regs->intrenable) &
OHCI_INTR_RHSC;
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
/* If no status changes are pending, enable RHSC interrupts. */
if (!rhsc_enable && !rhsc_status && !changed) {
rhsc_enable = OHCI_INTR_RHSC;
ohci_writel(ohci, rhsc_enable, &ohci->regs->intrenable);
}
/* Keep on polling until we know a device is connected
* and RHSC is enabled, or until we autostop.
*/
if (!ohci->autostop) {
if (any_connected ||
!device_may_wakeup(&ohci_to_hcd(ohci)
->self.root_hub->dev)) {
if (rhsc_enable)
poll_rh = 0;
} else {
ohci->autostop = 1;
ohci->next_statechange = jiffies + HZ;
}
/* if no devices have been attached for one second, autostop */
} else {
if (changed || any_connected) {
ohci->autostop = 0;
ohci->next_statechange = jiffies +
STATECHANGE_DELAY;
} else if (time_after_eq(jiffies,
ohci->next_statechange)
&& !ohci->ed_rm_list
&& !(ohci->hc_control &
OHCI_SCHED_ENABLES)) {
ohci_rh_suspend(ohci, 1);
if (rhsc_enable)
poll_rh = 0;
}
}
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
/* if there is a port change, autostart or ask to be resumed */
if (changed) {
if (ohci->autostop)
ohci_rh_resume(ohci);
else
usb_hcd_resume_root_hub(ohci_to_hcd(ohci));
/* If remote wakeup is disabled, stop polling */
} else if (!ohci->autostop &&
!ohci_to_hcd(ohci)->self.root_hub->
do_remote_wakeup) {
poll_rh = 0;
} else {
/* If no status changes are pending,
* enable RHSC interrupts
*/
if (!rhsc_enable && !rhsc_status) {
rhsc_enable = OHCI_INTR_RHSC;
ohci_writel(ohci, rhsc_enable,
&ohci->regs->intrenable);
}
/* Keep polling until RHSC is enabled */
if (rhsc_enable)
poll_rh = 0;
}
break;
}
return poll_rh;
}
#else /* CONFIG_PM */
static inline int ohci_rh_resume(struct ohci_hcd *ohci)
{
return 0;
}
/* Carry out polling-related state changes.
* autostop isn't used when CONFIG_PM is turned off.
*/
static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected, int rhsc_status)
{
/* If RHSC is enabled, don't poll */
if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
return 0;
/* If status changes are pending, continue polling.
* Conversely, if no status changes are pending but the RHSC
* status bit was set, then RHSC may be broken so continue polling.
*/
if (changed || rhsc_status)
return 1;
/* It's safe to re-enable RHSC interrupts */
ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
return 0;
}
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
/* build "status change" packet (one or two bytes) from HC registers */
int ohci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int i, changed = 0, length = 1;
int any_connected = 0;
int rhsc_status;
unsigned long flags;
spin_lock_irqsave (&ohci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
/* undocumented erratum seen on at least rev D */
if ((ohci->flags & OHCI_QUIRK_AMD756)
&& (roothub_a (ohci) & RH_A_NDP) > MAX_ROOT_PORTS) {
ohci_warn (ohci, "bogus NDP, rereads as NDP=%d\n",
ohci_readl (ohci, &ohci->regs->roothub.a) & RH_A_NDP);
/* retry later; "should not happen" */
goto done;
}
/* init status */
if (roothub_status (ohci) & (RH_HS_LPSC | RH_HS_OCIC))
buf [0] = changed = 1;
else
buf [0] = 0;
if (ohci->num_ports > 7) {
buf [1] = 0;
length++;
}
/* Clear the RHSC status flag before reading the port statuses */
ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrstatus);
rhsc_status = ohci_readl(ohci, &ohci->regs->intrstatus) &
OHCI_INTR_RHSC;
/* look at each port */
for (i = 0; i < ohci->num_ports; i++) {
u32 status = roothub_portstatus (ohci, i);
/* can't autostop if ports are connected */
any_connected |= (status & RH_PS_CCS);
if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC
| RH_PS_OCIC | RH_PS_PRSC)) {
changed = 1;
if (i < 7)
buf [0] |= 1 << (i + 1);
else
buf [1] |= 1 << (i - 7);
}
}
if (ohci_root_hub_state_changes(ohci, changed,
any_connected, rhsc_status))
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
else
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
done:
spin_unlock_irqrestore (&ohci->lock, flags);
return changed ? length : 0;
}
EXPORT_SYMBOL_GPL(ohci_hub_status_data);
/*-------------------------------------------------------------------------*/
static void
ohci_hub_descriptor (
struct ohci_hcd *ohci,
struct usb_hub_descriptor *desc
) {
u32 rh = roothub_a (ohci);
u16 temp;
desc->bDescriptorType = USB_DT_HUB;
desc->bPwrOn2PwrGood = (rh & RH_A_POTPGT) >> 24;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ohci->num_ports;
temp = 1 + (ohci->num_ports / 8);
desc->bDescLength = 7 + 2 * temp;
temp = HUB_CHAR_COMMON_LPSM | HUB_CHAR_COMMON_OCPM;
if (rh & RH_A_NPS) /* no power switching? */
temp |= HUB_CHAR_NO_LPSM;
if (rh & RH_A_PSM) /* per-port power switching? */
temp |= HUB_CHAR_INDV_PORT_LPSM;
if (rh & RH_A_NOCP) /* no overcurrent reporting? */
temp |= HUB_CHAR_NO_OCPM;
else if (rh & RH_A_OCPM) /* per-port overcurrent reporting? */
temp |= HUB_CHAR_INDV_PORT_OCPM;
desc->wHubCharacteristics = cpu_to_le16(temp);
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
rh = roothub_b (ohci);
memset(desc->u.hs.DeviceRemovable, 0xff,
sizeof(desc->u.hs.DeviceRemovable));
desc->u.hs.DeviceRemovable[0] = rh & RH_B_DR;
if (ohci->num_ports > 7) {
desc->u.hs.DeviceRemovable[1] = (rh & RH_B_DR) >> 8;
desc->u.hs.DeviceRemovable[2] = 0xff;
} else
desc->u.hs.DeviceRemovable[1] = 0xff;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_OTG
static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
u32 status;
if (!port)
return -EINVAL;
port--;
/* start port reset before HNP protocol times out */
status = ohci_readl(ohci, &ohci->regs->roothub.portstatus [port]);
if (!(status & RH_PS_CCS))
return -ENODEV;
/* hub_wq will finish the reset later */
ohci_writel(ohci, RH_PS_PRS, &ohci->regs->roothub.portstatus [port]);
return 0;
}
#else
#define ohci_start_port_reset NULL
#endif
/*-------------------------------------------------------------------------*/
/* See usb 7.1.7.5: root hubs must issue at least 50 msec reset signaling,
* not necessarily continuous ... to guard against resume signaling.
*/
#define PORT_RESET_MSEC 50
/* this timer value might be vendor-specific ... */
#define PORT_RESET_HW_MSEC 10
/* wrap-aware logic morphed from <linux/jiffies.h> */
#define tick_before(t1,t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
/* called from some task, normally hub_wq */
static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port)
{
__hc32 __iomem *portstat = &ohci->regs->roothub.portstatus [port];
u32 temp = 0;
u16 now = ohci_readl(ohci, &ohci->regs->fmnumber);
u16 reset_done = now + PORT_RESET_MSEC;
int limit_1 = DIV_ROUND_UP(PORT_RESET_MSEC, PORT_RESET_HW_MSEC);
/* build a "continuous enough" reset signal, with up to
* 3msec gap between pulses. scheduler HZ==100 must work;
* this might need to be deadline-scheduled.
*/
do {
int limit_2;
/* spin until any current reset finishes */
limit_2 = PORT_RESET_HW_MSEC * 2;
while (--limit_2 >= 0) {
temp = ohci_readl (ohci, portstat);
/* handle e.g. CardBus eject */
if (temp == ~(u32)0)
return -ESHUTDOWN;
if (!(temp & RH_PS_PRS))
break;
udelay (500);
}
/* timeout (a hardware error) has been observed when
* EHCI sets CF while this driver is resetting a port;
* presumably other disconnect paths might do it too.
*/
if (limit_2 < 0) {
ohci_dbg(ohci,
"port[%d] reset timeout, stat %08x\n",
port, temp);
break;
}
if (!(temp & RH_PS_CCS))
break;
if (temp & RH_PS_PRSC)
ohci_writel (ohci, RH_PS_PRSC, portstat);
/* start the next reset, sleep till it's probably done */
ohci_writel (ohci, RH_PS_PRS, portstat);
msleep(PORT_RESET_HW_MSEC);
now = ohci_readl(ohci, &ohci->regs->fmnumber);
} while (tick_before(now, reset_done) && --limit_1 >= 0);
/* caller synchronizes using PRSC ... and handles PRS
* still being set when this returns.
*/
return 0;
}
int ohci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ports = ohci->num_ports;
u32 temp;
int retval = 0;
if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
return -ESHUTDOWN;
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
ohci_writel (ohci, RH_HS_OCIC,
&ohci->regs->roothub.status);
break;
case C_HUB_LOCAL_POWER:
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
temp = RH_PS_CCS;
break;
case USB_PORT_FEAT_C_ENABLE:
temp = RH_PS_PESC;
break;
case USB_PORT_FEAT_SUSPEND:
temp = RH_PS_POCI;
break;
case USB_PORT_FEAT_C_SUSPEND:
temp = RH_PS_PSSC;
break;
case USB_PORT_FEAT_POWER:
temp = RH_PS_LSDA;
break;
case USB_PORT_FEAT_C_CONNECTION:
temp = RH_PS_CSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
temp = RH_PS_OCIC;
break;
case USB_PORT_FEAT_C_RESET:
temp = RH_PS_PRSC;
break;
default:
goto error;
}
ohci_writel (ohci, temp,
&ohci->regs->roothub.portstatus [wIndex]);
// ohci_readl (ohci, &ohci->regs->roothub.portstatus [wIndex]);
break;
case GetHubDescriptor:
ohci_hub_descriptor (ohci, (struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE);
put_unaligned_le32(temp, buf);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = roothub_portstatus (ohci, wIndex);
put_unaligned_le32(temp, buf);
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
dbg_port(ohci, "GetStatus", wIndex, temp);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
// FIXME: this can be cleared, yes?
case C_HUB_LOCAL_POWER:
break;
default:
goto error;
}
break;
case SetPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
#ifdef CONFIG_USB_OTG
if (hcd->self.otg_port == (wIndex + 1)
&& hcd->self.b_hnp_enable)
ohci->start_hnp(ohci);
else
#endif
ohci_writel (ohci, RH_PS_PSS,
&ohci->regs->roothub.portstatus [wIndex]);
break;
case USB_PORT_FEAT_POWER:
ohci_writel (ohci, RH_PS_PPS,
&ohci->regs->roothub.portstatus [wIndex]);
break;
case USB_PORT_FEAT_RESET:
retval = root_port_reset (ohci, wIndex);
break;
default:
goto error;
}
break;
default:
error:
/* "protocol stall" on error */
retval = -EPIPE;
}
return retval;
}
EXPORT_SYMBOL_GPL(ohci_hub_control);
| linux-master | drivers/usb/host/ohci-hub.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
*
* [ Initialisation is based on Linus' ]
* [ uhci code and gregs ohci fragments ]
* [ (C) Copyright 1999 Linus Torvalds ]
* [ (C) Copyright 1999 Gregory P. Smith]
*
* PCI Bus Glue
*
* This file is licenced under the GPL.
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ohci.h"
#include "pci-quirks.h"
#define DRIVER_DESC "OHCI PCI platform driver"
static const char hcd_name[] = "ohci-pci";
/*-------------------------------------------------------------------------*/
static int broken_suspend(struct usb_hcd *hcd)
{
device_init_wakeup(&hcd->self.root_hub->dev, 0);
return 0;
}
/* AMD 756, for most chips (early revs), corrupts register
* values on read ... so enable the vendor workaround.
*/
static int ohci_quirk_amd756(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci->flags = OHCI_QUIRK_AMD756;
ohci_dbg (ohci, "AMD756 erratum 4 workaround\n");
/* also erratum 10 (suspend/resume issues) */
return broken_suspend(hcd);
}
/* Apple's OHCI driver has a lot of bizarre workarounds
* for this chip. Evidently control and bulk lists
* can get confused. (B&W G3 models, and ...)
*/
static int ohci_quirk_opti(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci_dbg (ohci, "WARNING: OPTi workarounds unavailable\n");
return 0;
}
/* Check for NSC87560. We have to look at the bridge (fn1) to
* identify the USB (fn2). This quirk might apply to more or
* even all NSC stuff.
*/
static int ohci_quirk_ns(struct usb_hcd *hcd)
{
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
struct pci_dev *b;
b = pci_get_slot (pdev->bus, PCI_DEVFN (PCI_SLOT (pdev->devfn), 1));
if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO
&& b->vendor == PCI_VENDOR_ID_NS) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci->flags |= OHCI_QUIRK_SUPERIO;
ohci_dbg (ohci, "Using NSC SuperIO setup\n");
}
pci_dev_put(b);
return 0;
}
/* Check for Compaq's ZFMicro chipset, which needs short
* delays before control or bulk queues get re-activated
* in finish_unlinks()
*/
static int ohci_quirk_zfmicro(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci->flags |= OHCI_QUIRK_ZFMICRO;
ohci_dbg(ohci, "enabled Compaq ZFMicro chipset quirks\n");
return 0;
}
/* Check for Toshiba SCC OHCI which has big endian registers
* and little endian in memory data structures
*/
static int ohci_quirk_toshiba_scc(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
/* That chip is only present in the southbridge of some
* cell based platforms which are supposed to select
* CONFIG_USB_OHCI_BIG_ENDIAN_MMIO. We verify here if
* that was the case though.
*/
#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
ohci->flags |= OHCI_QUIRK_BE_MMIO;
ohci_dbg (ohci, "enabled big endian Toshiba quirk\n");
return 0;
#else
ohci_err (ohci, "unsupported big endian Toshiba quirk\n");
return -ENXIO;
#endif
}
/* Check for NEC chip and apply quirk for allegedly lost interrupts.
*/
static void ohci_quirk_nec_worker(struct work_struct *work)
{
struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
int status;
status = ohci_restart(ohci);
if (status != 0)
ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
"ohci_restart", status);
}
static int ohci_quirk_nec(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci->flags |= OHCI_QUIRK_NEC;
INIT_WORK(&ohci->nec_work, ohci_quirk_nec_worker);
ohci_dbg (ohci, "enabled NEC chipset lost interrupt quirk\n");
return 0;
}
static int ohci_quirk_amd700(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
if (usb_amd_quirk_pll_check())
ohci->flags |= OHCI_QUIRK_AMD_PLL;
/* SB800 needs pre-fetch fix */
if (usb_amd_prefetch_quirk()) {
ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
}
ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
return 0;
}
static int ohci_quirk_qemu(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci->flags |= OHCI_QUIRK_QEMU;
ohci_dbg(ohci, "enabled qemu quirk\n");
return 0;
}
/* List of quirks for OHCI */
static const struct pci_device_id ohci_pci_quirks[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x740c),
.driver_data = (unsigned long)ohci_quirk_amd756,
},
{
PCI_DEVICE(PCI_VENDOR_ID_OPTI, 0xc861),
.driver_data = (unsigned long)ohci_quirk_opti,
},
{
PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_ANY_ID),
.driver_data = (unsigned long)ohci_quirk_ns,
},
{
PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xa0f8),
.driver_data = (unsigned long)ohci_quirk_zfmicro,
},
{
PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, 0x01b6),
.driver_data = (unsigned long)ohci_quirk_toshiba_scc,
},
{
PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB),
.driver_data = (unsigned long)ohci_quirk_nec,
},
{
/* Toshiba portege 4000 */
.vendor = PCI_VENDOR_ID_AL,
.device = 0x5237,
.subvendor = PCI_VENDOR_ID_TOSHIBA,
.subdevice = 0x0004,
.driver_data = (unsigned long) broken_suspend,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
.driver_data = (unsigned long) broken_suspend,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
{
.vendor = PCI_VENDOR_ID_APPLE,
.device = 0x003f,
.subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
.subdevice = PCI_SUBDEVICE_ID_QEMU,
.driver_data = (unsigned long)ohci_quirk_qemu,
},
{},
};
static int ohci_pci_reset (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int ret = 0;
if (hcd->self.controller) {
const struct pci_device_id *quirk_id;
quirk_id = pci_match_id(ohci_pci_quirks, pdev);
if (quirk_id != NULL) {
int (*quirk)(struct usb_hcd *ohci);
quirk = (void *)quirk_id->driver_data;
ret = quirk(hcd);
}
}
if (ret == 0)
ret = ohci_setup(hcd);
/*
* After ohci setup RWC may not be set for add-in PCI cards.
* This transfers PCI PM wakeup capabilities.
*/
if (device_can_wakeup(&pdev->dev))
ohci->hc_control |= OHCI_CTRL_RWC;
return ret;
}
static struct hc_driver __read_mostly ohci_pci_hc_driver;
static const struct ohci_driver_overrides pci_overrides __initconst = {
.product_desc = "OHCI PCI host controller",
.reset = ohci_pci_reset,
};
static const struct pci_device_id pci_ids[] = { {
/* handle any USB OHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
}, {
/* The device in the ConneXT I/O hub has no class reg */
PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI),
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
static int ohci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
return usb_hcd_pci_probe(dev, &ohci_pci_hc_driver);
}
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver ohci_pci_driver = {
.name = hcd_name,
.id_table = pci_ids,
.probe = ohci_pci_probe,
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
#endif
};
#ifdef CONFIG_PM
static int ohci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
return ohci_resume(hcd, msg.event == PM_EVENT_RESTORE);
}
#endif
static int __init ohci_pci_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
#ifdef CONFIG_PM
/* Entries for the PCI suspend/resume callbacks are special */
ohci_pci_hc_driver.pci_suspend = ohci_suspend;
ohci_pci_hc_driver.pci_resume = ohci_pci_resume;
#endif
return pci_register_driver(&ohci_pci_driver);
}
module_init(ohci_pci_init);
static void __exit ohci_pci_cleanup(void)
{
pci_unregister_driver(&ohci_pci_driver);
}
module_exit(ohci_pci_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: ehci_pci");
| linux-master | drivers/usb/host/ohci-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-debugfs.h"
#include "xhci-dbgcap.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
static int link_quirk;
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
static unsigned long long quirks;
module_param(quirks, ullong, S_IRUGO);
MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
{
struct xhci_segment *seg = ring->first_seg;
if (!td || !td->start_seg)
return false;
do {
if (seg == td->start_seg)
return true;
seg = seg->next;
} while (seg && seg != ring->first_seg);
return false;
}
/*
* xhci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*/
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
{
u32 result;
int ret;
ret = readl_poll_timeout_atomic(ptr, result,
(result & mask) == done ||
result == U32_MAX,
1, timeout_us);
if (result == U32_MAX) /* card removed */
return -ENODEV;
return ret;
}
/*
* Disable interrupts and begin the xHCI halting process.
*/
void xhci_quiesce(struct xhci_hcd *xhci)
{
u32 halted;
u32 cmd;
u32 mask;
mask = ~(XHCI_IRQS);
halted = readl(&xhci->op_regs->status) & STS_HALT;
if (!halted)
mask &= ~CMD_RUN;
cmd = readl(&xhci->op_regs->command);
cmd &= mask;
writel(cmd, &xhci->op_regs->command);
}
/*
* Force HC into halt state.
*
* Disable any IRQs and clear the run/stop bit.
* HC will complete any current and actively pipelined transactions, and
* should halt within 16 ms of the run/stop bit being cleared.
* Read HC Halted bit in the status register to see when the HC is finished.
*/
int xhci_halt(struct xhci_hcd *xhci)
{
int ret;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
xhci_quiesce(xhci);
ret = xhci_handshake(&xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
if (ret) {
xhci_warn(xhci, "Host halt failed, %d\n", ret);
return ret;
}
xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
return ret;
}
/*
* Set the run bit and wait for the host to be running.
*/
int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
temp = readl(&xhci->op_regs->command);
temp |= (CMD_RUN);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
temp);
writel(temp, &xhci->op_regs->command);
/*
* Wait for the HCHalted Status bit to be 0 to indicate the host is
* running.
*/
ret = xhci_handshake(&xhci->op_regs->status,
STS_HALT, 0, XHCI_MAX_HALT_USEC);
if (ret == -ETIMEDOUT)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret) {
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
}
return ret;
}
/*
* Reset a halted HC.
*
* This resets pipelines, timers, counters, state machines, etc.
* Transactions will be terminated immediately, and operational registers
* will be set to their defaults.
*/
int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
{
u32 command;
u32 state;
int ret;
state = readl(&xhci->op_regs->status);
if (state == ~(u32)0) {
xhci_warn(xhci, "Host not accessible, reset failed.\n");
return -ENODEV;
}
if ((state & STS_HALT) == 0) {
xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
return 0;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
command = readl(&xhci->op_regs->command);
command |= CMD_RESET;
writel(command, &xhci->op_regs->command);
/* Existing Intel xHCI controllers require a delay of 1 mS,
* after setting the CMD_RESET bit, and before accessing any
* HC registers. This allows the HC to complete the
* reset operation and be ready for HC register access.
* Without this delay, the subsequent HC register access,
* may result in a system hang very rarely.
*/
if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000);
ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
if (ret)
return ret;
if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wait for controller to be ready for doorbell rings");
/*
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
xhci->usb2_rhub.bus_state.port_c_suspend = 0;
xhci->usb2_rhub.bus_state.suspended_ports = 0;
xhci->usb2_rhub.bus_state.resuming_ports = 0;
xhci->usb3_rhub.bus_state.port_c_suspend = 0;
xhci->usb3_rhub.bus_state.suspended_ports = 0;
xhci->usb3_rhub.bus_state.resuming_ports = 0;
return ret;
}
static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct iommu_domain *domain;
int err, i;
u64 val;
u32 intrs;
/*
* Some Renesas controllers get into a weird state if they are
* reset while programmed with 64bit addresses (they will preserve
* the top half of the address in internal, non visible
* registers). You end up with half the address coming from the
* kernel, and the other half coming from the firmware. Also,
* changing the programming leads to extra accesses even if the
* controller is supposed to be halted. The controller ends up with
* a fatal fault, and is then ripe for being properly reset.
*
* Special care is taken to only apply this if the device is behind
* an iommu. Doing anything when there is no iommu is definitely
* unsafe...
*/
domain = iommu_get_domain_for_dev(dev);
if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
domain->type == IOMMU_DOMAIN_IDENTITY)
return;
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
/* Clear HSEIE so that faults do not get signaled */
val = readl(&xhci->op_regs->command);
val &= ~CMD_HSEIE;
writel(val, &xhci->op_regs->command);
/* Clear HSE (aka FATAL) */
val = readl(&xhci->op_regs->status);
val |= STS_FATAL;
writel(val, &xhci->op_regs->status);
/* Now zero the registers, and brace for impact */
val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
ARRAY_SIZE(xhci->run_regs->ir_set));
for (i = 0; i < intrs; i++) {
struct xhci_intr_reg __iomem *ir;
ir = &xhci->run_regs->ir_set[i];
val = xhci_read_64(xhci, &ir->erst_base);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &ir->erst_base);
val= xhci_read_64(xhci, &ir->erst_dequeue);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &ir->erst_dequeue);
}
/* Wait for the fault to appear. It will be cleared on reset */
err = xhci_handshake(&xhci->op_regs->status,
STS_FATAL, STS_FATAL,
XHCI_MAX_HALT_USEC);
if (!err)
xhci_info(xhci, "Fault detected\n");
}
static int xhci_enable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
if (!ir || !ir->ir_set)
return -EINVAL;
iman = readl(&ir->ir_set->irq_pending);
writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
return 0;
}
static int xhci_disable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
if (!ir || !ir->ir_set)
return -EINVAL;
iman = readl(&ir->ir_set->irq_pending);
writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
return 0;
}
static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
struct xhci_hub *rhub;
u32 temp;
int i;
xhci = from_timer(xhci, t, comp_mode_recovery_timer);
rhub = &xhci->usb3_rhub;
hcd = rhub->hcd;
if (!hcd)
return;
for (i = 0; i < rhub->num_ports; i++) {
temp = readl(rhub->ports[i]->addr);
if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
/*
* Compliance Mode Detected. Letting USB Core
* handle the Warm Reset
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance mode detected->port %d",
i + 1);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Attempting compliance mode recovery");
if (hcd->state == HC_STATE_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
usb_hcd_poll_rh_status(hcd);
}
}
if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
mod_timer(&xhci->comp_mode_recovery_timer,
jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
}
/*
* Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
* that causes ports behind that hardware to enter compliance mode sometimes.
* The quirk creates a timer that polls every 2 seconds the link state of
* each host controller's port and recovers it by issuing a Warm reset
* if Compliance mode is detected, otherwise the port will become "dead" (no
* device connections or disconnections will be detected anymore). Becasue no
* status event is generated when entering compliance mode (per xhci spec),
* this quirk is needed on systems that have the failing hardware installed.
*/
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
{
xhci->port_status_u0 = 0;
timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
0);
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
add_timer(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance mode recovery timer initialized");
}
/*
* This function identifies the systems that have installed the SN65LVPE502CP
* USB3.0 re-driver and that need the Compliance Mode Quirk.
* Systems:
* Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
*/
static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
{
const char *dmi_product_name, *dmi_sys_vendor;
dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
if (!dmi_product_name || !dmi_sys_vendor)
return false;
if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
return false;
if (strstr(dmi_product_name, "Z420") ||
strstr(dmi_product_name, "Z620") ||
strstr(dmi_product_name, "Z820") ||
strstr(dmi_product_name, "Z1 Workstation"))
return true;
return false;
}
static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
{
return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
}
/*
* Initialize memory for HCD and xHC (one-time init).
*
* Program the PAGESIZE register, initialize the device context array, create
* device contexts (?), set up a command ring segment (or two?), create event
* ring (one for now).
*/
static int xhci_init(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
spin_lock_init(&xhci->lock);
if (xhci->hci_version == 0x95 && link_quirk) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Not clearing Link TRB chain bits.");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI doesn't need link TRB QUIRK");
}
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
xhci->quirks |= XHCI_COMP_MODE_QUIRK;
compliance_mode_recovery_timer_init(xhci);
}
return retval;
}
/*-------------------------------------------------------------------------*/
static int xhci_run_finished(struct xhci_hcd *xhci)
{
struct xhci_interrupter *ir = xhci->interrupter;
unsigned long flags;
u32 temp;
/*
* Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
* Protect the short window before host is running with a lock
*/
spin_lock_irqsave(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
temp = readl(&xhci->op_regs->command);
temp |= (CMD_EIE);
writel(temp, &xhci->op_regs->command);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
xhci_enable_interrupter(ir);
if (xhci_start(xhci)) {
xhci_halt(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
return -ENODEV;
}
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
/*
* Start the HC after it was halted.
*
* This function is called by the USB core when the HC driver is added.
* Its opposite is xhci_stop().
*
* xhci_init() must be called once before this function can be called.
* Reset the HC, enable device slot contexts, program DCBAAP, and
* set command ring pointer and event ring pointer.
*
* Setup MSI-X vectors and enable interrupts.
*/
int xhci_run(struct usb_hcd *hcd)
{
u32 temp;
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir = xhci->interrupter;
/* Start the xHCI host controller running only after the USB 2.0 roothub
* is setup.
*/
hcd->uses_new_polling = 1;
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set the interrupt modulation register");
temp = readl(&ir->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
writel(temp, &ir->ir_set->irq_control);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
command = xhci_alloc_command(xhci, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
if (ret)
xhci_free_command(xhci, command);
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished %s for main hcd", __func__);
xhci_create_dbc_dev(xhci);
xhci_debugfs_init(xhci);
if (xhci_has_one_roothub(xhci))
return xhci_run_finished(xhci);
set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
return 0;
}
EXPORT_SYMBOL_GPL(xhci_run);
/*
* Stop xHCI driver.
*
* This function is called by the USB core when the HC driver is removed.
* Its opposite is xhci_run().
*
* Disable device contexts, disable IRQs, and quiesce the HC.
* Reset the HC, finish any completed transactions, and cleanup memory.
*/
void xhci_stop(struct usb_hcd *hcd)
{
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir = xhci->interrupter;
mutex_lock(&xhci->mutex);
/* Only halt host and free memory after both hcds are removed */
if (!usb_hcd_is_primary_hcd(hcd)) {
mutex_unlock(&xhci->mutex);
return;
}
xhci_remove_dbc_dev(xhci);
spin_lock_irq(&xhci->lock);
xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
xhci_halt(xhci);
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
spin_unlock_irq(&xhci->lock);
/* Deleting Compliance Mode Recovery Timer */
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"%s: compliance mode recovery timer deleted",
__func__);
}
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put();
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Disabling event ring interrupts");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
xhci_disable_interrupter(ir);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
xhci_debugfs_exit(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_stop completed - status = %x",
readl(&xhci->op_regs->status));
mutex_unlock(&xhci->mutex);
}
EXPORT_SYMBOL_GPL(xhci_stop);
/*
* Shutdown HC (not bus-specific)
*
* This is called when the machine is rebooting or halting. We assume that the
* machine will be powered off, and the HC's internal state will be reset.
* Don't bother to free memory.
*
* This will only ever be called with the main usb_hcd (the USB3 roothub).
*/
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
/* Don't poll the roothubs after shutdown. */
xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
__func__, hcd->self.busnum);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
if (xhci->shared_hcd) {
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
/*
* Workaround for spurious wakeps at shutdown with HSW, and for boot
* firmware delay in ADL-P PCH if port are left in U3 at shutdown
*/
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
xhci->quirks & XHCI_RESET_TO_DEFAULT)
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
spin_unlock_irq(&xhci->lock);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_shutdown completed - status = %x",
readl(&xhci->op_regs->status));
}
EXPORT_SYMBOL_GPL(xhci_shutdown);
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
{
struct xhci_interrupter *ir = xhci->interrupter;
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
if (!ir)
return;
ir->s3_erst_size = readl(&ir->ir_set->erst_size);
ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
ir->s3_irq_control = readl(&ir->ir_set->irq_control);
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
struct xhci_interrupter *ir = xhci->interrupter;
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
writel(ir->s3_erst_size, &ir->ir_set->erst_size);
xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
writel(ir->s3_irq_control, &ir->ir_set->irq_control);
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
{
u64 val_64;
/* step 2: initialize command ring buffer */
val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue) &
(u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%llx",
(long unsigned long) val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}
/*
* The whole command ring must be cleared to zero when we suspend the host.
*
* The host doesn't save the command ring pointer in the suspend well, so we
* need to re-program it on resume. Unfortunately, the pointer must be 64-byte
* aligned, because of the reserved bits in the command ring dequeue pointer
* register. Therefore, we can't just set the dequeue pointer back in the
* middle of the ring (TRBs are 16-byte aligned).
*/
static void xhci_clear_command_ring(struct xhci_hcd *xhci)
{
struct xhci_ring *ring;
struct xhci_segment *seg;
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
/* Reset the software enqueue and dequeue pointers */
ring->deq_seg = ring->first_seg;
ring->dequeue = ring->first_seg->trbs;
ring->enq_seg = ring->deq_seg;
ring->enqueue = ring->dequeue;
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
/*
* Ring is now zeroed, so the HW should look for change of ownership
* when the cycle bit is set to 1.
*/
ring->cycle_state = 1;
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
* and want to make sure the hardware doesn't access bogus memory
* because, say, the BIOS or an SMI started the host without changing
* the command ring pointers.
*/
xhci_set_cmd_ring_deq(xhci);
}
/*
* Disable port wake bits if do_wakeup is not set.
*
* Also clear a possible internal port wake state left hanging for ports that
* detected termination but never successfully enumerated (trained to 0U).
* Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
* at enumeration clears this wake, force one here as well for unconnected ports
*/
static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
struct xhci_hub *rhub,
bool do_wakeup)
{
unsigned long flags;
u32 t1, t2, portsc;
int i;
spin_lock_irqsave(&xhci->lock, flags);
for (i = 0; i < rhub->num_ports; i++) {
portsc = readl(rhub->ports[i]->addr);
t1 = xhci_port_state_to_neutral(portsc);
t2 = t1;
/* clear wake bits if do_wake is not set */
if (!do_wakeup)
t2 &= ~PORT_WAKE_BITS;
/* Don't touch csc bit if connected or connect change is set */
if (!(portsc & (PORT_CSC | PORT_CONNECT)))
t2 |= PORT_CSC;
if (t1 != t2) {
writel(t2, rhub->ports[i]->addr);
xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
rhub->hcd->self.busnum, i + 1, portsc, t2);
}
}
spin_unlock_irqrestore(&xhci->lock, flags);
}
static bool xhci_pending_portevent(struct xhci_hcd *xhci)
{
struct xhci_port **ports;
int port_index;
u32 status;
u32 portsc;
status = readl(&xhci->op_regs->status);
if (status & STS_EINT)
return true;
/*
* Checking STS_EINT is not enough as there is a lag between a change
* bit being set and the Port Status Change Event that it generated
* being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
*/
port_index = xhci->usb2_rhub.num_ports;
ports = xhci->usb2_rhub.ports;
while (port_index--) {
portsc = readl(ports[port_index]->addr);
if (portsc & PORT_CHANGE_MASK ||
(portsc & PORT_PLS_MASK) == XDEV_RESUME)
return true;
}
port_index = xhci->usb3_rhub.num_ports;
ports = xhci->usb3_rhub.ports;
while (port_index--) {
portsc = readl(ports[port_index]->addr);
if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
(portsc & PORT_PLS_MASK) == XDEV_RESUME)
return true;
}
return false;
}
/*
* Stop HC (not bus-specific)
*
* This is called when the machine transition into S3/S4 mode.
*
*/
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
{
int rc = 0;
unsigned int delay = XHCI_MAX_HALT_USEC * 2;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
u32 res;
if (!hcd->state)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
(xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
return -EINVAL;
/* Clear root port wake on bits if wakeup not allowed. */
xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
if (!HCD_HW_ACCESSIBLE(hcd))
return 0;
xhci_dbc_suspend(xhci);
/* Don't poll the roothubs on bus suspend. */
xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
__func__, hcd->self.busnum);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
if (xhci->shared_hcd) {
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
if (xhci->quirks & XHCI_SUSPEND_DELAY)
usleep_range(1000, 1500);
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (xhci->shared_hcd)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
/* step 1: stop endpoint */
/* skipped assuming that port suspend has done */
/* step 2: clear Run/Stop bit */
command = readl(&xhci->op_regs->command);
command &= ~CMD_RUN;
writel(command, &xhci->op_regs->command);
/* Some chips from Fresco Logic need an extraordinary delay */
delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
if (xhci_handshake(&xhci->op_regs->status,
STS_HALT, STS_HALT, delay)) {
xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
xhci_clear_command_ring(xhci);
/* step 3: save registers */
xhci_save_registers(xhci);
/* step 4: set CSS flag */
command = readl(&xhci->op_regs->command);
command |= CMD_CSS;
writel(command, &xhci->op_regs->command);
xhci->broken_suspend = 0;
if (xhci_handshake(&xhci->op_regs->status,
STS_SAVE, 0, 20 * 1000)) {
/*
* AMD SNPS xHC 3.0 occasionally does not clear the
* SSS bit of USBSTS and when driver tries to poll
* to see if the xHC clears BIT(8) which never happens
* and driver assumes that controller is not responding
* and times out. To workaround this, its good to check
* if SRE and HCE bits are not set (as per xhci
* Section 5.4.2) and bypass the timeout.
*/
res = readl(&xhci->op_regs->status);
if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
(((res & STS_SRE) == 0) &&
((res & STS_HCE) == 0))) {
xhci->broken_suspend = 1;
} else {
xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
}
spin_unlock_irq(&xhci->lock);
/*
* Deleting Compliance Mode Recovery Timer because the xHCI Host
* is about to be suspended.
*/
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"%s: compliance mode recovery timer deleted",
__func__);
}
return rc;
}
EXPORT_SYMBOL_GPL(xhci_suspend);
/*
* start xHC (not bus-specific)
*
* This is called when the machine transition from S3/S4 mode.
*
*/
int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
{
bool hibernated = (msg.event == PM_EVENT_RESTORE);
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
bool reinit_xhc = false;
if (!hcd->state)
return 0;
/* Wait a bit if either of the roothubs need to settle from the
* transition into bus suspend.
*/
if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
msleep(100);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (xhci->shared_hcd)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
reinit_xhc = true;
if (!reinit_xhc) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
*/
retval = xhci_handshake(&xhci->op_regs->status,
STS_CNR, 0, 10 * 1000 * 1000);
if (retval) {
xhci_warn(xhci, "Controller not ready at resume %d\n",
retval);
spin_unlock_irq(&xhci->lock);
return retval;
}
/* step 1: restore register */
xhci_restore_registers(xhci);
/* step 2: initialize command ring buffer */
xhci_set_cmd_ring_deq(xhci);
/* step 3: restore state and start state*/
/* step 3: set CRS flag */
command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
writel(command, &xhci->op_regs->command);
/*
* Some controllers take up to 55+ ms to complete the controller
* restore so setting the timeout to 100ms. Xhci specification
* doesn't mention any timeout value.
*/
if (xhci_handshake(&xhci->op_regs->status,
STS_RESTORE, 0, 100 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
}
temp = readl(&xhci->op_regs->status);
/* re-initialize the HC on Restore Error, or Host Controller Error */
if ((temp & (STS_SRE | STS_HCE)) &&
!(xhci->xhc_state & XHCI_STATE_REMOVING)) {
reinit_xhc = true;
if (!xhci->broken_suspend)
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
}
if (reinit_xhc) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance Mode Recovery Timer deleted!");
}
/* Let the USB core know _both_ roothubs lost power. */
usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
if (xhci->shared_hcd)
usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_zero_64b_regs(xhci);
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
spin_unlock_irq(&xhci->lock);
if (retval)
return retval;
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
xhci_disable_interrupter(xhci->interrupter);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
xhci_debugfs_exit(xhci);
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
readl(&xhci->op_regs->status));
/* USB core calls the PCI reinit and start functions twice:
* first with the primary HCD, and then with the secondary HCD.
* If we don't do the same, the host will never be started.
*/
xhci_dbg(xhci, "Initialize the xhci_hcd\n");
retval = xhci_init(hcd);
if (retval)
return retval;
comp_timer_running = true;
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd);
if (!retval && xhci->shared_hcd) {
xhci_dbg(xhci, "Start the secondary HCD\n");
retval = xhci_run(xhci->shared_hcd);
}
hcd->state = HC_STATE_SUSPENDED;
if (xhci->shared_hcd)
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
goto done;
}
/* step 4: set Run/Stop bit */
command = readl(&xhci->op_regs->command);
command |= CMD_RUN;
writel(command, &xhci->op_regs->command);
xhci_handshake(&xhci->op_regs->status, STS_HALT,
0, 250 * 1000);
/* step 5: walk topology and initialize portsc,
* portpmsc and portli
*/
/* this is done in bus_resume */
/* step 6: restart each of the previously
* Running endpoints by ringing their doorbells
*/
spin_unlock_irq(&xhci->lock);
xhci_dbc_resume(xhci);
done:
if (retval == 0) {
/*
* Resume roothubs only if there are pending events.
* USB 3 devices resend U3 LFPS wake after a 100ms delay if
* the first wake signalling failed, give it that chance.
*/
pending_portevent = xhci_pending_portevent(xhci);
if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
msleep(120);
pending_portevent = xhci_pending_portevent(xhci);
}
if (pending_portevent) {
if (xhci->shared_hcd)
usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
}
}
/*
* If system is subject to the Quirk, Compliance Mode Timer needs to
* be re-initialized Always after a system resume. Ports are subject
* to suffer the Compliance Mode issue again. It doesn't matter if
* ports have entered previously to U0 before system's suspension.
*/
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
compliance_mode_recovery_timer_init(xhci);
if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
__func__, hcd->self.busnum);
if (xhci->shared_hcd) {
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
}
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
return retval;
}
EXPORT_SYMBOL_GPL(xhci_resume);
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
{
void *temp;
int ret = 0;
unsigned int buf_len;
enum dma_data_direction dir;
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
buf_len = urb->transfer_buffer_length;
temp = kzalloc_node(buf_len, GFP_ATOMIC,
dev_to_node(hcd->self.sysdev));
if (usb_urb_dir_out(urb))
sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
temp, buf_len, 0);
urb->transfer_buffer = temp;
urb->transfer_dma = dma_map_single(hcd->self.sysdev,
urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
if (dma_mapping_error(hcd->self.sysdev,
urb->transfer_dma)) {
ret = -EAGAIN;
kfree(temp);
} else {
urb->transfer_flags |= URB_DMA_MAP_SINGLE;
}
return ret;
}
static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
struct urb *urb)
{
bool ret = false;
unsigned int i;
unsigned int len = 0;
unsigned int trb_size;
unsigned int max_pkt;
struct scatterlist *sg;
struct scatterlist *tail_sg;
tail_sg = urb->sg;
max_pkt = usb_endpoint_maxp(&urb->ep->desc);
if (!urb->num_sgs)
return ret;
if (urb->dev->speed >= USB_SPEED_SUPER)
trb_size = TRB_CACHE_SIZE_SS;
else
trb_size = TRB_CACHE_SIZE_HS;
if (urb->transfer_buffer_length != 0 &&
!(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
for_each_sg(urb->sg, sg, urb->num_sgs, i) {
len = len + sg->length;
if (i > trb_size - 2) {
len = len - tail_sg->length;
if (len < max_pkt) {
ret = true;
break;
}
tail_sg = sg_next(tail_sg);
}
}
}
return ret;
}
static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
{
unsigned int len;
unsigned int buf_len;
enum dma_data_direction dir;
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
buf_len = urb->transfer_buffer_length;
if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_SINGLE))
dma_unmap_single(hcd->self.sysdev,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
if (usb_urb_dir_in(urb)) {
len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
urb->transfer_buffer,
buf_len,
0);
if (len != buf_len) {
xhci_dbg(hcd_to_xhci(hcd),
"Copy from tmp buf to urb sg list failed\n");
urb->actual_length = len;
}
}
urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
}
/*
* Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
* we'll copy the actual data into the TRB address register. This is limited to
* transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
* >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
*/
static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(hcd);
if (xhci_urb_suitable_for_idt(urb))
return 0;
if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
if (xhci_urb_temp_buffer_required(hcd, urb))
return xhci_map_temp_buffer(hcd, urb);
}
return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
}
static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
struct xhci_hcd *xhci;
bool unmap_temp_buf = false;
xhci = hcd_to_xhci(hcd);
if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
unmap_temp_buf = true;
if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
xhci_unmap_temp_buf(hcd, urb);
else
usb_hcd_unmap_urb_for_dma(hcd, urb);
}
/**
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
* HCDs. Find the index for an endpoint given its descriptor. Use the return
* value to right shift 1 for the bitmask.
*
* Index = (epnum * 2) + direction - 1,
* where direction = 0 for OUT, 1 for IN.
* For control endpoints, the IN index is used (OUT index is unused), so
* index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
*/
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
{
unsigned int index;
if (usb_endpoint_xfer_control(desc))
index = (unsigned int) (usb_endpoint_num(desc)*2);
else
index = (unsigned int) (usb_endpoint_num(desc)*2) +
(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
return index;
}
EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
* address from the XHCI endpoint index.
*/
static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
{
unsigned int number = DIV_ROUND_UP(ep_index, 2);
unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
return direction | number;
}
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
{
return 1 << (xhci_get_endpoint_index(desc) + 1);
}
/* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags.
* e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
* fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
*/
unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
{
return fls(added_ctxs) - 1;
}
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
if (!hcd || (check_ep && !ep) || !udev) {
pr_debug("xHCI %s called with invalid args\n", func);
return -EINVAL;
}
if (!udev->parent) {
pr_debug("xHCI %s called for root hub\n", func);
return 0;
}
xhci = hcd_to_xhci(hcd);
if (check_virt_dev) {
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
func);
return -EINVAL;
}
virt_dev = xhci->devs[udev->slot_id];
if (virt_dev->udev != udev) {
xhci_dbg(xhci, "xHCI %s called with udev and "
"virt_dev does not match\n", func);
return -EINVAL;
}
}
if (xhci->xhc_state & XHCI_STATE_HALTED)
return -ENODEV;
return 1;
}
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev, struct xhci_command *command,
bool ctx_change, bool must_succeed);
/*
* Full speed devices may have a max packet size greater than 8 bytes, but the
* USB core doesn't know that until it reads the first 8 bytes of the
* descriptor. If the usb_device's max packet size changes after that point,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
{
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
struct xhci_command *command;
int max_packet_size;
int hw_max_packet_size;
int ret = 0;
out_ctx = xhci->devs[slot_id]->out_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
if (hw_max_packet_size != max_packet_size) {
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Max Packet Size for ep 0 changed.");
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Max packet size in usb_device = %d",
max_packet_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Max packet size in xHCI HW = %d",
hw_max_packet_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Issuing evaluate context command.");
/* Set up the input context flags for the command */
/* FIXME: This won't work if a non-default control endpoint
* changes max packet sizes.
*/
command = xhci_alloc_command(xhci, true, mem_flags);
if (!command)
return -ENOMEM;
command->in_ctx = xhci->devs[slot_id]->in_ctx;
ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
ret = -ENOMEM;
goto command_cleanup;
}
/* Set up the modified control endpoint 0 */
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
ctrl_ctx->drop_flags = 0;
ret = xhci_configure_endpoint(xhci, urb->dev, command,
true, false);
/* Clean up the input context for later use by bandwidth
* functions.
*/
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
command_cleanup:
kfree(command->completion);
kfree(command);
}
return ret;
}
/*
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*/
static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned long flags;
int ret = 0;
unsigned int slot_id, ep_index;
unsigned int *ep_state;
struct urb_priv *urb_priv;
int num_tds;
if (!urb)
return -EINVAL;
ret = xhci_check_args(hcd, urb->dev, urb->ep,
true, true, __func__);
if (ret <= 0)
return ret ? ret : -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
if (!HCD_HW_ACCESSIBLE(hcd))
return -ESHUTDOWN;
if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
return -ENODEV;
}
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
num_tds = urb->number_of_packets;
else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
urb->transfer_buffer_length > 0 &&
urb->transfer_flags & URB_ZERO_PACKET &&
!(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
num_tds = 2;
else
num_tds = 1;
urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
if (!urb_priv)
return -ENOMEM;
urb_priv->num_tds = num_tds;
urb_priv->num_tds_done = 0;
urb->hcpriv = urb_priv;
trace_xhci_urb_enqueue(urb);
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
/* Check to see if the max packet size for the default control
* endpoint changed during FS device enumeration
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb, mem_flags);
if (ret < 0) {
xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;
return ret;
}
}
}
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
ret = -ESHUTDOWN;
goto free_priv;
}
if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
*ep_state);
ret = -EINVAL;
goto free_priv;
}
if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
ret = -EINVAL;
goto free_priv;
}
switch (usb_endpoint_type(&urb->ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
break;
case USB_ENDPOINT_XFER_BULK:
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
break;
case USB_ENDPOINT_XFER_INT:
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
break;
case USB_ENDPOINT_XFER_ISOC:
ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
}
if (ret) {
free_priv:
xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;
}
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
/*
* Remove the URB's TD from the endpoint ring. This may cause the HC to stop
* USB transfers, potentially stopping in the middle of a TRB buffer. The HC
* should pick up where it left off in the TD, unless a Set Transfer Ring
* Dequeue Pointer is issued.
*
* The TRBs that make up the buffers for the canceled URB will be "removed" from
* the ring. Since the ring is a contiguous structure, they can't be physically
* removed. Instead, there are two options:
*
* 1) If the HC is in the middle of processing the URB to be canceled, we
* simply move the ring's dequeue pointer past those TRBs using the Set
* Transfer Ring Dequeue Pointer command. This will be the common case,
* when drivers timeout on the last submitted URB and attempt to cancel.
*
* 2) If the HC is in the middle of a different TD, we turn the TRBs into a
* series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
* HC will need to invalidate the any TRBs it has cached after the stop
* endpoint command, as noted in the xHCI 0.95 errata.
*
* 3) The TD may have completed by the time the Stop Endpoint Command
* completes, so software needs to handle that case too.
*
* This function should protect against the TD enqueueing code ringing the
* doorbell while this code is waiting for a Stop Endpoint command to complete.
* It also needs to account for multiple cancellations on happening at the same
* time for the same endpoint.
*
* Note that this function can be called in any context, or so says
* usb_hcd_unlink_urb()
*/
static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
int ret, i;
u32 temp;
struct xhci_hcd *xhci;
struct urb_priv *urb_priv;
struct xhci_td *td;
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct xhci_command *command;
struct xhci_virt_device *vdev;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
trace_xhci_urb_dequeue(urb);
/* Make sure the URB hasn't completed or been unlinked already */
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret)
goto done;
/* give back URB now if we can't queue it for cancel */
vdev = xhci->devs[urb->dev->slot_id];
urb_priv = urb->hcpriv;
if (!vdev || !urb_priv)
goto err_giveback;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &vdev->eps[ep_index];
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep || !ep_ring)
goto err_giveback;
/* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
temp = readl(&xhci->op_regs->status);
if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
xhci_hc_died(xhci);
goto done;
}
/*
* check ring is not re-allocated since URB was enqueued. If it is, then
* make sure none of the ring related pointers in this URB private data
* are touched, such as td_list, otherwise we overwrite freed data
*/
if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
xhci_err(xhci, "Canceled URB td not found on endpoint ring");
for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
td = &urb_priv->td[i];
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
}
goto err_giveback;
}
if (xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"HC halted, freeing TD manually.");
for (i = urb_priv->num_tds_done;
i < urb_priv->num_tds;
i++) {
td = &urb_priv->td[i];
if (!list_empty(&td->td_list))
list_del_init(&td->td_list);
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
}
goto err_giveback;
}
i = urb_priv->num_tds_done;
if (i < urb_priv->num_tds)
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cancel URB %p, dev %s, ep 0x%x, "
"starting at offset 0x%llx",
urb, urb->dev->devpath,
urb->ep->desc.bEndpointAddress,
(unsigned long long) xhci_trb_virt_to_dma(
urb_priv->td[i].start_seg,
urb_priv->td[i].first_trb));
for (; i < urb_priv->num_tds; i++) {
td = &urb_priv->td[i];
/* TD can already be on cancelled list if ep halted on it */
if (list_empty(&td->cancelled_td_list)) {
td->cancel_status = TD_DIRTY;
list_add_tail(&td->cancelled_td_list,
&ep->cancelled_td_list);
}
}
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
ret = -ENOMEM;
goto done;
}
ep->ep_state |= EP_STOP_CMD_PENDING;
xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
ep_index, 0);
xhci_ring_cmd_db(xhci);
}
done:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
err_giveback:
if (urb_priv)
xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
return ret;
}
/* Drop an endpoint from a new bandwidth configuration for this device.
* Only one call to this function is allowed per endpoint before
* check_bandwidth() or reset_bandwidth() must be called.
* A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
* add the endpoint to the schedule with possibly new parameters denoted by a
* different endpoint descriptor in usb_host_endpoint.
* A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
* not allowed.
*
* The USB core will not allow URBs to be queued to an endpoint that is being
* disabled, so there's no need for mutual exclusion to protect
* the xhci->devs[slot_id] structure.
*/
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
struct xhci_container_ctx *in_ctx, *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 drop_flag;
u32 new_add_flags, new_drop_flags;
int ret;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
if (xhci->xhc_state & XHCI_STATE_DYING)
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
__func__, drop_flag);
return 0;
}
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return 0;
}
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
/* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request
*/
if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
/* Do not warn when called after a usb_device_reset */
if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
__func__, ep);
return 0;
}
ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags);
return 0;
}
EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
/* Add an endpoint to a new possible bandwidth configuration for this device.
* Only one call to this function is allowed per endpoint before
* check_bandwidth() or reset_bandwidth() must be called.
* A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
* add the endpoint to the schedule with possibly new parameters denoted by a
* different endpoint descriptor in usb_host_endpoint.
* A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
* not allowed.
*
* The USB core will not allow URBs to be queued to an endpoint until the
* configuration or alt setting is installed in the device, so there's no need
* for mutual exclusion to protect the xhci->devs[slot_id] structure.
*/
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
struct xhci_container_ctx *in_ctx;
unsigned int ep_index;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
u32 new_add_flags, new_drop_flags;
struct xhci_virt_device *virt_dev;
int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0) {
/* So we won't queue a reset ep command for a root hub */
ep->hcpriv = NULL;
return ret;
}
xhci = hcd_to_xhci(hcd);
if (xhci->xhc_state & XHCI_STATE_DYING)
return -ENODEV;
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
/* FIXME when we have to issue an evaluate endpoint command to
* deal with ep0 max packet size changing once we get the
* descriptors
*/
xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
__func__, added_ctxs);
return 0;
}
virt_dev = xhci->devs[udev->slot_id];
in_ctx = virt_dev->in_ctx;
ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return 0;
}
ep_index = xhci_get_endpoint_index(&ep->desc);
/* If this endpoint is already in use, and the upper layers are trying
* to add it again without dropping it, reject the addition.
*/
if (virt_dev->eps[ep_index].ring &&
!(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
xhci_warn(xhci, "Trying to add endpoint 0x%x "
"without dropping it.\n",
(unsigned int) ep->desc.bEndpointAddress);
return -EINVAL;
}
/* If the HCD has already noted the endpoint is enabled,
* ignore this request.
*/
if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
__func__, ep);
return 0;
}
/*
* Configuration and alternate setting changes must be done in
* process context, not interrupt context (or so documenation
* for usb_set_interface() and usb_set_configuration() claim).
*/
if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
}
ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
/* If xhci_endpoint_disable() was called for this endpoint, but the
* xHC hasn't been notified yet through the check_bandwidth() call,
* this re-adds a new state for the endpoint from the new endpoint
* descriptors. We must drop and re-add this endpoint, so we leave the
* drop flags alone.
*/
new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
trace_xhci_add_endpoint(ep_ctx);
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags);
return 0;
}
EXPORT_SYMBOL_GPL(xhci_add_endpoint);
static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
{
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
int i;
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return;
}
/* When a device's add flag and drop flag are zero, any subsequent
* configure endpoint command will leave that endpoint's state
* untouched. Make sure we don't leave any old state in the input
* endpoint contexts.
*/
ctrl_ctx->drop_flags = 0;
ctrl_ctx->add_flags = 0;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
/* Endpoint 0 is always valid */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
for (i = 1; i < 31; i++) {
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0;
ep_ctx->deq = 0;
ep_ctx->tx_info = 0;
}
}
static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
struct usb_device *udev, u32 *cmd_status)
{
int ret;
switch (*cmd_status) {
case COMP_COMMAND_ABORTED:
case COMP_COMMAND_RING_STOPPED:
xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
ret = -ETIME;
break;
case COMP_RESOURCE_ERROR:
dev_warn(&udev->dev,
"Not enough host controller resources for new device state.\n");
ret = -ENOMEM;
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BANDWIDTH_ERROR:
case COMP_SECONDARY_BANDWIDTH_ERROR:
dev_warn(&udev->dev,
"Not enough bandwidth for new device state.\n");
ret = -ENOSPC;
/* FIXME: can we go back to the old state? */
break;
case COMP_TRB_ERROR:
/* the HCD set up something wrong */
dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
"add flag = 1, "
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
dev_warn(&udev->dev,
"ERROR: Incompatible device for endpoint configure command.\n");
ret = -ENODEV;
break;
case COMP_SUCCESS:
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Successful Endpoint Configure command");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
*cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
struct usb_device *udev, u32 *cmd_status)
{
int ret;
switch (*cmd_status) {
case COMP_COMMAND_ABORTED:
case COMP_COMMAND_RING_STOPPED:
xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
ret = -ETIME;
break;
case COMP_PARAMETER_ERROR:
dev_warn(&udev->dev,
"WARN: xHCI driver setup invalid evaluate context command.\n");
ret = -EINVAL;
break;
case COMP_SLOT_NOT_ENABLED_ERROR:
dev_warn(&udev->dev,
"WARN: slot not enabled for evaluate context command.\n");
ret = -EINVAL;
break;
case COMP_CONTEXT_STATE_ERROR:
dev_warn(&udev->dev,
"WARN: invalid context state for evaluate context command.\n");
ret = -EINVAL;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
dev_warn(&udev->dev,
"ERROR: Incompatible device for evaluate context command.\n");
ret = -ENODEV;
break;
case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
/* Max Exit Latency too large error */
dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
ret = -EINVAL;
break;
case COMP_SUCCESS:
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Successful evaluate context command");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
*cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
struct xhci_input_control_ctx *ctrl_ctx)
{
u32 valid_add_flags;
u32 valid_drop_flags;
/* Ignore the slot flag (bit 0), and the default control endpoint flag
* (bit 1). The default control endpoint is added during the Address
* Device command and is never removed until the slot is disabled.
*/
valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
/* Use hweight32 to count the number of ones in the add flags, or
* number of endpoints added. Don't count endpoints that are changed
* (both added and dropped).
*/
return hweight32(valid_add_flags) -
hweight32(valid_add_flags & valid_drop_flags);
}
static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
struct xhci_input_control_ctx *ctrl_ctx)
{
u32 valid_add_flags;
u32 valid_drop_flags;
valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
return hweight32(valid_drop_flags) -
hweight32(valid_add_flags & valid_drop_flags);
}
/*
* We need to reserve the new number of endpoints before the configure endpoint
* command completes. We can't subtract the dropped endpoints from the number
* of active endpoints until the command completes because we can oversubscribe
* the host in this case:
*
* - the first configure endpoint command drops more endpoints than it adds
* - a second configure endpoint command that adds more endpoints is queued
* - the first configure endpoint command fails, so the config is unchanged
* - the second command may succeed, even though there isn't enough resources
*
* Must be called with xhci->lock held.
*/
static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
struct xhci_input_control_ctx *ctrl_ctx)
{
u32 added_eps;
added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Not enough ep ctxs: "
"%u active, need to add %u, limit is %u.",
xhci->num_active_eps, added_eps,
xhci->limit_active_eps);
return -ENOMEM;
}
xhci->num_active_eps += added_eps;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Adding %u ep ctxs, %u now active.", added_eps,
xhci->num_active_eps);
return 0;
}
/*
* The configure endpoint was failed by the xHC for some other reason, so we
* need to revert the resources that failed configuration would have used.
*
* Must be called with xhci->lock held.
*/
static void xhci_free_host_resources(struct xhci_hcd *xhci,
struct xhci_input_control_ctx *ctrl_ctx)
{
u32 num_failed_eps;
num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
xhci->num_active_eps -= num_failed_eps;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Removing %u failed ep ctxs, %u now active.",
num_failed_eps,
xhci->num_active_eps);
}
/*
* Now that the command has completed, clean up the active endpoint count by
* subtracting out the endpoints that were dropped (but not changed).
*
* Must be called with xhci->lock held.
*/
static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
struct xhci_input_control_ctx *ctrl_ctx)
{
u32 num_dropped_eps;
num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
xhci->num_active_eps -= num_dropped_eps;
if (num_dropped_eps)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Removing %u dropped ep ctxs, %u now active.",
num_dropped_eps,
xhci->num_active_eps);
}
static unsigned int xhci_get_block_size(struct usb_device *udev)
{
switch (udev->speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
return FS_BLOCK;
case USB_SPEED_HIGH:
return HS_BLOCK;
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
return SS_BLOCK;
case USB_SPEED_UNKNOWN:
default:
/* Should never happen */
return 1;
}
}
static unsigned int
xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
{
if (interval_bw->overhead[LS_OVERHEAD_TYPE])
return LS_OVERHEAD;
if (interval_bw->overhead[FS_OVERHEAD_TYPE])
return FS_OVERHEAD;
return HS_OVERHEAD;
}
/* If we are changing a LS/FS device under a HS hub,
* make sure (if we are activating a new TT) that the HS bus has enough
* bandwidth for this new TT.
*/
static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps)
{
struct xhci_interval_bw_table *bw_table;
struct xhci_tt_bw_info *tt_info;
/* Find the bandwidth table for the root port this TT is attached to. */
bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
tt_info = virt_dev->tt_info;
/* If this TT already had active endpoints, the bandwidth for this TT
* has already been added. Removing all periodic endpoints (and thus
* making the TT enactive) will only decrease the bandwidth used.
*/
if (old_active_eps)
return 0;
if (old_active_eps == 0 && tt_info->active_eps != 0) {
if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
return -ENOMEM;
return 0;
}
/* Not sure why we would have no new active endpoints...
*
* Maybe because of an Evaluate Context change for a hub update or a
* control endpoint 0 max packet size change?
* FIXME: skip the bandwidth calculation in that case.
*/
return 0;
}
static int xhci_check_ss_bw(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev)
{
unsigned int bw_reserved;
bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
return -ENOMEM;
bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
return -ENOMEM;
return 0;
}
/*
* This algorithm is a very conservative estimate of the worst-case scheduling
* scenario for any one interval. The hardware dynamically schedules the
* packets, so we can't tell which microframe could be the limiting factor in
* the bandwidth scheduling. This only takes into account periodic endpoints.
*
* Obviously, we can't solve an NP complete problem to find the minimum worst
* case scenario. Instead, we come up with an estimate that is no less than
* the worst case bandwidth used for any one microframe, but may be an
* over-estimate.
*
* We walk the requirements for each endpoint by interval, starting with the
* smallest interval, and place packets in the schedule where there is only one
* possible way to schedule packets for that interval. In order to simplify
* this algorithm, we record the largest max packet size for each interval, and
* assume all packets will be that size.
*
* For interval 0, we obviously must schedule all packets for each interval.
* The bandwidth for interval 0 is just the amount of data to be transmitted
* (the sum of all max ESIT payload sizes, plus any overhead per packet times
* the number of packets).
*
* For interval 1, we have two possible microframes to schedule those packets
* in. For this algorithm, if we can schedule the same number of packets for
* each possible scheduling opportunity (each microframe), we will do so. The
* remaining number of packets will be saved to be transmitted in the gaps in
* the next interval's scheduling sequence.
*
* As we move those remaining packets to be scheduled with interval 2 packets,
* we have to double the number of remaining packets to transmit. This is
* because the intervals are actually powers of 2, and we would be transmitting
* the previous interval's packets twice in this interval. We also have to be
* sure that when we look at the largest max packet size for this interval, we
* also look at the largest max packet size for the remaining packets and take
* the greater of the two.
*
* The algorithm continues to evenly distribute packets in each scheduling
* opportunity, and push the remaining packets out, until we get to the last
* interval. Then those packets and their associated overhead are just added
* to the bandwidth used.
*/
static int xhci_check_bw_table(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps)
{
unsigned int bw_reserved;
unsigned int max_bandwidth;
unsigned int bw_used;
unsigned int block_size;
struct xhci_interval_bw_table *bw_table;
unsigned int packet_size = 0;
unsigned int overhead = 0;
unsigned int packets_transmitted = 0;
unsigned int packets_remaining = 0;
unsigned int i;
if (virt_dev->udev->speed >= USB_SPEED_SUPER)
return xhci_check_ss_bw(xhci, virt_dev);
if (virt_dev->udev->speed == USB_SPEED_HIGH) {
max_bandwidth = HS_BW_LIMIT;
/* Convert percent of bus BW reserved to blocks reserved */
bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
} else {
max_bandwidth = FS_BW_LIMIT;
bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
}
bw_table = virt_dev->bw_table;
/* We need to translate the max packet size and max ESIT payloads into
* the units the hardware uses.
*/
block_size = xhci_get_block_size(virt_dev->udev);
/* If we are manipulating a LS/FS device under a HS hub, double check
* that the HS bus has enough bandwidth if we are activing a new TT.
*/
if (virt_dev->tt_info) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
virt_dev->real_port);
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
"newly activated TT.\n");
return -ENOMEM;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for TT slot %u port %u",
virt_dev->tt_info->slot_id,
virt_dev->tt_info->ttport);
} else {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
virt_dev->real_port);
}
/* Add in how much bandwidth will be used for interval zero, or the
* rounded max ESIT payload + number of packets * largest overhead.
*/
bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
bw_table->interval_bw[0].num_packets *
xhci_get_largest_overhead(&bw_table->interval_bw[0]);
for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
unsigned int bw_added;
unsigned int largest_mps;
unsigned int interval_overhead;
/*
* How many packets could we transmit in this interval?
* If packets didn't fit in the previous interval, we will need
* to transmit that many packets twice within this interval.
*/
packets_remaining = 2 * packets_remaining +
bw_table->interval_bw[i].num_packets;
/* Find the largest max packet size of this or the previous
* interval.
*/
if (list_empty(&bw_table->interval_bw[i].endpoints))
largest_mps = 0;
else {
struct xhci_virt_ep *virt_ep;
struct list_head *ep_entry;
ep_entry = bw_table->interval_bw[i].endpoints.next;
virt_ep = list_entry(ep_entry,
struct xhci_virt_ep, bw_endpoint_list);
/* Convert to blocks, rounding up */
largest_mps = DIV_ROUND_UP(
virt_ep->bw_info.max_packet_size,
block_size);
}
if (largest_mps > packet_size)
packet_size = largest_mps;
/* Use the larger overhead of this or the previous interval. */
interval_overhead = xhci_get_largest_overhead(
&bw_table->interval_bw[i]);
if (interval_overhead > overhead)
overhead = interval_overhead;
/* How many packets can we evenly distribute across
* (1 << (i + 1)) possible scheduling opportunities?
*/
packets_transmitted = packets_remaining >> (i + 1);
/* Add in the bandwidth used for those scheduled packets */
bw_added = packets_transmitted * (overhead + packet_size);
/* How many packets do we have remaining to transmit? */
packets_remaining = packets_remaining % (1 << (i + 1));
/* What largest max packet size should those packets have? */
/* If we've transmitted all packets, don't carry over the
* largest packet size.
*/
if (packets_remaining == 0) {
packet_size = 0;
overhead = 0;
} else if (packets_transmitted > 0) {
/* Otherwise if we do have remaining packets, and we've
* scheduled some packets in this interval, take the
* largest max packet size from endpoints with this
* interval.
*/
packet_size = largest_mps;
overhead = interval_overhead;
}
/* Otherwise carry over packet_size and overhead from the last
* time we had a remainder.
*/
bw_used += bw_added;
if (bw_used > max_bandwidth) {
xhci_warn(xhci, "Not enough bandwidth. "
"Proposed: %u, Max: %u\n",
bw_used, max_bandwidth);
return -ENOMEM;
}
}
/*
* Ok, we know we have some packets left over after even-handedly
* scheduling interval 15. We don't know which microframes they will
* fit into, so we over-schedule and say they will be scheduled every
* microframe.
*/
if (packets_remaining > 0)
bw_used += overhead + packet_size;
if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
unsigned int port_index = virt_dev->real_port - 1;
/* OK, we're manipulating a HS device attached to a
* root port bandwidth domain. Include the number of active TTs
* in the bandwidth used.
*/
bw_used += TT_HS_OVERHEAD *
xhci->rh_bw[port_index].num_active_tts;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Final bandwidth: %u, Limit: %u, Reserved: %u, "
"Available: %u " "percent",
bw_used, max_bandwidth, bw_reserved,
(max_bandwidth - bw_used - bw_reserved) * 100 /
max_bandwidth);
bw_used += bw_reserved;
if (bw_used > max_bandwidth) {
xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
bw_used, max_bandwidth);
return -ENOMEM;
}
bw_table->bw_used = bw_used;
return 0;
}
static bool xhci_is_async_ep(unsigned int ep_type)
{
return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
ep_type != ISOC_IN_EP &&
ep_type != INT_IN_EP);
}
static bool xhci_is_sync_in_ep(unsigned int ep_type)
{
return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
}
static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
{
unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
if (ep_bw->ep_interval == 0)
return SS_OVERHEAD_BURST +
(ep_bw->mult * ep_bw->num_packets *
(SS_OVERHEAD + mps));
return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
1 << ep_bw->ep_interval);
}
static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
struct xhci_bw_info *ep_bw,
struct xhci_interval_bw_table *bw_table,
struct usb_device *udev,
struct xhci_virt_ep *virt_ep,
struct xhci_tt_bw_info *tt_info)
{
struct xhci_interval_bw *interval_bw;
int normalized_interval;
if (xhci_is_async_ep(ep_bw->type))
return;
if (udev->speed >= USB_SPEED_SUPER) {
if (xhci_is_sync_in_ep(ep_bw->type))
xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
xhci_get_ss_bw_consumed(ep_bw);
else
xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
xhci_get_ss_bw_consumed(ep_bw);
return;
}
/* SuperSpeed endpoints never get added to intervals in the table, so
* this check is only valid for HS/FS/LS devices.
*/
if (list_empty(&virt_ep->bw_endpoint_list))
return;
/* For LS/FS devices, we need to translate the interval expressed in
* microframes to frames.
*/
if (udev->speed == USB_SPEED_HIGH)
normalized_interval = ep_bw->ep_interval;
else
normalized_interval = ep_bw->ep_interval - 3;
if (normalized_interval == 0)
bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
interval_bw = &bw_table->interval_bw[normalized_interval];
interval_bw->num_packets -= ep_bw->num_packets;
switch (udev->speed) {
case USB_SPEED_LOW:
interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_FULL:
interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_HIGH:
interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
break;
default:
/* Should never happen because only LS/FS/HS endpoints will get
* added to the endpoint list.
*/
return;
}
if (tt_info)
tt_info->active_eps -= 1;
list_del_init(&virt_ep->bw_endpoint_list);
}
static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
struct xhci_bw_info *ep_bw,
struct xhci_interval_bw_table *bw_table,
struct usb_device *udev,
struct xhci_virt_ep *virt_ep,
struct xhci_tt_bw_info *tt_info)
{
struct xhci_interval_bw *interval_bw;
struct xhci_virt_ep *smaller_ep;
int normalized_interval;
if (xhci_is_async_ep(ep_bw->type))
return;
if (udev->speed == USB_SPEED_SUPER) {
if (xhci_is_sync_in_ep(ep_bw->type))
xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
xhci_get_ss_bw_consumed(ep_bw);
else
xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
xhci_get_ss_bw_consumed(ep_bw);
return;
}
/* For LS/FS devices, we need to translate the interval expressed in
* microframes to frames.
*/
if (udev->speed == USB_SPEED_HIGH)
normalized_interval = ep_bw->ep_interval;
else
normalized_interval = ep_bw->ep_interval - 3;
if (normalized_interval == 0)
bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
interval_bw = &bw_table->interval_bw[normalized_interval];
interval_bw->num_packets += ep_bw->num_packets;
switch (udev->speed) {
case USB_SPEED_LOW:
interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_FULL:
interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_HIGH:
interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
break;
default:
/* Should never happen because only LS/FS/HS endpoints will get
* added to the endpoint list.
*/
return;
}
if (tt_info)
tt_info->active_eps += 1;
/* Insert the endpoint into the list, largest max packet size first. */
list_for_each_entry(smaller_ep, &interval_bw->endpoints,
bw_endpoint_list) {
if (ep_bw->max_packet_size >=
smaller_ep->bw_info.max_packet_size) {
/* Add the new ep before the smaller endpoint */
list_add_tail(&virt_ep->bw_endpoint_list,
&smaller_ep->bw_endpoint_list);
return;
}
}
/* Add the new endpoint at the end of the list. */
list_add_tail(&virt_ep->bw_endpoint_list,
&interval_bw->endpoints);
}
void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps)
{
struct xhci_root_port_bw_info *rh_bw_info;
if (!virt_dev->tt_info)
return;
rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
if (old_active_eps == 0 &&
virt_dev->tt_info->active_eps != 0) {
rh_bw_info->num_active_tts += 1;
rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
} else if (old_active_eps != 0 &&
virt_dev->tt_info->active_eps == 0) {
rh_bw_info->num_active_tts -= 1;
rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
}
}
static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct xhci_container_ctx *in_ctx)
{
struct xhci_bw_info ep_bw_info[31];
int i;
struct xhci_input_control_ctx *ctrl_ctx;
int old_active_eps = 0;
if (virt_dev->tt_info)
old_active_eps = virt_dev->tt_info->active_eps;
ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return -ENOMEM;
}
for (i = 0; i < 31; i++) {
if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
continue;
/* Make a copy of the BW info in case we need to revert this */
memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
sizeof(ep_bw_info[i]));
/* Drop the endpoint from the interval table if the endpoint is
* being dropped or changed.
*/
if (EP_IS_DROPPED(ctrl_ctx, i))
xhci_drop_ep_from_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
/* Overwrite the information stored in the endpoints' bw_info */
xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
for (i = 0; i < 31; i++) {
/* Add any changed or added endpoints to the interval table */
if (EP_IS_ADDED(ctrl_ctx, i))
xhci_add_ep_to_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
/* Ok, this fits in the bandwidth we have.
* Update the number of active TTs.
*/
xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
return 0;
}
/* We don't have enough bandwidth for this, revert the stored info. */
for (i = 0; i < 31; i++) {
if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
continue;
/* Drop the new copies of any added or changed endpoints from
* the interval table.
*/
if (EP_IS_ADDED(ctrl_ctx, i)) {
xhci_drop_ep_from_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
/* Revert the endpoint back to its old information */
memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
sizeof(ep_bw_info[i]));
/* Add any changed or dropped endpoints back into the table */
if (EP_IS_DROPPED(ctrl_ctx, i))
xhci_add_ep_to_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
return -ENOMEM;
}
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev,
struct xhci_command *command,
bool ctx_change, bool must_succeed)
{
int ret;
unsigned long flags;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
if (!command)
return -EINVAL;
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING) {
spin_unlock_irqrestore(&xhci->lock, flags);
return -ESHUTDOWN;
}
virt_dev = xhci->devs[udev->slot_id];
ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return -ENOMEM;
}
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
xhci_reserve_host_resources(xhci, ctrl_ctx)) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "Not enough host resources, "
"active endpoint contexts = %u\n",
xhci->num_active_eps);
return -ENOMEM;
}
if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "Not enough bandwidth\n");
return -ENOMEM;
}
slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
trace_xhci_configure_endpoint(slot_ctx);
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, command,
command->in_ctx->dma,
udev->slot_id, must_succeed);
else
ret = xhci_queue_evaluate_context(xhci, command,
command->in_ctx->dma,
udev->slot_id, must_succeed);
if (ret < 0) {
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"FIXME allocate a new ring segment");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
wait_for_completion(command->completion);
if (!ctx_change)
ret = xhci_configure_endpoint_result(xhci, udev,
&command->status);
else
ret = xhci_evaluate_context_result(xhci, udev,
&command->status);
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
/* If the command failed, remove the reserved resources.
* Otherwise, clean up the estimate to include dropped eps.
*/
if (ret)
xhci_free_host_resources(xhci, ctrl_ctx);
else
xhci_finish_resource_reservation(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
}
return ret;
}
static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
struct xhci_virt_device *vdev, int i)
{
struct xhci_virt_ep *ep = &vdev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
}
}
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
*
* Since we are in the middle of changing either configuration or
* installing a new alt setting, the USB core won't allow URBs to be
* enqueued for any endpoint on the old config or interface. Nothing
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_command *command;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
command->in_ctx = virt_dev->in_ctx;
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
ret = -ENOMEM;
goto command_cleanup;
}
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
/* Don't issue the command if there's no endpoints to update. */
if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
ctrl_ctx->drop_flags == 0) {
ret = 0;
goto command_cleanup;
}
/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
for (i = 31; i >= 1; i--) {
__le32 le32 = cpu_to_le32(BIT(i));
if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
|| (ctrl_ctx->add_flags & le32) || i == 1) {
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
break;
}
}
ret = xhci_configure_endpoint(xhci, udev, command,
false, false);
if (ret)
/* Callee should call reset_bandwidth() */
goto command_cleanup;
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; i++) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
xhci_free_endpoint_ring(xhci, virt_dev, i);
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
}
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
* Install any rings for completely new endpoints or changed endpoints,
* and free any old rings from changed endpoints.
*/
for (i = 1; i < 31; i++) {
if (!virt_dev->eps[i].new_ring)
continue;
/* Only free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
xhci_free_endpoint_ring(xhci, virt_dev, i);
}
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
xhci_debugfs_create_endpoint(xhci, virt_dev, i);
}
command_cleanup:
kfree(command->completion);
kfree(command);
return ret;
}
EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
int i, ret;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
if (ret <= 0)
return;
xhci = hcd_to_xhci(hcd);
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; i++) {
if (virt_dev->eps[i].new_ring) {
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
virt_dev->eps[i].new_ring = NULL;
}
}
xhci_zero_in_ctx(xhci, virt_dev);
}
EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
struct xhci_input_control_ctx *ctrl_ctx,
u32 add_flags, u32 drop_flags)
{
ctrl_ctx->add_flags = cpu_to_le32(add_flags);
ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
xhci_slot_copy(xhci, in_ctx, out_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
}
static void xhci_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *host_ep)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *vdev;
struct xhci_virt_ep *ep;
struct usb_device *udev;
unsigned long flags;
unsigned int ep_index;
xhci = hcd_to_xhci(hcd);
rescan:
spin_lock_irqsave(&xhci->lock, flags);
udev = (struct usb_device *)host_ep->hcpriv;
if (!udev || !udev->slot_id)
goto done;
vdev = xhci->devs[udev->slot_id];
if (!vdev)
goto done;
ep_index = xhci_get_endpoint_index(&host_ep->desc);
ep = &vdev->eps[ep_index];
/* wait for hub_tt_work to finish clearing hub TT */
if (ep->ep_state & EP_CLEARING_TT) {
spin_unlock_irqrestore(&xhci->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
}
if (ep->ep_state)
xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
ep->ep_state);
done:
host_ep->hcpriv = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
}
/*
* Called after usb core issues a clear halt control message.
* The host side of the halt should already be cleared by a reset endpoint
* command issued when the STALL event was received.
*
* The reset endpoint command may only be issued to endpoints in the halted
* state. For software that wishes to reset the data toggle or sequence number
* of an endpoint that isn't in the halted state this function will issue a
* configure endpoint command with the Drop and Add bits set for the target
* endpoint. Refer to the additional note in xhci spcification section 4.6.8.
*/
static void xhci_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *host_ep)
{
struct xhci_hcd *xhci;
struct usb_device *udev;
struct xhci_virt_device *vdev;
struct xhci_virt_ep *ep;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_command *stop_cmd, *cfg_cmd;
unsigned int ep_index;
unsigned long flags;
u32 ep_flag;
int err;
xhci = hcd_to_xhci(hcd);
if (!host_ep->hcpriv)
return;
udev = (struct usb_device *) host_ep->hcpriv;
vdev = xhci->devs[udev->slot_id];
/*
* vdev may be lost due to xHC restore error and re-initialization
* during S3/S4 resume. A new vdev will be allocated later by
* xhci_discover_or_reset_device()
*/
if (!udev->slot_id || !vdev)
return;
ep_index = xhci_get_endpoint_index(&host_ep->desc);
ep = &vdev->eps[ep_index];
/* Bail out if toggle is already being cleared by a endpoint reset */
spin_lock_irqsave(&xhci->lock, flags);
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
spin_unlock_irqrestore(&xhci->lock, flags);
/* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
if (usb_endpoint_xfer_control(&host_ep->desc) ||
usb_endpoint_xfer_isoc(&host_ep->desc))
return;
ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
return;
stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
if (!stop_cmd)
return;
cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
if (!cfg_cmd)
goto cleanup;
spin_lock_irqsave(&xhci->lock, flags);
/* block queuing new trbs and ringing ep doorbell */
ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
/*
* Make sure endpoint ring is empty before resetting the toggle/seq.
* Driver is required to synchronously cancel all transfer request.
* Stop the endpoint to force xHC to update the output context
*/
if (!list_empty(&ep->ring->td_list)) {
dev_err(&udev->dev, "EP not empty, refuse reset\n");
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cfg_cmd);
goto cleanup;
}
err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
ep_index, 0);
if (err < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cfg_cmd);
xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
__func__, err);
goto cleanup;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(stop_cmd->completion);
spin_lock_irqsave(&xhci->lock, flags);
/* config ep command clears toggle if add and drop ep flags are set */
ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cfg_cmd);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
goto cleanup;
}
xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
ctrl_ctx, ep_flag, ep_flag);
xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
udev->slot_id, false);
if (err < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cfg_cmd);
xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
__func__, err);
goto cleanup;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(cfg_cmd->completion);
xhci_free_command(xhci, cfg_cmd);
cleanup:
xhci_free_command(xhci, stop_cmd);
spin_lock_irqsave(&xhci->lock, flags);
if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
spin_unlock_irqrestore(&xhci->lock, flags);
}
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev, struct usb_host_endpoint *ep,
unsigned int slot_id)
{
int ret;
unsigned int ep_index;
unsigned int ep_state;
if (!ep)
return -EINVAL;
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return ret ? ret : -EINVAL;
if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
ep->desc.bEndpointAddress);
return -EINVAL;
}
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
if (ep_state & EP_HAS_STREAMS ||
ep_state & EP_GETTING_STREAMS) {
xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
"already has streams set up.\n",
ep->desc.bEndpointAddress);
xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
"dynamic stream context array reallocation.\n");
return -EINVAL;
}
if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
"endpoint 0x%x; URBs are pending.\n",
ep->desc.bEndpointAddress);
return -EINVAL;
}
return 0;
}
static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
unsigned int *num_streams, unsigned int *num_stream_ctxs)
{
unsigned int max_streams;
/* The stream context array size must be a power of two */
*num_stream_ctxs = roundup_pow_of_two(*num_streams);
/*
* Find out how many primary stream array entries the host controller
* supports. Later we may use secondary stream arrays (similar to 2nd
* level page entries), but that's an optional feature for xHCI host
* controllers. xHCs must support at least 4 stream IDs.
*/
max_streams = HCC_MAX_PSA(xhci->hcc_params);
if (*num_stream_ctxs > max_streams) {
xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
max_streams);
*num_stream_ctxs = max_streams;
*num_streams = max_streams;
}
}
/* Returns an error code if one of the endpoint already has streams.
* This does not change any data structures, it only checks and gathers
* information.
*/
static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int *num_streams, u32 *changed_ep_bitmask)
{
unsigned int max_streams;
unsigned int endpoint_flag;
int i;
int ret;
for (i = 0; i < num_eps; i++) {
ret = xhci_check_streams_endpoint(xhci, udev,
eps[i], udev->slot_id);
if (ret < 0)
return ret;
max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (max_streams < (*num_streams - 1)) {
xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
eps[i]->desc.bEndpointAddress,
max_streams);
*num_streams = max_streams+1;
}
endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
if (*changed_ep_bitmask & endpoint_flag)
return -EINVAL;
*changed_ep_bitmask |= endpoint_flag;
}
return 0;
}
static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps)
{
u32 changed_ep_bitmask = 0;
unsigned int slot_id;
unsigned int ep_index;
unsigned int ep_state;
int i;
slot_id = udev->slot_id;
if (!xhci->devs[slot_id])
return 0;
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
/* Are streams already being freed for the endpoint? */
if (ep_state & EP_GETTING_NO_STREAMS) {
xhci_warn(xhci, "WARN Can't disable streams for "
"endpoint 0x%x, "
"streams are being disabled already\n",
eps[i]->desc.bEndpointAddress);
return 0;
}
/* Are there actually any streams to free? */
if (!(ep_state & EP_HAS_STREAMS) &&
!(ep_state & EP_GETTING_STREAMS)) {
xhci_warn(xhci, "WARN Can't disable streams for "
"endpoint 0x%x, "
"streams are already disabled!\n",
eps[i]->desc.bEndpointAddress);
xhci_warn(xhci, "WARN xhci_free_streams() called "
"with non-streams endpoint\n");
return 0;
}
changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
}
return changed_ep_bitmask;
}
/*
* The USB device drivers use this function (through the HCD interface in USB
* core) to prepare a set of bulk endpoints to use streams. Streams are used to
* coordinate mass storage command queueing across multiple endpoints (basically
* a stream ID == a task ID).
*
* Setting up streams involves allocating the same size stream context array
* for each endpoint and issuing a configure endpoint command for all endpoints.
*
* Don't allow the call to succeed if one endpoint only supports one stream
* (which means it doesn't support streams at all).
*
* Drivers may get less stream IDs than they asked for, if the host controller
* hardware or endpoints claim they can't support the number of requested
* stream IDs.
*/
static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags)
{
int i, ret;
struct xhci_hcd *xhci;
struct xhci_virt_device *vdev;
struct xhci_command *config_cmd;
struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
unsigned int num_stream_ctxs;
unsigned int max_packet;
unsigned long flags;
u32 changed_ep_bitmask = 0;
if (!eps)
return -EINVAL;
/* Add one to the number of streams requested to account for
* stream 0 that is reserved for xHCI usage.
*/
num_streams += 1;
xhci = hcd_to_xhci(hcd);
xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
num_streams);
/* MaxPSASize value 0 (2 streams) means streams are not supported */
if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
HCC_MAX_PSA(xhci->hcc_params) < 4) {
xhci_dbg(xhci, "xHCI controller does not support streams.\n");
return -ENOSYS;
}
config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
if (!config_cmd)
return -ENOMEM;
ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
xhci_free_command(xhci, config_cmd);
return -ENOMEM;
}
/* Check to make sure all endpoints are not already configured for
* streams. While we're at it, find the maximum number of streams that
* all the endpoints will support and check for duplicate endpoints.
*/
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
num_eps, &num_streams, &changed_ep_bitmask);
if (ret < 0) {
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
if (num_streams <= 1) {
xhci_warn(xhci, "WARN: endpoints can't handle "
"more than one stream.\n");
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
return -EINVAL;
}
vdev = xhci->devs[udev->slot_id];
/* Mark each endpoint as being in transition, so
* xhci_urb_enqueue() will reject all URBs.
*/
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
}
spin_unlock_irqrestore(&xhci->lock, flags);
/* Setup internal data structures and allocate HW data structures for
* streams (but don't install the HW structures in the input context
* until we're sure all memory allocation succeeded).
*/
xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
num_stream_ctxs, num_streams);
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
max_packet = usb_endpoint_maxp(&eps[i]->desc);
vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
num_stream_ctxs,
num_streams,
max_packet, mem_flags);
if (!vdev->eps[ep_index].stream_info)
goto cleanup;
/* Set maxPstreams in endpoint context and update deq ptr to
* point to stream context array. FIXME
*/
}
/* Set up the input context for a configure endpoint command. */
for (i = 0; i < num_eps; i++) {
struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
xhci_endpoint_copy(xhci, config_cmd->in_ctx,
vdev->out_ctx, ep_index);
xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
vdev->eps[ep_index].stream_info);
}
/* Tell the HW to drop its old copy of the endpoint context info
* and add the updated copy from the input context.
*/
xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
vdev->out_ctx, ctrl_ctx,
changed_ep_bitmask, changed_ep_bitmask);
/* Issue and wait for the configure endpoint command */
ret = xhci_configure_endpoint(xhci, udev, config_cmd,
false, false);
/* xHC rejected the configure endpoint command for some reason, so we
* leave the old ring intact and free our internal streams data
* structure.
*/
if (ret < 0)
goto cleanup;
spin_lock_irqsave(&xhci->lock, flags);
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
udev->slot_id, ep_index);
vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
}
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
}
/* Subtract 1 for stream 0, which drivers can't use */
return num_streams - 1;
cleanup:
/* If it didn't work, free the streams! */
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
vdev->eps[ep_index].stream_info = NULL;
/* FIXME Unset maxPstreams in endpoint context and
* update deq ptr to point to normal string ring.
*/
vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
xhci_endpoint_zero(xhci, vdev, eps[i]);
}
xhci_free_command(xhci, config_cmd);
return -ENOMEM;
}
/* Transition the endpoint from using streams to being a "normal" endpoint
* without streams.
*
* Modify the endpoint context state, submit a configure endpoint command,
* and free all endpoint rings for streams if that completes successfully.
*/
static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
int i, ret;
struct xhci_hcd *xhci;
struct xhci_virt_device *vdev;
struct xhci_command *command;
struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
unsigned long flags;
u32 changed_ep_bitmask;
xhci = hcd_to_xhci(hcd);
vdev = xhci->devs[udev->slot_id];
/* Set up a configure endpoint command to remove the streams rings */
spin_lock_irqsave(&xhci->lock, flags);
changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
udev, eps, num_eps);
if (changed_ep_bitmask == 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
return -EINVAL;
}
/* Use the xhci_command structure from the first endpoint. We may have
* allocated too many, but the driver may call xhci_free_streams() for
* each endpoint it grouped into one call to xhci_alloc_streams().
*/
ep_index = xhci_get_endpoint_index(&eps[0]->desc);
command = vdev->eps[ep_index].stream_info->free_streams_command;
ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return -EINVAL;
}
for (i = 0; i < num_eps; i++) {
struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
EP_GETTING_NO_STREAMS;
xhci_endpoint_copy(xhci, command->in_ctx,
vdev->out_ctx, ep_index);
xhci_setup_no_streams_ep_input_ctx(ep_ctx,
&vdev->eps[ep_index]);
}
xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
vdev->out_ctx, ctrl_ctx,
changed_ep_bitmask, changed_ep_bitmask);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Issue and wait for the configure endpoint command,
* which must succeed.
*/
ret = xhci_configure_endpoint(xhci, udev, command,
false, true);
/* xHC rejected the configure endpoint command for some reason, so we
* leave the streams rings intact.
*/
if (ret < 0)
return ret;
spin_lock_irqsave(&xhci->lock, flags);
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
vdev->eps[ep_index].stream_info = NULL;
/* FIXME Unset maxPstreams in endpoint context and
* update deq ptr to point to normal string ring.
*/
vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
}
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
/*
* Deletes endpoint resources for endpoints that were active before a Reset
* Device command, or a Disable Slot command. The Reset Device command leaves
* the control endpoint intact, whereas the Disable Slot command deletes it.
*
* Must be called with xhci->lock held.
*/
void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev, bool drop_control_ep)
{
int i;
unsigned int num_dropped_eps = 0;
unsigned int drop_flags = 0;
for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
if (virt_dev->eps[i].ring) {
drop_flags |= 1 << i;
num_dropped_eps++;
}
}
xhci->num_active_eps -= num_dropped_eps;
if (num_dropped_eps)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Dropped %u ep ctxs, flags = 0x%x, "
"%u now active.",
num_dropped_eps, drop_flags,
xhci->num_active_eps);
}
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
* xhci_address_device(), and then re-set up the configuration. If this is
* called because of a usb_reset_and_verify_device(), then the old alternate
* settings will be re-installed through the normal bandwidth allocation
* functions.
*
* Wait for the Reset Device command to finish. Remove all structures
* associated with the endpoints that were disabled. Clear the input device
* structure? Reset the control endpoint 0 max packet size?
*
* If the virt_dev to be reset does not exist or does not match the udev,
* it means the device is lost, possibly due to the xHC restore error and
* re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
* re-allocate the device.
*/
static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
struct usb_device *udev)
{
int ret, i;
unsigned long flags;
struct xhci_hcd *xhci;
unsigned int slot_id;
struct xhci_virt_device *virt_dev;
struct xhci_command *reset_device_cmd;
struct xhci_slot_ctx *slot_ctx;
int old_active_eps = 0;
ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
slot_id = udev->slot_id;
virt_dev = xhci->devs[slot_id];
if (!virt_dev) {
xhci_dbg(xhci, "The device to be reset with slot ID %u does "
"not exist. Re-allocate the device\n", slot_id);
ret = xhci_alloc_dev(hcd, udev);
if (ret == 1)
return 0;
else
return -EINVAL;
}
if (virt_dev->tt_info)
old_active_eps = virt_dev->tt_info->active_eps;
if (virt_dev->udev != udev) {
/* If the virt_dev and the udev does not match, this virt_dev
* may belong to another udev.
* Re-allocate the device.
*/
xhci_dbg(xhci, "The device to be reset with slot ID %u does "
"not match the udev. Re-allocate the device\n",
slot_id);
ret = xhci_alloc_dev(hcd, udev);
if (ret == 1)
return 0;
else
return -EINVAL;
}
/* If device is not setup, there is no point in resetting it */
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
SLOT_STATE_DISABLED)
return 0;
trace_xhci_discover_or_reset_device(slot_ctx);
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
/* Allocate the command structure that holds the struct completion.
* Assume we're in process context, since the normal device reset
* process has to wait for the device anyway. Storage devices are
* reset as part of error handling, so use GFP_NOIO instead of
* GFP_KERNEL.
*/
reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
if (!reset_device_cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
return -ENOMEM;
}
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
if (ret) {
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
spin_unlock_irqrestore(&xhci->lock, flags);
goto command_cleanup;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the Reset Device command to finish */
wait_for_completion(reset_device_cmd->completion);
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
* unless we tried to reset a slot ID that wasn't enabled,
* or the device wasn't in the addressed or configured state.
*/
ret = reset_device_cmd->status;
switch (ret) {
case COMP_COMMAND_ABORTED:
case COMP_COMMAND_RING_STOPPED:
xhci_warn(xhci, "Timeout waiting for reset device command\n");
ret = -ETIME;
goto command_cleanup;
case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
slot_id,
xhci_get_slot_state(xhci, virt_dev->out_ctx));
xhci_dbg(xhci, "Not freeing device rings.\n");
/* Don't treat this as an error. May change my mind later. */
ret = 0;
goto command_cleanup;
case COMP_SUCCESS:
xhci_dbg(xhci, "Successful reset device command.\n");
break;
default:
if (xhci_is_vendor_info_code(xhci, ret))
break;
xhci_warn(xhci, "Unknown completion code %u for "
"reset device command.\n", ret);
ret = -EINVAL;
goto command_cleanup;
}
/* Free up host controller endpoint resources */
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
/* Don't delete the default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, false);
spin_unlock_irqrestore(&xhci->lock, flags);
}
/* Everything but endpoint 0 is disabled, so free the rings. */
for (i = 1; i < 31; i++) {
struct xhci_virt_ep *ep = &virt_dev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
}
if (ep->ring) {
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
}
if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
xhci_drop_ep_from_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
udev,
&virt_dev->eps[i],
virt_dev->tt_info);
xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
}
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
virt_dev->flags = 0;
ret = 0;
command_cleanup:
xhci_free_command(xhci, reset_device_cmd);
return ret;
}
/*
* At this point, the struct usb_device is about to go away, the device has
* disconnected, and all traffic has been stopped and the endpoints have been
* disabled. Free any HC data structures associated with that device.
*/
static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
int i, ret;
/*
* We called pm_runtime_get_noresume when the device was attached.
* Decrement the counter here to allow controller to runtime suspend
* if no devices remain.
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_put_noidle(hcd->self.controller);
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
/* If the host is halted due to driver unload, we still need to free the
* device.
*/
if (ret <= 0 && ret != -ENODEV)
return;
virt_dev = xhci->devs[udev->slot_id];
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
trace_xhci_free_dev(slot_ctx);
/* Stop any wayward timer functions (which may grab the lock) */
for (i = 0; i < 31; i++)
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
virt_dev->udev = NULL;
xhci_disable_slot(xhci, udev->slot_id);
spin_lock_irqsave(&xhci->lock, flags);
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
{
struct xhci_command *command;
unsigned long flags;
u32 state;
int ret;
command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
xhci_debugfs_remove_slot(xhci, slot_id);
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
spin_unlock_irqrestore(&xhci->lock, flags);
kfree(command);
return -ENODEV;
}
ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
slot_id);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
kfree(command);
return ret;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(command->completion);
if (command->status != COMP_SUCCESS)
xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
slot_id, command->status);
xhci_free_command(xhci, command);
return 0;
}
/*
* Checks if we have enough host controller resources for the default control
* endpoint.
*
* Must be called with xhci->lock held.
*/
static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
{
if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Not enough ep ctxs: "
"%u active, need to add 1, limit is %u.",
xhci->num_active_eps, xhci->limit_active_eps);
return -ENOMEM;
}
xhci->num_active_eps += 1;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Adding 1 ep ctx, %u now active.",
xhci->num_active_eps);
return 0;
}
/*
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
* timed out, or allocating memory failed. Returns 1 on success.
*/
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *vdev;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
int ret, slot_id;
struct xhci_command *command;
command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return 0;
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
xhci_free_command(xhci, command);
return 0;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(command->completion);
slot_id = command->slot_id;
if (!slot_id || command->status != COMP_SUCCESS) {
xhci_err(xhci, "Error while assigning device slot ID: %s\n",
xhci_trb_comp_code_string(command->status));
xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
HCS_MAX_SLOTS(
readl(&xhci->cap_regs->hcs_params1)));
xhci_free_command(xhci, command);
return 0;
}
xhci_free_command(xhci, command);
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_reserve_host_control_ep_resources(xhci);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "Not enough host resources, "
"active endpoint contexts = %u\n",
xhci->num_active_eps);
goto disable_slot;
}
spin_unlock_irqrestore(&xhci->lock, flags);
}
/* Use GFP_NOIO, since this function can be called from
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
goto disable_slot;
}
vdev = xhci->devs[slot_id];
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_alloc_dev(slot_ctx);
udev->slot_id = slot_id;
xhci_debugfs_create_slot(xhci, slot_id);
/*
* If resetting upon resume, we can't put the controller into runtime
* suspend if there is a device attached.
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_get_noresume(hcd->self.controller);
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
disable_slot:
xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
return 0;
}
/*
* Issue an Address Device command and optionally send a corresponding
* SetAddress request to the device.
*/
static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
enum xhci_setup_dev setup)
{
const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
unsigned long flags;
struct xhci_virt_device *virt_dev;
int ret = 0;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u64 temp_64;
struct xhci_command *command = NULL;
mutex_lock(&xhci->mutex);
if (xhci->xhc_state) { /* dying, removing or halted */
ret = -ESHUTDOWN;
goto out;
}
if (!udev->slot_id) {
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Bad Slot ID %d", udev->slot_id);
ret = -EINVAL;
goto out;
}
virt_dev = xhci->devs[udev->slot_id];
if (WARN_ON(!virt_dev)) {
/*
* In plug/unplug torture test with an NEC controller,
* a zero-dereference was observed once due to virt_dev = 0.
* Print useful debug rather than crash if it is observed again!
*/
xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
udev->slot_id);
ret = -EINVAL;
goto out;
}
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
trace_xhci_setup_device_slot(slot_ctx);
if (setup == SETUP_CONTEXT_ONLY) {
if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
SLOT_STATE_DEFAULT) {
xhci_dbg(xhci, "Slot already in default state\n");
goto out;
}
}
command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command) {
ret = -ENOMEM;
goto out;
}
command->in_ctx = virt_dev->in_ctx;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
ret = -EINVAL;
goto out;
}
/*
* If this is the first Set Address since device plug-in or
* virt_device realloaction after a resume with an xHCI power loss,
* then set up the slot context.
*/
if (!slot_ctx->dev_info)
xhci_setup_addressable_virt_dev(xhci, udev);
/* Otherwise, update the control endpoint ring enqueue pointer. */
else
xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
ctrl_ctx->drop_flags = 0;
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
trace_xhci_address_ctrl_ctx(ctrl_ctx);
spin_lock_irqsave(&xhci->lock, flags);
trace_xhci_setup_device(virt_dev);
ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
udev->slot_id, setup);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"FIXME: allocate a command ring segment");
goto out;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
wait_for_completion(command->completion);
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
* the SetAddress() "recovery interval" required by USB and aborting the
* command on a timeout.
*/
switch (command->status) {
case COMP_COMMAND_ABORTED:
case COMP_COMMAND_RING_STOPPED:
xhci_warn(xhci, "Timeout while waiting for setup device command\n");
ret = -ETIME;
break;
case COMP_CONTEXT_STATE_ERROR:
case COMP_SLOT_NOT_ENABLED_ERROR:
xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
act, udev->slot_id);
ret = -EINVAL;
break;
case COMP_USB_TRANSACTION_ERROR:
dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
mutex_unlock(&xhci->mutex);
ret = xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
if (!ret)
xhci_alloc_dev(hcd, udev);
kfree(command->completion);
kfree(command);
return -EPROTO;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
dev_warn(&udev->dev,
"ERROR: Incompatible device for setup %s command\n", act);
ret = -ENODEV;
break;
case COMP_SUCCESS:
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Successful setup %s command", act);
break;
default:
xhci_err(xhci,
"ERROR: unexpected setup %s command completion code 0x%x.\n",
act, command->status);
trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
}
if (ret)
goto out;
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Slot ID %d dcbaa entry @%p = %#016llx",
udev->slot_id,
&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
(unsigned long long)
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Output Context DMA address = %#08llx",
(unsigned long long)virt_dev->out_ctx->dma);
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
/*
* USB core uses address 1 for the roothubs, so we add one to the
* address given back to us by the HC.
*/
trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
/* Zero the input context control for later use */
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Internal device address = %d",
le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
out:
mutex_unlock(&xhci->mutex);
if (command) {
kfree(command->completion);
kfree(command);
}
return ret;
}
static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
}
static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
}
/*
* Transfer the port index into real index in the HW port status
* registers. Caculate offset between the port's PORTSC register
* and port status base. Divide the number of per port register
* to get the real index. The raw port number bases 1.
*/
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
{
struct xhci_hub *rhub;
rhub = xhci_get_rhub(hcd);
return rhub->ports[port1 - 1]->hw_portnum + 1;
}
/*
* Issue an Evaluate Context command to change the Maximum Exit Latency in the
* slot context. If that succeeds, store the new MEL in the xhci_virt_device.
*/
static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
struct usb_device *udev, u16 max_exit_latency)
{
struct xhci_virt_device *virt_dev;
struct xhci_command *command;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
int ret;
command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
/*
* virt_dev might not exists yet if xHC resumed from hibernate (S4) and
* xHC was re-initialized. Exit latency will be set later after
* hub_port_finish_reset() is done and xhci->devs[] are re-allocated
*/
if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, command);
return 0;
}
/* Attempt to issue an Evaluate Context command to change the MEL. */
ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, command);
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return -ENOMEM;
}
xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
slot_ctx->dev_state = 0;
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Set up evaluate context for LPM MEL change.");
/* Issue and wait for the evaluate context command. */
ret = xhci_configure_endpoint(xhci, udev, command,
true, true);
if (!ret) {
spin_lock_irqsave(&xhci->lock, flags);
virt_dev->current_mel = max_exit_latency;
spin_unlock_irqrestore(&xhci->lock, flags);
}
xhci_free_command(xhci, command);
return ret;
}
#ifdef CONFIG_PM
/* BESL to HIRD Encoding array for USB2 LPM */
static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
/* Calculate HIRD/BESL for USB2 PORTPMSC*/
static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
struct usb_device *udev)
{
int u2del, besl, besl_host;
int besl_device = 0;
u32 field;
u2del = HCS_U2_LATENCY(xhci->hcs_params3);
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
if (field & USB_BESL_SUPPORT) {
for (besl_host = 0; besl_host < 16; besl_host++) {
if (xhci_besl_encoding[besl_host] >= u2del)
break;
}
/* Use baseline BESL value as default */
if (field & USB_BESL_BASELINE_VALID)
besl_device = USB_GET_BESL_BASELINE(field);
else if (field & USB_BESL_DEEP_VALID)
besl_device = USB_GET_BESL_DEEP(field);
} else {
if (u2del <= 50)
besl_host = 0;
else
besl_host = (u2del - 51) / 75 + 1;
}
besl = besl_host + besl_device;
if (besl > 15)
besl = 15;
return besl;
}
/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
{
u32 field;
int l1;
int besld = 0;
int hirdm = 0;
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
/* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
l1 = udev->l1_params.timeout / 256;
/* device has preferred BESLD */
if (field & USB_BESL_DEEP_VALID) {
besld = USB_GET_BESL_DEEP(field);
hirdm = 1;
}
return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
}
static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_port **ports;
__le32 __iomem *pm_addr, *hlpm_addr;
u32 pm_val, hlpm_val, field;
unsigned int port_num;
unsigned long flags;
int hird, exit_latency;
int ret;
if (xhci->quirks & XHCI_HW_LPM_DISABLE)
return -EPERM;
if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
return -EPERM;
if (!udev->parent || udev->parent->parent ||
udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EPERM;
if (udev->usb2_hw_lpm_capable != 1)
return -EPERM;
spin_lock_irqsave(&xhci->lock, flags);
ports = xhci->usb2_rhub.ports;
port_num = udev->portnum - 1;
pm_addr = ports[port_num]->addr + PORTPMSC;
pm_val = readl(pm_addr);
hlpm_addr = ports[port_num]->addr + PORTHLPMC;
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
if (enable) {
/* Host supports BESL timeout instead of HIRD */
if (udev->usb2_hw_lpm_besl_capable) {
/* if device doesn't have a preferred BESL value use a
* default one which works with mixed HIRD and BESL
* systems. See XHCI_DEFAULT_BESL definition in xhci.h
*/
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
if ((field & USB_BESL_SUPPORT) &&
(field & USB_BESL_BASELINE_VALID))
hird = USB_GET_BESL_BASELINE(field);
else
hird = udev->l1_params.besl;
exit_latency = xhci_besl_encoding[hird];
spin_unlock_irqrestore(&xhci->lock, flags);
ret = xhci_change_max_exit_latency(xhci, udev,
exit_latency);
if (ret < 0)
return ret;
spin_lock_irqsave(&xhci->lock, flags);
hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
writel(hlpm_val, hlpm_addr);
/* flush write */
readl(hlpm_addr);
} else {
hird = xhci_calculate_hird_besl(xhci, udev);
}
pm_val &= ~PORT_HIRD_MASK;
pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
writel(pm_val, pm_addr);
pm_val = readl(pm_addr);
pm_val |= PORT_HLE;
writel(pm_val, pm_addr);
/* flush write */
readl(pm_addr);
} else {
pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
writel(pm_val, pm_addr);
/* flush write */
readl(pm_addr);
if (udev->usb2_hw_lpm_besl_capable) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_change_max_exit_latency(xhci, udev, 0);
readl_poll_timeout(ports[port_num]->addr, pm_val,
(pm_val & PORT_PLS_MASK) == XDEV_U0,
100, 10000);
return 0;
}
}
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
/* check if a usb2 port supports a given extened capability protocol
* only USB2 ports extended protocol capability values are cached.
* Return 1 if capability is supported
*/
static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
unsigned capability)
{
u32 port_offset, port_count;
int i;
for (i = 0; i < xhci->num_ext_caps; i++) {
if (xhci->ext_caps[i] & capability) {
/* port offsets starts at 1 */
port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
if (port >= port_offset &&
port < port_offset + port_count)
return 1;
}
}
return 0;
}
static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int portnum = udev->portnum - 1;
if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
return 0;
/* we only support lpm for non-hub device connected to root hub yet */
if (!udev->parent || udev->parent->parent ||
udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return 0;
if (xhci->hw_lpm_support == 1 &&
xhci_check_usb2_port_capability(
xhci, portnum, XHCI_HLC)) {
udev->usb2_hw_lpm_capable = 1;
udev->l1_params.timeout = XHCI_L1_TIMEOUT;
udev->l1_params.besl = XHCI_DEFAULT_BESL;
if (xhci_check_usb2_port_capability(xhci, portnum,
XHCI_BLC))
udev->usb2_hw_lpm_besl_capable = 1;
}
return 0;
}
/*---------------------- USB 3.0 Link PM functions ------------------------*/
/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
static unsigned long long xhci_service_interval_to_ns(
struct usb_endpoint_descriptor *desc)
{
return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
}
static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
enum usb3_link_state state)
{
unsigned long long sel;
unsigned long long pel;
unsigned int max_sel_pel;
char *state_name;
switch (state) {
case USB3_LPM_U1:
/* Convert SEL and PEL stored in nanoseconds to microseconds */
sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
state_name = "U1";
break;
case USB3_LPM_U2:
sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
state_name = "U2";
break;
default:
dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
__func__);
return USB3_LPM_DISABLED;
}
if (sel <= max_sel_pel && pel <= max_sel_pel)
return USB3_LPM_DEVICE_INITIATED;
if (sel > max_sel_pel)
dev_dbg(&udev->dev, "Device-initiated %s disabled "
"due to long SEL %llu ms\n",
state_name, sel);
else
dev_dbg(&udev->dev, "Device-initiated %s disabled "
"due to long PEL %llu ms\n",
state_name, pel);
return USB3_LPM_DISABLED;
}
/* The U1 timeout should be the maximum of the following values:
* - For control endpoints, U1 system exit latency (SEL) * 3
* - For bulk endpoints, U1 SEL * 5
* - For interrupt endpoints:
* - Notification EPs, U1 SEL * 3
* - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
* - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
*/
static unsigned long long xhci_calculate_intel_u1_timeout(
struct usb_device *udev,
struct usb_endpoint_descriptor *desc)
{
unsigned long long timeout_ns;
int ep_type;
int intr_type;
ep_type = usb_endpoint_type(desc);
switch (ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
timeout_ns = udev->u1_params.sel * 3;
break;
case USB_ENDPOINT_XFER_BULK:
timeout_ns = udev->u1_params.sel * 5;
break;
case USB_ENDPOINT_XFER_INT:
intr_type = usb_endpoint_interrupt_type(desc);
if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
timeout_ns = udev->u1_params.sel * 3;
break;
}
/* Otherwise the calculation is the same as isoc eps */
fallthrough;
case USB_ENDPOINT_XFER_ISOC:
timeout_ns = xhci_service_interval_to_ns(desc);
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
if (timeout_ns < udev->u1_params.sel * 2)
timeout_ns = udev->u1_params.sel * 2;
break;
default:
return 0;
}
return timeout_ns;
}
/* Returns the hub-encoded U1 timeout value. */
static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_endpoint_descriptor *desc)
{
unsigned long long timeout_ns;
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
else
timeout_ns = udev->u1_params.sel;
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
if (timeout_ns == USB3_LPM_DISABLED)
timeout_ns = 1;
else
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
/* If the necessary timeout value is bigger than what we can set in the
* USB 3.0 hub, we have to disable hub-initiated U1.
*/
if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
return timeout_ns;
dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
"due to long timeout %llu ms\n", timeout_ns);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
}
/* The U2 timeout should be the maximum of:
* - 10 ms (to avoid the bandwidth impact on the scheduler)
* - largest bInterval of any active periodic endpoint (to avoid going
* into lower power link states between intervals).
* - the U2 Exit Latency of the device
*/
static unsigned long long xhci_calculate_intel_u2_timeout(
struct usb_device *udev,
struct usb_endpoint_descriptor *desc)
{
unsigned long long timeout_ns;
unsigned long long u2_del_ns;
timeout_ns = 10 * 1000 * 1000;
if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
(xhci_service_interval_to_ns(desc) > timeout_ns))
timeout_ns = xhci_service_interval_to_ns(desc);
u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
if (u2_del_ns > timeout_ns)
timeout_ns = u2_del_ns;
return timeout_ns;
}
/* Returns the hub-encoded U2 timeout value. */
static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_endpoint_descriptor *desc)
{
unsigned long long timeout_ns;
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
else
timeout_ns = udev->u2_params.sel;
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
* USB 3.0 hub, we have to disable hub-initiated U2.
*/
if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
return timeout_ns;
dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
"due to long timeout %llu ms\n", timeout_ns);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
}
static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_endpoint_descriptor *desc,
enum usb3_link_state state,
u16 *timeout)
{
if (state == USB3_LPM_U1)
return xhci_calculate_u1_timeout(xhci, udev, desc);
else if (state == USB3_LPM_U2)
return xhci_calculate_u2_timeout(xhci, udev, desc);
return USB3_LPM_DISABLED;
}
static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_endpoint_descriptor *desc,
enum usb3_link_state state,
u16 *timeout)
{
u16 alt_timeout;
alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
desc, state, timeout);
/* If we found we can't enable hub-initiated LPM, and
* the U1 or U2 exit latency was too high to allow
* device-initiated LPM as well, then we will disable LPM
* for this device, so stop searching any further.
*/
if (alt_timeout == USB3_LPM_DISABLED) {
*timeout = alt_timeout;
return -E2BIG;
}
if (alt_timeout > *timeout)
*timeout = alt_timeout;
return 0;
}
static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_interface *alt,
enum usb3_link_state state,
u16 *timeout)
{
int j;
for (j = 0; j < alt->desc.bNumEndpoints; j++) {
if (xhci_update_timeout_for_endpoint(xhci, udev,
&alt->endpoint[j].desc, state, timeout))
return -E2BIG;
}
return 0;
}
static int xhci_check_tier_policy(struct xhci_hcd *xhci,
struct usb_device *udev,
enum usb3_link_state state)
{
struct usb_device *parent = udev->parent;
int tier = 1; /* roothub is tier1 */
while (parent) {
parent = parent->parent;
tier++;
}
if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
goto fail;
if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
goto fail;
return 0;
fail:
dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
tier);
return -E2BIG;
}
/* Returns the U1 or U2 timeout that should be enabled.
* If the tier check or timeout setting functions return with a non-zero exit
* code, that means the timeout value has been finalized and we shouldn't look
* at any more endpoints.
*/
static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_host_config *config;
char *state_name;
int i;
u16 timeout = USB3_LPM_DISABLED;
if (state == USB3_LPM_U1)
state_name = "U1";
else if (state == USB3_LPM_U2)
state_name = "U2";
else {
dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
state);
return timeout;
}
/* Gather some information about the currently installed configuration
* and alternate interface settings.
*/
if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
state, &timeout))
return timeout;
config = udev->actconfig;
if (!config)
return timeout;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_driver *driver;
struct usb_interface *intf = config->interface[i];
if (!intf)
continue;
/* Check if any currently bound drivers want hub-initiated LPM
* disabled.
*/
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
if (driver && driver->disable_hub_initiated_lpm) {
dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
state_name, driver->name);
timeout = xhci_get_timeout_no_hub_lpm(udev,
state);
if (timeout == USB3_LPM_DISABLED)
return timeout;
}
}
/* Not sure how this could happen... */
if (!intf->cur_altsetting)
continue;
if (xhci_update_timeout_for_interface(xhci, udev,
intf->cur_altsetting,
state, &timeout))
return timeout;
}
return timeout;
}
static int calculate_max_exit_latency(struct usb_device *udev,
enum usb3_link_state state_changed,
u16 hub_encoded_timeout)
{
unsigned long long u1_mel_us = 0;
unsigned long long u2_mel_us = 0;
unsigned long long mel_us = 0;
bool disabling_u1;
bool disabling_u2;
bool enabling_u1;
bool enabling_u2;
disabling_u1 = (state_changed == USB3_LPM_U1 &&
hub_encoded_timeout == USB3_LPM_DISABLED);
disabling_u2 = (state_changed == USB3_LPM_U2 &&
hub_encoded_timeout == USB3_LPM_DISABLED);
enabling_u1 = (state_changed == USB3_LPM_U1 &&
hub_encoded_timeout != USB3_LPM_DISABLED);
enabling_u2 = (state_changed == USB3_LPM_U2 &&
hub_encoded_timeout != USB3_LPM_DISABLED);
/* If U1 was already enabled and we're not disabling it,
* or we're going to enable U1, account for the U1 max exit latency.
*/
if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
enabling_u1)
u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
enabling_u2)
u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
mel_us = max(u1_mel_us, u2_mel_us);
/* xHCI host controller max exit latency field is only 16 bits wide. */
if (mel_us > MAX_EXIT) {
dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
"is too big.\n", mel_us);
return -E2BIG;
}
return mel_us;
}
/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
struct xhci_port *port;
u16 hub_encoded_timeout;
int mel;
int ret;
xhci = hcd_to_xhci(hcd);
/* The LPM timeout values are pretty host-controller specific, so don't
* enable hub-initiated timeouts unless the vendor has provided
* information about their timeout algorithm.
*/
if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
!xhci->devs[udev->slot_id])
return USB3_LPM_DISABLED;
if (xhci_check_tier_policy(xhci, udev, state) < 0)
return USB3_LPM_DISABLED;
/* If connected to root port then check port can handle lpm */
if (udev->parent && !udev->parent->parent) {
port = xhci->usb3_rhub.ports[udev->portnum - 1];
if (port->lpm_incapable)
return USB3_LPM_DISABLED;
}
hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
if (mel < 0) {
/* Max Exit Latency is too big, disable LPM. */
hub_encoded_timeout = USB3_LPM_DISABLED;
mel = 0;
}
ret = xhci_change_max_exit_latency(xhci, udev, mel);
if (ret)
return ret;
return hub_encoded_timeout;
}
static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
u16 mel;
xhci = hcd_to_xhci(hcd);
if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
!xhci->devs[udev->slot_id])
return 0;
mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
return xhci_change_max_exit_latency(xhci, udev, mel);
}
#else /* CONFIG_PM */
static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
return 0;
}
static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return 0;
}
static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
return USB3_LPM_DISABLED;
}
static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
return 0;
}
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *vdev;
struct xhci_command *config_cmd;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
unsigned think_time;
int ret;
/* Ignore root hubs */
if (!hdev->parent)
return 0;
vdev = xhci->devs[hdev->slot_id];
if (!vdev) {
xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
return -EINVAL;
}
config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
if (!config_cmd)
return -ENOMEM;
ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
xhci_free_command(xhci, config_cmd);
return -ENOMEM;
}
spin_lock_irqsave(&xhci->lock, flags);
if (hdev->speed == USB_SPEED_HIGH &&
xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
return -ENOMEM;
}
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
/*
* refer to section 6.2.2: MTT should be 0 for full speed hub,
* but it may be already set to 1 when setup an xHCI virtual
* device, so clear it anyway.
*/
if (tt->multi)
slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
else if (hdev->speed == USB_SPEED_FULL)
slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
if (xhci->hci_version > 0x95) {
xhci_dbg(xhci, "xHCI version %x needs hub "
"TT think time and number of ports\n",
(unsigned int) xhci->hci_version);
slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
/* Set TT think time - convert from ns to FS bit times.
* 0 = 8 FS bit times, 1 = 16 FS bit times,
* 2 = 24 FS bit times, 3 = 32 FS bit times.
*
* xHCI 1.0: this field shall be 0 if the device is not a
* High-spped hub.
*/
think_time = tt->think_time;
if (think_time != 0)
think_time = (think_time / 666) - 1;
if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
slot_ctx->tt_info |=
cpu_to_le32(TT_THINK_TIME(think_time));
} else {
xhci_dbg(xhci, "xHCI version %x doesn't need hub "
"TT think time or number of ports\n",
(unsigned int) xhci->hci_version);
}
slot_ctx->dev_state = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Set up %s for hub device.\n",
(xhci->hci_version > 0x95) ?
"configure endpoint" : "evaluate context");
/* Issue and wait for the configure endpoint or
* evaluate context command.
*/
if (xhci->hci_version > 0x95)
ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
false, false);
else
ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
true, false);
xhci_free_command(xhci, config_cmd);
return ret;
}
EXPORT_SYMBOL_GPL(xhci_update_hub_device);
static int xhci_get_frame(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
/* EHCI mods by the periodic size. Why? */
return readl(&xhci->run_regs->microframe_index) >> 3;
}
static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
{
xhci->usb2_rhub.hcd = hcd;
hcd->speed = HCD_USB2;
hcd->self.root_hub->speed = USB_SPEED_HIGH;
/*
* USB 2.0 roothub under xHCI has an integrated TT,
* (rate matching hub) as opposed to having an OHCI/UHCI
* companion controller.
*/
hcd->has_tt = 1;
}
static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
{
unsigned int minor_rev;
/*
* Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
* should return 0x31 for sbrn, or that the minor revision
* is a two digit BCD containig minor and sub-minor numbers.
* This was later clarified in xHCI 1.2.
*
* Some USB 3.1 capable hosts therefore have sbrn 0x30, and
* minor revision set to 0x1 instead of 0x10.
*/
if (xhci->usb3_rhub.min_rev == 0x1)
minor_rev = 1;
else
minor_rev = xhci->usb3_rhub.min_rev / 0x10;
switch (minor_rev) {
case 2:
hcd->speed = HCD_USB32;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
hcd->self.root_hub->rx_lanes = 2;
hcd->self.root_hub->tx_lanes = 2;
hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
break;
case 1:
hcd->speed = HCD_USB31;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
break;
}
xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
minor_rev, minor_rev ? "Enhanced " : "");
xhci->usb3_rhub.hcd = hcd;
}
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
{
struct xhci_hcd *xhci;
/*
* TODO: Check with DWC3 clients for sysdev according to
* quirks
*/
struct device *dev = hcd->self.sysdev;
int retval;
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
/* support to build packet from discontinuous buffers */
hcd->self.no_sg_constraint = 1;
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
xhci = hcd_to_xhci(hcd);
if (!usb_hcd_is_primary_hcd(hcd)) {
xhci_hcd_init_usb3_data(xhci, hcd);
return 0;
}
mutex_init(&xhci->mutex);
xhci->main_hcd = hcd;
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
xhci->run_regs = hcd->regs +
(readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
/* Cache read-only capability registers */
xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
if (xhci->hci_version > 0x100)
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
/* xhci-plat or xhci-pci might have set max_interrupters already */
if ((!xhci->max_interrupters) ||
xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
xhci->quirks |= quirks;
if (get_quirks)
get_quirks(dev, xhci);
/* In xhci controllers which follow xhci 1.0 spec gives a spurious
* success event after a short transfer. This quirk will ignore such
* spurious event.
*/
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
return retval;
xhci_zero_64b_regs(xhci);
xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
if (retval)
return retval;
xhci_dbg(xhci, "Reset complete\n");
/*
* On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
* of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
* address memory pointers actually. So, this driver clears the AC64
* bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
* DMA_BIT_MASK(32)) in this xhci_gen_setup().
*/
if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
xhci->hcc_params &= ~BIT(0);
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
!dma_set_mask(dev, DMA_BIT_MASK(64))) {
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
} else {
/*
* This is to avoid error in cases where a 32-bit USB
* controller is used on a 64-bit capable system.
*/
retval = dma_set_mask(dev, DMA_BIT_MASK(32));
if (retval)
return retval;
xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
}
xhci_dbg(xhci, "Calling HCD init\n");
/* Initialize HCD and host controller data structures. */
retval = xhci_init(hcd);
if (retval)
return retval;
xhci_dbg(xhci, "Called HCD init\n");
if (xhci_hcd_is_usb3(hcd))
xhci_hcd_init_usb3_data(xhci, hcd);
else
xhci_hcd_init_usb2_data(xhci, hcd);
xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
xhci->hcc_params, xhci->hci_version, xhci->quirks);
return 0;
}
EXPORT_SYMBOL_GPL(xhci_gen_setup);
static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
struct usb_device *udev;
unsigned int slot_id;
unsigned int ep_index;
unsigned long flags;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
udev = (struct usb_device *)ep->hcpriv;
slot_id = udev->slot_id;
ep_index = xhci_get_endpoint_index(&ep->desc);
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
}
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
.hcd_priv_size = sizeof(struct xhci_hcd),
/*
* generic hardware linkage
*/
.irq = xhci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
HCD_BH,
/*
* basic lifecycle operations
*/
.reset = NULL, /* set in xhci_init_driver() */
.start = xhci_run,
.stop = xhci_stop,
.shutdown = xhci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.map_urb_for_dma = xhci_map_urb_for_dma,
.unmap_urb_for_dma = xhci_unmap_urb_for_dma,
.urb_enqueue = xhci_urb_enqueue,
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
.free_dev = xhci_free_dev,
.alloc_streams = xhci_alloc_streams,
.free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
.endpoint_disable = xhci_endpoint_disable,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.enable_device = xhci_enable_device,
.update_hub_device = xhci_update_hub_device,
.reset_device = xhci_discover_or_reset_device,
/*
* scheduling support
*/
.get_frame_number = xhci_get_frame,
/*
* root hub support
*/
.hub_control = xhci_hub_control,
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
.get_resuming_ports = xhci_get_resuming_ports,
/*
* call back when device connected and addressed
*/
.update_device = xhci_update_device,
.set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
.enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
.disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
.find_raw_port_number = xhci_find_raw_port_number,
.clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
};
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over)
{
BUG_ON(!over);
/* Copy the generic table to drv then apply the overrides */
*drv = xhci_hc_driver;
if (over) {
drv->hcd_priv_size += over->extra_priv_size;
if (over->reset)
drv->reset = over->reset;
if (over->start)
drv->start = over->start;
if (over->add_endpoint)
drv->add_endpoint = over->add_endpoint;
if (over->drop_endpoint)
drv->drop_endpoint = over->drop_endpoint;
if (over->check_bandwidth)
drv->check_bandwidth = over->check_bandwidth;
if (over->reset_bandwidth)
drv->reset_bandwidth = over->reset_bandwidth;
if (over->update_hub_device)
drv->update_hub_device = over->update_hub_device;
if (over->hub_control)
drv->hub_control = over->hub_control;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
static int __init xhci_hcd_init(void)
{
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
*/
BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
/* xhci_device_control has eight fields, and also
* embeds one xhci_slot_ctx and 31 xhci_ep_ctx
*/
BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
if (usb_disabled())
return -ENODEV;
xhci_debugfs_create_root();
xhci_dbc_init();
return 0;
}
/*
* If an init function is provided, an exit function must also be provided
* to allow module unload.
*/
static void __exit xhci_hcd_fini(void)
{
xhci_debugfs_remove_root();
xhci_dbc_exit();
}
module_init(xhci_hcd_init);
module_exit(xhci_hcd_fini);
| linux-master | drivers/usb/host/xhci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/host/ehci-orion.c
*
* Tzachi Perelstein <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
#include <linux/clk.h>
#include <linux/platform_data/usb-ehci-orion.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include "ehci.h"
#define rdl(off) readl_relaxed(hcd->regs + (off))
#define wrl(off, val) writel_relaxed((val), hcd->regs + (off))
#define USB_CMD 0x140
#define USB_CMD_RUN BIT(0)
#define USB_CMD_RESET BIT(1)
#define USB_MODE 0x1a8
#define USB_MODE_MASK GENMASK(1, 0)
#define USB_MODE_DEVICE 0x2
#define USB_MODE_HOST 0x3
#define USB_MODE_SDIS BIT(4)
#define USB_CAUSE 0x310
#define USB_MASK 0x314
#define USB_WINDOW_CTRL(i) (0x320 + ((i) << 4))
#define USB_WINDOW_BASE(i) (0x324 + ((i) << 4))
#define USB_IPG 0x360
#define USB_PHY_PWR_CTRL 0x400
#define USB_PHY_TX_CTRL 0x420
#define USB_PHY_RX_CTRL 0x430
#define USB_PHY_IVREF_CTRL 0x440
#define USB_PHY_TST_GRP_CTRL 0x450
#define USB_SBUSCFG 0x90
/* BAWR = BARD = 3 : Align read/write bursts packets larger than 128 bytes */
#define USB_SBUSCFG_BAWR_ALIGN_128B (0x3 << 6)
#define USB_SBUSCFG_BARD_ALIGN_128B (0x3 << 3)
/* AHBBRST = 3 : Align AHB Burst to INCR16 (64 bytes) */
#define USB_SBUSCFG_AHBBRST_INCR16 (0x3 << 0)
#define USB_SBUSCFG_DEF_VAL (USB_SBUSCFG_BAWR_ALIGN_128B \
| USB_SBUSCFG_BARD_ALIGN_128B \
| USB_SBUSCFG_AHBBRST_INCR16)
#define DRIVER_DESC "EHCI orion driver"
#define hcd_to_orion_priv(h) ((struct orion_ehci_hcd *)hcd_to_ehci(h)->priv)
struct orion_ehci_hcd {
struct clk *clk;
struct phy *phy;
};
static struct hc_driver __read_mostly ehci_orion_hc_driver;
/*
* Implement Orion USB controller specification guidelines
*/
static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
{
/* The below GLs are according to the Orion Errata document */
/*
* Clear interrupt cause and mask
*/
wrl(USB_CAUSE, 0);
wrl(USB_MASK, 0);
/*
* Reset controller
*/
wrl(USB_CMD, rdl(USB_CMD) | USB_CMD_RESET);
while (rdl(USB_CMD) & USB_CMD_RESET);
/*
* GL# USB-10: Set IPG for non start of frame packets
* Bits[14:8]=0xc
*/
wrl(USB_IPG, (rdl(USB_IPG) & ~0x7f00) | 0xc00);
/*
* GL# USB-9: USB 2.0 Power Control
* BG_VSEL[7:6]=0x1
*/
wrl(USB_PHY_PWR_CTRL, (rdl(USB_PHY_PWR_CTRL) & ~0xc0)| 0x40);
/*
* GL# USB-1: USB PHY Tx Control - force calibration to '8'
* TXDATA_BLOCK_EN[21]=0x1, EXT_RCAL_EN[13]=0x1, IMP_CAL[6:3]=0x8
*/
wrl(USB_PHY_TX_CTRL, (rdl(USB_PHY_TX_CTRL) & ~0x78) | 0x202040);
/*
* GL# USB-3 GL# USB-9: USB PHY Rx Control
* RXDATA_BLOCK_LENGHT[31:30]=0x3, EDGE_DET_SEL[27:26]=0,
* CDR_FASTLOCK_EN[21]=0, DISCON_THRESHOLD[9:8]=0, SQ_THRESH[7:4]=0x1
*/
wrl(USB_PHY_RX_CTRL, (rdl(USB_PHY_RX_CTRL) & ~0xc2003f0) | 0xc0000010);
/*
* GL# USB-3 GL# USB-9: USB PHY IVREF Control
* PLLVDD12[1:0]=0x2, RXVDD[5:4]=0x3, Reserved[19]=0
*/
wrl(USB_PHY_IVREF_CTRL, (rdl(USB_PHY_IVREF_CTRL) & ~0x80003 ) | 0x32);
/*
* GL# USB-3 GL# USB-9: USB PHY Test Group Control
* REG_FIFO_SQ_RST[15]=0
*/
wrl(USB_PHY_TST_GRP_CTRL, rdl(USB_PHY_TST_GRP_CTRL) & ~0x8000);
/*
* Stop and reset controller
*/
wrl(USB_CMD, rdl(USB_CMD) & ~USB_CMD_RUN);
wrl(USB_CMD, rdl(USB_CMD) | USB_CMD_RESET);
while (rdl(USB_CMD) & USB_CMD_RESET);
/*
* GL# USB-5 Streaming disable REG_USB_MODE[4]=1
* TBD: This need to be done after each reset!
* GL# USB-4 Setup USB Host mode
*/
wrl(USB_MODE, USB_MODE_SDIS | USB_MODE_HOST);
}
static void
ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
const struct mbus_dram_target_info *dram)
{
int i;
for (i = 0; i < 4; i++) {
wrl(USB_WINDOW_CTRL(i), 0);
wrl(USB_WINDOW_BASE(i), 0);
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1);
wrl(USB_WINDOW_BASE(i), cs->base);
}
}
static int ehci_orion_drv_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
int ret;
ret = ehci_setup(hcd);
if (ret)
return ret;
/*
* For SoC without hlock, need to program sbuscfg value to guarantee
* AHB master's burst would not overrun or underrun FIFO.
*
* sbuscfg reg has to be set after usb controller reset, otherwise
* the value would be override to 0.
*/
if (of_device_is_compatible(dev->of_node, "marvell,armada-3700-ehci"))
wrl(USB_SBUSCFG, USB_SBUSCFG_DEF_VAL);
return ret;
}
static int __maybe_unused ehci_orion_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
return ehci_suspend(hcd, device_may_wakeup(dev));
}
static int __maybe_unused ehci_orion_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
return ehci_resume(hcd, false);
}
static SIMPLE_DEV_PM_OPS(ehci_orion_pm_ops, ehci_orion_drv_suspend,
ehci_orion_drv_resume);
static const struct ehci_driver_overrides orion_overrides __initconst = {
.extra_priv_size = sizeof(struct orion_ehci_hcd),
.reset = ehci_orion_drv_reset,
};
static int ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev);
const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
void __iomem *regs;
int irq, err;
enum orion_ehci_phy_ver phy_version;
struct orion_ehci_hcd *priv;
if (usb_disabled())
return -ENODEV;
pr_debug("Initializing Orion-SoC USB Host Controller\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
goto err;
}
/*
* Right now device-tree probed devices don't get dma_mask
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
goto err;
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs)) {
err = PTR_ERR(regs);
goto err;
}
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
err = -ENOMEM;
goto err;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs + 0x100;
hcd->has_tt = 1;
priv = hcd_to_orion_priv(hcd);
/*
* Not all platforms can gate the clock, so it is not an error if
* the clock does not exists.
*/
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(priv->clk)) {
err = clk_prepare_enable(priv->clk);
if (err)
goto err_put_hcd;
}
priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(priv->phy)) {
err = PTR_ERR(priv->phy);
if (err != -ENOSYS)
goto err_dis_clk;
}
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
ehci_orion_conf_mbus_windows(hcd, dram);
/*
* setup Orion USB controller.
*/
if (pdev->dev.of_node)
phy_version = EHCI_PHY_NA;
else
phy_version = pd->phy_version;
switch (phy_version) {
case EHCI_PHY_NA: /* dont change USB phy settings */
break;
case EHCI_PHY_ORION:
orion_usb_phy_v1_setup(hcd);
break;
case EHCI_PHY_DD:
case EHCI_PHY_KW:
default:
dev_warn(&pdev->dev, "USB phy version isn't supported.\n");
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_dis_clk;
device_wakeup_enable(hcd->self.controller);
return 0;
err_dis_clk:
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
err_put_hcd:
usb_put_hcd(hcd);
err:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
return err;
}
static void ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct orion_ehci_hcd *priv = hcd_to_orion_priv(hcd);
usb_remove_hcd(hcd);
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
}
static const struct of_device_id ehci_orion_dt_ids[] = {
{ .compatible = "marvell,orion-ehci", },
{ .compatible = "marvell,armada-3700-ehci", },
{},
};
MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
static struct platform_driver ehci_orion_driver = {
.probe = ehci_orion_drv_probe,
.remove_new = ehci_orion_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "orion-ehci",
.of_match_table = ehci_orion_dt_ids,
.pm = &ehci_orion_pm_ops,
},
};
static int __init ehci_orion_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_orion_hc_driver, &orion_overrides);
return platform_driver_register(&ehci_orion_driver);
}
module_init(ehci_orion_init);
static void __exit ehci_orion_cleanup(void)
{
platform_driver_unregister(&ehci_orion_driver);
}
module_exit(ehci_orion_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:orion-ehci");
MODULE_AUTHOR("Tzachi Perelstein");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/ehci-orion.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
* (C) Copyright 2002 Hewlett-Packard Company
* (C) Copyright 2006 Sylvain Munaut <[email protected]>
*
* Bus glue for OHCI HC on the of_platform bus
*
* Modified for of_platform bus from ohci-sa1111.c
*
* This file is licenced under the GPL.
*/
#include <linux/signal.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
static int
ohci_ppc_of_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
if ((ret = ohci_init(ohci)) < 0)
return ret;
if ((ret = ohci_run(ohci)) < 0) {
dev_err(hcd->self.controller, "can't start %s\n",
hcd->self.bus_name);
ohci_stop(hcd);
return ret;
}
return 0;
}
static const struct hc_driver ohci_ppc_of_hc_driver = {
.description = hcd_name,
.product_desc = "OF OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_DMA | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.start = ohci_ppc_of_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
static int ohci_hcd_ppc_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct resource res;
int irq;
int rv;
int is_bigendian;
struct device_node *np;
if (usb_disabled())
return -ENODEV;
is_bigendian =
of_device_is_compatible(dn, "ohci-bigendian") ||
of_device_is_compatible(dn, "ohci-be");
dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n");
rv = of_address_to_resource(dn, 0, &res);
if (rv)
return rv;
hcd = usb_create_hcd(&ohci_ppc_of_hc_driver, &op->dev, "PPC-OF USB");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res.start;
hcd->rsrc_len = resource_size(&res);
hcd->regs = devm_ioremap_resource(&op->dev, &res);
if (IS_ERR(hcd->regs)) {
rv = PTR_ERR(hcd->regs);
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (!irq) {
dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
__FILE__);
rv = -EBUSY;
goto err_rmr;
}
ohci = hcd_to_ohci(hcd);
if (is_bigendian) {
ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
if (of_device_is_compatible(dn, "fsl,mpc5200-ohci"))
ohci->flags |= OHCI_QUIRK_FRAME_NO;
if (of_device_is_compatible(dn, "mpc5200-ohci"))
ohci->flags |= OHCI_QUIRK_FRAME_NO;
}
ohci_hcd_init(ohci);
rv = usb_add_hcd(hcd, irq, 0);
if (rv == 0) {
device_wakeup_enable(hcd->self.controller);
return 0;
}
/* by now, 440epx is known to show usb_23 erratum */
np = of_find_compatible_node(NULL, NULL, "ibm,usb-ehci-440epx");
/* Work around - At this point ohci_run has executed, the
* controller is running, everything, the root ports, etc., is
* set up. If the ehci driver is loaded, put the ohci core in
* the suspended state. The ehci driver will bring it out of
* suspended state when / if a non-high speed USB device is
* attached to the USB Host port. If the ehci driver is not
* loaded, do nothing. request_mem_region is used to test if
* the ehci driver is loaded.
*/
if (np != NULL) {
if (!of_address_to_resource(np, 0, &res)) {
if (!request_mem_region(res.start, 0x4, hcd_name)) {
writel_be((readl_be(&ohci->regs->control) |
OHCI_USB_SUSPEND), &ohci->regs->control);
(void) readl_be(&ohci->regs->control);
} else
release_mem_region(res.start, 0x4);
} else
pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
of_node_put(np);
}
irq_dispose_mapping(irq);
err_rmr:
usb_put_hcd(hcd);
return rv;
}
static void ohci_hcd_ppc_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
usb_remove_hcd(hcd);
irq_dispose_mapping(hcd->irq);
usb_put_hcd(hcd);
}
static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
{
.name = "usb",
.compatible = "ohci-bigendian",
},
{
.name = "usb",
.compatible = "ohci-be",
},
#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE
{
.name = "usb",
.compatible = "ohci-littledian",
},
{
.name = "usb",
.compatible = "ohci-le",
},
#endif
{},
};
MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
#if !defined(CONFIG_USB_OHCI_HCD_PPC_OF_BE) && \
!defined(CONFIG_USB_OHCI_HCD_PPC_OF_LE)
#error "No endianness selected for ppc-of-ohci"
#endif
static struct platform_driver ohci_hcd_ppc_of_driver = {
.probe = ohci_hcd_ppc_of_probe,
.remove_new = ohci_hcd_ppc_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ohci",
.of_match_table = ohci_hcd_ppc_of_match,
},
};
| linux-master | drivers/usb/host/ohci-ppc-of.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
*
* This file is licenced under the GPL.
*/
/*-------------------------------------------------------------------------*/
/*
* OHCI deals with three types of memory:
* - data used only by the HCD ... kmalloc is fine
* - async and periodic schedules, shared by HC and HCD ... these
* need to use dma_pool or dma_alloc_coherent
* - driver buffers, read/written by HC ... the hcd glue or the
* device driver provides us with dma addresses
*
* There's also "register" data, which is memory mapped.
* No memory seen by this driver (or any HCD) may be paged out.
*/
/*-------------------------------------------------------------------------*/
static void ohci_hcd_init (struct ohci_hcd *ohci)
{
ohci->next_statechange = jiffies;
spin_lock_init (&ohci->lock);
INIT_LIST_HEAD (&ohci->pending);
INIT_LIST_HEAD(&ohci->eds_in_use);
}
/*-------------------------------------------------------------------------*/
static int ohci_mem_init (struct ohci_hcd *ohci)
{
/*
* HCs with local memory allocate from localmem_pool so there's
* no need to create the below dma pools.
*/
if (ohci_to_hcd(ohci)->localmem_pool)
return 0;
ohci->td_cache = dma_pool_create ("ohci_td",
ohci_to_hcd(ohci)->self.controller,
sizeof (struct td),
32 /* byte alignment */,
0 /* no page-crossing issues */);
if (!ohci->td_cache)
return -ENOMEM;
ohci->ed_cache = dma_pool_create ("ohci_ed",
ohci_to_hcd(ohci)->self.controller,
sizeof (struct ed),
16 /* byte alignment */,
0 /* no page-crossing issues */);
if (!ohci->ed_cache) {
dma_pool_destroy (ohci->td_cache);
return -ENOMEM;
}
return 0;
}
static void ohci_mem_cleanup (struct ohci_hcd *ohci)
{
dma_pool_destroy(ohci->td_cache);
ohci->td_cache = NULL;
dma_pool_destroy(ohci->ed_cache);
ohci->ed_cache = NULL;
}
/*-------------------------------------------------------------------------*/
/* ohci "done list" processing needs this mapping */
static inline struct td *
dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
{
struct td *td;
td_dma &= TD_MASK;
td = hc->td_hash [TD_HASH_FUNC(td_dma)];
while (td && td->td_dma != td_dma)
td = td->td_hash;
return td;
}
/* TDs ... */
static struct td *
td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
{
dma_addr_t dma;
struct td *td;
struct usb_hcd *hcd = ohci_to_hcd(hc);
if (hcd->localmem_pool)
td = gen_pool_dma_zalloc_align(hcd->localmem_pool,
sizeof(*td), &dma, 32);
else
td = dma_pool_zalloc(hc->td_cache, mem_flags, &dma);
if (td) {
/* in case hc fetches it, make it look dead */
td->hwNextTD = cpu_to_hc32 (hc, dma);
td->td_dma = dma;
/* hashed in td_fill */
}
return td;
}
static void
td_free (struct ohci_hcd *hc, struct td *td)
{
struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)];
struct usb_hcd *hcd = ohci_to_hcd(hc);
while (*prev && *prev != td)
prev = &(*prev)->td_hash;
if (*prev)
*prev = td->td_hash;
else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0)
ohci_dbg (hc, "no hash for td %p\n", td);
if (hcd->localmem_pool)
gen_pool_free(hcd->localmem_pool, (unsigned long)td,
sizeof(*td));
else
dma_pool_free(hc->td_cache, td, td->td_dma);
}
/*-------------------------------------------------------------------------*/
/* EDs ... */
static struct ed *
ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
{
dma_addr_t dma;
struct ed *ed;
struct usb_hcd *hcd = ohci_to_hcd(hc);
if (hcd->localmem_pool)
ed = gen_pool_dma_zalloc_align(hcd->localmem_pool,
sizeof(*ed), &dma, 16);
else
ed = dma_pool_zalloc(hc->ed_cache, mem_flags, &dma);
if (ed) {
INIT_LIST_HEAD (&ed->td_list);
ed->dma = dma;
}
return ed;
}
static void
ed_free (struct ohci_hcd *hc, struct ed *ed)
{
struct usb_hcd *hcd = ohci_to_hcd(hc);
if (hcd->localmem_pool)
gen_pool_free(hcd->localmem_pool, (unsigned long)ed,
sizeof(*ed));
else
dma_pool_free(hc->ed_cache, ed, ed->dma);
}
| linux-master | drivers/usb/host/ohci-mem.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
*
* This file is licenced under the GPL.
*/
#include <linux/irq.h>
#include <linux/slab.h>
static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
{
int last = urb_priv->length - 1;
if (last >= 0) {
int i;
struct td *td;
for (i = 0; i <= last; i++) {
td = urb_priv->td [i];
if (td)
td_free (hc, td);
}
}
list_del (&urb_priv->pending);
kfree (urb_priv);
}
/*-------------------------------------------------------------------------*/
/*
* URB goes back to driver, and isn't reissued.
* It's completely gone from HC data structures.
* PRECONDITION: ohci lock held, irqs blocked.
*/
static void
finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
__releases(ohci->lock)
__acquires(ohci->lock)
{
struct device *dev = ohci_to_hcd(ohci)->self.controller;
struct usb_host_endpoint *ep = urb->ep;
struct urb_priv *urb_priv;
// ASSERT (urb->hcpriv != 0);
restart:
urb_free_priv (ohci, urb->hcpriv);
urb->hcpriv = NULL;
if (likely(status == -EINPROGRESS))
status = 0;
switch (usb_pipetype (urb->pipe)) {
case PIPE_ISOCHRONOUS:
ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
usb_amd_quirk_pll_enable();
if (quirk_amdprefetch(ohci))
sb800_prefetch(dev, 0);
}
break;
case PIPE_INTERRUPT:
ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
break;
}
/* urb->complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
spin_unlock (&ohci->lock);
usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
spin_lock (&ohci->lock);
/* stop periodic dma if it's not needed */
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
}
/*
* An isochronous URB that is sumitted too late won't have any TDs
* (marked by the fact that the td_cnt value is larger than the
* actual number of TDs). If the next URB on this endpoint is like
* that, give it back now.
*/
if (!list_empty(&ep->urb_list)) {
urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
urb_priv = urb->hcpriv;
if (urb_priv->td_cnt > urb_priv->length) {
status = 0;
goto restart;
}
}
}
/*-------------------------------------------------------------------------*
* ED handling functions
*-------------------------------------------------------------------------*/
/* search for the right schedule branch to use for a periodic ed.
* does some load balancing; returns the branch, or negative errno.
*/
static int balance (struct ohci_hcd *ohci, int interval, int load)
{
int i, branch = -ENOSPC;
/* iso periods can be huge; iso tds specify frame numbers */
if (interval > NUM_INTS)
interval = NUM_INTS;
/* search for the least loaded schedule branch of that period
* that has enough bandwidth left unreserved.
*/
for (i = 0; i < interval ; i++) {
if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
int j;
/* usb 1.1 says 90% of one frame */
for (j = i; j < NUM_INTS; j += interval) {
if ((ohci->load [j] + load) > 900)
break;
}
if (j < NUM_INTS)
continue;
branch = i;
}
}
return branch;
}
/*-------------------------------------------------------------------------*/
/* both iso and interrupt requests have periods; this routine puts them
* into the schedule tree in the apppropriate place. most iso devices use
* 1msec periods, but that's not required.
*/
static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
{
unsigned i;
ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
struct ed **prev = &ohci->periodic [i];
__hc32 *prev_p = &ohci->hcca->int_table [i];
struct ed *here = *prev;
/* sorting each branch by period (slow before fast)
* lets us share the faster parts of the tree.
* (plus maybe: put interrupt eds before iso)
*/
while (here && ed != here) {
if (ed->interval > here->interval)
break;
prev = &here->ed_next;
prev_p = &here->hwNextED;
here = *prev;
}
if (ed != here) {
ed->ed_next = here;
if (here)
ed->hwNextED = *prev_p;
wmb ();
*prev = ed;
*prev_p = cpu_to_hc32(ohci, ed->dma);
wmb();
}
ohci->load [i] += ed->load;
}
ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
}
/* link an ed into one of the HC chains */
static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
{
int branch;
ed->ed_prev = NULL;
ed->ed_next = NULL;
ed->hwNextED = 0;
wmb ();
/* we care about rm_list when setting CLE/BLE in case the HC was at
* work on some TD when CLE/BLE was turned off, and isn't quiesced
* yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
*
* control and bulk EDs are doubly linked (ed_next, ed_prev), but
* periodic ones are singly linked (ed_next). that's because the
* periodic schedule encodes a tree like figure 3-5 in the ohci
* spec: each qh can have several "previous" nodes, and the tree
* doesn't have unused/idle descriptors.
*/
switch (ed->type) {
case PIPE_CONTROL:
if (ohci->ed_controltail == NULL) {
WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
ohci_writel (ohci, ed->dma,
&ohci->regs->ed_controlhead);
} else {
ohci->ed_controltail->ed_next = ed;
ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
ed->dma);
}
ed->ed_prev = ohci->ed_controltail;
if (!ohci->ed_controltail && !ohci->ed_rm_list) {
wmb();
ohci->hc_control |= OHCI_CTRL_CLE;
ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
ohci_writel (ohci, ohci->hc_control,
&ohci->regs->control);
}
ohci->ed_controltail = ed;
break;
case PIPE_BULK:
if (ohci->ed_bulktail == NULL) {
WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
} else {
ohci->ed_bulktail->ed_next = ed;
ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
ed->dma);
}
ed->ed_prev = ohci->ed_bulktail;
if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
wmb();
ohci->hc_control |= OHCI_CTRL_BLE;
ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
ohci_writel (ohci, ohci->hc_control,
&ohci->regs->control);
}
ohci->ed_bulktail = ed;
break;
// case PIPE_INTERRUPT:
// case PIPE_ISOCHRONOUS:
default:
branch = balance (ohci, ed->interval, ed->load);
if (branch < 0) {
ohci_dbg (ohci,
"ERR %d, interval %d msecs, load %d\n",
branch, ed->interval, ed->load);
// FIXME if there are TDs queued, fail them!
return branch;
}
ed->branch = branch;
periodic_link (ohci, ed);
}
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
ed->state = ED_OPER;
return 0;
}
/*-------------------------------------------------------------------------*/
/* scan the periodic table to find and unlink this ED */
static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
{
int i;
for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
struct ed *temp;
struct ed **prev = &ohci->periodic [i];
__hc32 *prev_p = &ohci->hcca->int_table [i];
while (*prev && (temp = *prev) != ed) {
prev_p = &temp->hwNextED;
prev = &temp->ed_next;
}
if (*prev) {
*prev_p = ed->hwNextED;
*prev = ed->ed_next;
}
ohci->load [i] -= ed->load;
}
ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
}
/* unlink an ed from one of the HC chains.
* just the link to the ed is unlinked.
* the link from the ed still points to another operational ed or 0
* so the HC can eventually finish the processing of the unlinked ed
* (assuming it already started that, which needn't be true).
*
* ED_UNLINK is a transient state: the HC may still see this ED, but soon
* it won't. ED_SKIP means the HC will finish its current transaction,
* but won't start anything new. The TD queue may still grow; device
* drivers don't know about this HCD-internal state.
*
* When the HC can't see the ED, something changes ED_UNLINK to one of:
*
* - ED_OPER: when there's any request queued, the ED gets rescheduled
* immediately. HC should be working on them.
*
* - ED_IDLE: when there's no TD queue or the HC isn't running.
*
* When finish_unlinks() runs later, after SOF interrupt, it will often
* complete one or more URB unlinks before making that state change.
*/
static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
{
ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
wmb ();
ed->state = ED_UNLINK;
/* To deschedule something from the control or bulk list, just
* clear CLE/BLE and wait. There's no safe way to scrub out list
* head/current registers until later, and "later" isn't very
* tightly specified. Figure 6-5 and Section 6.4.2.2 show how
* the HC is reading the ED queues (while we modify them).
*
* For now, ed_schedule() is "later". It might be good paranoia
* to scrub those registers in finish_unlinks(), in case of bugs
* that make the HC try to use them.
*/
switch (ed->type) {
case PIPE_CONTROL:
/* remove ED from the HC's list: */
if (ed->ed_prev == NULL) {
if (!ed->hwNextED) {
ohci->hc_control &= ~OHCI_CTRL_CLE;
ohci_writel (ohci, ohci->hc_control,
&ohci->regs->control);
// a ohci_readl() later syncs CLE with the HC
} else
ohci_writel (ohci,
hc32_to_cpup (ohci, &ed->hwNextED),
&ohci->regs->ed_controlhead);
} else {
ed->ed_prev->ed_next = ed->ed_next;
ed->ed_prev->hwNextED = ed->hwNextED;
}
/* remove ED from the HCD's list: */
if (ohci->ed_controltail == ed) {
ohci->ed_controltail = ed->ed_prev;
if (ohci->ed_controltail)
ohci->ed_controltail->ed_next = NULL;
} else if (ed->ed_next) {
ed->ed_next->ed_prev = ed->ed_prev;
}
break;
case PIPE_BULK:
/* remove ED from the HC's list: */
if (ed->ed_prev == NULL) {
if (!ed->hwNextED) {
ohci->hc_control &= ~OHCI_CTRL_BLE;
ohci_writel (ohci, ohci->hc_control,
&ohci->regs->control);
// a ohci_readl() later syncs BLE with the HC
} else
ohci_writel (ohci,
hc32_to_cpup (ohci, &ed->hwNextED),
&ohci->regs->ed_bulkhead);
} else {
ed->ed_prev->ed_next = ed->ed_next;
ed->ed_prev->hwNextED = ed->hwNextED;
}
/* remove ED from the HCD's list: */
if (ohci->ed_bulktail == ed) {
ohci->ed_bulktail = ed->ed_prev;
if (ohci->ed_bulktail)
ohci->ed_bulktail->ed_next = NULL;
} else if (ed->ed_next) {
ed->ed_next->ed_prev = ed->ed_prev;
}
break;
// case PIPE_INTERRUPT:
// case PIPE_ISOCHRONOUS:
default:
periodic_unlink (ohci, ed);
break;
}
}
/*-------------------------------------------------------------------------*/
/* get and maybe (re)init an endpoint. init _should_ be done only as part
* of enumeration, usb_set_configuration() or usb_set_interface().
*/
static struct ed *ed_get (
struct ohci_hcd *ohci,
struct usb_host_endpoint *ep,
struct usb_device *udev,
unsigned int pipe,
int interval
) {
struct ed *ed;
unsigned long flags;
spin_lock_irqsave (&ohci->lock, flags);
ed = ep->hcpriv;
if (!ed) {
struct td *td;
int is_out;
u32 info;
ed = ed_alloc (ohci, GFP_ATOMIC);
if (!ed) {
/* out of memory */
goto done;
}
/* dummy td; end of td list for ed */
td = td_alloc (ohci, GFP_ATOMIC);
if (!td) {
/* out of memory */
ed_free (ohci, ed);
ed = NULL;
goto done;
}
ed->dummy = td;
ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
ed->state = ED_IDLE;
is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
/* FIXME usbcore changes dev->devnum before SET_ADDRESS
* succeeds ... otherwise we wouldn't need "pipe".
*/
info = usb_pipedevice (pipe);
ed->type = usb_pipetype(pipe);
info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
info |= usb_endpoint_maxp(&ep->desc) << 16;
if (udev->speed == USB_SPEED_LOW)
info |= ED_LOWSPEED;
/* only control transfers store pids in tds */
if (ed->type != PIPE_CONTROL) {
info |= is_out ? ED_OUT : ED_IN;
if (ed->type != PIPE_BULK) {
/* periodic transfers... */
if (ed->type == PIPE_ISOCHRONOUS)
info |= ED_ISO;
else if (interval > 32) /* iso can be bigger */
interval = 32;
ed->interval = interval;
ed->load = usb_calc_bus_time (
udev->speed, !is_out,
ed->type == PIPE_ISOCHRONOUS,
usb_endpoint_maxp(&ep->desc))
/ 1000;
}
}
ed->hwINFO = cpu_to_hc32(ohci, info);
ep->hcpriv = ed;
}
done:
spin_unlock_irqrestore (&ohci->lock, flags);
return ed;
}
/*-------------------------------------------------------------------------*/
/* request unlinking of an endpoint from an operational HC.
* put the ep on the rm_list
* real work is done at the next start frame (SF) hardware interrupt
* caller guarantees HCD is running, so hardware access is safe,
* and that ed->state is ED_OPER
*/
static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
{
ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
ed_deschedule (ohci, ed);
/* rm_list is just singly linked, for simplicity */
ed->ed_next = ohci->ed_rm_list;
ed->ed_prev = NULL;
ohci->ed_rm_list = ed;
/* enable SOF interrupt */
ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
// flush those writes, and get latest HCCA contents
(void) ohci_readl (ohci, &ohci->regs->control);
/* SF interrupt might get delayed; record the frame counter value that
* indicates when the HC isn't looking at it, so concurrent unlinks
* behave. frame_no wraps every 2^16 msec, and changes right before
* SF is triggered.
*/
ed->tick = ohci_frame_no(ohci) + 1;
}
/*-------------------------------------------------------------------------*
* TD handling functions
*-------------------------------------------------------------------------*/
/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
static void
td_fill (struct ohci_hcd *ohci, u32 info,
dma_addr_t data, int len,
struct urb *urb, int index)
{
struct td *td, *td_pt;
struct urb_priv *urb_priv = urb->hcpriv;
int is_iso = info & TD_ISO;
int hash;
// ASSERT (index < urb_priv->length);
/* aim for only one interrupt per urb. mostly applies to control
* and iso; other urbs rarely need more than one TD per urb.
* this way, only final tds (or ones with an error) cause IRQs.
* at least immediately; use DI=6 in case any control request is
* tempted to die part way through. (and to force the hc to flush
* its donelist soonish, even on unlink paths.)
*
* NOTE: could delay interrupts even for the last TD, and get fewer
* interrupts ... increasing per-urb latency by sharing interrupts.
* Drivers that queue bulk urbs may request that behavior.
*/
if (index != (urb_priv->length - 1)
|| (urb->transfer_flags & URB_NO_INTERRUPT))
info |= TD_DI_SET (6);
/* use this td as the next dummy */
td_pt = urb_priv->td [index];
/* fill the old dummy TD */
td = urb_priv->td [index] = urb_priv->ed->dummy;
urb_priv->ed->dummy = td_pt;
td->ed = urb_priv->ed;
td->next_dl_td = NULL;
td->index = index;
td->urb = urb;
td->data_dma = data;
if (!len)
data = 0;
td->hwINFO = cpu_to_hc32 (ohci, info);
if (is_iso) {
td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
*ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
(data & 0x0FFF) | 0xE000);
} else {
td->hwCBP = cpu_to_hc32 (ohci, data);
}
if (data)
td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
else
td->hwBE = 0;
td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
/* append to queue */
list_add_tail (&td->td_list, &td->ed->td_list);
/* hash it for later reverse mapping */
hash = TD_HASH_FUNC (td->td_dma);
td->td_hash = ohci->td_hash [hash];
ohci->td_hash [hash] = td;
/* HC might read the TD (or cachelines) right away ... */
wmb ();
td->ed->hwTailP = td->hwNextTD;
}
/*-------------------------------------------------------------------------*/
/* Prepare all TDs of a transfer, and queue them onto the ED.
* Caller guarantees HC is active.
* Usually the ED is already on the schedule, so TDs might be
* processed as soon as they're queued.
*/
static void td_submit_urb (
struct ohci_hcd *ohci,
struct urb *urb
) {
struct urb_priv *urb_priv = urb->hcpriv;
struct device *dev = ohci_to_hcd(ohci)->self.controller;
dma_addr_t data;
int data_len = urb->transfer_buffer_length;
int cnt = 0;
u32 info = 0;
int is_out = usb_pipeout (urb->pipe);
int periodic = 0;
int i, this_sg_len, n;
struct scatterlist *sg;
/* OHCI handles the bulk/interrupt data toggles itself. We just
* use the device toggle bits for resetting, and rely on the fact
* that resetting toggle is meaningless if the endpoint is active.
*/
if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
is_out, 1);
urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
}
list_add (&urb_priv->pending, &ohci->pending);
i = urb->num_mapped_sgs;
if (data_len > 0 && i > 0) {
sg = urb->sg;
data = sg_dma_address(sg);
/*
* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
this_sg_len = min_t(int, sg_dma_len(sg), data_len);
} else {
sg = NULL;
if (data_len)
data = urb->transfer_dma;
else
data = 0;
this_sg_len = data_len;
}
/* NOTE: TD_CC is set so we can tell which TDs the HC processed by
* using TD_CC_GET, as well as by seeing them on the done list.
* (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
*/
switch (urb_priv->ed->type) {
/* Bulk and interrupt are identical except for where in the schedule
* their EDs live.
*/
case PIPE_INTERRUPT:
/* ... and periodic urbs have extra accounting */
periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
&& ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
fallthrough;
case PIPE_BULK:
info = is_out
? TD_T_TOGGLE | TD_CC | TD_DP_OUT
: TD_T_TOGGLE | TD_CC | TD_DP_IN;
/* TDs _could_ transfer up to 8K each */
for (;;) {
n = min(this_sg_len, 4096);
/* maybe avoid ED halt on final TD short read */
if (n >= data_len || (i == 1 && n >= this_sg_len)) {
if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
info |= TD_R;
}
td_fill(ohci, info, data, n, urb, cnt);
this_sg_len -= n;
data_len -= n;
data += n;
cnt++;
if (this_sg_len <= 0) {
if (--i <= 0 || data_len <= 0)
break;
sg = sg_next(sg);
data = sg_dma_address(sg);
this_sg_len = min_t(int, sg_dma_len(sg),
data_len);
}
}
if ((urb->transfer_flags & URB_ZERO_PACKET)
&& cnt < urb_priv->length) {
td_fill (ohci, info, 0, 0, urb, cnt);
cnt++;
}
/* maybe kickstart bulk list */
if (urb_priv->ed->type == PIPE_BULK) {
wmb ();
ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
}
break;
/* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
* any DATA phase works normally, and the STATUS ack is special.
*/
case PIPE_CONTROL:
info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
if (data_len > 0) {
info = TD_CC | TD_R | TD_T_DATA1;
info |= is_out ? TD_DP_OUT : TD_DP_IN;
/* NOTE: mishandles transfers >8K, some >4K */
td_fill (ohci, info, data, data_len, urb, cnt++);
}
info = (is_out || data_len == 0)
? TD_CC | TD_DP_IN | TD_T_DATA1
: TD_CC | TD_DP_OUT | TD_T_DATA1;
td_fill (ohci, info, data, 0, urb, cnt++);
/* maybe kickstart control list */
wmb ();
ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
break;
/* ISO has no retransmit, so no toggle; and it uses special TDs.
* Each TD could handle multiple consecutive frames (interval 1);
* we could often reduce the number of TDs here.
*/
case PIPE_ISOCHRONOUS:
for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets;
cnt++) {
int frame = urb->start_frame;
// FIXME scheduling should handle frame counter
// roll-around ... exotic case (and OHCI has
// a 2^16 iso range, vs other HCs max of 2^10)
frame += cnt * urb->interval;
frame &= 0xffff;
td_fill (ohci, TD_CC | TD_ISO | frame,
data + urb->iso_frame_desc [cnt].offset,
urb->iso_frame_desc [cnt].length, urb, cnt);
}
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
usb_amd_quirk_pll_disable();
if (quirk_amdprefetch(ohci))
sb800_prefetch(dev, 1);
}
periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
break;
}
/* start periodic dma if needed */
if (periodic) {
wmb ();
ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
}
// ASSERT (urb_priv->length == cnt);
}
/*-------------------------------------------------------------------------*
* Done List handling functions
*-------------------------------------------------------------------------*/
/* calculate transfer length/status and update the urb */
static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
{
u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
int cc = 0;
int status = -EINPROGRESS;
list_del (&td->td_list);
/* ISO ... drivers see per-TD length/status */
if (tdINFO & TD_ISO) {
u16 tdPSW = ohci_hwPSW(ohci, td, 0);
int dlen = 0;
/* NOTE: assumes FC in tdINFO == 0, and that
* only the first of 0..MAXPSW psws is used.
*/
cc = (tdPSW >> 12) & 0xF;
if (tdINFO & TD_CC) /* hc didn't touch? */
return status;
if (usb_pipeout (urb->pipe))
dlen = urb->iso_frame_desc [td->index].length;
else {
/* short reads are always OK for ISO */
if (cc == TD_DATAUNDERRUN)
cc = TD_CC_NOERROR;
dlen = tdPSW & 0x3ff;
}
urb->actual_length += dlen;
urb->iso_frame_desc [td->index].actual_length = dlen;
urb->iso_frame_desc [td->index].status = cc_to_error [cc];
if (cc != TD_CC_NOERROR)
ohci_dbg(ohci,
"urb %p iso td %p (%d) len %d cc %d\n",
urb, td, 1 + td->index, dlen, cc);
/* BULK, INT, CONTROL ... drivers see aggregate length/status,
* except that "setup" bytes aren't counted and "short" transfers
* might not be reported as errors.
*/
} else {
int type = usb_pipetype (urb->pipe);
u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
cc = TD_CC_GET (tdINFO);
/* update packet status if needed (short is normally ok) */
if (cc == TD_DATAUNDERRUN
&& !(urb->transfer_flags & URB_SHORT_NOT_OK))
cc = TD_CC_NOERROR;
if (cc != TD_CC_NOERROR && cc < 0x0E)
status = cc_to_error[cc];
/* count all non-empty packets except control SETUP packet */
if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
if (td->hwCBP == 0)
urb->actual_length += tdBE - td->data_dma + 1;
else
urb->actual_length +=
hc32_to_cpup (ohci, &td->hwCBP)
- td->data_dma;
}
if (cc != TD_CC_NOERROR && cc < 0x0E)
ohci_dbg(ohci,
"urb %p td %p (%d) cc %d, len=%d/%d\n",
urb, td, 1 + td->index, cc,
urb->actual_length,
urb->transfer_buffer_length);
}
return status;
}
/*-------------------------------------------------------------------------*/
static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
{
struct urb *urb = td->urb;
urb_priv_t *urb_priv = urb->hcpriv;
struct ed *ed = td->ed;
struct list_head *tmp = td->td_list.next;
__hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
/* clear ed halt; this is the td that caused it, but keep it inactive
* until its urb->complete() has a chance to clean up.
*/
ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
wmb ();
ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
/* Get rid of all later tds from this urb. We don't have
* to be careful: no errors and nothing was transferred.
* Also patch the ed so it looks as if those tds completed normally.
*/
while (tmp != &ed->td_list) {
struct td *next;
next = list_entry (tmp, struct td, td_list);
tmp = next->td_list.next;
if (next->urb != urb)
break;
/* NOTE: if multi-td control DATA segments get supported,
* this urb had one of them, this td wasn't the last td
* in that segment (TD_R clear), this ed halted because
* of a short read, _and_ URB_SHORT_NOT_OK is clear ...
* then we need to leave the control STATUS packet queued
* and clear ED_SKIP.
*/
list_del(&next->td_list);
urb_priv->td_cnt++;
ed->hwHeadP = next->hwNextTD | toggle;
}
/* help for troubleshooting: report anything that
* looks odd ... that doesn't include protocol stalls
* (or maybe some other things)
*/
switch (cc) {
case TD_DATAUNDERRUN:
if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
break;
fallthrough;
case TD_CC_STALL:
if (usb_pipecontrol (urb->pipe))
break;
fallthrough;
default:
ohci_dbg (ohci,
"urb %p path %s ep%d%s %08x cc %d --> status %d\n",
urb, urb->dev->devpath,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
hc32_to_cpu (ohci, td->hwINFO),
cc, cc_to_error [cc]);
}
}
/* Add a TD to the done list */
static void add_to_done_list(struct ohci_hcd *ohci, struct td *td)
{
struct td *td2, *td_prev;
struct ed *ed;
if (td->next_dl_td)
return; /* Already on the list */
/* Add all the TDs going back until we reach one that's on the list */
ed = td->ed;
td2 = td_prev = td;
list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) {
if (td2->next_dl_td)
break;
td2->next_dl_td = td_prev;
td_prev = td2;
}
if (ohci->dl_end)
ohci->dl_end->next_dl_td = td_prev;
else
ohci->dl_start = td_prev;
/*
* Make td->next_dl_td point to td itself, to mark the fact
* that td is on the done list.
*/
ohci->dl_end = td->next_dl_td = td;
/* Did we just add the latest pending TD? */
td2 = ed->pending_td;
if (td2 && td2->next_dl_td)
ed->pending_td = NULL;
}
/* Get the entries on the hardware done queue and put them on our list */
static void update_done_list(struct ohci_hcd *ohci)
{
u32 td_dma;
struct td *td = NULL;
td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
ohci->hcca->done_head = 0;
wmb();
/* get TD from hc's singly linked list, and
* add to ours. ed->td_list changes later.
*/
while (td_dma) {
int cc;
td = dma_to_td (ohci, td_dma);
if (!td) {
ohci_err (ohci, "bad entry %8x\n", td_dma);
break;
}
td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
/* Non-iso endpoints can halt on error; un-halt,
* and dequeue any other TDs from this urb.
* No other TD could have caused the halt.
*/
if (cc != TD_CC_NOERROR
&& (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
ed_halted(ohci, td, cc);
td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
add_to_done_list(ohci, td);
}
}
/*-------------------------------------------------------------------------*/
/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
static void finish_unlinks(struct ohci_hcd *ohci)
{
unsigned tick = ohci_frame_no(ohci);
struct ed *ed, **last;
rescan_all:
for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
struct list_head *entry, *tmp;
int completed, modified;
__hc32 *prev;
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
if (likely(ohci->rh_state == OHCI_RH_RUNNING) &&
tick_before(tick, ed->tick)) {
skip_ed:
last = &ed->ed_next;
continue;
}
if (!list_empty(&ed->td_list)) {
struct td *td;
u32 head;
td = list_first_entry(&ed->td_list, struct td, td_list);
/* INTR_WDH may need to clean up first */
head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK;
if (td->td_dma != head &&
ohci->rh_state == OHCI_RH_RUNNING)
goto skip_ed;
/* Don't mess up anything already on the done list */
if (td->next_dl_td)
goto skip_ed;
}
/* ED's now officially unlinked, hc doesn't see */
ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
ed->hwNextED = 0;
wmb();
ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
*last = ed->ed_next;
ed->ed_next = NULL;
modified = 0;
/* unlink urbs as requested, but rescan the list after
* we call a completion since it might have unlinked
* another (earlier) urb
*
* When we get here, the HC doesn't see this ed. But it
* must not be rescheduled until all completed URBs have
* been given back to the driver.
*/
rescan_this:
completed = 0;
prev = &ed->hwHeadP;
list_for_each_safe (entry, tmp, &ed->td_list) {
struct td *td;
struct urb *urb;
urb_priv_t *urb_priv;
__hc32 savebits;
u32 tdINFO;
td = list_entry (entry, struct td, td_list);
urb = td->urb;
urb_priv = td->urb->hcpriv;
if (!urb->unlinked) {
prev = &td->hwNextTD;
continue;
}
/* patch pointer hc uses */
savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
*prev = td->hwNextTD | savebits;
/* If this was unlinked, the TD may not have been
* retired ... so manually save the data toggle.
* The controller ignores the value we save for
* control and ISO endpoints.
*/
tdINFO = hc32_to_cpup(ohci, &td->hwINFO);
if ((tdINFO & TD_T) == TD_T_DATA0)
ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C);
else if ((tdINFO & TD_T) == TD_T_DATA1)
ed->hwHeadP |= cpu_to_hc32(ohci, ED_C);
/* HC may have partly processed this TD */
td_done (ohci, urb, td);
urb_priv->td_cnt++;
/* if URB is done, clean up */
if (urb_priv->td_cnt >= urb_priv->length) {
modified = completed = 1;
finish_urb(ohci, urb, 0);
}
}
if (completed && !list_empty (&ed->td_list))
goto rescan_this;
/*
* If no TDs are queued, ED is now idle.
* Otherwise, if the HC is running, reschedule.
* If the HC isn't running, add ED back to the
* start of the list for later processing.
*/
if (list_empty(&ed->td_list)) {
ed->state = ED_IDLE;
list_del(&ed->in_use_list);
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
ed_schedule(ohci, ed);
} else {
ed->ed_next = ohci->ed_rm_list;
ohci->ed_rm_list = ed;
/* Don't loop on the same ED */
if (last == &ohci->ed_rm_list)
last = &ed->ed_next;
}
if (modified)
goto rescan_all;
}
/* maybe reenable control and bulk lists */
if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
u32 command = 0, control = 0;
if (ohci->ed_controltail) {
command |= OHCI_CLF;
if (quirk_zfmicro(ohci))
mdelay(1);
if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
control |= OHCI_CTRL_CLE;
ohci_writel (ohci, 0,
&ohci->regs->ed_controlcurrent);
}
}
if (ohci->ed_bulktail) {
command |= OHCI_BLF;
if (quirk_zfmicro(ohci))
mdelay(1);
if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
control |= OHCI_CTRL_BLE;
ohci_writel (ohci, 0,
&ohci->regs->ed_bulkcurrent);
}
}
/* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
if (control) {
ohci->hc_control |= control;
if (quirk_zfmicro(ohci))
mdelay(1);
ohci_writel (ohci, ohci->hc_control,
&ohci->regs->control);
}
if (command) {
if (quirk_zfmicro(ohci))
mdelay(1);
ohci_writel (ohci, command, &ohci->regs->cmdstatus);
}
}
}
/*-------------------------------------------------------------------------*/
/* Take back a TD from the host controller */
static void takeback_td(struct ohci_hcd *ohci, struct td *td)
{
struct urb *urb = td->urb;
urb_priv_t *urb_priv = urb->hcpriv;
struct ed *ed = td->ed;
int status;
/* update URB's length and status from TD */
status = td_done(ohci, urb, td);
urb_priv->td_cnt++;
/* If all this urb's TDs are done, call complete() */
if (urb_priv->td_cnt >= urb_priv->length)
finish_urb(ohci, urb, status);
/* clean schedule: unlink EDs that are no longer busy */
if (list_empty(&ed->td_list)) {
if (ed->state == ED_OPER)
start_ed_unlink(ohci, ed);
/* ... reenabling halted EDs only after fault cleanup */
} else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
== cpu_to_hc32(ohci, ED_SKIP)) {
td = list_entry(ed->td_list.next, struct td, td_list);
if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
/* ... hc may need waking-up */
switch (ed->type) {
case PIPE_CONTROL:
ohci_writel(ohci, OHCI_CLF,
&ohci->regs->cmdstatus);
break;
case PIPE_BULK:
ohci_writel(ohci, OHCI_BLF,
&ohci->regs->cmdstatus);
break;
}
}
}
}
/*
* Process normal completions (error or success) and clean the schedules.
*
* This is the main path for handing urbs back to drivers. The only other
* normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
* instead of scanning the (re-reversed) donelist as this does.
*/
static void process_done_list(struct ohci_hcd *ohci)
{
struct td *td;
while (ohci->dl_start) {
td = ohci->dl_start;
if (td == ohci->dl_end)
ohci->dl_start = ohci->dl_end = NULL;
else
ohci->dl_start = td->next_dl_td;
takeback_td(ohci, td);
}
}
/*
* TD takeback and URB giveback must be single-threaded.
* This routine takes care of it all.
*/
static void ohci_work(struct ohci_hcd *ohci)
{
if (ohci->working) {
ohci->restart_work = 1;
return;
}
ohci->working = 1;
restart:
process_done_list(ohci);
if (ohci->ed_rm_list)
finish_unlinks(ohci);
if (ohci->restart_work) {
ohci->restart_work = 0;
goto restart;
}
ohci->working = 0;
}
| linux-master | drivers/usb/host/ohci-q.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include <asm/fsl_gtm.h>
#include "fhci.h"
void fhci_start_sof_timer(struct fhci_hcd *fhci)
{
fhci_dbg(fhci, "-> %s\n", __func__);
/* clear frame_n */
out_be16(&fhci->pram->frame_num, 0);
out_be16(&fhci->regs->usb_ussft, 0);
setbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
fhci_dbg(fhci, "<- %s\n", __func__);
}
void fhci_stop_sof_timer(struct fhci_hcd *fhci)
{
fhci_dbg(fhci, "-> %s\n", __func__);
clrbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
gtm_stop_timer16(fhci->timer);
fhci_dbg(fhci, "<- %s\n", __func__);
}
u16 fhci_get_sof_timer_count(struct fhci_usb *usb)
{
return be16_to_cpu(in_be16(&usb->fhci->regs->usb_ussft) / 12);
}
/* initialize the endpoint zero */
static u32 endpoint_zero_init(struct fhci_usb *usb,
enum fhci_mem_alloc data_mem,
u32 ring_len)
{
u32 rc;
rc = fhci_create_ep(usb, data_mem, ring_len);
if (rc)
return rc;
/* inilialize endpoint registers */
fhci_init_ep_registers(usb, usb->ep0, data_mem);
return 0;
}
/* enable the USB interrupts */
void fhci_usb_enable_interrupt(struct fhci_usb *usb)
{
struct fhci_hcd *fhci = usb->fhci;
if (usb->intr_nesting_cnt == 1) {
/* initialize the USB interrupt */
enable_irq(fhci_to_hcd(fhci)->irq);
/* initialize the event register and mask register */
out_be16(&usb->fhci->regs->usb_usber, 0xffff);
out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
/* enable the timer interrupts */
enable_irq(fhci->timer->irq);
} else if (usb->intr_nesting_cnt > 1)
fhci_info(fhci, "unbalanced USB interrupts nesting\n");
usb->intr_nesting_cnt--;
}
/* disable the usb interrupt */
void fhci_usb_disable_interrupt(struct fhci_usb *usb)
{
struct fhci_hcd *fhci = usb->fhci;
if (usb->intr_nesting_cnt == 0) {
/* disable the timer interrupt */
disable_irq_nosync(fhci->timer->irq);
/* disable the usb interrupt */
disable_irq_nosync(fhci_to_hcd(fhci)->irq);
out_be16(&usb->fhci->regs->usb_usbmr, 0);
}
usb->intr_nesting_cnt++;
}
/* enable the USB controller */
static u32 fhci_usb_enable(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
out_be16(&usb->fhci->regs->usb_usber, 0xffff);
out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
mdelay(100);
return 0;
}
/* disable the USB controller */
static u32 fhci_usb_disable(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
fhci_usb_disable_interrupt(usb);
fhci_port_disable(fhci);
/* disable the usb controller */
if (usb->port_status == FHCI_PORT_FULL ||
usb->port_status == FHCI_PORT_LOW)
fhci_device_disconnected_interrupt(fhci);
clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
return 0;
}
/* check the bus state by polling the QE bit on the IO ports */
int fhci_ioports_check_bus_state(struct fhci_hcd *fhci)
{
u8 bits = 0;
/* check USBOE,if transmitting,exit */
if (!gpiod_get_value(fhci->gpiods[GPIO_USBOE]))
return -1;
/* check USBRP */
if (gpiod_get_value(fhci->gpiods[GPIO_USBRP]))
bits |= 0x2;
/* check USBRN */
if (gpiod_get_value(fhci->gpiods[GPIO_USBRN]))
bits |= 0x1;
return bits;
}
static void fhci_mem_free(struct fhci_hcd *fhci)
{
struct ed *ed;
struct ed *next_ed;
struct td *td;
struct td *next_td;
list_for_each_entry_safe(ed, next_ed, &fhci->empty_eds, node) {
list_del(&ed->node);
kfree(ed);
}
list_for_each_entry_safe(td, next_td, &fhci->empty_tds, node) {
list_del(&td->node);
kfree(td);
}
kfree(fhci->vroot_hub);
fhci->vroot_hub = NULL;
kfree(fhci->hc_list);
fhci->hc_list = NULL;
}
static int fhci_mem_init(struct fhci_hcd *fhci)
{
int i;
fhci->hc_list = kzalloc(sizeof(*fhci->hc_list), GFP_KERNEL);
if (!fhci->hc_list)
goto err;
INIT_LIST_HEAD(&fhci->hc_list->ctrl_list);
INIT_LIST_HEAD(&fhci->hc_list->bulk_list);
INIT_LIST_HEAD(&fhci->hc_list->iso_list);
INIT_LIST_HEAD(&fhci->hc_list->intr_list);
INIT_LIST_HEAD(&fhci->hc_list->done_list);
fhci->vroot_hub = kzalloc(sizeof(*fhci->vroot_hub), GFP_KERNEL);
if (!fhci->vroot_hub)
goto err;
INIT_LIST_HEAD(&fhci->empty_eds);
INIT_LIST_HEAD(&fhci->empty_tds);
/* initialize work queue to handle done list */
fhci_tasklet.data = (unsigned long)fhci;
fhci->process_done_task = &fhci_tasklet;
for (i = 0; i < MAX_TDS; i++) {
struct td *td;
td = kmalloc(sizeof(*td), GFP_KERNEL);
if (!td)
goto err;
fhci_recycle_empty_td(fhci, td);
}
for (i = 0; i < MAX_EDS; i++) {
struct ed *ed;
ed = kmalloc(sizeof(*ed), GFP_KERNEL);
if (!ed)
goto err;
fhci_recycle_empty_ed(fhci, ed);
}
fhci->active_urbs = 0;
return 0;
err:
fhci_mem_free(fhci);
return -ENOMEM;
}
/* destroy the fhci_usb structure */
static void fhci_usb_free(void *lld)
{
struct fhci_usb *usb = lld;
struct fhci_hcd *fhci;
if (usb) {
fhci = usb->fhci;
fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
fhci_ep0_free(usb);
kfree(usb->actual_frame);
kfree(usb);
}
}
/* initialize the USB */
static int fhci_usb_init(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
memset_io(usb->fhci->pram, 0, FHCI_PRAM_SIZE);
usb->port_status = FHCI_PORT_DISABLED;
usb->max_frame_usage = FRAME_TIME_USAGE;
usb->sw_transaction_time = SW_FIX_TIME_BETWEEN_TRANSACTION;
usb->actual_frame = kzalloc(sizeof(*usb->actual_frame), GFP_KERNEL);
if (!usb->actual_frame) {
fhci_usb_free(usb);
return -ENOMEM;
}
INIT_LIST_HEAD(&usb->actual_frame->tds_list);
/* initializing registers on chip, clear frame number */
out_be16(&fhci->pram->frame_num, 0);
/* clear rx state */
out_be32(&fhci->pram->rx_state, 0);
/* set mask register */
usb->saved_msk = (USB_E_TXB_MASK |
USB_E_TXE1_MASK |
USB_E_IDLE_MASK |
USB_E_RESET_MASK | USB_E_SFT_MASK | USB_E_MSF_MASK);
out_8(&usb->fhci->regs->usb_usmod, USB_MODE_HOST | USB_MODE_EN);
/* clearing the mask register */
out_be16(&usb->fhci->regs->usb_usbmr, 0);
/* initialing the event register */
out_be16(&usb->fhci->regs->usb_usber, 0xffff);
if (endpoint_zero_init(usb, DEFAULT_DATA_MEM, DEFAULT_RING_LEN) != 0) {
fhci_usb_free(usb);
return -EINVAL;
}
return 0;
}
/* initialize the fhci_usb struct and the corresponding data staruct */
static struct fhci_usb *fhci_create_lld(struct fhci_hcd *fhci)
{
struct fhci_usb *usb;
/* allocate memory for SCC data structure */
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
if (!usb)
return NULL;
usb->fhci = fhci;
usb->hc_list = fhci->hc_list;
usb->vroot_hub = fhci->vroot_hub;
usb->transfer_confirm = fhci_transfer_confirm_callback;
return usb;
}
static int fhci_start(struct usb_hcd *hcd)
{
int ret;
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
ret = fhci_mem_init(fhci);
if (ret) {
fhci_err(fhci, "failed to allocate memory\n");
goto err;
}
fhci->usb_lld = fhci_create_lld(fhci);
if (!fhci->usb_lld) {
fhci_err(fhci, "low level driver config failed\n");
ret = -ENOMEM;
goto err;
}
ret = fhci_usb_init(fhci);
if (ret) {
fhci_err(fhci, "low level driver initialize failed\n");
goto err;
}
spin_lock_init(&fhci->lock);
/* connect the virtual root hub */
fhci->vroot_hub->dev_num = 1; /* this field may be needed to fix */
fhci->vroot_hub->hub.wHubStatus = 0;
fhci->vroot_hub->hub.wHubChange = 0;
fhci->vroot_hub->port.wPortStatus = 0;
fhci->vroot_hub->port.wPortChange = 0;
hcd->state = HC_STATE_RUNNING;
/*
* From here on, hub_wq concurrently accesses the root
* hub; drivers will be talking to enumerated devices.
* (On restart paths, hub_wq already knows about the root
* hub and could find work as soon as we wrote FLAG_CF.)
*
* Before this point the HC was idle/ready. After, hub_wq
* and device drivers may start it running.
*/
fhci_usb_enable(fhci);
return 0;
err:
fhci_mem_free(fhci);
return ret;
}
static void fhci_stop(struct usb_hcd *hcd)
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
fhci_usb_disable_interrupt(fhci->usb_lld);
fhci_usb_disable(fhci);
fhci_usb_free(fhci->usb_lld);
fhci->usb_lld = NULL;
fhci_mem_free(fhci);
}
static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
u32 pipe = urb->pipe;
int ret;
int i;
int size = 0;
struct urb_priv *urb_priv;
unsigned long flags;
switch (usb_pipetype(pipe)) {
case PIPE_CONTROL:
/* 1 td fro setup,1 for ack */
size = 2;
fallthrough;
case PIPE_BULK:
/* one td for every 4096 bytes(can be up to 8k) */
size += urb->transfer_buffer_length / 4096;
/* ...add for any remaining bytes... */
if ((urb->transfer_buffer_length % 4096) != 0)
size++;
/* ..and maybe a zero length packet to wrap it up */
if (size == 0)
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
% usb_maxpacket(urb->dev, pipe)) != 0)
size++;
break;
case PIPE_ISOCHRONOUS:
size = urb->number_of_packets;
if (size <= 0)
return -EINVAL;
for (i = 0; i < urb->number_of_packets; i++) {
urb->iso_frame_desc[i].actual_length = 0;
urb->iso_frame_desc[i].status = (u32) (-EXDEV);
}
break;
case PIPE_INTERRUPT:
size = 1;
}
/* allocate the private part of the URB */
urb_priv = kzalloc(sizeof(*urb_priv), mem_flags);
if (!urb_priv)
return -ENOMEM;
/* allocate the private part of the URB */
urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
if (!urb_priv->tds) {
kfree(urb_priv);
return -ENOMEM;
}
spin_lock_irqsave(&fhci->lock, flags);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto err;
/* fill the private part of the URB */
urb_priv->num_of_tds = size;
urb->status = -EINPROGRESS;
urb->actual_length = 0;
urb->error_count = 0;
urb->hcpriv = urb_priv;
fhci_queue_urb(fhci, urb);
err:
if (ret) {
kfree(urb_priv->tds);
kfree(urb_priv);
}
spin_unlock_irqrestore(&fhci->lock, flags);
return ret;
}
/* dequeue FHCI URB */
static int fhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
struct fhci_usb *usb = fhci->usb_lld;
int ret = -EINVAL;
unsigned long flags;
if (!urb || !urb->dev || !urb->dev->bus)
goto out;
spin_lock_irqsave(&fhci->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret)
goto out2;
if (usb->port_status != FHCI_PORT_DISABLED) {
struct urb_priv *urb_priv;
/*
* flag the urb's data for deletion in some upcoming
* SF interrupt's delete list processing
*/
urb_priv = urb->hcpriv;
if (!urb_priv || (urb_priv->state == URB_DEL))
goto out2;
urb_priv->state = URB_DEL;
/* already pending? */
urb_priv->ed->state = FHCI_ED_URB_DEL;
} else {
fhci_urb_complete_free(fhci, urb);
}
out2:
spin_unlock_irqrestore(&fhci->lock, flags);
out:
return ret;
}
static void fhci_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct fhci_hcd *fhci;
struct ed *ed;
unsigned long flags;
fhci = hcd_to_fhci(hcd);
spin_lock_irqsave(&fhci->lock, flags);
ed = ep->hcpriv;
if (ed) {
while (ed->td_head != NULL) {
struct td *td = fhci_remove_td_from_ed(ed);
fhci_urb_complete_free(fhci, td->urb);
}
fhci_recycle_empty_ed(fhci, ed);
ep->hcpriv = NULL;
}
spin_unlock_irqrestore(&fhci->lock, flags);
}
static int fhci_get_frame_number(struct usb_hcd *hcd)
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
return get_frame_num(fhci);
}
static const struct hc_driver fhci_driver = {
.description = "fsl,usb-fhci",
.product_desc = "FHCI HOST Controller",
.hcd_priv_size = sizeof(struct fhci_hcd),
/* generic hardware linkage */
.irq = fhci_irq,
.flags = HCD_DMA | HCD_USB11 | HCD_MEMORY,
/* basic lifecycle operation */
.start = fhci_start,
.stop = fhci_stop,
/* managing i/o requests and associated device resources */
.urb_enqueue = fhci_urb_enqueue,
.urb_dequeue = fhci_urb_dequeue,
.endpoint_disable = fhci_endpoint_disable,
/* scheduling support */
.get_frame_number = fhci_get_frame_number,
/* root hub support */
.hub_status_data = fhci_hub_status_data,
.hub_control = fhci_hub_control,
};
static int of_fhci_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *node = dev->of_node;
struct usb_hcd *hcd;
struct fhci_hcd *fhci;
struct resource usb_regs;
unsigned long pram_addr;
unsigned int usb_irq;
const char *sprop;
const u32 *iprop;
int size;
int ret;
int i;
int j;
if (usb_disabled())
return -ENODEV;
sprop = of_get_property(node, "mode", NULL);
if (sprop && strcmp(sprop, "host"))
return -ENODEV;
hcd = usb_create_hcd(&fhci_driver, dev, dev_name(dev));
if (!hcd) {
dev_err(dev, "could not create hcd\n");
return -ENOMEM;
}
fhci = hcd_to_fhci(hcd);
hcd->self.controller = dev;
dev_set_drvdata(dev, hcd);
iprop = of_get_property(node, "hub-power-budget", &size);
if (iprop && size == sizeof(*iprop))
hcd->power_budget = *iprop;
/* FHCI registers. */
ret = of_address_to_resource(node, 0, &usb_regs);
if (ret) {
dev_err(dev, "could not get regs\n");
goto err_regs;
}
hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs));
if (!hcd->regs) {
dev_err(dev, "could not ioremap regs\n");
ret = -ENOMEM;
goto err_regs;
}
fhci->regs = hcd->regs;
/* Parameter RAM. */
iprop = of_get_property(node, "reg", &size);
if (!iprop || size < sizeof(*iprop) * 4) {
dev_err(dev, "can't get pram offset\n");
ret = -EINVAL;
goto err_pram;
}
pram_addr = cpm_muram_alloc(FHCI_PRAM_SIZE, 64);
if (IS_ERR_VALUE(pram_addr)) {
dev_err(dev, "failed to allocate usb pram\n");
ret = -ENOMEM;
goto err_pram;
}
qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, QE_CR_SUBBLOCK_USB,
QE_CR_PROTOCOL_UNSPECIFIED, pram_addr);
fhci->pram = cpm_muram_addr(pram_addr);
/* GPIOs and pins */
for (i = 0; i < NUM_GPIOS; i++) {
if (i < GPIO_SPEED)
fhci->gpiods[i] = devm_gpiod_get_index(dev,
NULL, i, GPIOD_IN);
else
fhci->gpiods[i] = devm_gpiod_get_index_optional(dev,
NULL, i, GPIOD_OUT_LOW);
if (IS_ERR(fhci->gpiods[i])) {
dev_err(dev, "incorrect GPIO%d: %ld\n",
i, PTR_ERR(fhci->gpiods[i]));
goto err_gpios;
}
if (!fhci->gpiods[i]) {
dev_info(dev, "assuming board doesn't have "
"%s gpio\n", i == GPIO_SPEED ?
"speed" : "power");
}
}
for (j = 0; j < NUM_PINS; j++) {
fhci->pins[j] = qe_pin_request(dev, j);
if (IS_ERR(fhci->pins[j])) {
ret = PTR_ERR(fhci->pins[j]);
dev_err(dev, "can't get pin %d: %d\n", j, ret);
goto err_pins;
}
}
/* Frame limit timer and its interrupt. */
fhci->timer = gtm_get_timer16();
if (IS_ERR(fhci->timer)) {
ret = PTR_ERR(fhci->timer);
dev_err(dev, "failed to request qe timer: %i", ret);
goto err_get_timer;
}
ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq,
0, "qe timer (usb)", hcd);
if (ret) {
dev_err(dev, "failed to request timer irq");
goto err_timer_irq;
}
/* USB Host interrupt. */
usb_irq = irq_of_parse_and_map(node, 0);
if (!usb_irq) {
dev_err(dev, "could not get usb irq\n");
ret = -EINVAL;
goto err_usb_irq;
}
/* Clocks. */
sprop = of_get_property(node, "fsl,fullspeed-clock", NULL);
if (sprop) {
fhci->fullspeed_clk = qe_clock_source(sprop);
if (fhci->fullspeed_clk == QE_CLK_DUMMY) {
dev_err(dev, "wrong fullspeed-clock\n");
ret = -EINVAL;
goto err_clocks;
}
}
sprop = of_get_property(node, "fsl,lowspeed-clock", NULL);
if (sprop) {
fhci->lowspeed_clk = qe_clock_source(sprop);
if (fhci->lowspeed_clk == QE_CLK_DUMMY) {
dev_err(dev, "wrong lowspeed-clock\n");
ret = -EINVAL;
goto err_clocks;
}
}
if (fhci->fullspeed_clk == QE_CLK_NONE &&
fhci->lowspeed_clk == QE_CLK_NONE) {
dev_err(dev, "no clocks specified\n");
ret = -EINVAL;
goto err_clocks;
}
dev_info(dev, "at 0x%p, irq %d\n", hcd->regs, usb_irq);
fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
/* Start with full-speed, if possible. */
if (fhci->fullspeed_clk != QE_CLK_NONE) {
fhci_config_transceiver(fhci, FHCI_PORT_FULL);
qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK);
} else {
fhci_config_transceiver(fhci, FHCI_PORT_LOW);
qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3);
}
/* Clear and disable any pending interrupts. */
out_be16(&fhci->regs->usb_usber, 0xffff);
out_be16(&fhci->regs->usb_usbmr, 0);
ret = usb_add_hcd(hcd, usb_irq, 0);
if (ret < 0)
goto err_add_hcd;
device_wakeup_enable(hcd->self.controller);
fhci_dfs_create(fhci);
return 0;
err_add_hcd:
err_clocks:
irq_dispose_mapping(usb_irq);
err_usb_irq:
free_irq(fhci->timer->irq, hcd);
err_timer_irq:
gtm_put_timer16(fhci->timer);
err_get_timer:
err_pins:
while (--j >= 0)
qe_pin_free(fhci->pins[j]);
err_gpios:
cpm_muram_free(pram_addr);
err_pram:
iounmap(hcd->regs);
err_regs:
usb_put_hcd(hcd);
return ret;
}
static void fhci_remove(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
int j;
usb_remove_hcd(hcd);
free_irq(fhci->timer->irq, hcd);
gtm_put_timer16(fhci->timer);
cpm_muram_free(cpm_muram_offset(fhci->pram));
for (j = 0; j < NUM_PINS; j++)
qe_pin_free(fhci->pins[j]);
fhci_dfs_destroy(fhci);
usb_put_hcd(hcd);
}
static void of_fhci_remove(struct platform_device *ofdev)
{
fhci_remove(&ofdev->dev);
}
static const struct of_device_id of_fhci_match[] = {
{ .compatible = "fsl,mpc8323-qe-usb", },
{},
};
MODULE_DEVICE_TABLE(of, of_fhci_match);
static struct platform_driver of_fhci_driver = {
.driver = {
.name = "fsl,usb-fhci",
.of_match_table = of_fhci_match,
},
.probe = of_fhci_probe,
.remove_new = of_fhci_remove,
};
module_platform_driver(of_fhci_driver);
MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
MODULE_AUTHOR("Shlomi Gridish <[email protected]>, "
"Jerry Huang <[email protected]>, "
"Anton Vorontsov <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/fhci-hcd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI HCD (Host Controller Driver) PCI Bus Glue.
*
* Copyright (c) 2000-2004 by David Brownell
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ehci.h"
#include "pci-quirks.h"
#define DRIVER_DESC "EHCI PCI platform driver"
static const char hcd_name[] = "ehci-pci";
/* defined here to avoid adding to pci_ids.h for single instance use */
#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
#define PCI_VENDOR_ID_ASPEED 0x1a03
#define PCI_DEVICE_ID_ASPEED_EHCI 0x2603
/*-------------------------------------------------------------------------*/
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939
static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
{
return pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC;
}
/*
* This is the list of PCI IDs for the devices that have EHCI USB class and
* specific drivers for that. One of the example is a ChipIdea device installed
* on some Intel MID platforms.
*/
static const struct pci_device_id bypass_pci_id_table[] = {
/* ChipIdea on Intel MID platform */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006), },
{}
};
static inline bool is_bypassed_id(struct pci_dev *pdev)
{
return !!pci_match_id(bypass_pci_id_table, pdev);
}
/*
* 0x84 is the offset of in/out threshold register,
* and it is the same offset as the register of 'hostpc'.
*/
#define intel_quark_x1000_insnreg01 hostpc
/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */
#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
{
int retval;
/* we expect static quirk code to handle the "extended capabilities"
* (currently just BIOS handoff) allowed starting with EHCI 0.96
*/
/* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
retval = pci_set_mwi(pdev);
if (!retval)
ehci_dbg(ehci, "MWI active\n");
/* Reset the threshold limit */
if (is_intel_quark_x1000(pdev)) {
/*
* For the Intel QUARK X1000, raise the I/O threshold to the
* maximum usable value in order to improve performance.
*/
ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD,
ehci->regs->intel_quark_x1000_insnreg01);
}
return 0;
}
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
u32 temp;
int retval;
ehci->caps = hcd->regs;
/*
* ehci_init() causes memory for DMA transfers to be
* allocated. Thus, any vendor-specific workarounds based on
* limiting the type of memory used for DMA transfers must
* happen before ehci_setup() is called.
*
* Most other workarounds can be done either before or after
* init and reset; they are located here too.
*/
switch (pdev->vendor) {
case PCI_VENDOR_ID_TOSHIBA_2:
/* celleb's companion chip */
if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
ehci->big_endian_mmio = 1;
#else
ehci_warn(ehci,
"unsupported big endian Toshiba quirk\n");
#endif
}
break;
case PCI_VENDOR_ID_NVIDIA:
/* NVidia reports that certain chips don't handle
* QH, ITD, or SITD addresses above 2GB. (But TD,
* data buffer, and periodic schedule are normal.)
*/
switch (pdev->device) {
case 0x003c: /* MCP04 */
case 0x005b: /* CK804 */
case 0x00d8: /* CK8 */
case 0x00e8: /* CK8S */
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(31)) < 0)
ehci_warn(ehci, "can't enable NVidia "
"workaround for >2GB RAM\n");
break;
/* Some NForce2 chips have problems with selective suspend;
* fixed in newer silicon.
*/
case 0x0068:
if (pdev->revision < 0xa4)
ehci->no_selective_suspend = 1;
break;
}
break;
case PCI_VENDOR_ID_INTEL:
if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB)
hcd->has_tt = 1;
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI)
hcd->has_tt = 1;
break;
case PCI_VENDOR_ID_AMD:
/* AMD PLL quirk */
if (usb_amd_quirk_pll_check())
ehci->amd_pll_fix = 1;
/* AMD8111 EHCI doesn't work, according to AMD errata */
if (pdev->device == 0x7463) {
ehci_info(ehci, "ignoring AMD8111 (errata)\n");
retval = -EIO;
goto done;
}
/*
* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
* read/write memory space which does not belong to it when
* there is NULL pointer with T-bit set to 1 in the frame list
* table. To avoid the issue, the frame list link pointer
* should always contain a valid pointer to a inactive qh.
*/
if (pdev->device == 0x7808) {
ehci->use_dummy_qh = 1;
ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
}
break;
case PCI_VENDOR_ID_VIA:
if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
u8 tmp;
/* The VT6212 defaults to a 1 usec EHCI sleep time which
* hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes
* that sleep time use the conventional 10 usec.
*/
pci_read_config_byte(pdev, 0x4b, &tmp);
if (tmp & 0x20)
break;
pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
}
break;
case PCI_VENDOR_ID_ATI:
/* AMD PLL quirk */
if (usb_amd_quirk_pll_check())
ehci->amd_pll_fix = 1;
/*
* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
* read/write memory space which does not belong to it when
* there is NULL pointer with T-bit set to 1 in the frame list
* table. To avoid the issue, the frame list link pointer
* should always contain a valid pointer to a inactive qh.
*/
if (pdev->device == 0x4396) {
ehci->use_dummy_qh = 1;
ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
}
/* SB600 and old version of SB700 have a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
usb_amd_hang_symptom_quirk()) {
u8 tmp;
ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
pci_read_config_byte(pdev, 0x53, &tmp);
pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
break;
case PCI_VENDOR_ID_NETMOS:
/* MosChip frame-index-register bug */
ehci_info(ehci, "applying MosChip frame-index workaround\n");
ehci->frame_index_bug = 1;
break;
case PCI_VENDOR_ID_HUAWEI:
/* Synopsys HC bug */
if (pdev->device == 0xa239) {
ehci_info(ehci, "applying Synopsys HC workaround\n");
ehci->has_synopsys_hc_bug = 1;
}
break;
case PCI_VENDOR_ID_ASPEED:
if (pdev->device == PCI_DEVICE_ID_ASPEED_EHCI) {
ehci_info(ehci, "applying Aspeed HC workaround\n");
ehci->is_aspeed = 1;
}
break;
case PCI_VENDOR_ID_ZHAOXIN:
if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x90)
ehci->zx_wakeup_clear_needed = 1;
break;
}
/* optional debug port, normally in the first BAR */
temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
if (temp) {
pci_read_config_dword(pdev, temp, &temp);
temp >>= 16;
if (((temp >> 13) & 7) == 1) {
u32 hcs_params = ehci_readl(ehci,
&ehci->caps->hcs_params);
temp &= 0x1fff;
ehci->debug = hcd->regs + temp;
temp = ehci_readl(ehci, &ehci->debug->control);
ehci_info(ehci, "debug port %d%s\n",
HCS_DEBUG_PORT(hcs_params),
(temp & DBGP_ENABLED) ? " IN USE" : "");
if (!(temp & DBGP_ENABLED))
ehci->debug = NULL;
}
}
retval = ehci_setup(hcd);
if (retval)
return retval;
/* These workarounds need to be applied after ehci_setup() */
switch (pdev->vendor) {
case PCI_VENDOR_ID_NEC:
case PCI_VENDOR_ID_INTEL:
case PCI_VENDOR_ID_AMD:
ehci->need_io_watchdog = 0;
break;
case PCI_VENDOR_ID_NVIDIA:
switch (pdev->device) {
/* MCP89 chips on the MacBookAir3,1 give EPROTO when
* fetching device descriptors unless LPM is disabled.
* There are also intermittent problems enumerating
* devices with PPCD enabled.
*/
case 0x0d9d:
ehci_info(ehci, "disable ppcd for nvidia mcp89\n");
ehci->has_ppcd = 0;
ehci->command &= ~CMD_PPCEE;
break;
}
break;
}
/* at least the Genesys GL880S needs fixup here */
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
temp &= 0x0f;
if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
ehci_dbg(ehci, "bogus port configuration: "
"cc=%d x pcc=%d < ports=%d\n",
HCS_N_CC(ehci->hcs_params),
HCS_N_PCC(ehci->hcs_params),
HCS_N_PORTS(ehci->hcs_params));
switch (pdev->vendor) {
case 0x17a0: /* GENESYS */
/* GL880S: should be PORTS=2 */
temp |= (ehci->hcs_params & ~0xf);
ehci->hcs_params = temp;
break;
case PCI_VENDOR_ID_NVIDIA:
/* NF4: should be PCC=10 */
break;
}
}
/* Serial Bus Release Number is at PCI 0x60 offset */
if (pdev->vendor == PCI_VENDOR_ID_STMICRO
&& pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
; /* ConneXT has no sbrn register */
else if (pdev->vendor == PCI_VENDOR_ID_HUAWEI
&& pdev->device == 0xa239)
; /* HUAWEI Kunpeng920 USB EHCI has no sbrn register */
else
pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
/* Keep this around for a while just in case some EHCI
* implementation uses legacy PCI PM support. This test
* can be removed on 17 Dec 2009 if the dev_warn() hasn't
* been triggered by then.
*/
if (!device_can_wakeup(&pdev->dev)) {
u16 port_wake;
pci_read_config_word(pdev, 0x62, &port_wake);
if (port_wake & 0x0001) {
dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
device_set_wakeup_capable(&pdev->dev, 1);
}
}
#ifdef CONFIG_PM
if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif
retval = ehci_pci_reinit(ehci, pdev);
done:
return retval;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
/* suspend/resume, section 4.3 */
/* These routines rely on the PCI bus glue
* to handle powerdown and wakeup, and currently also on
* transceivers that don't need any software attention to set up
* the right sort of wakeup.
* Also they depend on separate root hub suspend/resume.
*/
static int ehci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
bool hibernated = (msg.event == PM_EVENT_RESTORE);
if (ehci_resume(hcd, hibernated) != 0)
(void) ehci_pci_reinit(ehci, pdev);
return 0;
}
#else
#define ehci_suspend NULL
#define ehci_pci_resume NULL
#endif /* CONFIG_PM */
static struct hc_driver __read_mostly ehci_pci_hc_driver;
static const struct ehci_driver_overrides pci_overrides __initconst = {
.reset = ehci_pci_setup,
};
/*-------------------------------------------------------------------------*/
static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
if (is_bypassed_id(pdev))
return -ENODEV;
return usb_hcd_pci_probe(pdev, &ehci_pci_hc_driver);
}
static void ehci_pci_remove(struct pci_dev *pdev)
{
pci_clear_mwi(pdev);
usb_hcd_pci_remove(pdev);
}
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
}, {
PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver ehci_pci_driver = {
.name = hcd_name,
.id_table = pci_ids,
.probe = ehci_pci_probe,
.remove = ehci_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
.driver = {
#ifdef CONFIG_PM
.pm = &usb_hcd_pci_pm_ops,
#endif
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init ehci_pci_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides);
/* Entries for the PCI suspend/resume callbacks are special */
ehci_pci_hc_driver.pci_suspend = ehci_suspend;
ehci_pci_hc_driver.pci_resume = ehci_pci_resume;
return pci_register_driver(&ehci_pci_driver);
}
module_init(ehci_pci_init);
static void __exit ehci_pci_cleanup(void)
{
pci_unregister_driver(&ehci_pci_driver);
}
module_exit(ehci_pci_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("David Brownell");
MODULE_AUTHOR("Alan Stern");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ehci-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Cavium Networks
*
* Some parts of the code were originally released under BSD license:
*
* Copyright (c) 2003-2010 Cavium Networks ([email protected]). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Networks nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
* OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
*/
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/hcd.h>
#include <linux/prefetch.h>
#include <linux/irqdomain.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <asm/octeon/octeon.h>
#include "octeon-hcd.h"
/**
* enum cvmx_usb_speed - the possible USB device speeds
*
* @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
* @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
* @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
*/
enum cvmx_usb_speed {
CVMX_USB_SPEED_HIGH = 0,
CVMX_USB_SPEED_FULL = 1,
CVMX_USB_SPEED_LOW = 2,
};
/**
* enum cvmx_usb_transfer - the possible USB transfer types
*
* @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
* transfers
* @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
* priority periodic transfers
* @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
* transfers
* @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
* periodic transfers
*/
enum cvmx_usb_transfer {
CVMX_USB_TRANSFER_CONTROL = 0,
CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
CVMX_USB_TRANSFER_BULK = 2,
CVMX_USB_TRANSFER_INTERRUPT = 3,
};
/**
* enum cvmx_usb_direction - the transfer directions
*
* @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
* @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
*/
enum cvmx_usb_direction {
CVMX_USB_DIRECTION_OUT,
CVMX_USB_DIRECTION_IN,
};
/**
* enum cvmx_usb_status - possible callback function status codes
*
* @CVMX_USB_STATUS_OK: The transaction / operation finished without
* any errors
* @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented
* @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight
* by a user call to cvmx_usb_cancel
* @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected
* error status
* @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response
* from the device
* @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the
* device even after a number of retries
* @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle
* error even after a number of retries
* @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error
* @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error
* even after a number of retries
*/
enum cvmx_usb_status {
CVMX_USB_STATUS_OK,
CVMX_USB_STATUS_SHORT,
CVMX_USB_STATUS_CANCEL,
CVMX_USB_STATUS_ERROR,
CVMX_USB_STATUS_STALL,
CVMX_USB_STATUS_XACTERR,
CVMX_USB_STATUS_DATATGLERR,
CVMX_USB_STATUS_BABBLEERR,
CVMX_USB_STATUS_FRAMEERR,
};
/**
* struct cvmx_usb_port_status - the USB port status information
*
* @port_enabled: 1 = Usb port is enabled, 0 = disabled
* @port_over_current: 1 = Over current detected, 0 = Over current not
* detected. Octeon doesn't support over current detection.
* @port_powered: 1 = Port power is being supplied to the device, 0 =
* power is off. Octeon doesn't support turning port power
* off.
* @port_speed: Current port speed.
* @connected: 1 = A device is connected to the port, 0 = No device is
* connected.
* @connect_change: 1 = Device connected state changed since the last set
* status call.
*/
struct cvmx_usb_port_status {
u32 reserved : 25;
u32 port_enabled : 1;
u32 port_over_current : 1;
u32 port_powered : 1;
enum cvmx_usb_speed port_speed : 2;
u32 connected : 1;
u32 connect_change : 1;
};
/**
* struct cvmx_usb_iso_packet - descriptor for Isochronous packets
*
* @offset: This is the offset in bytes into the main buffer where this data
* is stored.
* @length: This is the length in bytes of the data.
* @status: This is the status of this individual packet transfer.
*/
struct cvmx_usb_iso_packet {
int offset;
int length;
enum cvmx_usb_status status;
};
/**
* enum cvmx_usb_initialize_flags - flags used by the initialization function
*
* @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
* as clock source at USB_XO and
* USB_XI.
* @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
* board clock source at USB_XO.
* USB_XI should be tied to GND.
* @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
* @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
* crystal
* @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
* @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
* @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
* data transfer use for the USB
*/
enum cvmx_usb_initialize_flags {
CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
/* Bits 3-4 used to encode the clock frequency */
CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
};
/**
* enum cvmx_usb_pipe_flags - internal flags for a pipe.
*
* @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
* actively using hardware.
* @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed
* pipe is in the ping state.
*/
enum cvmx_usb_pipe_flags {
CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
};
/* Maximum number of times to retry failed transactions */
#define MAX_RETRIES 3
/* Maximum number of hardware channels supported by the USB block */
#define MAX_CHANNELS 8
/*
* The low level hardware can transfer a maximum of this number of bytes in each
* transfer. The field is 19 bits wide
*/
#define MAX_TRANSFER_BYTES ((1 << 19) - 1)
/*
* The low level hardware can transfer a maximum of this number of packets in
* each transfer. The field is 10 bits wide
*/
#define MAX_TRANSFER_PACKETS ((1 << 10) - 1)
/**
* Logical transactions may take numerous low level
* transactions, especially when splits are concerned. This
* enum represents all of the possible stages a transaction can
* be in. Note that split completes are always even. This is so
* the NAK handler can backup to the previous low level
* transaction with a simple clearing of bit 0.
*/
enum cvmx_usb_stage {
CVMX_USB_STAGE_NON_CONTROL,
CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
CVMX_USB_STAGE_SETUP,
CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
CVMX_USB_STAGE_DATA,
CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
CVMX_USB_STAGE_STATUS,
CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
};
/**
* struct cvmx_usb_transaction - describes each pending USB transaction
* regardless of type. These are linked together
* to form a list of pending requests for a pipe.
*
* @node: List node for transactions in the pipe.
* @type: Type of transaction, duplicated of the pipe.
* @flags: State flags for this transaction.
* @buffer: User's physical buffer address to read/write.
* @buffer_length: Size of the user's buffer in bytes.
* @control_header: For control transactions, physical address of the 8
* byte standard header.
* @iso_start_frame: For ISO transactions, the starting frame number.
* @iso_number_packets: For ISO transactions, the number of packets in the
* request.
* @iso_packets: For ISO transactions, the sub packets in the request.
* @actual_bytes: Actual bytes transfer for this transaction.
* @stage: For control transactions, the current stage.
* @urb: URB.
*/
struct cvmx_usb_transaction {
struct list_head node;
enum cvmx_usb_transfer type;
u64 buffer;
int buffer_length;
u64 control_header;
int iso_start_frame;
int iso_number_packets;
struct cvmx_usb_iso_packet *iso_packets;
int xfersize;
int pktcnt;
int retries;
int actual_bytes;
enum cvmx_usb_stage stage;
struct urb *urb;
};
/**
* struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
* and some USB device. It contains a list of pending
* request to the device.
*
* @node: List node for pipe list
* @next: Pipe after this one in the list
* @transactions: List of pending transactions
* @interval: For periodic pipes, the interval between packets in
* frames
* @next_tx_frame: The next frame this pipe is allowed to transmit on
* @flags: State flags for this pipe
* @device_speed: Speed of device connected to this pipe
* @transfer_type: Type of transaction supported by this pipe
* @transfer_dir: IN or OUT. Ignored for Control
* @multi_count: Max packet in a row for the device
* @max_packet: The device's maximum packet size in bytes
* @device_addr: USB device address at other end of pipe
* @endpoint_num: USB endpoint number at other end of pipe
* @hub_device_addr: Hub address this device is connected to
* @hub_port: Hub port this device is connected to
* @pid_toggle: This toggles between 0/1 on every packet send to track
* the data pid needed
* @channel: Hardware DMA channel for this pipe
* @split_sc_frame: The low order bits of the frame number the split
* complete should be sent on
*/
struct cvmx_usb_pipe {
struct list_head node;
struct list_head transactions;
u64 interval;
u64 next_tx_frame;
enum cvmx_usb_pipe_flags flags;
enum cvmx_usb_speed device_speed;
enum cvmx_usb_transfer transfer_type;
enum cvmx_usb_direction transfer_dir;
int multi_count;
u16 max_packet;
u8 device_addr;
u8 endpoint_num;
u8 hub_device_addr;
u8 hub_port;
u8 pid_toggle;
u8 channel;
s8 split_sc_frame;
};
struct cvmx_usb_tx_fifo {
struct {
int channel;
int size;
u64 address;
} entry[MAX_CHANNELS + 1];
int head;
int tail;
};
/**
* struct octeon_hcd - the state of the USB block
*
* lock: Serialization lock.
* init_flags: Flags passed to initialize.
* index: Which USB block this is for.
* idle_hardware_channels: Bit set for every idle hardware channel.
* usbcx_hprt: Stored port status so we don't need to read a CSR to
* determine splits.
* pipe_for_channel: Map channels to pipes.
* pipe: Storage for pipes.
* indent: Used by debug output to indent functions.
* port_status: Last port status used for change notification.
* idle_pipes: List of open pipes that have no transactions.
* active_pipes: Active pipes indexed by transfer type.
* frame_number: Increments every SOF interrupt for time keeping.
* active_split: Points to the current active split, or NULL.
*/
struct octeon_hcd {
spinlock_t lock; /* serialization lock */
int init_flags;
int index;
int idle_hardware_channels;
union cvmx_usbcx_hprt usbcx_hprt;
struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
int indent;
struct cvmx_usb_port_status port_status;
struct list_head idle_pipes;
struct list_head active_pipes[4];
u64 frame_number;
struct cvmx_usb_transaction *active_split;
struct cvmx_usb_tx_fifo periodic;
struct cvmx_usb_tx_fifo nonperiodic;
};
/*
* This macro logically sets a single field in a CSR. It does the sequence
* read, modify, and write
*/
#define USB_SET_FIELD32(address, _union, field, value) \
do { \
union _union c; \
\
c.u32 = cvmx_usb_read_csr32(usb, address); \
c.s.field = value; \
cvmx_usb_write_csr32(usb, address, c.u32); \
} while (0)
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) \
(CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000)
/**
* struct octeon_temp_buffer - a bounce buffer for USB transfers
* @orig_buffer: the original buffer passed by the USB stack
* @data: the newly allocated temporary buffer (excluding meta-data)
*
* Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
* the buffer is too short, we need to allocate a temporary one, and this struct
* represents it.
*/
struct octeon_temp_buffer {
void *orig_buffer;
u8 data[];
};
static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
{
return container_of((void *)p, struct usb_hcd, hcd_priv);
}
/**
* octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
* (if needed)
* @urb: URB.
* @mem_flags: Memory allocation flags.
*
* This function allocates a temporary bounce buffer whenever it's needed
* due to HW limitations.
*/
static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
{
struct octeon_temp_buffer *temp;
if (urb->num_sgs || urb->sg ||
(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
!(urb->transfer_buffer_length % sizeof(u32)))
return 0;
temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
sizeof(*temp), mem_flags);
if (!temp)
return -ENOMEM;
temp->orig_buffer = urb->transfer_buffer;
if (usb_urb_dir_out(urb))
memcpy(temp->data, urb->transfer_buffer,
urb->transfer_buffer_length);
urb->transfer_buffer = temp->data;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
return 0;
}
/**
* octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
* @urb: URB.
*
* Frees a buffer allocated by octeon_alloc_temp_buffer().
*/
static void octeon_free_temp_buffer(struct urb *urb)
{
struct octeon_temp_buffer *temp;
size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
data);
if (usb_urb_dir_in(urb)) {
if (usb_pipeisoc(urb->pipe))
length = urb->transfer_buffer_length;
else
length = urb->actual_length;
memcpy(temp->orig_buffer, urb->transfer_buffer, length);
}
urb->transfer_buffer = temp->orig_buffer;
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
kfree(temp);
}
/**
* octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
* @hcd: USB HCD structure.
* @urb: URB.
* @mem_flags: Memory allocation flags.
*/
static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
int ret;
ret = octeon_alloc_temp_buffer(urb, mem_flags);
if (ret)
return ret;
ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
if (ret)
octeon_free_temp_buffer(urb);
return ret;
}
/**
* octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
* @hcd: USB HCD structure.
* @urb: URB.
*/
static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
usb_hcd_unmap_urb_for_dma(hcd, urb);
octeon_free_temp_buffer(urb);
}
/**
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
* debugging is on.
*
* @usb: USB block this access is for
* @address: 64bit address to read
*
* Returns: Result of the read
*/
static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address)
{
return cvmx_read64_uint32(address ^ 4);
}
/**
* Write a USB 32bit CSR. It performs the necessary address
* swizzle for 32bit CSRs and logs the value in a readable format
* if debugging is on.
*
* @usb: USB block this access is for
* @address: 64bit address to write
* @value: Value to write
*/
static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb,
u64 address, u32 value)
{
cvmx_write64_uint32(address ^ 4, value);
cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
}
/**
* Return non zero if this pipe connects to a non HIGH speed
* device through a high speed hub.
*
* @usb: USB block this access is for
* @pipe: Pipe to check
*
* Returns: Non zero if we need to do split transactions
*/
static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe)
{
return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
}
/**
* Trivial utility function to return the correct PID for a pipe
*
* @pipe: pipe to check
*
* Returns: PID for pipe
*/
static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
{
if (pipe->pid_toggle)
return 2; /* Data1 */
return 0; /* Data0 */
}
/* Loops through register until txfflsh or rxfflsh become zero.*/
static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type)
{
int result;
u64 address = CVMX_USBCX_GRSTCTL(usb->index);
u64 done = cvmx_get_cycle() + 100 *
(u64)octeon_get_clock_rate / 1000000;
union cvmx_usbcx_grstctl c;
while (1) {
c.u32 = cvmx_usb_read_csr32(usb, address);
if (fflsh_type == 0 && c.s.txfflsh == 0) {
result = 0;
break;
} else if (fflsh_type == 1 && c.s.rxfflsh == 0) {
result = 0;
break;
} else if (cvmx_get_cycle() > done) {
result = -1;
break;
}
__delay(100);
}
return result;
}
static void cvmx_fifo_setup(struct octeon_hcd *usb)
{
union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
union cvmx_usbcx_gnptxfsiz npsiz;
union cvmx_usbcx_hptxfsiz psiz;
usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GHWCFG3(usb->index));
/*
* Program the USBC_GRXFSIZ register to select the size of the receive
* FIFO (25%).
*/
USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz,
rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
/*
* Program the USBC_GNPTXFSIZ register to select the size and the start
* address of the non-periodic transmit FIFO for nonperiodic
* transactions (50%).
*/
npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32);
/*
* Program the USBC_HPTXFSIZ register to select the size and start
* address of the periodic transmit FIFO for periodic transactions
* (25%).
*/
psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32);
/* Flush all FIFOs */
USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
cvmx_usbcx_grstctl, txfnum, 0x10);
USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
cvmx_usbcx_grstctl, txfflsh, 1);
cvmx_wait_tx_rx(usb, 0);
USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
cvmx_usbcx_grstctl, rxfflsh, 1);
cvmx_wait_tx_rx(usb, 1);
}
/**
* Shutdown a USB port after a call to cvmx_usb_initialize().
* The port should be disabled with all pipes closed when this
* function is called.
*
* @usb: USB device state populated by cvmx_usb_initialize().
*
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_shutdown(struct octeon_hcd *usb)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
/* Make sure all pipes are closed */
if (!list_empty(&usb->idle_pipes) ||
!list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) ||
!list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) ||
!list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) ||
!list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK]))
return -EBUSY;
/* Disable the clocks and put them in power on reset */
usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
usbn_clk_ctl.s.enable = 1;
usbn_clk_ctl.s.por = 1;
usbn_clk_ctl.s.hclk_rst = 1;
usbn_clk_ctl.s.prst = 0;
usbn_clk_ctl.s.hrst = 0;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
return 0;
}
/**
* Initialize a USB port for use. This must be called before any
* other access to the Octeon USB port is made. The port starts
* off in the disabled state.
*
* @dev: Pointer to struct device for logging purposes.
* @usb: Pointer to struct octeon_hcd.
*
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct device *dev,
struct octeon_hcd *usb)
{
int channel;
int divisor;
int retries = 0;
union cvmx_usbcx_hcfg usbcx_hcfg;
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
union cvmx_usbcx_gintsts usbc_gintsts;
union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
union cvmx_usbcx_gintmsk usbcx_gintmsk;
union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
retry:
/*
* Power On Reset and PHY Initialization
*
* 1. Wait for DCOK to assert (nothing to do)
*
* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
* USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
*/
usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
usbn_clk_ctl.s.por = 1;
usbn_clk_ctl.s.hrst = 0;
usbn_clk_ctl.s.prst = 0;
usbn_clk_ctl.s.hclk_rst = 0;
usbn_clk_ctl.s.enable = 0;
/*
* 2b. Select the USB reference clock/crystal parameters by writing
* appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
*/
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
/*
* The USB port uses 12/24/48MHz 2.5V board clock
* source at USB_XO. USB_XI should be tied to GND.
* Most Octeon evaluation boards require this setting
*/
if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN50XX))
/* From CN56XX,CN50XX,CN31XX,CN30XX manuals */
usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */
else
/* From CN52XX manual */
usbn_clk_ctl.s.p_rtype = 1;
switch (usb->init_flags &
CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
usbn_clk_ctl.s.p_c_sel = 0;
break;
case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
usbn_clk_ctl.s.p_c_sel = 1;
break;
case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
usbn_clk_ctl.s.p_c_sel = 2;
break;
}
} else {
/*
* The USB port uses a 12MHz crystal as clock source
* at USB_XO and USB_XI
*/
if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
/* From CN31XX,CN30XX manual */
usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */
else
/* From CN56XX,CN52XX,CN50XX manuals. */
usbn_clk_ctl.s.p_rtype = 0;
usbn_clk_ctl.s.p_c_sel = 0;
}
/*
* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
* setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
* such that USB is as close as possible to 125Mhz
*/
divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
/* Lower than 4 doesn't seem to work properly */
if (divisor < 4)
divisor = 4;
usbn_clk_ctl.s.divide = divisor;
usbn_clk_ctl.s.divide2 = 0;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
/* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
usbn_clk_ctl.s.hclk_rst = 1;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
/* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
__delay(64);
/*
* 3. Program the power-on reset field in the USBN clock-control
* register:
* USBN_CLK_CTL[POR] = 0
*/
usbn_clk_ctl.s.por = 0;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
/* 4. Wait 1 ms for PHY clock to start */
mdelay(1);
/*
* 5. Program the Reset input from automatic test equipment field in the
* USBP control and status register:
* USBN_USBP_CTL_STATUS[ATE_RESET] = 1
*/
usbn_usbp_ctl_status.u64 =
cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
usbn_usbp_ctl_status.s.ate_reset = 1;
cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
usbn_usbp_ctl_status.u64);
/* 6. Wait 10 cycles */
__delay(10);
/*
* 7. Clear ATE_RESET field in the USBN clock-control register:
* USBN_USBP_CTL_STATUS[ATE_RESET] = 0
*/
usbn_usbp_ctl_status.s.ate_reset = 0;
cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
usbn_usbp_ctl_status.u64);
/*
* 8. Program the PHY reset field in the USBN clock-control register:
* USBN_CLK_CTL[PRST] = 1
*/
usbn_clk_ctl.s.prst = 1;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
/*
* 9. Program the USBP control and status register to select host or
* device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
* device
*/
usbn_usbp_ctl_status.s.hst_mode = 0;
cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
usbn_usbp_ctl_status.u64);
/* 10. Wait 1 us */
udelay(1);
/*
* 11. Program the hreset_n field in the USBN clock-control register:
* USBN_CLK_CTL[HRST] = 1
*/
usbn_clk_ctl.s.hrst = 1;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
/* 12. Proceed to USB core initialization */
usbn_clk_ctl.s.enable = 1;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
udelay(1);
/*
* USB Core Initialization
*
* 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
* determine USB core configuration parameters.
*
* Nothing needed
*
* 2. Program the following fields in the global AHB configuration
* register (USBC_GAHBCFG)
* DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
* Burst length, USBC_GAHBCFG[HBSTLEN] = 0
* Nonperiodic TxFIFO empty level (slave mode only),
* USBC_GAHBCFG[NPTXFEMPLVL]
* Periodic TxFIFO empty level (slave mode only),
* USBC_GAHBCFG[PTXFEMPLVL]
* Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
*/
usbcx_gahbcfg.u32 = 0;
usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
usbcx_gahbcfg.s.hbstlen = 0;
usbcx_gahbcfg.s.nptxfemplvl = 1;
usbcx_gahbcfg.s.ptxfemplvl = 1;
usbcx_gahbcfg.s.glblintrmsk = 1;
cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
usbcx_gahbcfg.u32);
/*
* 3. Program the following fields in USBC_GUSBCFG register.
* HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
* ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
* USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
* PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
*/
usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GUSBCFG(usb->index));
usbcx_gusbcfg.s.toutcal = 0;
usbcx_gusbcfg.s.ddrsel = 0;
usbcx_gusbcfg.s.usbtrdtim = 0x5;
usbcx_gusbcfg.s.phylpwrclksel = 0;
cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
usbcx_gusbcfg.u32);
/*
* 4. The software must unmask the following bits in the USBC_GINTMSK
* register.
* OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
* Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
*/
usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GINTMSK(usb->index));
usbcx_gintmsk.s.otgintmsk = 1;
usbcx_gintmsk.s.modemismsk = 1;
usbcx_gintmsk.s.hchintmsk = 1;
usbcx_gintmsk.s.sofmsk = 0;
/* We need RX FIFO interrupts if we don't have DMA */
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
usbcx_gintmsk.s.rxflvlmsk = 1;
cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
usbcx_gintmsk.u32);
/*
* Disable all channel interrupts. We'll enable them per channel later.
*/
for (channel = 0; channel < 8; channel++)
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCINTMSKX(channel, usb->index),
0);
/*
* Host Port Initialization
*
* 1. Program the host-port interrupt-mask field to unmask,
* USBC_GINTMSK[PRTINT] = 1
*/
USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
cvmx_usbcx_gintmsk, prtintmsk, 1);
USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
cvmx_usbcx_gintmsk, disconnintmsk, 1);
/*
* 2. Program the USBC_HCFG register to select full-speed host
* or high-speed host.
*/
usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
usbcx_hcfg.s.fslssupp = 0;
usbcx_hcfg.s.fslspclksel = 0;
cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
cvmx_fifo_setup(usb);
/*
* If the controller is getting port events right after the reset, it
* means the initialization failed. Try resetting the controller again
* in such case. This is seen to happen after cold boot on DSR-1000N.
*/
usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GINTSTS(usb->index));
cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
usbc_gintsts.u32);
dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32);
if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint)
return 0;
if (retries++ >= 5)
return -EAGAIN;
dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n",
(int)usbc_gintsts.u32);
msleep(50);
cvmx_usb_shutdown(usb);
msleep(50);
goto retry;
}
/**
* Reset a USB port. After this call succeeds, the USB port is
* online and servicing requests.
*
* @usb: USB device state populated by cvmx_usb_initialize().
*/
static void cvmx_usb_reset_port(struct octeon_hcd *usb)
{
usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HPRT(usb->index));
/* Program the port reset bit to start the reset process */
USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
prtrst, 1);
/*
* Wait at least 50ms (high speed), or 10ms (full speed) for the reset
* process to complete.
*/
mdelay(50);
/* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
prtrst, 0);
/*
* Read the port speed field to get the enumerated speed,
* USBC_HPRT[PRTSPD].
*/
usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HPRT(usb->index));
}
/**
* Disable a USB port. After this call the USB port will not
* generate data transfers and will not generate events.
* Transactions in process will fail and call their
* associated callbacks.
*
* @usb: USB device state populated by cvmx_usb_initialize().
*
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_disable(struct octeon_hcd *usb)
{
/* Disable the port */
USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
prtena, 1);
return 0;
}
/**
* Get the current state of the USB port. Use this call to
* determine if the usb port has anything connected, is enabled,
* or has some sort of error condition. The return value of this
* call has "changed" bits to signal of the value of some fields
* have changed between calls.
*
* @usb: USB device state populated by cvmx_usb_initialize().
*
* Returns: Port status information
*/
static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb)
{
union cvmx_usbcx_hprt usbc_hprt;
struct cvmx_usb_port_status result;
memset(&result, 0, sizeof(result));
usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
result.port_enabled = usbc_hprt.s.prtena;
result.port_over_current = usbc_hprt.s.prtovrcurract;
result.port_powered = usbc_hprt.s.prtpwr;
result.port_speed = usbc_hprt.s.prtspd;
result.connected = usbc_hprt.s.prtconnsts;
result.connect_change =
result.connected != usb->port_status.connected;
return result;
}
/**
* Open a virtual pipe between the host and a USB device. A pipe
* must be opened before data can be transferred between a device
* and Octeon.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @device_addr:
* USB device address to open the pipe to
* (0-127).
* @endpoint_num:
* USB endpoint number to open the pipe to
* (0-15).
* @device_speed:
* The speed of the device the pipe is going
* to. This must match the device's speed,
* which may be different than the port speed.
* @max_packet: The maximum packet length the device can
* transmit/receive (low speed=0-8, full
* speed=0-1023, high speed=0-1024). This value
* comes from the standard endpoint descriptor
* field wMaxPacketSize bits <10:0>.
* @transfer_type:
* The type of transfer this pipe is for.
* @transfer_dir:
* The direction the pipe is in. This is not
* used for control pipes.
* @interval: For ISOCHRONOUS and INTERRUPT transfers,
* this is how often the transfer is scheduled
* for. All other transfers should specify
* zero. The units are in frames (8000/sec at
* high speed, 1000/sec for full speed).
* @multi_count:
* For high speed devices, this is the maximum
* allowed number of packet per microframe.
* Specify zero for non high speed devices. This
* value comes from the standard endpoint descriptor
* field wMaxPacketSize bits <12:11>.
* @hub_device_addr:
* Hub device address this device is connected
* to. Devices connected directly to Octeon
* use zero. This is only used when the device
* is full/low speed behind a high speed hub.
* The address will be of the high speed hub,
* not and full speed hubs after it.
* @hub_port: Which port on the hub the device is
* connected. Use zero for devices connected
* directly to Octeon. Like hub_device_addr,
* this is only used for full/low speed
* devices behind a high speed hub.
*
* Returns: A non-NULL value is a pipe. NULL means an error.
*/
static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb,
int device_addr,
int endpoint_num,
enum cvmx_usb_speed
device_speed,
int max_packet,
enum cvmx_usb_transfer
transfer_type,
enum cvmx_usb_direction
transfer_dir,
int interval, int multi_count,
int hub_device_addr,
int hub_port)
{
struct cvmx_usb_pipe *pipe;
pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC);
if (!pipe)
return NULL;
if ((device_speed == CVMX_USB_SPEED_HIGH) &&
(transfer_dir == CVMX_USB_DIRECTION_OUT) &&
(transfer_type == CVMX_USB_TRANSFER_BULK))
pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
pipe->device_addr = device_addr;
pipe->endpoint_num = endpoint_num;
pipe->device_speed = device_speed;
pipe->max_packet = max_packet;
pipe->transfer_type = transfer_type;
pipe->transfer_dir = transfer_dir;
INIT_LIST_HEAD(&pipe->transactions);
/*
* All pipes use interval to rate limit NAK processing. Force an
* interval if one wasn't supplied
*/
if (!interval)
interval = 1;
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
pipe->interval = interval * 8;
/* Force start splits to be schedule on uFrame 0 */
pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) +
pipe->interval;
} else {
pipe->interval = interval;
pipe->next_tx_frame = usb->frame_number + pipe->interval;
}
pipe->multi_count = multi_count;
pipe->hub_device_addr = hub_device_addr;
pipe->hub_port = hub_port;
pipe->pid_toggle = 0;
pipe->split_sc_frame = -1;
list_add_tail(&pipe->node, &usb->idle_pipes);
/*
* We don't need to tell the hardware about this pipe yet since
* it doesn't have any submitted requests
*/
return pipe;
}
/**
* Poll the RX FIFOs and remove data as needed. This function is only used
* in non DMA mode. It is very important that this function be called quickly
* enough to prevent FIFO overflow.
*
* @usb: USB device state populated by cvmx_usb_initialize().
*/
static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb)
{
union cvmx_usbcx_grxstsph rx_status;
int channel;
int bytes;
u64 address;
u32 *ptr;
rx_status.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GRXSTSPH(usb->index));
/* Only read data if IN data is there */
if (rx_status.s.pktsts != 2)
return;
/* Check if no data is available */
if (!rx_status.s.bcnt)
return;
channel = rx_status.s.chnum;
bytes = rx_status.s.bcnt;
if (!bytes)
return;
/* Get where the DMA engine would have written this data */
address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) +
channel * 8);
ptr = cvmx_phys_to_ptr(address);
cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8,
address + bytes);
/* Loop writing the FIFO data for this packet into memory */
while (bytes > 0) {
*ptr++ = cvmx_usb_read_csr32(usb,
USB_FIFO_ADDRESS(channel, usb->index));
bytes -= 4;
}
CVMX_SYNCW;
}
/**
* Fill the TX hardware fifo with data out of the software
* fifos
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @fifo: Software fifo to use
* @available: Amount of space in the hardware fifo
*
* Returns: Non zero if the hardware fifo was too small and needs
* to be serviced again.
*/
static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
struct cvmx_usb_tx_fifo *fifo, int available)
{
/*
* We're done either when there isn't anymore space or the software FIFO
* is empty
*/
while (available && (fifo->head != fifo->tail)) {
int i = fifo->tail;
const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
usb->index) ^ 4;
int words = available;
/* Limit the amount of data to what the SW fifo has */
if (fifo->entry[i].size <= available) {
words = fifo->entry[i].size;
fifo->tail++;
if (fifo->tail > MAX_CHANNELS)
fifo->tail = 0;
}
/* Update the next locations and counts */
available -= words;
fifo->entry[i].address += words * 4;
fifo->entry[i].size -= words;
/*
* Write the HW fifo data. The read every three writes is due
* to an errata on CN3XXX chips
*/
while (words > 3) {
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
words -= 3;
}
cvmx_write64_uint32(csr_address, *ptr++);
if (--words) {
cvmx_write64_uint32(csr_address, *ptr++);
if (--words)
cvmx_write64_uint32(csr_address, *ptr++);
}
cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
}
return fifo->head != fifo->tail;
}
/**
* Check the hardware FIFOs and fill them as needed
*
* @usb: USB device state populated by cvmx_usb_initialize().
*/
static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb)
{
if (usb->periodic.head != usb->periodic.tail) {
union cvmx_usbcx_hptxsts tx_status;
tx_status.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HPTXSTS(usb->index));
if (cvmx_usb_fill_tx_hw(usb, &usb->periodic,
tx_status.s.ptxfspcavail))
USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
cvmx_usbcx_gintmsk, ptxfempmsk, 1);
else
USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
cvmx_usbcx_gintmsk, ptxfempmsk, 0);
}
if (usb->nonperiodic.head != usb->nonperiodic.tail) {
union cvmx_usbcx_gnptxsts tx_status;
tx_status.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GNPTXSTS(usb->index));
if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
tx_status.s.nptxfspcavail))
USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
cvmx_usbcx_gintmsk, nptxfempmsk, 1);
else
USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
cvmx_usbcx_gintmsk, nptxfempmsk, 0);
}
}
/**
* Fill the TX FIFO with an outgoing packet
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @channel: Channel number to get packet from
*/
static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel)
{
union cvmx_usbcx_hccharx hcchar;
union cvmx_usbcx_hcspltx usbc_hcsplt;
union cvmx_usbcx_hctsizx usbc_hctsiz;
struct cvmx_usb_tx_fifo *fifo;
/* We only need to fill data on outbound channels */
hcchar.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCCHARX(channel, usb->index));
if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
return;
/* OUT Splits only have data on the start and not the complete */
usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCSPLTX(channel, usb->index));
if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
return;
/*
* Find out how many bytes we need to fill and convert it into 32bit
* words.
*/
usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCTSIZX(channel, usb->index));
if (!usbc_hctsiz.s.xfersize)
return;
if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
(hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
fifo = &usb->periodic;
else
fifo = &usb->nonperiodic;
fifo->entry[fifo->head].channel = channel;
fifo->entry[fifo->head].address =
cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
channel * 8);
fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2;
fifo->head++;
if (fifo->head > MAX_CHANNELS)
fifo->head = 0;
cvmx_usb_poll_tx_fifo(usb);
}
/**
* Perform channel specific setup for Control transactions. All
* the generic stuff will already have been done in cvmx_usb_start_channel().
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @channel: Channel to setup
* @pipe: Pipe for control transaction
*/
static void cvmx_usb_start_channel_control(struct octeon_hcd *usb,
int channel,
struct cvmx_usb_pipe *pipe)
{
struct usb_hcd *hcd = octeon_to_hcd(usb);
struct device *dev = hcd->self.controller;
struct cvmx_usb_transaction *transaction =
list_first_entry(&pipe->transactions, typeof(*transaction),
node);
struct usb_ctrlrequest *header =
cvmx_phys_to_ptr(transaction->control_header);
int bytes_to_transfer = transaction->buffer_length -
transaction->actual_bytes;
int packets_to_transfer;
union cvmx_usbcx_hctsizx usbc_hctsiz;
usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCTSIZX(channel, usb->index));
switch (transaction->stage) {
case CVMX_USB_STAGE_NON_CONTROL:
case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
break;
case CVMX_USB_STAGE_SETUP:
usbc_hctsiz.s.pid = 3; /* Setup */
bytes_to_transfer = sizeof(*header);
/* All Control operations start with a setup going OUT */
USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
cvmx_usbcx_hccharx, epdir,
CVMX_USB_DIRECTION_OUT);
/*
* Setup send the control header instead of the buffer data. The
* buffer data will be used in the next stage
*/
cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
channel * 8,
transaction->control_header);
break;
case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
usbc_hctsiz.s.pid = 3; /* Setup */
bytes_to_transfer = 0;
/* All Control operations start with a setup going OUT */
USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
cvmx_usbcx_hccharx, epdir,
CVMX_USB_DIRECTION_OUT);
USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
cvmx_usbcx_hcspltx, compsplt, 1);
break;
case CVMX_USB_STAGE_DATA:
usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
if (header->bRequestType & USB_DIR_IN)
bytes_to_transfer = 0;
else if (bytes_to_transfer > pipe->max_packet)
bytes_to_transfer = pipe->max_packet;
}
USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
cvmx_usbcx_hccharx, epdir,
((header->bRequestType & USB_DIR_IN) ?
CVMX_USB_DIRECTION_IN :
CVMX_USB_DIRECTION_OUT));
break;
case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
if (!(header->bRequestType & USB_DIR_IN))
bytes_to_transfer = 0;
USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
cvmx_usbcx_hccharx, epdir,
((header->bRequestType & USB_DIR_IN) ?
CVMX_USB_DIRECTION_IN :
CVMX_USB_DIRECTION_OUT));
USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
cvmx_usbcx_hcspltx, compsplt, 1);
break;
case CVMX_USB_STAGE_STATUS:
usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
bytes_to_transfer = 0;
USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
cvmx_usbcx_hccharx, epdir,
((header->bRequestType & USB_DIR_IN) ?
CVMX_USB_DIRECTION_OUT :
CVMX_USB_DIRECTION_IN));
break;
case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
bytes_to_transfer = 0;
USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
cvmx_usbcx_hccharx, epdir,
((header->bRequestType & USB_DIR_IN) ?
CVMX_USB_DIRECTION_OUT :
CVMX_USB_DIRECTION_IN));
USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
cvmx_usbcx_hcspltx, compsplt, 1);
break;
}
/*
* Make sure the transfer never exceeds the byte limit of the hardware.
* Further bytes will be sent as continued transactions
*/
if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
/* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
bytes_to_transfer *= pipe->max_packet;
}
/*
* Calculate the number of packets to transfer. If the length is zero
* we still need to transfer one packet
*/
packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
pipe->max_packet);
if (packets_to_transfer == 0) {
packets_to_transfer = 1;
} else if ((packets_to_transfer > 1) &&
(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must be
* restarted between every packet for IN transactions, so there
* is no reason to do multiple packets in a row
*/
packets_to_transfer = 1;
bytes_to_transfer = packets_to_transfer * pipe->max_packet;
} else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
/*
* Limit the number of packet and data transferred to what the
* hardware can handle
*/
packets_to_transfer = MAX_TRANSFER_PACKETS;
bytes_to_transfer = packets_to_transfer * pipe->max_packet;
}
usbc_hctsiz.s.xfersize = bytes_to_transfer;
usbc_hctsiz.s.pktcnt = packets_to_transfer;
cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
usbc_hctsiz.u32);
}
/**
* Start a channel to perform the pipe's head transaction
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @channel: Channel to setup
* @pipe: Pipe to start
*/
static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
struct cvmx_usb_pipe *pipe)
{
struct cvmx_usb_transaction *transaction =
list_first_entry(&pipe->transactions, typeof(*transaction),
node);
/* Make sure all writes to the DMA region get flushed */
CVMX_SYNCW;
/* Attach the channel to the pipe */
usb->pipe_for_channel[channel] = pipe;
pipe->channel = channel;
pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
/* Mark this channel as in use */
usb->idle_hardware_channels &= ~(1 << channel);
/* Enable the channel interrupt bits */
{
union cvmx_usbcx_hcintx usbc_hcint;
union cvmx_usbcx_hcintmskx usbc_hcintmsk;
union cvmx_usbcx_haintmsk usbc_haintmsk;
/* Clear all channel status bits */
usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCINTX(channel, usb->index));
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCINTX(channel, usb->index),
usbc_hcint.u32);
usbc_hcintmsk.u32 = 0;
usbc_hcintmsk.s.chhltdmsk = 1;
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
/*
* Channels need these extra interrupts when we aren't
* in DMA mode.
*/
usbc_hcintmsk.s.datatglerrmsk = 1;
usbc_hcintmsk.s.frmovrunmsk = 1;
usbc_hcintmsk.s.bblerrmsk = 1;
usbc_hcintmsk.s.xacterrmsk = 1;
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
/*
* Splits don't generate xfercompl, so we need
* ACK and NYET.
*/
usbc_hcintmsk.s.nyetmsk = 1;
usbc_hcintmsk.s.ackmsk = 1;
}
usbc_hcintmsk.s.nakmsk = 1;
usbc_hcintmsk.s.stallmsk = 1;
usbc_hcintmsk.s.xfercomplmsk = 1;
}
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCINTMSKX(channel, usb->index),
usbc_hcintmsk.u32);
/* Enable the channel interrupt to propagate */
usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HAINTMSK(usb->index));
usbc_haintmsk.s.haintmsk |= 1 << channel;
cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
usbc_haintmsk.u32);
}
/* Setup the location the DMA engine uses. */
{
u64 reg;
u64 dma_address = transaction->buffer +
transaction->actual_bytes;
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
dma_address = transaction->buffer +
transaction->iso_packets[0].offset +
transaction->actual_bytes;
if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index);
else
reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index);
cvmx_write64_uint64(reg + channel * 8, dma_address);
}
/* Setup both the size of the transfer and the SPLIT characteristics */
{
union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
int packets_to_transfer;
int bytes_to_transfer = transaction->buffer_length -
transaction->actual_bytes;
/*
* ISOCHRONOUS transactions store each individual transfer size
* in the packet structure, not the global buffer_length
*/
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
bytes_to_transfer =
transaction->iso_packets[0].length -
transaction->actual_bytes;
/*
* We need to do split transactions when we are talking to non
* high speed devices that are behind a high speed hub
*/
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
/*
* On the start split phase (stage is even) record the
* frame number we will need to send the split complete.
* We only store the lower two bits since the time ahead
* can only be two frames
*/
if ((transaction->stage & 1) == 0) {
if (transaction->type == CVMX_USB_TRANSFER_BULK)
pipe->split_sc_frame =
(usb->frame_number + 1) & 0x7f;
else
pipe->split_sc_frame =
(usb->frame_number + 2) & 0x7f;
} else {
pipe->split_sc_frame = -1;
}
usbc_hcsplt.s.spltena = 1;
usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
usbc_hcsplt.s.prtaddr = pipe->hub_port;
usbc_hcsplt.s.compsplt = (transaction->stage ==
CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
/*
* SPLIT transactions can only ever transmit one data
* packet so limit the transfer size to the max packet
* size
*/
if (bytes_to_transfer > pipe->max_packet)
bytes_to_transfer = pipe->max_packet;
/*
* ISOCHRONOUS OUT splits are unique in that they limit
* data transfers to 188 byte chunks representing the
* begin/middle/end of the data or all
*/
if (!usbc_hcsplt.s.compsplt &&
(pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
(pipe->transfer_type ==
CVMX_USB_TRANSFER_ISOCHRONOUS)) {
/*
* Clear the split complete frame number as
* there isn't going to be a split complete
*/
pipe->split_sc_frame = -1;
/*
* See if we've started this transfer and sent
* data
*/
if (transaction->actual_bytes == 0) {
/*
* Nothing sent yet, this is either a
* begin or the entire payload
*/
if (bytes_to_transfer <= 188)
/* Entire payload in one go */
usbc_hcsplt.s.xactpos = 3;
else
/* First part of payload */
usbc_hcsplt.s.xactpos = 2;
} else {
/*
* Continuing the previous data, we must
* either be in the middle or at the end
*/
if (bytes_to_transfer <= 188)
/* End of payload */
usbc_hcsplt.s.xactpos = 1;
else
/* Middle of payload */
usbc_hcsplt.s.xactpos = 0;
}
/*
* Again, the transfer size is limited to 188
* bytes
*/
if (bytes_to_transfer > 188)
bytes_to_transfer = 188;
}
}
/*
* Make sure the transfer never exceeds the byte limit of the
* hardware. Further bytes will be sent as continued
* transactions
*/
if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
/*
* Round MAX_TRANSFER_BYTES to a multiple of out packet
* size
*/
bytes_to_transfer = MAX_TRANSFER_BYTES /
pipe->max_packet;
bytes_to_transfer *= pipe->max_packet;
}
/*
* Calculate the number of packets to transfer. If the length is
* zero we still need to transfer one packet
*/
packets_to_transfer =
DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
if (packets_to_transfer == 0) {
packets_to_transfer = 1;
} else if ((packets_to_transfer > 1) &&
(usb->init_flags &
CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must
* be restarted between every packet for IN
* transactions, so there is no reason to do multiple
* packets in a row
*/
packets_to_transfer = 1;
bytes_to_transfer = packets_to_transfer *
pipe->max_packet;
} else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
/*
* Limit the number of packet and data transferred to
* what the hardware can handle
*/
packets_to_transfer = MAX_TRANSFER_PACKETS;
bytes_to_transfer = packets_to_transfer *
pipe->max_packet;
}
usbc_hctsiz.s.xfersize = bytes_to_transfer;
usbc_hctsiz.s.pktcnt = packets_to_transfer;
/* Update the DATA0/DATA1 toggle */
usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
/*
* High speed pipes may need a hardware ping before they start
*/
if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING)
usbc_hctsiz.s.dopng = 1;
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCSPLTX(channel, usb->index),
usbc_hcsplt.u32);
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCTSIZX(channel, usb->index),
usbc_hctsiz.u32);
}
/* Setup the Host Channel Characteristics Register */
{
union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
/*
* Set the startframe odd/even properly. This is only used for
* periodic
*/
usbc_hcchar.s.oddfrm = usb->frame_number & 1;
/*
* Set the number of back to back packets allowed by this
* endpoint. Split transactions interpret "ec" as the number of
* immediate retries of failure. These retries happen too
* quickly, so we disable these entirely for splits
*/
if (cvmx_usb_pipe_needs_split(usb, pipe))
usbc_hcchar.s.ec = 1;
else if (pipe->multi_count < 1)
usbc_hcchar.s.ec = 1;
else if (pipe->multi_count > 3)
usbc_hcchar.s.ec = 3;
else
usbc_hcchar.s.ec = pipe->multi_count;
/* Set the rest of the endpoint specific settings */
usbc_hcchar.s.devaddr = pipe->device_addr;
usbc_hcchar.s.eptype = transaction->type;
usbc_hcchar.s.lspddev =
(pipe->device_speed == CVMX_USB_SPEED_LOW);
usbc_hcchar.s.epdir = pipe->transfer_dir;
usbc_hcchar.s.epnum = pipe->endpoint_num;
usbc_hcchar.s.mps = pipe->max_packet;
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCCHARX(channel, usb->index),
usbc_hcchar.u32);
}
/* Do transaction type specific fixups as needed */
switch (transaction->type) {
case CVMX_USB_TRANSFER_CONTROL:
cvmx_usb_start_channel_control(usb, channel, pipe);
break;
case CVMX_USB_TRANSFER_BULK:
case CVMX_USB_TRANSFER_INTERRUPT:
break;
case CVMX_USB_TRANSFER_ISOCHRONOUS:
if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
/*
* ISO transactions require different PIDs depending on
* direction and how many packets are needed
*/
if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
if (pipe->multi_count < 2) /* Need DATA0 */
USB_SET_FIELD32(
CVMX_USBCX_HCTSIZX(channel,
usb->index),
cvmx_usbcx_hctsizx, pid, 0);
else /* Need MDATA */
USB_SET_FIELD32(
CVMX_USBCX_HCTSIZX(channel,
usb->index),
cvmx_usbcx_hctsizx, pid, 3);
}
}
break;
}
{
union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 =
cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCTSIZX(channel,
usb->index))
};
transaction->xfersize = usbc_hctsiz.s.xfersize;
transaction->pktcnt = usbc_hctsiz.s.pktcnt;
}
/* Remember when we start a split transaction */
if (cvmx_usb_pipe_needs_split(usb, pipe))
usb->active_split = transaction;
USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
cvmx_usbcx_hccharx, chena, 1);
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
cvmx_usb_fill_tx_fifo(usb, channel);
}
/**
* Find a pipe that is ready to be scheduled to hardware.
* @usb: USB device state populated by cvmx_usb_initialize().
* @xfer_type: Transfer type
*
* Returns: Pipe or NULL if none are ready
*/
static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
enum cvmx_usb_transfer xfer_type)
{
struct list_head *list = usb->active_pipes + xfer_type;
u64 current_frame = usb->frame_number;
struct cvmx_usb_pipe *pipe;
list_for_each_entry(pipe, list, node) {
struct cvmx_usb_transaction *t =
list_first_entry(&pipe->transactions, typeof(*t),
node);
if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
(pipe->next_tx_frame <= current_frame) &&
((pipe->split_sc_frame == -1) ||
((((int)current_frame - pipe->split_sc_frame) & 0x7f) <
0x40)) &&
(!usb->active_split || (usb->active_split == t))) {
prefetch(t);
return pipe;
}
}
return NULL;
}
static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb,
int is_sof)
{
struct cvmx_usb_pipe *pipe;
/* Find a pipe needing service. */
if (is_sof) {
/*
* Only process periodic pipes on SOF interrupts. This way we
* are sure that the periodic data is sent in the beginning of
* the frame.
*/
pipe = cvmx_usb_find_ready_pipe(usb,
CVMX_USB_TRANSFER_ISOCHRONOUS);
if (pipe)
return pipe;
pipe = cvmx_usb_find_ready_pipe(usb,
CVMX_USB_TRANSFER_INTERRUPT);
if (pipe)
return pipe;
}
pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL);
if (pipe)
return pipe;
return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK);
}
/**
* Called whenever a pipe might need to be scheduled to the
* hardware.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @is_sof: True if this schedule was called on a SOF interrupt.
*/
static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof)
{
int channel;
struct cvmx_usb_pipe *pipe;
int need_sof;
enum cvmx_usb_transfer ttype;
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
/*
* Without DMA we need to be careful to not schedule something
* at the end of a frame and cause an overrun.
*/
union cvmx_usbcx_hfnum hfnum = {
.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HFNUM(usb->index))
};
union cvmx_usbcx_hfir hfir = {
.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HFIR(usb->index))
};
if (hfnum.s.frrem < hfir.s.frint / 4)
goto done;
}
while (usb->idle_hardware_channels) {
/* Find an idle channel */
channel = __fls(usb->idle_hardware_channels);
if (unlikely(channel > 7))
break;
pipe = cvmx_usb_next_pipe(usb, is_sof);
if (!pipe)
break;
cvmx_usb_start_channel(usb, channel, pipe);
}
done:
/*
* Only enable SOF interrupts when we have transactions pending in the
* future that might need to be scheduled
*/
need_sof = 0;
for (ttype = CVMX_USB_TRANSFER_CONTROL;
ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
if (pipe->next_tx_frame > usb->frame_number) {
need_sof = 1;
break;
}
}
}
USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
cvmx_usbcx_gintmsk, sofmsk, need_sof);
}
static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb,
enum cvmx_usb_status status,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction
*transaction,
int bytes_transferred,
struct urb *urb)
{
struct usb_hcd *hcd = octeon_to_hcd(usb);
struct device *dev = hcd->self.controller;
if (likely(status == CVMX_USB_STATUS_OK))
urb->actual_length = bytes_transferred;
else
urb->actual_length = 0;
urb->hcpriv = NULL;
/* For Isochronous transactions we need to update the URB packet status
* list from data in our private copy
*/
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
int i;
/*
* The pointer to the private list is stored in the setup_packet
* field.
*/
struct cvmx_usb_iso_packet *iso_packet =
(struct cvmx_usb_iso_packet *)urb->setup_packet;
/* Recalculate the transfer size by adding up each packet */
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
if (iso_packet[i].status == CVMX_USB_STATUS_OK) {
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length =
iso_packet[i].length;
urb->actual_length +=
urb->iso_frame_desc[i].actual_length;
} else {
dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
i, urb->number_of_packets,
iso_packet[i].status, pipe,
transaction, iso_packet[i].length);
urb->iso_frame_desc[i].status = -EREMOTEIO;
}
}
/* Free the private list now that we don't need it anymore */
kfree(iso_packet);
urb->setup_packet = NULL;
}
switch (status) {
case CVMX_USB_STATUS_OK:
urb->status = 0;
break;
case CVMX_USB_STATUS_CANCEL:
if (urb->status == 0)
urb->status = -ENOENT;
break;
case CVMX_USB_STATUS_STALL:
dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
pipe, transaction, bytes_transferred);
urb->status = -EPIPE;
break;
case CVMX_USB_STATUS_BABBLEERR:
dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
pipe, transaction, bytes_transferred);
urb->status = -EPIPE;
break;
case CVMX_USB_STATUS_SHORT:
dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
pipe, transaction, bytes_transferred);
urb->status = -EREMOTEIO;
break;
case CVMX_USB_STATUS_ERROR:
case CVMX_USB_STATUS_XACTERR:
case CVMX_USB_STATUS_DATATGLERR:
case CVMX_USB_STATUS_FRAMEERR:
dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
status, pipe, transaction, bytes_transferred);
urb->status = -EPROTO;
break;
}
usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb);
spin_unlock(&usb->lock);
usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status);
spin_lock(&usb->lock);
}
/**
* Signal the completion of a transaction and free it. The
* transaction will be removed from the pipe transaction list.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Pipe the transaction is on
* @transaction:
* Transaction that completed
* @complete_code:
* Completion code
*/
static void cvmx_usb_complete(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction *transaction,
enum cvmx_usb_status complete_code)
{
/* If this was a split then clear our split in progress marker */
if (usb->active_split == transaction)
usb->active_split = NULL;
/*
* Isochronous transactions need extra processing as they might not be
* done after a single data transfer
*/
if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
/* Update the number of bytes transferred in this ISO packet */
transaction->iso_packets[0].length = transaction->actual_bytes;
transaction->iso_packets[0].status = complete_code;
/*
* If there are more ISOs pending and we succeeded, schedule the
* next one
*/
if ((transaction->iso_number_packets > 1) &&
(complete_code == CVMX_USB_STATUS_OK)) {
/* No bytes transferred for this packet as of yet */
transaction->actual_bytes = 0;
/* One less ISO waiting to transfer */
transaction->iso_number_packets--;
/* Increment to the next location in our packet array */
transaction->iso_packets++;
transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
return;
}
}
/* Remove the transaction from the pipe list */
list_del(&transaction->node);
if (list_empty(&pipe->transactions))
list_move_tail(&pipe->node, &usb->idle_pipes);
octeon_usb_urb_complete_callback(usb, complete_code, pipe,
transaction,
transaction->actual_bytes,
transaction->urb);
kfree(transaction);
}
/**
* Submit a usb transaction to a pipe. Called for all types
* of transactions.
*
* @usb:
* @pipe: Which pipe to submit to.
* @type: Transaction type
* @buffer: User buffer for the transaction
* @buffer_length:
* User buffer's length in bytes
* @control_header:
* For control transactions, the 8 byte standard header
* @iso_start_frame:
* For ISO transactions, the start frame
* @iso_number_packets:
* For ISO, the number of packet in the transaction.
* @iso_packets:
* A description of each ISO packet
* @urb: URB for the callback
*
* Returns: Transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
enum cvmx_usb_transfer type,
u64 buffer,
int buffer_length,
u64 control_header,
int iso_start_frame,
int iso_number_packets,
struct cvmx_usb_iso_packet *iso_packets,
struct urb *urb)
{
struct cvmx_usb_transaction *transaction;
if (unlikely(pipe->transfer_type != type))
return NULL;
transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC);
if (unlikely(!transaction))
return NULL;
transaction->type = type;
transaction->buffer = buffer;
transaction->buffer_length = buffer_length;
transaction->control_header = control_header;
/* FIXME: This is not used, implement it. */
transaction->iso_start_frame = iso_start_frame;
transaction->iso_number_packets = iso_number_packets;
transaction->iso_packets = iso_packets;
transaction->urb = urb;
if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
transaction->stage = CVMX_USB_STAGE_SETUP;
else
transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
if (!list_empty(&pipe->transactions)) {
list_add_tail(&transaction->node, &pipe->transactions);
} else {
list_add_tail(&transaction->node, &pipe->transactions);
list_move_tail(&pipe->node,
&usb->active_pipes[pipe->transfer_type]);
/*
* We may need to schedule the pipe if this was the head of the
* pipe.
*/
cvmx_usb_schedule(usb, 0);
}
return transaction;
}
/**
* Call to submit a USB Bulk transfer to a pipe.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Handle to the pipe for the transfer.
* @urb: URB.
*
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
urb->transfer_dma,
urb->transfer_buffer_length,
0, /* control_header */
0, /* iso_start_frame */
0, /* iso_number_packets */
NULL, /* iso_packets */
urb);
}
/**
* Call to submit a USB Interrupt transfer to a pipe.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Handle to the pipe for the transfer.
* @urb: URB returned when the callback is called.
*
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
return cvmx_usb_submit_transaction(usb, pipe,
CVMX_USB_TRANSFER_INTERRUPT,
urb->transfer_dma,
urb->transfer_buffer_length,
0, /* control_header */
0, /* iso_start_frame */
0, /* iso_number_packets */
NULL, /* iso_packets */
urb);
}
/**
* Call to submit a USB Control transfer to a pipe.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Handle to the pipe for the transfer.
* @urb: URB.
*
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_control(
struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
int buffer_length = urb->transfer_buffer_length;
u64 control_header = urb->setup_dma;
struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
if ((header->bRequestType & USB_DIR_IN) == 0)
buffer_length = le16_to_cpu(header->wLength);
return cvmx_usb_submit_transaction(usb, pipe,
CVMX_USB_TRANSFER_CONTROL,
urb->transfer_dma, buffer_length,
control_header,
0, /* iso_start_frame */
0, /* iso_number_packets */
NULL, /* iso_packets */
urb);
}
/**
* Call to submit a USB Isochronous transfer to a pipe.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Handle to the pipe for the transfer.
* @urb: URB returned when the callback is called.
*
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
struct cvmx_usb_iso_packet *packets;
packets = (struct cvmx_usb_iso_packet *)urb->setup_packet;
return cvmx_usb_submit_transaction(usb, pipe,
CVMX_USB_TRANSFER_ISOCHRONOUS,
urb->transfer_dma,
urb->transfer_buffer_length,
0, /* control_header */
urb->start_frame,
urb->number_of_packets,
packets, urb);
}
/**
* Cancel one outstanding request in a pipe. Canceling a request
* can fail if the transaction has already completed before cancel
* is called. Even after a successful cancel call, it may take
* a frame or two for the cvmx_usb_poll() function to call the
* associated callback.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Pipe to cancel requests in.
* @transaction: Transaction to cancel, returned by the submit function.
*
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_cancel(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction *transaction)
{
/*
* If the transaction is the HEAD of the queue and scheduled. We need to
* treat it special
*/
if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
union cvmx_usbcx_hccharx usbc_hcchar;
usb->pipe_for_channel[pipe->channel] = NULL;
pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
CVMX_SYNCW;
usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCCHARX(pipe->channel,
usb->index));
/*
* If the channel isn't enabled then the transaction already
* completed.
*/
if (usbc_hcchar.s.chena) {
usbc_hcchar.s.chdis = 1;
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCCHARX(pipe->channel,
usb->index),
usbc_hcchar.u32);
}
}
cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL);
return 0;
}
/**
* Cancel all outstanding requests in a pipe. Logically all this
* does is call cvmx_usb_cancel() in a loop.
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Pipe to cancel requests in.
*
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_cancel_all(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe)
{
struct cvmx_usb_transaction *transaction, *next;
/* Simply loop through and attempt to cancel each transaction */
list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
int result = cvmx_usb_cancel(usb, pipe, transaction);
if (unlikely(result != 0))
return result;
}
return 0;
}
/**
* Close a pipe created with cvmx_usb_open_pipe().
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @pipe: Pipe to close.
*
* Returns: 0 or a negative error code. EBUSY is returned if the pipe has
* outstanding transfers.
*/
static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe)
{
/* Fail if the pipe has pending transactions */
if (!list_empty(&pipe->transactions))
return -EBUSY;
list_del(&pipe->node);
kfree(pipe);
return 0;
}
/**
* Get the current USB protocol level frame number. The frame
* number is always in the range of 0-0x7ff.
*
* @usb: USB device state populated by cvmx_usb_initialize().
*
* Returns: USB frame number
*/
static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
{
union cvmx_usbcx_hfnum usbc_hfnum;
usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
return usbc_hfnum.s.frnum;
}
static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction *transaction,
union cvmx_usbcx_hccharx usbc_hcchar,
int buffer_space_left,
int bytes_in_last_packet)
{
switch (transaction->stage) {
case CVMX_USB_STAGE_NON_CONTROL:
case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
/* This should be impossible */
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_ERROR);
break;
case CVMX_USB_STAGE_SETUP:
pipe->pid_toggle = 1;
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
transaction->stage =
CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
} else {
struct usb_ctrlrequest *header =
cvmx_phys_to_ptr(transaction->control_header);
if (header->wLength)
transaction->stage = CVMX_USB_STAGE_DATA;
else
transaction->stage = CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
{
struct usb_ctrlrequest *header =
cvmx_phys_to_ptr(transaction->control_header);
if (header->wLength)
transaction->stage = CVMX_USB_STAGE_DATA;
else
transaction->stage = CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_DATA:
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
/*
* For setup OUT data that are splits,
* the hardware doesn't appear to count
* transferred data. Here we manually
* update the data transferred
*/
if (!usbc_hcchar.s.epdir) {
if (buffer_space_left < pipe->max_packet)
transaction->actual_bytes +=
buffer_space_left;
else
transaction->actual_bytes +=
pipe->max_packet;
}
} else if ((buffer_space_left == 0) ||
(bytes_in_last_packet < pipe->max_packet)) {
pipe->pid_toggle = 1;
transaction->stage = CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
if ((buffer_space_left == 0) ||
(bytes_in_last_packet < pipe->max_packet)) {
pipe->pid_toggle = 1;
transaction->stage = CVMX_USB_STAGE_STATUS;
} else {
transaction->stage = CVMX_USB_STAGE_DATA;
}
break;
case CVMX_USB_STAGE_STATUS:
if (cvmx_usb_pipe_needs_split(usb, pipe))
transaction->stage =
CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
else
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_OK);
break;
case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
break;
}
}
static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction *transaction,
union cvmx_usbcx_hcintx usbc_hcint,
int buffer_space_left,
int bytes_in_last_packet)
{
/*
* The only time a bulk transfer isn't complete when it finishes with
* an ACK is during a split transaction. For splits we need to continue
* the transfer if more data is needed.
*/
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
transaction->stage =
CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
else if (buffer_space_left &&
(bytes_in_last_packet == pipe->max_packet))
transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
else
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_OK);
} else {
if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
(pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
(usbc_hcint.s.nak))
pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
if (!buffer_space_left ||
(bytes_in_last_packet < pipe->max_packet))
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_OK);
}
}
static void cvmx_usb_transfer_intr(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction *transaction,
int buffer_space_left,
int bytes_in_last_packet)
{
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) {
transaction->stage =
CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
} else if (buffer_space_left &&
(bytes_in_last_packet == pipe->max_packet)) {
transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
} else {
pipe->next_tx_frame += pipe->interval;
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_OK);
}
} else if (!buffer_space_left ||
(bytes_in_last_packet < pipe->max_packet)) {
pipe->next_tx_frame += pipe->interval;
cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
}
}
static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction *transaction,
int buffer_space_left,
int bytes_in_last_packet,
int bytes_this_transfer)
{
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
/*
* ISOCHRONOUS OUT splits don't require a complete split stage.
* Instead they use a sequence of begin OUT splits to transfer
* the data 188 bytes at a time. Once the transfer is complete,
* the pipe sleeps until the next schedule interval.
*/
if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
/*
* If no space left or this wasn't a max size packet
* then this transfer is complete. Otherwise start it
* again to send the next 188 bytes
*/
if (!buffer_space_left || (bytes_this_transfer < 188)) {
pipe->next_tx_frame += pipe->interval;
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_OK);
}
return;
}
if (transaction->stage ==
CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
/*
* We are in the incoming data phase. Keep getting data
* until we run out of space or get a small packet
*/
if ((buffer_space_left == 0) ||
(bytes_in_last_packet < pipe->max_packet)) {
pipe->next_tx_frame += pipe->interval;
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_OK);
}
} else {
transaction->stage =
CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
}
} else {
pipe->next_tx_frame += pipe->interval;
cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
}
}
/**
* Poll a channel for status
*
* @usb: USB device
* @channel: Channel to poll
*
* Returns: Zero on success
*/
static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel)
{
struct usb_hcd *hcd = octeon_to_hcd(usb);
struct device *dev = hcd->self.controller;
union cvmx_usbcx_hcintx usbc_hcint;
union cvmx_usbcx_hctsizx usbc_hctsiz;
union cvmx_usbcx_hccharx usbc_hcchar;
struct cvmx_usb_pipe *pipe;
struct cvmx_usb_transaction *transaction;
int bytes_this_transfer;
int bytes_in_last_packet;
int packets_processed;
int buffer_space_left;
/* Read the interrupt status bits for the channel */
usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCINTX(channel, usb->index));
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCCHARX(channel,
usb->index));
if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
/*
* There seems to be a bug in CN31XX which can cause
* interrupt IN transfers to get stuck until we do a
* write of HCCHARX without changing things
*/
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCCHARX(channel,
usb->index),
usbc_hcchar.u32);
return 0;
}
/*
* In non DMA mode the channels don't halt themselves. We need
* to manually disable channels that are left running
*/
if (!usbc_hcint.s.chhltd) {
if (usbc_hcchar.s.chena) {
union cvmx_usbcx_hcintmskx hcintmsk;
/* Disable all interrupts except CHHLTD */
hcintmsk.u32 = 0;
hcintmsk.s.chhltdmsk = 1;
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCINTMSKX(channel, usb->index),
hcintmsk.u32);
usbc_hcchar.s.chdis = 1;
cvmx_usb_write_csr32(usb,
CVMX_USBCX_HCCHARX(channel, usb->index),
usbc_hcchar.u32);
return 0;
} else if (usbc_hcint.s.xfercompl) {
/*
* Successful IN/OUT with transfer complete.
* Channel halt isn't needed.
*/
} else {
dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
usb->index, channel);
return 0;
}
}
} else {
/*
* There is are no interrupts that we need to process when the
* channel is still running
*/
if (!usbc_hcint.s.chhltd)
return 0;
}
/* Disable the channel interrupts now that it is done */
cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
usb->idle_hardware_channels |= (1 << channel);
/* Make sure this channel is tied to a valid pipe */
pipe = usb->pipe_for_channel[channel];
prefetch(pipe);
if (!pipe)
return 0;
transaction = list_first_entry(&pipe->transactions,
typeof(*transaction),
node);
prefetch(transaction);
/*
* Disconnect this pipe from the HW channel. Later the schedule
* function will figure out which pipe needs to go
*/
usb->pipe_for_channel[channel] = NULL;
pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
/*
* Read the channel config info so we can figure out how much data
* transferred
*/
usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCCHARX(channel, usb->index));
usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HCTSIZX(channel, usb->index));
/*
* Calculating the number of bytes successfully transferred is dependent
* on the transfer direction
*/
packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
if (usbc_hcchar.s.epdir) {
/*
* IN transactions are easy. For every byte received the
* hardware decrements xfersize. All we need to do is subtract
* the current value of xfersize from its starting value and we
* know how many bytes were written to the buffer
*/
bytes_this_transfer = transaction->xfersize -
usbc_hctsiz.s.xfersize;
} else {
/*
* OUT transaction don't decrement xfersize. Instead pktcnt is
* decremented on every successful packet send. The hardware
* does this when it receives an ACK, or NYET. If it doesn't
* receive one of these responses pktcnt doesn't change
*/
bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
/*
* The last packet may not be a full transfer if we didn't have
* enough data
*/
if (bytes_this_transfer > transaction->xfersize)
bytes_this_transfer = transaction->xfersize;
}
/* Figure out how many bytes were in the last packet of the transfer */
if (packets_processed)
bytes_in_last_packet = bytes_this_transfer -
(packets_processed - 1) * usbc_hcchar.s.mps;
else
bytes_in_last_packet = bytes_this_transfer;
/*
* As a special case, setup transactions output the setup header, not
* the user's data. For this reason we don't count setup data as bytes
* transferred
*/
if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
(transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
bytes_this_transfer = 0;
/*
* Add the bytes transferred to the running total. It is important that
* bytes_this_transfer doesn't count any data that needs to be
* retransmitted
*/
transaction->actual_bytes += bytes_this_transfer;
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
buffer_space_left = transaction->iso_packets[0].length -
transaction->actual_bytes;
else
buffer_space_left = transaction->buffer_length -
transaction->actual_bytes;
/*
* We need to remember the PID toggle state for the next transaction.
* The hardware already updated it for the next transaction
*/
pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
/*
* For high speed bulk out, assume the next transaction will need to do
* a ping before proceeding. If this isn't true the ACK processing below
* will clear this flag
*/
if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
(pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
(pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
if (WARN_ON_ONCE(bytes_this_transfer < 0)) {
/*
* In some rare cases the DMA engine seems to get stuck and
* keeps substracting same byte count over and over again. In
* such case we just need to fail every transaction.
*/
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_ERROR);
return 0;
}
if (usbc_hcint.s.stall) {
/*
* STALL as a response means this transaction cannot be
* completed because the device can't process transactions. Tell
* the user. Any data that was transferred will be counted on
* the actual bytes transferred
*/
pipe->pid_toggle = 0;
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_STALL);
} else if (usbc_hcint.s.xacterr) {
/*
* XactErr as a response means the device signaled
* something wrong with the transfer. For example, PID
* toggle errors cause these.
*/
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_XACTERR);
} else if (usbc_hcint.s.bblerr) {
/* Babble Error (BblErr) */
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_BABBLEERR);
} else if (usbc_hcint.s.datatglerr) {
/* Data toggle error */
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_DATATGLERR);
} else if (usbc_hcint.s.nyet) {
/*
* NYET as a response is only allowed in three cases: as a
* response to a ping, as a response to a split transaction, and
* as a response to a bulk out. The ping case is handled by
* hardware, so we only have splits and bulk out
*/
if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
transaction->retries = 0;
/*
* If there is more data to go then we need to try
* again. Otherwise this transaction is complete
*/
if ((buffer_space_left == 0) ||
(bytes_in_last_packet < pipe->max_packet))
cvmx_usb_complete(usb, pipe,
transaction,
CVMX_USB_STATUS_OK);
} else {
/*
* Split transactions retry the split complete 4 times
* then rewind to the start split and do the entire
* transactions again
*/
transaction->retries++;
if ((transaction->retries & 0x3) == 0) {
/*
* Rewind to the beginning of the transaction by
* anding off the split complete bit
*/
transaction->stage &= ~1;
pipe->split_sc_frame = -1;
}
}
} else if (usbc_hcint.s.ack) {
transaction->retries = 0;
/*
* The ACK bit can only be checked after the other error bits.
* This is because a multi packet transfer may succeed in a
* number of packets and then get a different response on the
* last packet. In this case both ACK and the last response bit
* will be set. If none of the other response bits is set, then
* the last packet must have been an ACK
*
* Since we got an ACK, we know we don't need to do a ping on
* this pipe
*/
pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING;
switch (transaction->type) {
case CVMX_USB_TRANSFER_CONTROL:
cvmx_usb_transfer_control(usb, pipe, transaction,
usbc_hcchar,
buffer_space_left,
bytes_in_last_packet);
break;
case CVMX_USB_TRANSFER_BULK:
cvmx_usb_transfer_bulk(usb, pipe, transaction,
usbc_hcint, buffer_space_left,
bytes_in_last_packet);
break;
case CVMX_USB_TRANSFER_INTERRUPT:
cvmx_usb_transfer_intr(usb, pipe, transaction,
buffer_space_left,
bytes_in_last_packet);
break;
case CVMX_USB_TRANSFER_ISOCHRONOUS:
cvmx_usb_transfer_isoc(usb, pipe, transaction,
buffer_space_left,
bytes_in_last_packet,
bytes_this_transfer);
break;
}
} else if (usbc_hcint.s.nak) {
/*
* If this was a split then clear our split in progress marker.
*/
if (usb->active_split == transaction)
usb->active_split = NULL;
/*
* NAK as a response means the device couldn't accept the
* transaction, but it should be retried in the future. Rewind
* to the beginning of the transaction by anding off the split
* complete bit. Retry in the next interval
*/
transaction->retries = 0;
transaction->stage &= ~1;
pipe->next_tx_frame += pipe->interval;
if (pipe->next_tx_frame < usb->frame_number)
pipe->next_tx_frame = usb->frame_number +
pipe->interval -
(usb->frame_number - pipe->next_tx_frame) %
pipe->interval;
} else {
struct cvmx_usb_port_status port;
port = cvmx_usb_get_status(usb);
if (port.port_enabled) {
/* We'll retry the exact same transaction again */
transaction->retries++;
} else {
/*
* We get channel halted interrupts with no result bits
* sets when the cable is unplugged
*/
cvmx_usb_complete(usb, pipe, transaction,
CVMX_USB_STATUS_ERROR);
}
}
return 0;
}
static void octeon_usb_port_callback(struct octeon_hcd *usb)
{
spin_unlock(&usb->lock);
usb_hcd_poll_rh_status(octeon_to_hcd(usb));
spin_lock(&usb->lock);
}
/**
* Poll the USB block for status and call all needed callback
* handlers. This function is meant to be called in the interrupt
* handler for the USB controller. It can also be called
* periodically in a loop for non-interrupt based operation.
*
* @usb: USB device state populated by cvmx_usb_initialize().
*
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_poll(struct octeon_hcd *usb)
{
union cvmx_usbcx_hfnum usbc_hfnum;
union cvmx_usbcx_gintsts usbc_gintsts;
prefetch_range(usb, sizeof(*usb));
/* Update the frame counter */
usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum)
usb->frame_number += 0x4000;
usb->frame_number &= ~0x3fffull;
usb->frame_number |= usbc_hfnum.s.frnum;
/* Read the pending interrupts */
usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GINTSTS(usb->index));
/* Clear the interrupts now that we know about them */
cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
usbc_gintsts.u32);
if (usbc_gintsts.s.rxflvl) {
/*
* RxFIFO Non-Empty (RxFLvl)
* Indicates that there is at least one packet pending to be
* read from the RxFIFO.
*
* In DMA mode this is handled by hardware
*/
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
cvmx_usb_poll_rx_fifo(usb);
}
if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
/* Fill the Tx FIFOs when not in DMA mode */
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
cvmx_usb_poll_tx_fifo(usb);
}
if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
union cvmx_usbcx_hprt usbc_hprt;
/*
* Disconnect Detected Interrupt (DisconnInt)
* Asserted when a device disconnect is detected.
*
* Host Port Interrupt (PrtInt)
* The core sets this bit to indicate a change in port status of
* one of the O2P USB core ports in Host mode. The application
* must read the Host Port Control and Status (HPRT) register to
* determine the exact event that caused this interrupt. The
* application must clear the appropriate status bit in the Host
* Port Control and Status register to clear this bit.
*
* Call the user's port callback
*/
octeon_usb_port_callback(usb);
/* Clear the port change bits */
usbc_hprt.u32 =
cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
usbc_hprt.s.prtena = 0;
cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
usbc_hprt.u32);
}
if (usbc_gintsts.s.hchint) {
/*
* Host Channels Interrupt (HChInt)
* The core sets this bit to indicate that an interrupt is
* pending on one of the channels of the core (in Host mode).
* The application must read the Host All Channels Interrupt
* (HAINT) register to determine the exact number of the channel
* on which the interrupt occurred, and then read the
* corresponding Host Channel-n Interrupt (HCINTn) register to
* determine the exact cause of the interrupt. The application
* must clear the appropriate status bit in the HCINTn register
* to clear this bit.
*/
union cvmx_usbcx_haint usbc_haint;
usbc_haint.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HAINT(usb->index));
while (usbc_haint.u32) {
int channel;
channel = __fls(usbc_haint.u32);
cvmx_usb_poll_channel(usb, channel);
usbc_haint.u32 ^= 1 << channel;
}
}
cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
return 0;
}
/* convert between an HCD pointer and the corresponding struct octeon_hcd */
static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
{
return (struct octeon_hcd *)(hcd->hcd_priv);
}
static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
{
struct octeon_hcd *usb = hcd_to_octeon(hcd);
unsigned long flags;
spin_lock_irqsave(&usb->lock, flags);
cvmx_usb_poll(usb);
spin_unlock_irqrestore(&usb->lock, flags);
return IRQ_HANDLED;
}
static int octeon_usb_start(struct usb_hcd *hcd)
{
hcd->state = HC_STATE_RUNNING;
return 0;
}
static void octeon_usb_stop(struct usb_hcd *hcd)
{
hcd->state = HC_STATE_HALT;
}
static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
{
struct octeon_hcd *usb = hcd_to_octeon(hcd);
return cvmx_usb_get_frame_number(usb);
}
static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct device *dev = hcd->self.controller;
struct cvmx_usb_transaction *transaction = NULL;
struct cvmx_usb_pipe *pipe;
unsigned long flags;
struct cvmx_usb_iso_packet *iso_packet;
struct usb_host_endpoint *ep = urb->ep;
int rc;
urb->status = 0;
spin_lock_irqsave(&usb->lock, flags);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc) {
spin_unlock_irqrestore(&usb->lock, flags);
return rc;
}
if (!ep->hcpriv) {
enum cvmx_usb_transfer transfer_type;
enum cvmx_usb_speed speed;
int split_device = 0;
int split_port = 0;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS;
break;
case PIPE_INTERRUPT:
transfer_type = CVMX_USB_TRANSFER_INTERRUPT;
break;
case PIPE_CONTROL:
transfer_type = CVMX_USB_TRANSFER_CONTROL;
break;
default:
transfer_type = CVMX_USB_TRANSFER_BULK;
break;
}
switch (urb->dev->speed) {
case USB_SPEED_LOW:
speed = CVMX_USB_SPEED_LOW;
break;
case USB_SPEED_FULL:
speed = CVMX_USB_SPEED_FULL;
break;
default:
speed = CVMX_USB_SPEED_HIGH;
break;
}
/*
* For slow devices on high speed ports we need to find the hub
* that does the speed translation so we know where to send the
* split transactions.
*/
if (speed != CVMX_USB_SPEED_HIGH) {
/*
* Start at this device and work our way up the usb
* tree.
*/
struct usb_device *dev = urb->dev;
while (dev->parent) {
/*
* If our parent is high speed then he'll
* receive the splits.
*/
if (dev->parent->speed == USB_SPEED_HIGH) {
split_device = dev->parent->devnum;
split_port = dev->portnum;
break;
}
/*
* Move up the tree one level. If we make it all
* the way up the tree, then the port must not
* be in high speed mode and we don't need a
* split.
*/
dev = dev->parent;
}
}
pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe), speed,
le16_to_cpu(ep->desc.wMaxPacketSize)
& 0x7ff,
transfer_type,
usb_pipein(urb->pipe) ?
CVMX_USB_DIRECTION_IN :
CVMX_USB_DIRECTION_OUT,
urb->interval,
(le16_to_cpu(ep->desc.wMaxPacketSize)
>> 11) & 0x3,
split_device, split_port);
if (!pipe) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&usb->lock, flags);
dev_dbg(dev, "Failed to create pipe\n");
return -ENOMEM;
}
ep->hcpriv = pipe;
} else {
pipe = ep->hcpriv;
}
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
dev_dbg(dev, "Submit isochronous to %d.%d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe));
/*
* Allocate a structure to use for our private list of
* isochronous packets.
*/
iso_packet = kmalloc_array(urb->number_of_packets,
sizeof(struct cvmx_usb_iso_packet),
GFP_ATOMIC);
if (iso_packet) {
int i;
/* Fill the list with the data from the URB */
for (i = 0; i < urb->number_of_packets; i++) {
iso_packet[i].offset =
urb->iso_frame_desc[i].offset;
iso_packet[i].length =
urb->iso_frame_desc[i].length;
iso_packet[i].status = CVMX_USB_STATUS_ERROR;
}
/*
* Store a pointer to the list in the URB setup_packet
* field. We know this currently isn't being used and
* this saves us a bunch of logic.
*/
urb->setup_packet = (char *)iso_packet;
transaction = cvmx_usb_submit_isochronous(usb,
pipe, urb);
/*
* If submit failed we need to free our private packet
* list.
*/
if (!transaction) {
urb->setup_packet = NULL;
kfree(iso_packet);
}
}
break;
case PIPE_INTERRUPT:
dev_dbg(dev, "Submit interrupt to %d.%d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_interrupt(usb, pipe, urb);
break;
case PIPE_CONTROL:
dev_dbg(dev, "Submit control to %d.%d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_control(usb, pipe, urb);
break;
case PIPE_BULK:
dev_dbg(dev, "Submit bulk to %d.%d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_bulk(usb, pipe, urb);
break;
}
if (!transaction) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&usb->lock, flags);
dev_dbg(dev, "Failed to submit\n");
return -ENOMEM;
}
urb->hcpriv = transaction;
spin_unlock_irqrestore(&usb->lock, flags);
return 0;
}
static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
struct urb *urb,
int status)
{
struct octeon_hcd *usb = hcd_to_octeon(hcd);
unsigned long flags;
int rc;
if (!urb->dev)
return -EINVAL;
spin_lock_irqsave(&usb->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto out;
urb->status = status;
cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv);
out:
spin_unlock_irqrestore(&usb->lock, flags);
return rc;
}
static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct device *dev = hcd->self.controller;
if (ep->hcpriv) {
struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct cvmx_usb_pipe *pipe = ep->hcpriv;
unsigned long flags;
spin_lock_irqsave(&usb->lock, flags);
cvmx_usb_cancel_all(usb, pipe);
if (cvmx_usb_close_pipe(usb, pipe))
dev_dbg(dev, "Closing pipe %p failed\n", pipe);
spin_unlock_irqrestore(&usb->lock, flags);
ep->hcpriv = NULL;
}
}
static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct cvmx_usb_port_status port_status;
unsigned long flags;
spin_lock_irqsave(&usb->lock, flags);
port_status = cvmx_usb_get_status(usb);
spin_unlock_irqrestore(&usb->lock, flags);
buf[0] = port_status.connect_change << 1;
return buf[0] != 0;
}
static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct device *dev = hcd->self.controller;
struct cvmx_usb_port_status usb_port_status;
int port_status;
struct usb_hub_descriptor *desc;
unsigned long flags;
switch (typeReq) {
case ClearHubFeature:
dev_dbg(dev, "ClearHubFeature\n");
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* Nothing required here */
break;
default:
return -EINVAL;
}
break;
case ClearPortFeature:
dev_dbg(dev, "ClearPortFeature\n");
if (wIndex != 1) {
dev_dbg(dev, " INVALID\n");
return -EINVAL;
}
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
dev_dbg(dev, " ENABLE\n");
spin_lock_irqsave(&usb->lock, flags);
cvmx_usb_disable(usb);
spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_SUSPEND:
dev_dbg(dev, " SUSPEND\n");
/* Not supported on Octeon */
break;
case USB_PORT_FEAT_POWER:
dev_dbg(dev, " POWER\n");
/* Not supported on Octeon */
break;
case USB_PORT_FEAT_INDICATOR:
dev_dbg(dev, " INDICATOR\n");
/* Port inidicator not supported */
break;
case USB_PORT_FEAT_C_CONNECTION:
dev_dbg(dev, " C_CONNECTION\n");
/* Clears drivers internal connect status change flag */
spin_lock_irqsave(&usb->lock, flags);
usb->port_status = cvmx_usb_get_status(usb);
spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_C_RESET:
dev_dbg(dev, " C_RESET\n");
/*
* Clears the driver's internal Port Reset Change flag.
*/
spin_lock_irqsave(&usb->lock, flags);
usb->port_status = cvmx_usb_get_status(usb);
spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_C_ENABLE:
dev_dbg(dev, " C_ENABLE\n");
/*
* Clears the driver's internal Port Enable/Disable
* Change flag.
*/
spin_lock_irqsave(&usb->lock, flags);
usb->port_status = cvmx_usb_get_status(usb);
spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_C_SUSPEND:
dev_dbg(dev, " C_SUSPEND\n");
/*
* Clears the driver's internal Port Suspend Change
* flag, which is set when resume signaling on the host
* port is complete.
*/
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(dev, " C_OVER_CURRENT\n");
/* Clears the driver's overcurrent Change flag */
spin_lock_irqsave(&usb->lock, flags);
usb->port_status = cvmx_usb_get_status(usb);
spin_unlock_irqrestore(&usb->lock, flags);
break;
default:
dev_dbg(dev, " UNKNOWN\n");
return -EINVAL;
}
break;
case GetHubDescriptor:
dev_dbg(dev, "GetHubDescriptor\n");
desc = (struct usb_hub_descriptor *)buf;
desc->bDescLength = 9;
desc->bDescriptorType = 0x29;
desc->bNbrPorts = 1;
desc->wHubCharacteristics = cpu_to_le16(0x08);
desc->bPwrOn2PwrGood = 1;
desc->bHubContrCurrent = 0;
desc->u.hs.DeviceRemovable[0] = 0;
desc->u.hs.DeviceRemovable[1] = 0xff;
break;
case GetHubStatus:
dev_dbg(dev, "GetHubStatus\n");
*(__le32 *)buf = 0;
break;
case GetPortStatus:
dev_dbg(dev, "GetPortStatus\n");
if (wIndex != 1) {
dev_dbg(dev, " INVALID\n");
return -EINVAL;
}
spin_lock_irqsave(&usb->lock, flags);
usb_port_status = cvmx_usb_get_status(usb);
spin_unlock_irqrestore(&usb->lock, flags);
port_status = 0;
if (usb_port_status.connect_change) {
port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
dev_dbg(dev, " C_CONNECTION\n");
}
if (usb_port_status.port_enabled) {
port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
dev_dbg(dev, " C_ENABLE\n");
}
if (usb_port_status.connected) {
port_status |= (1 << USB_PORT_FEAT_CONNECTION);
dev_dbg(dev, " CONNECTION\n");
}
if (usb_port_status.port_enabled) {
port_status |= (1 << USB_PORT_FEAT_ENABLE);
dev_dbg(dev, " ENABLE\n");
}
if (usb_port_status.port_over_current) {
port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
dev_dbg(dev, " OVER_CURRENT\n");
}
if (usb_port_status.port_powered) {
port_status |= (1 << USB_PORT_FEAT_POWER);
dev_dbg(dev, " POWER\n");
}
if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) {
port_status |= USB_PORT_STAT_HIGH_SPEED;
dev_dbg(dev, " HIGHSPEED\n");
} else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) {
port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
dev_dbg(dev, " LOWSPEED\n");
}
*((__le32 *)buf) = cpu_to_le32(port_status);
break;
case SetHubFeature:
dev_dbg(dev, "SetHubFeature\n");
/* No HUB features supported */
break;
case SetPortFeature:
dev_dbg(dev, "SetPortFeature\n");
if (wIndex != 1) {
dev_dbg(dev, " INVALID\n");
return -EINVAL;
}
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
dev_dbg(dev, " SUSPEND\n");
return -EINVAL;
case USB_PORT_FEAT_POWER:
dev_dbg(dev, " POWER\n");
/*
* Program the port power bit to drive VBUS on the USB.
*/
spin_lock_irqsave(&usb->lock, flags);
USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
cvmx_usbcx_hprt, prtpwr, 1);
spin_unlock_irqrestore(&usb->lock, flags);
return 0;
case USB_PORT_FEAT_RESET:
dev_dbg(dev, " RESET\n");
spin_lock_irqsave(&usb->lock, flags);
cvmx_usb_reset_port(usb);
spin_unlock_irqrestore(&usb->lock, flags);
return 0;
case USB_PORT_FEAT_INDICATOR:
dev_dbg(dev, " INDICATOR\n");
/* Not supported */
break;
default:
dev_dbg(dev, " UNKNOWN\n");
return -EINVAL;
}
break;
default:
dev_dbg(dev, "Unknown root hub request\n");
return -EINVAL;
}
return 0;
}
static const struct hc_driver octeon_hc_driver = {
.description = "Octeon USB",
.product_desc = "Octeon Host Controller",
.hcd_priv_size = sizeof(struct octeon_hcd),
.irq = octeon_usb_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
.start = octeon_usb_start,
.stop = octeon_usb_stop,
.urb_enqueue = octeon_usb_urb_enqueue,
.urb_dequeue = octeon_usb_urb_dequeue,
.endpoint_disable = octeon_usb_endpoint_disable,
.get_frame_number = octeon_usb_get_frame_number,
.hub_status_data = octeon_usb_hub_status_data,
.hub_control = octeon_usb_hub_control,
.map_urb_for_dma = octeon_map_urb_for_dma,
.unmap_urb_for_dma = octeon_unmap_urb_for_dma,
};
static int octeon_usb_probe(struct platform_device *pdev)
{
int status;
int initialize_flags;
int usb_num;
struct resource *res_mem;
struct device_node *usbn_node;
int irq = platform_get_irq(pdev, 0);
struct device *dev = &pdev->dev;
struct octeon_hcd *usb;
struct usb_hcd *hcd;
u32 clock_rate = 48000000;
bool is_crystal_clock = false;
const char *clock_type;
int i;
if (!dev->of_node) {
dev_err(dev, "Error: empty of_node\n");
return -ENXIO;
}
usbn_node = dev->of_node->parent;
i = of_property_read_u32(usbn_node,
"clock-frequency", &clock_rate);
if (i)
i = of_property_read_u32(usbn_node,
"refclk-frequency", &clock_rate);
if (i) {
dev_err(dev, "No USBN \"clock-frequency\"\n");
return -ENXIO;
}
switch (clock_rate) {
case 12000000:
initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
break;
case 24000000:
initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
break;
case 48000000:
initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
break;
default:
dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n",
clock_rate);
return -ENXIO;
}
i = of_property_read_string(usbn_node,
"cavium,refclk-type", &clock_type);
if (i)
i = of_property_read_string(usbn_node,
"refclk-type", &clock_type);
if (!i && strcmp("crystal", clock_type) == 0)
is_crystal_clock = true;
if (is_crystal_clock)
initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
else
initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mem) {
dev_err(dev, "found no memory resource\n");
return -ENXIO;
}
usb_num = (res_mem->start >> 44) & 1;
if (irq < 0) {
/* Defective device tree, but we know how to fix it. */
irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
irq = irq_create_mapping(NULL, hwirq);
}
/*
* Set the DMA mask to 64bits so we get buffers already translated for
* DMA.
*/
i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (i)
return i;
/*
* Only cn52XX and cn56XX have DWC_OTG USB hardware and the
* IOB priority registers. Under heavy network load USB
* hardware can be starved by the IOB causing a crash. Give
* it a priority boost if it has been waiting more than 400
* cycles to avoid this situation.
*
* Testing indicates that a cnt_val of 8192 is not sufficient,
* but no failures are seen with 4096. We choose a value of
* 400 to give a safety factor of 10.
*/
if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
pri_cnt.u64 = 0;
pri_cnt.s.cnt_enb = 1;
pri_cnt.s.cnt_val = 400;
cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
}
hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_dbg(dev, "Failed to allocate memory for HCD\n");
return -1;
}
hcd->uses_new_polling = 1;
usb = (struct octeon_hcd *)hcd->hcd_priv;
spin_lock_init(&usb->lock);
usb->init_flags = initialize_flags;
/* Initialize the USB state structure */
usb->index = usb_num;
INIT_LIST_HEAD(&usb->idle_pipes);
for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
INIT_LIST_HEAD(&usb->active_pipes[i]);
/* Due to an errata, CN31XX doesn't support DMA */
if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
/* Only use one channel with non DMA */
usb->idle_hardware_channels = 0x1;
} else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
/* CN5XXX have an errata with channel 3 */
usb->idle_hardware_channels = 0xf7;
} else {
usb->idle_hardware_channels = 0xff;
}
status = cvmx_usb_initialize(dev, usb);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
usb_put_hcd(hcd);
return -1;
}
status = usb_add_hcd(hcd, irq, 0);
if (status) {
dev_dbg(dev, "USB add HCD failed with %d\n", status);
usb_put_hcd(hcd);
return -1;
}
device_wakeup_enable(hcd->self.controller);
dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
return 0;
}
static void octeon_usb_remove(struct platform_device *pdev)
{
int status;
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct octeon_hcd *usb = hcd_to_octeon(hcd);
unsigned long flags;
usb_remove_hcd(hcd);
spin_lock_irqsave(&usb->lock, flags);
status = cvmx_usb_shutdown(usb);
spin_unlock_irqrestore(&usb->lock, flags);
if (status)
dev_dbg(dev, "USB shutdown failed with %d\n", status);
usb_put_hcd(hcd);
}
static const struct of_device_id octeon_usb_match[] = {
{
.compatible = "cavium,octeon-5750-usbc",
},
{},
};
MODULE_DEVICE_TABLE(of, octeon_usb_match);
static struct platform_driver octeon_usb_driver = {
.driver = {
.name = "octeon-hcd",
.of_match_table = octeon_usb_match,
},
.probe = octeon_usb_probe,
.remove_new = octeon_usb_remove,
};
static int __init octeon_usb_driver_init(void)
{
if (usb_disabled())
return 0;
return platform_driver_register(&octeon_usb_driver);
}
module_init(octeon_usb_driver_init);
static void __exit octeon_usb_driver_exit(void)
{
if (usb_disabled())
return;
platform_driver_unregister(&octeon_usb_driver);
}
module_exit(octeon_usb_driver_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cavium, Inc. <[email protected]>");
MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
| linux-master | drivers/usb/host/octeon-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Nuvoton NPCM7xx driver for EHCI HCD
*
* Copyright (C) 2018 Nuvoton Technologies,
* Avi Fishman <[email protected]> <[email protected]>
* Tomer Maimon <[email protected]> <[email protected]>
*
* Based on various ehci-spear.c driver
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ehci.h"
#define DRIVER_DESC "EHCI npcm7xx driver"
static struct hc_driver __read_mostly ehci_npcm7xx_hc_driver;
static int __maybe_unused ehci_npcm7xx_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
bool do_wakeup = device_may_wakeup(dev);
return ehci_suspend(hcd, do_wakeup);
}
static int __maybe_unused ehci_npcm7xx_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
ehci_resume(hcd, false);
return 0;
}
static SIMPLE_DEV_PM_OPS(ehci_npcm7xx_pm_ops, ehci_npcm7xx_drv_suspend,
ehci_npcm7xx_drv_resume);
static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct resource *res;
const struct hc_driver *driver = &ehci_npcm7xx_hc_driver;
int irq;
int retval;
dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n");
if (usb_disabled())
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = irq;
goto fail;
}
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
goto fail;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
/* registers start at offset 0x0 */
hcd_to_ehci(hcd)->caps = hcd->regs;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto err_put_hcd;
device_wakeup_enable(hcd->self.controller);
return retval;
err_put_hcd:
usb_put_hcd(hcd);
fail:
dev_err(&pdev->dev, "init fail, %d\n", retval);
return retval;
}
static void npcm7xx_ehci_hcd_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
static const struct of_device_id npcm7xx_ehci_id_table[] = {
{ .compatible = "nuvoton,npcm750-ehci" },
{ },
};
MODULE_DEVICE_TABLE(of, npcm7xx_ehci_id_table);
static struct platform_driver npcm7xx_ehci_hcd_driver = {
.probe = npcm7xx_ehci_hcd_drv_probe,
.remove_new = npcm7xx_ehci_hcd_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "npcm7xx-ehci",
.bus = &platform_bus_type,
.pm = pm_ptr(&ehci_npcm7xx_pm_ops),
.of_match_table = npcm7xx_ehci_id_table,
}
};
static int __init ehci_npcm7xx_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_npcm7xx_hc_driver, NULL);
return platform_driver_register(&npcm7xx_ehci_hcd_driver);
}
module_init(ehci_npcm7xx_init);
static void __exit ehci_npcm7xx_cleanup(void)
{
platform_driver_unregister(&npcm7xx_ehci_hcd_driver);
}
module_exit(ehci_npcm7xx_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:npcm7xx-ehci");
MODULE_AUTHOR("Avi Fishman");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/ehci-npcm7xx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001 by David Brownell
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* There's basically three types of memory:
* - data used only by the HCD ... kmalloc is fine
* - async and periodic schedules, shared by HC and HCD ... these
* need to use dma_pool or dma_alloc_coherent
* - driver buffers, read/written by HC ... single shot DMA mapped
*
* There's also "register" data (e.g. PCI or SOC), which is memory mapped.
* No memory seen by this driver is pageable.
*/
/*-------------------------------------------------------------------------*/
/* Allocate the key transfer structures from the previously allocated pool */
static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
dma_addr_t dma)
{
memset (qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END(ehci);
qtd->hw_alt_next = EHCI_LIST_END(ehci);
INIT_LIST_HEAD (&qtd->qtd_list);
}
static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qtd *qtd;
dma_addr_t dma;
qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
if (qtd != NULL) {
ehci_qtd_init(ehci, qtd, dma);
}
return qtd;
}
static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
{
dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma);
}
static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* clean qtds first, and know this is not linked */
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
ehci_dbg (ehci, "unused qh not empty!\n");
BUG ();
}
if (qh->dummy)
ehci_qtd_free (ehci, qh->dummy);
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
kfree(qh);
}
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qh *qh;
dma_addr_t dma;
qh = kzalloc(sizeof *qh, GFP_ATOMIC);
if (!qh)
goto done;
qh->hw = (struct ehci_qh_hw *)
dma_pool_zalloc(ehci->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
INIT_LIST_HEAD(&qh->unlink_node);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);
if (qh->dummy == NULL) {
ehci_dbg (ehci, "no dummy td\n");
goto fail1;
}
done:
return qh;
fail1:
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
fail:
kfree(qh);
return NULL;
}
/*-------------------------------------------------------------------------*/
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
if (ehci->async)
qh_destroy(ehci, ehci->async);
ehci->async = NULL;
if (ehci->dummy)
qh_destroy(ehci, ehci->dummy);
ehci->dummy = NULL;
/* DMA consistent memory and pools */
dma_pool_destroy(ehci->qtd_pool);
ehci->qtd_pool = NULL;
dma_pool_destroy(ehci->qh_pool);
ehci->qh_pool = NULL;
dma_pool_destroy(ehci->itd_pool);
ehci->itd_pool = NULL;
dma_pool_destroy(ehci->sitd_pool);
ehci->sitd_pool = NULL;
if (ehci->periodic)
dma_free_coherent(ehci_to_hcd(ehci)->self.sysdev,
ehci->periodic_size * sizeof (u32),
ehci->periodic, ehci->periodic_dma);
ehci->periodic = NULL;
/* shadow periodic table */
kfree(ehci->pshadow);
ehci->pshadow = NULL;
}
/* remember to add cleanup code (above) if you add anything here */
static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
{
int i;
/* QTDs for control/bulk/intr transfers */
ehci->qtd_pool = dma_pool_create ("ehci_qtd",
ehci_to_hcd(ehci)->self.sysdev,
sizeof (struct ehci_qtd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qtd_pool) {
goto fail;
}
/* QHs for control/bulk/intr transfers */
ehci->qh_pool = dma_pool_create ("ehci_qh",
ehci_to_hcd(ehci)->self.sysdev,
sizeof(struct ehci_qh_hw),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qh_pool) {
goto fail;
}
ehci->async = ehci_qh_alloc (ehci, flags);
if (!ehci->async) {
goto fail;
}
/* ITD for high speed ISO transfers */
ehci->itd_pool = dma_pool_create ("ehci_itd",
ehci_to_hcd(ehci)->self.sysdev,
sizeof (struct ehci_itd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->itd_pool) {
goto fail;
}
/* SITD for full/low speed split ISO transfers */
ehci->sitd_pool = dma_pool_create ("ehci_sitd",
ehci_to_hcd(ehci)->self.sysdev,
sizeof (struct ehci_sitd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->sitd_pool) {
goto fail;
}
/* Hardware periodic table */
ehci->periodic = (__le32 *)
dma_alloc_coherent(ehci_to_hcd(ehci)->self.sysdev,
ehci->periodic_size * sizeof(__le32),
&ehci->periodic_dma, flags);
if (ehci->periodic == NULL) {
goto fail;
}
if (ehci->use_dummy_qh) {
struct ehci_qh_hw *hw;
ehci->dummy = ehci_qh_alloc(ehci, flags);
if (!ehci->dummy)
goto fail;
hw = ehci->dummy->hw;
hw->hw_next = EHCI_LIST_END(ehci);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
hw->hw_alt_next = EHCI_LIST_END(ehci);
ehci->dummy->hw = hw;
for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic[i] = cpu_to_hc32(ehci,
ehci->dummy->qh_dma);
} else {
for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic[i] = EHCI_LIST_END(ehci);
}
/* software shadow of hardware table */
ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
if (ehci->pshadow != NULL)
return 0;
fail:
ehci_dbg (ehci, "couldn't init memory\n");
ehci_mem_cleanup (ehci);
return -ENOMEM;
}
| linux-master | drivers/usb/host/ehci-mem.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
* (C) Copyright 2002 Hewlett-Packard Company
*
* Bus Glue for pxa27x
*
* Written by Christopher Hoover <[email protected]>
* Based on fragments of previous driver by Russell King et al.
*
* Modified for LH7A404 from ohci-sa1111.c
* by Durgesh Pattamatta <[email protected]>
*
* Modified for pxa27x from ohci-lh7a404.c
* by Nick Bane <[email protected]> 26-8-2004
*
* This file is licenced under the GPL.
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/signal.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
#include <linux/soc/pxa/cpu.h>
#include "ohci.h"
#define DRIVER_DESC "OHCI PXA27x/PXA3x driver"
/*
* UHC: USB Host Controller (OHCI-like) register definitions
*/
#define UHCREV (0x0000) /* UHC HCI Spec Revision */
#define UHCHCON (0x0004) /* UHC Host Control Register */
#define UHCCOMS (0x0008) /* UHC Command Status Register */
#define UHCINTS (0x000C) /* UHC Interrupt Status Register */
#define UHCINTE (0x0010) /* UHC Interrupt Enable */
#define UHCINTD (0x0014) /* UHC Interrupt Disable */
#define UHCHCCA (0x0018) /* UHC Host Controller Comm. Area */
#define UHCPCED (0x001C) /* UHC Period Current Endpt Descr */
#define UHCCHED (0x0020) /* UHC Control Head Endpt Descr */
#define UHCCCED (0x0024) /* UHC Control Current Endpt Descr */
#define UHCBHED (0x0028) /* UHC Bulk Head Endpt Descr */
#define UHCBCED (0x002C) /* UHC Bulk Current Endpt Descr */
#define UHCDHEAD (0x0030) /* UHC Done Head */
#define UHCFMI (0x0034) /* UHC Frame Interval */
#define UHCFMR (0x0038) /* UHC Frame Remaining */
#define UHCFMN (0x003C) /* UHC Frame Number */
#define UHCPERS (0x0040) /* UHC Periodic Start */
#define UHCLS (0x0044) /* UHC Low Speed Threshold */
#define UHCRHDA (0x0048) /* UHC Root Hub Descriptor A */
#define UHCRHDA_NOCP (1 << 12) /* No over current protection */
#define UHCRHDA_OCPM (1 << 11) /* Over Current Protection Mode */
#define UHCRHDA_POTPGT(x) \
(((x) & 0xff) << 24) /* Power On To Power Good Time */
#define UHCRHDB (0x004C) /* UHC Root Hub Descriptor B */
#define UHCRHS (0x0050) /* UHC Root Hub Status */
#define UHCRHPS1 (0x0054) /* UHC Root Hub Port 1 Status */
#define UHCRHPS2 (0x0058) /* UHC Root Hub Port 2 Status */
#define UHCRHPS3 (0x005C) /* UHC Root Hub Port 3 Status */
#define UHCSTAT (0x0060) /* UHC Status Register */
#define UHCSTAT_UPS3 (1 << 16) /* USB Power Sense Port3 */
#define UHCSTAT_SBMAI (1 << 15) /* System Bus Master Abort Interrupt*/
#define UHCSTAT_SBTAI (1 << 14) /* System Bus Target Abort Interrupt*/
#define UHCSTAT_UPRI (1 << 13) /* USB Port Resume Interrupt */
#define UHCSTAT_UPS2 (1 << 12) /* USB Power Sense Port 2 */
#define UHCSTAT_UPS1 (1 << 11) /* USB Power Sense Port 1 */
#define UHCSTAT_HTA (1 << 10) /* HCI Target Abort */
#define UHCSTAT_HBA (1 << 8) /* HCI Buffer Active */
#define UHCSTAT_RWUE (1 << 7) /* HCI Remote Wake Up Event */
#define UHCHR (0x0064) /* UHC Reset Register */
#define UHCHR_SSEP3 (1 << 11) /* Sleep Standby Enable for Port3 */
#define UHCHR_SSEP2 (1 << 10) /* Sleep Standby Enable for Port2 */
#define UHCHR_SSEP1 (1 << 9) /* Sleep Standby Enable for Port1 */
#define UHCHR_PCPL (1 << 7) /* Power control polarity low */
#define UHCHR_PSPL (1 << 6) /* Power sense polarity low */
#define UHCHR_SSE (1 << 5) /* Sleep Standby Enable */
#define UHCHR_UIT (1 << 4) /* USB Interrupt Test */
#define UHCHR_SSDC (1 << 3) /* Simulation Scale Down Clock */
#define UHCHR_CGR (1 << 2) /* Clock Generation Reset */
#define UHCHR_FHR (1 << 1) /* Force Host Controller Reset */
#define UHCHR_FSBIR (1 << 0) /* Force System Bus Iface Reset */
#define UHCHIE (0x0068) /* UHC Interrupt Enable Register*/
#define UHCHIE_UPS3IE (1 << 14) /* Power Sense Port3 IntEn */
#define UHCHIE_UPRIE (1 << 13) /* Port Resume IntEn */
#define UHCHIE_UPS2IE (1 << 12) /* Power Sense Port2 IntEn */
#define UHCHIE_UPS1IE (1 << 11) /* Power Sense Port1 IntEn */
#define UHCHIE_TAIE (1 << 10) /* HCI Interface Transfer Abort
Interrupt Enable*/
#define UHCHIE_HBAIE (1 << 8) /* HCI Buffer Active IntEn */
#define UHCHIE_RWIE (1 << 7) /* Remote Wake-up IntEn */
#define UHCHIT (0x006C) /* UHC Interrupt Test register */
#define PXA_UHC_MAX_PORTNUM 3
static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
struct pxa27x_ohci {
struct clk *clk;
void __iomem *mmio_base;
struct regulator *vbus[3];
bool vbus_enabled[3];
};
#define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)(hcd_to_ohci(hcd)->priv)
/*
PMM_NPS_MODE -- PMM Non-power switching mode
Ports are powered continuously.
PMM_GLOBAL_MODE -- PMM global switching mode
All ports are powered at the same time.
PMM_PERPORT_MODE -- PMM per port switching mode
Ports are powered individually.
*/
static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
{
uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
uint32_t uhcrhdb = __raw_readl(pxa_ohci->mmio_base + UHCRHDB);
switch (mode) {
case PMM_NPS_MODE:
uhcrhda |= RH_A_NPS;
break;
case PMM_GLOBAL_MODE:
uhcrhda &= ~(RH_A_NPS | RH_A_PSM);
break;
case PMM_PERPORT_MODE:
uhcrhda &= ~(RH_A_NPS);
uhcrhda |= RH_A_PSM;
/* Set port power control mask bits, only 3 ports. */
uhcrhdb |= (0x7<<17);
break;
default:
printk( KERN_ERR
"Invalid mode %d, set to non-power switch mode.\n",
mode );
uhcrhda |= RH_A_NPS;
}
__raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
__raw_writel(uhcrhdb, pxa_ohci->mmio_base + UHCRHDB);
return 0;
}
static int pxa27x_ohci_set_vbus_power(struct pxa27x_ohci *pxa_ohci,
unsigned int port, bool enable)
{
struct regulator *vbus = pxa_ohci->vbus[port];
int ret = 0;
if (IS_ERR_OR_NULL(vbus))
return 0;
if (enable && !pxa_ohci->vbus_enabled[port])
ret = regulator_enable(vbus);
else if (!enable && pxa_ohci->vbus_enabled[port])
ret = regulator_disable(vbus);
if (ret < 0)
return ret;
pxa_ohci->vbus_enabled[port] = enable;
return 0;
}
static int pxa27x_ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
int ret;
switch (typeReq) {
case SetPortFeature:
case ClearPortFeature:
if (!wIndex || wIndex > 3)
return -EPIPE;
if (wValue != USB_PORT_FEAT_POWER)
break;
ret = pxa27x_ohci_set_vbus_power(pxa_ohci, wIndex - 1,
typeReq == SetPortFeature);
if (ret)
return ret;
break;
}
return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
}
/*-------------------------------------------------------------------------*/
static inline void pxa27x_setup_hc(struct pxa27x_ohci *pxa_ohci,
struct pxaohci_platform_data *inf)
{
uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
if (inf->flags & ENABLE_PORT1)
uhchr &= ~UHCHR_SSEP1;
if (inf->flags & ENABLE_PORT2)
uhchr &= ~UHCHR_SSEP2;
if (inf->flags & ENABLE_PORT3)
uhchr &= ~UHCHR_SSEP3;
if (inf->flags & POWER_CONTROL_LOW)
uhchr |= UHCHR_PCPL;
if (inf->flags & POWER_SENSE_LOW)
uhchr |= UHCHR_PSPL;
if (inf->flags & NO_OC_PROTECTION)
uhcrhda |= UHCRHDA_NOCP;
else
uhcrhda &= ~UHCRHDA_NOCP;
if (inf->flags & OC_MODE_PERPORT)
uhcrhda |= UHCRHDA_OCPM;
else
uhcrhda &= ~UHCRHDA_OCPM;
if (inf->power_on_delay) {
uhcrhda &= ~UHCRHDA_POTPGT(0xff);
uhcrhda |= UHCRHDA_POTPGT(inf->power_on_delay / 2);
}
__raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
__raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
}
static inline void pxa27x_reset_hc(struct pxa27x_ohci *pxa_ohci)
{
uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
__raw_writel(uhchr | UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
udelay(11);
__raw_writel(uhchr & ~UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
}
static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
{
int retval;
struct pxaohci_platform_data *inf;
uint32_t uhchr;
inf = dev_get_platdata(dev);
retval = clk_prepare_enable(pxa_ohci->clk);
if (retval)
return retval;
pxa27x_reset_hc(pxa_ohci);
uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
__raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
while (__raw_readl(pxa_ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
cpu_relax();
pxa27x_setup_hc(pxa_ohci, inf);
if (inf->init)
retval = inf->init(dev);
if (retval < 0) {
clk_disable_unprepare(pxa_ohci->clk);
return retval;
}
uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
__raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
__raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, pxa_ohci->mmio_base + UHCHIE);
/* Clear any OTG Pin Hold */
pxa27x_clear_otgph();
return 0;
}
static void pxa27x_stop_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
{
struct pxaohci_platform_data *inf;
uint32_t uhccoms;
inf = dev_get_platdata(dev);
if (inf->exit)
inf->exit(dev);
pxa27x_reset_hc(pxa_ohci);
/* Host Controller Reset */
uhccoms = __raw_readl(pxa_ohci->mmio_base + UHCCOMS) | 0x01;
__raw_writel(uhccoms, pxa_ohci->mmio_base + UHCCOMS);
udelay(10);
clk_disable_unprepare(pxa_ohci->clk);
}
#ifdef CONFIG_OF
static const struct of_device_id pxa_ohci_dt_ids[] = {
{ .compatible = "marvell,pxa-ohci" },
{ }
};
MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids);
static int ohci_pxa_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct pxaohci_platform_data *pdata;
u32 tmp;
int ret;
if (!np)
return 0;
/* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
if (of_property_read_bool(np, "marvell,enable-port1"))
pdata->flags |= ENABLE_PORT1;
if (of_property_read_bool(np, "marvell,enable-port2"))
pdata->flags |= ENABLE_PORT2;
if (of_property_read_bool(np, "marvell,enable-port3"))
pdata->flags |= ENABLE_PORT3;
if (of_property_read_bool(np, "marvell,port-sense-low"))
pdata->flags |= POWER_SENSE_LOW;
if (of_property_read_bool(np, "marvell,power-control-low"))
pdata->flags |= POWER_CONTROL_LOW;
if (of_property_read_bool(np, "marvell,no-oc-protection"))
pdata->flags |= NO_OC_PROTECTION;
if (of_property_read_bool(np, "marvell,oc-mode-perport"))
pdata->flags |= OC_MODE_PERPORT;
if (!of_property_read_u32(np, "marvell,power-on-delay", &tmp))
pdata->power_on_delay = tmp;
if (!of_property_read_u32(np, "marvell,port-mode", &tmp))
pdata->port_mode = tmp;
if (!of_property_read_u32(np, "marvell,power-budget", &tmp))
pdata->power_budget = tmp;
pdev->dev.platform_data = pdata;
return 0;
}
#else
static int ohci_pxa_of_init(struct platform_device *pdev)
{
return 0;
}
#endif
/*-------------------------------------------------------------------------*/
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/**
* ohci_hcd_pxa27x_probe - initialize pxa27x-based HCDs
* @pdev: USB Host controller to probe
*
* Context: task context, might sleep
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int ohci_hcd_pxa27x_probe(struct platform_device *pdev)
{
int retval, irq;
struct usb_hcd *hcd;
struct pxaohci_platform_data *inf;
struct pxa27x_ohci *pxa_ohci;
struct ohci_hcd *ohci;
struct resource *r;
struct clk *usb_clk;
unsigned int i;
retval = ohci_pxa_of_init(pdev);
if (retval)
return retval;
inf = dev_get_platdata(&pdev->dev);
if (!inf)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
pr_err("no resource of IORESOURCE_IRQ");
return irq;
}
usb_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usb_clk))
return PTR_ERR(usb_clk);
hcd = usb_create_hcd(&ohci_pxa27x_hc_driver, &pdev->dev, "pxa27x");
if (!hcd)
return -ENOMEM;
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err;
}
hcd->rsrc_start = r->start;
hcd->rsrc_len = resource_size(r);
/* initialize "struct pxa27x_ohci" */
pxa_ohci = to_pxa27x_ohci(hcd);
pxa_ohci->clk = usb_clk;
pxa_ohci->mmio_base = (void __iomem *)hcd->regs;
for (i = 0; i < 3; ++i) {
char name[6];
if (!(inf->flags & (ENABLE_PORT1 << i)))
continue;
sprintf(name, "vbus%u", i + 1);
pxa_ohci->vbus[i] = devm_regulator_get(&pdev->dev, name);
}
retval = pxa27x_start_hc(pxa_ohci, &pdev->dev);
if (retval < 0) {
pr_debug("pxa27x_start_hc failed");
goto err;
}
/* Select Power Management Mode */
pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
if (inf->power_budget)
hcd->power_budget = inf->power_budget;
/* The value of NDP in roothub_a is incorrect on this hardware */
ohci = hcd_to_ohci(hcd);
ohci->num_ports = 3;
retval = usb_add_hcd(hcd, irq, 0);
if (retval == 0) {
device_wakeup_enable(hcd->self.controller);
return retval;
}
pxa27x_stop_hc(pxa_ohci, &pdev->dev);
err:
usb_put_hcd(hcd);
return retval;
}
/* may be called without controller electrically present */
/* may be called with controller, bus, and devices active */
/**
* ohci_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
* @pdev: USB Host Controller being removed
*
* Context: task context, might sleep
*
* Reverses the effect of ohci_hcd_pxa27x_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
static void ohci_hcd_pxa27x_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
unsigned int i;
usb_remove_hcd(hcd);
pxa27x_stop_hc(pxa_ohci, &pdev->dev);
for (i = 0; i < 3; ++i)
pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
usb_put_hcd(hcd);
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
bool do_wakeup = device_may_wakeup(dev);
int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
ret = ohci_suspend(hcd, do_wakeup);
if (ret)
return ret;
pxa27x_stop_hc(pxa_ohci, dev);
return ret;
}
static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
struct pxaohci_platform_data *inf = dev_get_platdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int status;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
status = pxa27x_start_hc(pxa_ohci, dev);
if (status < 0)
return status;
/* Select Power Management Mode */
pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
ohci_resume(hcd, false);
return 0;
}
static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
.suspend = ohci_hcd_pxa27x_drv_suspend,
.resume = ohci_hcd_pxa27x_drv_resume,
};
#endif
static struct platform_driver ohci_hcd_pxa27x_driver = {
.probe = ohci_hcd_pxa27x_probe,
.remove_new = ohci_hcd_pxa27x_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "pxa27x-ohci",
.of_match_table = of_match_ptr(pxa_ohci_dt_ids),
#ifdef CONFIG_PM
.pm = &ohci_hcd_pxa27x_pm_ops,
#endif
},
};
static const struct ohci_driver_overrides pxa27x_overrides __initconst = {
.extra_priv_size = sizeof(struct pxa27x_ohci),
};
static int __init ohci_pxa27x_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
ohci_pxa27x_hc_driver.hub_control = pxa27x_ohci_hub_control;
return platform_driver_register(&ohci_hcd_pxa27x_driver);
}
module_init(ohci_pxa27x_init);
static void __exit ohci_pxa27x_cleanup(void)
{
platform_driver_unregister(&ohci_hcd_pxa27x_driver);
}
module_exit(ohci_pxa27x_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa27x-ohci");
| linux-master | drivers/usb/host/ohci-pxa27x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EHCI HCD on SPEAr SOC
*
* Copyright (C) 2010 ST Micro Electronics,
* Deepak Sikri <[email protected]>
*
* Based on various ehci-*.c drivers
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ehci.h"
#define DRIVER_DESC "EHCI SPEAr driver"
struct spear_ehci {
struct clk *clk;
};
#define to_spear_ehci(hcd) (struct spear_ehci *)(hcd_to_ehci(hcd)->priv)
static struct hc_driver __read_mostly ehci_spear_hc_driver;
static int __maybe_unused ehci_spear_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
bool do_wakeup = device_may_wakeup(dev);
return ehci_suspend(hcd, do_wakeup);
}
static int __maybe_unused ehci_spear_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
ehci_resume(hcd, false);
return 0;
}
static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
ehci_spear_drv_resume);
static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd ;
struct spear_ehci *sehci;
struct resource *res;
struct clk *usbh_clk;
const struct hc_driver *driver = &ehci_spear_hc_driver;
int irq, retval;
if (usb_disabled())
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = irq;
goto fail;
}
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = PTR_ERR(usbh_clk);
goto fail;
}
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
sehci = to_spear_ehci(hcd);
sehci->clk = usbh_clk;
/* registers start at offset 0x0 */
hcd_to_ehci(hcd)->caps = hcd->regs;
clk_prepare_enable(sehci->clk);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto err_stop_ehci;
device_wakeup_enable(hcd->self.controller);
return retval;
err_stop_ehci:
clk_disable_unprepare(sehci->clk);
err_put_hcd:
usb_put_hcd(hcd);
fail:
dev_err(&pdev->dev, "init fail, %d\n", retval);
return retval ;
}
static void spear_ehci_hcd_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct spear_ehci *sehci = to_spear_ehci(hcd);
usb_remove_hcd(hcd);
if (sehci->clk)
clk_disable_unprepare(sehci->clk);
usb_put_hcd(hcd);
}
static const struct of_device_id spear_ehci_id_table[] = {
{ .compatible = "st,spear600-ehci", },
{ },
};
MODULE_DEVICE_TABLE(of, spear_ehci_id_table);
static struct platform_driver spear_ehci_hcd_driver = {
.probe = spear_ehci_hcd_drv_probe,
.remove_new = spear_ehci_hcd_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "spear-ehci",
.bus = &platform_bus_type,
.pm = pm_ptr(&ehci_spear_pm_ops),
.of_match_table = spear_ehci_id_table,
}
};
static const struct ehci_driver_overrides spear_overrides __initconst = {
.extra_priv_size = sizeof(struct spear_ehci),
};
static int __init ehci_spear_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_spear_hc_driver, &spear_overrides);
return platform_driver_register(&spear_ehci_hcd_driver);
}
module_init(ehci_spear_init);
static void __exit ehci_spear_cleanup(void)
{
platform_driver_unregister(&spear_ehci_hcd_driver);
}
module_exit(ehci_spear_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:spear-ehci");
MODULE_AUTHOR("Deepak Sikri");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ehci-spear.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Broadcom */
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/iopoll.h>
#include "ehci.h"
#define hcd_to_ehci_priv(h) ((struct brcm_priv *)hcd_to_ehci(h)->priv)
struct brcm_priv {
struct clk *clk;
};
/*
* ehci_brcm_wait_for_sof
* Wait for start of next microframe, then wait extra delay microseconds
*/
static inline void ehci_brcm_wait_for_sof(struct ehci_hcd *ehci, u32 delay)
{
u32 frame_idx = ehci_readl(ehci, &ehci->regs->frame_index);
u32 val;
int res;
/* Wait for next microframe (every 125 usecs) */
res = readl_relaxed_poll_timeout(&ehci->regs->frame_index, val,
val != frame_idx, 1, 130);
if (res)
ehci_err(ehci, "Error waiting for SOF\n");
udelay(delay);
}
/*
* ehci_brcm_hub_control
* The EHCI controller has a bug where it can violate the SOF
* interval between the first two SOF's transmitted after resume
* if the resume occurs near the end of the microframe. This causees
* the controller to detect babble on the suspended port and
* will eventually cause the controller to reset the port.
* The fix is to Intercept the echi-hcd request to complete RESUME and
* align it to the start of the next microframe.
* See SWLINUX-1909 for more details
*/
static int ehci_brcm_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int ports = HCS_N_PORTS(ehci->hcs_params);
u32 __iomem *status_reg;
unsigned long flags;
int retval, irq_disabled = 0;
u32 temp;
temp = (wIndex & 0xff) - 1;
if (temp >= HCS_N_PORTS_MAX) /* Avoid index-out-of-bounds warning */
temp = 0;
status_reg = &ehci->regs->port_status[temp];
/*
* RESUME is cleared when GetPortStatus() is called 20ms after start
* of RESUME
*/
if ((typeReq == GetPortStatus) &&
(wIndex && wIndex <= ports) &&
ehci->reset_done[wIndex-1] &&
time_after_eq(jiffies, ehci->reset_done[wIndex-1]) &&
(ehci_readl(ehci, status_reg) & PORT_RESUME)) {
/*
* to make sure we are not interrupted until RESUME bit
* is cleared, disable interrupts on current CPU
*/
ehci_dbg(ehci, "SOF alignment workaround\n");
irq_disabled = 1;
local_irq_save(flags);
ehci_brcm_wait_for_sof(ehci, 5);
}
retval = ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
if (irq_disabled)
local_irq_restore(flags);
return retval;
}
static int ehci_brcm_reset(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int len;
ehci->big_endian_mmio = 1;
ehci->caps = (void __iomem *)hcd->regs;
len = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci->regs = (void __iomem *)(hcd->regs + len);
/* This fixes the lockup during reboot due to prior interrupts */
ehci_writel(ehci, CMD_RESET, &ehci->regs->command);
mdelay(10);
/*
* SWLINUX-1705: Avoid OUT packet underflows during high memory
* bus usage
*/
ehci_writel(ehci, 0x00800040, &ehci->regs->brcm_insnreg[1]);
ehci_writel(ehci, 0x00000001, &ehci->regs->brcm_insnreg[3]);
return ehci_setup(hcd);
}
static struct hc_driver __read_mostly ehci_brcm_hc_driver;
static const struct ehci_driver_overrides brcm_overrides __initconst = {
.reset = ehci_brcm_reset,
.extra_priv_size = sizeof(struct brcm_priv),
};
static int ehci_brcm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res_mem;
struct brcm_priv *priv;
struct usb_hcd *hcd;
int irq;
int err;
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err)
return err;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* Hook the hub control routine to work around a bug */
ehci_brcm_hc_driver.hub_control = ehci_brcm_hub_control;
/* initialize hcd */
hcd = usb_create_hcd(&ehci_brcm_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
platform_set_drvdata(pdev, hcd);
priv = hcd_to_ehci_priv(hcd);
priv->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
goto err_hcd;
}
err = clk_prepare_enable(priv->clk);
if (err)
goto err_hcd;
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_clk;
}
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_clk;
device_wakeup_enable(hcd->self.controller);
device_enable_async_suspend(hcd->self.controller);
return 0;
err_clk:
clk_disable_unprepare(priv->clk);
err_hcd:
usb_put_hcd(hcd);
return err;
}
static void ehci_brcm_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
usb_remove_hcd(hcd);
clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
}
static int __maybe_unused ehci_brcm_suspend(struct device *dev)
{
int ret;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
bool do_wakeup = device_may_wakeup(dev);
ret = ehci_suspend(hcd, do_wakeup);
if (ret)
return ret;
clk_disable_unprepare(priv->clk);
return 0;
}
static int __maybe_unused ehci_brcm_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
int err;
err = clk_prepare_enable(priv->clk);
if (err)
return err;
/*
* SWLINUX-1705: Avoid OUT packet underflows during high memory
* bus usage
*/
ehci_writel(ehci, 0x00800040, &ehci->regs->brcm_insnreg[1]);
ehci_writel(ehci, 0x00000001, &ehci->regs->brcm_insnreg[3]);
ehci_resume(hcd, false);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
static SIMPLE_DEV_PM_OPS(ehci_brcm_pm_ops, ehci_brcm_suspend,
ehci_brcm_resume);
static const struct of_device_id brcm_ehci_of_match[] = {
{ .compatible = "brcm,ehci-brcm-v2", },
{ .compatible = "brcm,bcm7445-ehci", },
{}
};
static struct platform_driver ehci_brcm_driver = {
.probe = ehci_brcm_probe,
.remove_new = ehci_brcm_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-brcm",
.pm = &ehci_brcm_pm_ops,
.of_match_table = brcm_ehci_of_match,
}
};
static int __init ehci_brcm_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_brcm_hc_driver, &brcm_overrides);
return platform_driver_register(&ehci_brcm_driver);
}
module_init(ehci_brcm_init);
static void __exit ehci_brcm_exit(void)
{
platform_driver_unregister(&ehci_brcm_driver);
}
module_exit(ehci_brcm_exit);
MODULE_ALIAS("platform:ehci-brcm");
MODULE_DESCRIPTION("EHCI Broadcom STB driver");
MODULE_AUTHOR("Al Cooper");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ehci-brcm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Universal Host Controller Interface driver for USB.
*
* Maintainer: Alan Stern <[email protected]>
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, [email protected]
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, [email protected]
* (C) Copyright 1999 Deti Fliegl, [email protected]
* (C) Copyright 1999 Thomas Sailer, [email protected]
* (C) Copyright 1999 Roman Weissgaerber, [email protected]
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
* support from usb-ohci.c by Adam Richter, [email protected]).
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
* (C) Copyright 2004-2007 Alan Stern, [email protected]
*/
/*
* Technically, updating td->status here is a race, but it's not really a
* problem. The worst that can happen is that we set the IOC bit again
* generating a spurious interrupt. We could fix this by creating another
* QH and leaving the IOC bit always set, but then we would have to play
* games with the FSBR code to make sure we get the correct order in all
* the cases. I don't think it's worth the effort
*/
static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
{
if (uhci->is_stopped)
mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
}
static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
{
uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
}
/*
* Full-Speed Bandwidth Reclamation (FSBR).
* We turn on FSBR whenever a queue that wants it is advancing,
* and leave it on for a short time thereafter.
*/
static void uhci_fsbr_on(struct uhci_hcd *uhci)
{
struct uhci_qh *lqh;
/* The terminating skeleton QH always points back to the first
* FSBR QH. Make the last async QH point to the terminating
* skeleton QH. */
uhci->fsbr_is_on = 1;
lqh = list_entry(uhci->skel_async_qh->node.prev,
struct uhci_qh, node);
lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
}
static void uhci_fsbr_off(struct uhci_hcd *uhci)
{
struct uhci_qh *lqh;
/* Remove the link from the last async QH to the terminating
* skeleton QH. */
uhci->fsbr_is_on = 0;
lqh = list_entry(uhci->skel_async_qh->node.prev,
struct uhci_qh, node);
lqh->link = UHCI_PTR_TERM(uhci);
}
static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = urb->hcpriv;
urbp->fsbr = 1;
}
static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
{
if (urbp->fsbr) {
uhci->fsbr_is_wanted = 1;
if (!uhci->fsbr_is_on)
uhci_fsbr_on(uhci);
else if (uhci->fsbr_expiring) {
uhci->fsbr_expiring = 0;
del_timer(&uhci->fsbr_timer);
}
}
}
static void uhci_fsbr_timeout(struct timer_list *t)
{
struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
unsigned long flags;
spin_lock_irqsave(&uhci->lock, flags);
if (uhci->fsbr_expiring) {
uhci->fsbr_expiring = 0;
uhci_fsbr_off(uhci);
}
spin_unlock_irqrestore(&uhci->lock, flags);
}
static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
{
dma_addr_t dma_handle;
struct uhci_td *td;
td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
if (!td)
return NULL;
td->dma_handle = dma_handle;
td->frame = -1;
INIT_LIST_HEAD(&td->list);
INIT_LIST_HEAD(&td->fl_list);
return td;
}
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
if (!list_empty(&td->list))
dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
if (!list_empty(&td->fl_list))
dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
dma_pool_free(uhci->td_pool, td, td->dma_handle);
}
static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
u32 status, u32 token, u32 buffer)
{
td->status = cpu_to_hc32(uhci, status);
td->token = cpu_to_hc32(uhci, token);
td->buffer = cpu_to_hc32(uhci, buffer);
}
static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
{
list_add_tail(&td->list, &urbp->td_list);
}
static void uhci_remove_td_from_urbp(struct uhci_td *td)
{
list_del_init(&td->list);
}
/*
* We insert Isochronous URBs directly into the frame list at the beginning
*/
static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
struct uhci_td *td, unsigned framenum)
{
framenum &= (UHCI_NUMFRAMES - 1);
td->frame = framenum;
/* Is there a TD already mapped there? */
if (uhci->frame_cpu[framenum]) {
struct uhci_td *ftd, *ltd;
ftd = uhci->frame_cpu[framenum];
ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
list_add_tail(&td->fl_list, &ftd->fl_list);
td->link = ltd->link;
wmb();
ltd->link = LINK_TO_TD(uhci, td);
} else {
td->link = uhci->frame[framenum];
wmb();
uhci->frame[framenum] = LINK_TO_TD(uhci, td);
uhci->frame_cpu[framenum] = td;
}
}
static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
struct uhci_td *td)
{
/* If it's not inserted, don't remove it */
if (td->frame == -1) {
WARN_ON(!list_empty(&td->fl_list));
return;
}
if (uhci->frame_cpu[td->frame] == td) {
if (list_empty(&td->fl_list)) {
uhci->frame[td->frame] = td->link;
uhci->frame_cpu[td->frame] = NULL;
} else {
struct uhci_td *ntd;
ntd = list_entry(td->fl_list.next,
struct uhci_td,
fl_list);
uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
uhci->frame_cpu[td->frame] = ntd;
}
} else {
struct uhci_td *ptd;
ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
ptd->link = td->link;
}
list_del_init(&td->fl_list);
td->frame = -1;
}
static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
unsigned int framenum)
{
struct uhci_td *ftd, *ltd;
framenum &= (UHCI_NUMFRAMES - 1);
ftd = uhci->frame_cpu[framenum];
if (ftd) {
ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
uhci->frame[framenum] = ltd->link;
uhci->frame_cpu[framenum] = NULL;
while (!list_empty(&ftd->fl_list))
list_del_init(ftd->fl_list.prev);
}
}
/*
* Remove all the TDs for an Isochronous URB from the frame list
*/
static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
struct uhci_td *td;
list_for_each_entry(td, &urbp->td_list, list)
uhci_remove_td_from_frame_list(uhci, td);
}
static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
struct usb_device *udev, struct usb_host_endpoint *hep)
{
dma_addr_t dma_handle;
struct uhci_qh *qh;
qh = dma_pool_zalloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
if (!qh)
return NULL;
qh->dma_handle = dma_handle;
qh->element = UHCI_PTR_TERM(uhci);
qh->link = UHCI_PTR_TERM(uhci);
INIT_LIST_HEAD(&qh->queue);
INIT_LIST_HEAD(&qh->node);
if (udev) { /* Normal QH */
qh->type = usb_endpoint_type(&hep->desc);
if (qh->type != USB_ENDPOINT_XFER_ISOC) {
qh->dummy_td = uhci_alloc_td(uhci);
if (!qh->dummy_td) {
dma_pool_free(uhci->qh_pool, qh, dma_handle);
return NULL;
}
}
qh->state = QH_STATE_IDLE;
qh->hep = hep;
qh->udev = udev;
hep->hcpriv = qh;
if (qh->type == USB_ENDPOINT_XFER_INT ||
qh->type == USB_ENDPOINT_XFER_ISOC)
qh->load = usb_calc_bus_time(udev->speed,
usb_endpoint_dir_in(&hep->desc),
qh->type == USB_ENDPOINT_XFER_ISOC,
usb_endpoint_maxp(&hep->desc))
/ 1000 + 1;
} else { /* Skeleton QH */
qh->state = QH_STATE_ACTIVE;
qh->type = -1;
}
return qh;
}
static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
if (!list_empty(&qh->queue))
dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
list_del(&qh->node);
if (qh->udev) {
qh->hep->hcpriv = NULL;
if (qh->dummy_td)
uhci_free_td(uhci, qh->dummy_td);
}
dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
}
/*
* When a queue is stopped and a dequeued URB is given back, adjust
* the previous TD link (if the URB isn't first on the queue) or
* save its toggle value (if it is first and is currently executing).
*
* Returns 0 if the URB should not yet be given back, 1 otherwise.
*/
static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
struct urb *urb)
{
struct urb_priv *urbp = urb->hcpriv;
struct uhci_td *td;
int ret = 1;
/* Isochronous pipes don't use toggles and their TD link pointers
* get adjusted during uhci_urb_dequeue(). But since their queues
* cannot truly be stopped, we have to watch out for dequeues
* occurring after the nominal unlink frame. */
if (qh->type == USB_ENDPOINT_XFER_ISOC) {
ret = (uhci->frame_number + uhci->is_stopped !=
qh->unlink_frame);
goto done;
}
/* If the URB isn't first on its queue, adjust the link pointer
* of the last TD in the previous URB. The toggle doesn't need
* to be saved since this URB can't be executing yet. */
if (qh->queue.next != &urbp->node) {
struct urb_priv *purbp;
struct uhci_td *ptd;
purbp = list_entry(urbp->node.prev, struct urb_priv, node);
WARN_ON(list_empty(&purbp->td_list));
ptd = list_entry(purbp->td_list.prev, struct uhci_td,
list);
td = list_entry(urbp->td_list.prev, struct uhci_td,
list);
ptd->link = td->link;
goto done;
}
/* If the QH element pointer is UHCI_PTR_TERM then then currently
* executing URB has already been unlinked, so this one isn't it. */
if (qh_element(qh) == UHCI_PTR_TERM(uhci))
goto done;
qh->element = UHCI_PTR_TERM(uhci);
/* Control pipes don't have to worry about toggles */
if (qh->type == USB_ENDPOINT_XFER_CONTROL)
goto done;
/* Save the next toggle value */
WARN_ON(list_empty(&urbp->td_list));
td = list_entry(urbp->td_list.next, struct uhci_td, list);
qh->needs_fixup = 1;
qh->initial_toggle = uhci_toggle(td_token(uhci, td));
done:
return ret;
}
/*
* Fix up the data toggles for URBs in a queue, when one of them
* terminates early (short transfer, error, or dequeued).
*/
static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
int skip_first)
{
struct urb_priv *urbp = NULL;
struct uhci_td *td;
unsigned int toggle = qh->initial_toggle;
unsigned int pipe;
/* Fixups for a short transfer start with the second URB in the
* queue (the short URB is the first). */
if (skip_first)
urbp = list_entry(qh->queue.next, struct urb_priv, node);
/* When starting with the first URB, if the QH element pointer is
* still valid then we know the URB's toggles are okay. */
else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
toggle = 2;
/* Fix up the toggle for the URBs in the queue. Normally this
* loop won't run more than once: When an error or short transfer
* occurs, the queue usually gets emptied. */
urbp = list_prepare_entry(urbp, &qh->queue, node);
list_for_each_entry_continue(urbp, &qh->queue, node) {
/* If the first TD has the right toggle value, we don't
* need to change any toggles in this URB */
td = list_entry(urbp->td_list.next, struct uhci_td, list);
if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
td = list_entry(urbp->td_list.prev, struct uhci_td,
list);
toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
/* Otherwise all the toggles in the URB have to be switched */
} else {
list_for_each_entry(td, &urbp->td_list, list) {
td->token ^= cpu_to_hc32(uhci,
TD_TOKEN_TOGGLE);
toggle ^= 1;
}
}
}
wmb();
pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
usb_pipeout(pipe), toggle);
qh->needs_fixup = 0;
}
/*
* Link an Isochronous QH into its skeleton's list
*/
static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
/* Isochronous QHs aren't linked by the hardware */
}
/*
* Link a high-period interrupt QH into the schedule at the end of its
* skeleton's list
*/
static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
qh->link = pqh->link;
wmb();
pqh->link = LINK_TO_QH(uhci, qh);
}
/*
* Link a period-1 interrupt or async QH into the schedule at the
* correct spot in the async skeleton's list, and update the FSBR link
*/
static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
__hc32 link_to_new_qh;
/* Find the predecessor QH for our new one and insert it in the list.
* The list of QHs is expected to be short, so linear search won't
* take too long. */
list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
if (pqh->skel <= qh->skel)
break;
}
list_add(&qh->node, &pqh->node);
/* Link it into the schedule */
qh->link = pqh->link;
wmb();
link_to_new_qh = LINK_TO_QH(uhci, qh);
pqh->link = link_to_new_qh;
/* If this is now the first FSBR QH, link the terminating skeleton
* QH to it. */
if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
uhci->skel_term_qh->link = link_to_new_qh;
}
/*
* Put a QH on the schedule in both hardware and software
*/
static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
WARN_ON(list_empty(&qh->queue));
/* Set the element pointer if it isn't set already.
* This isn't needed for Isochronous queues, but it doesn't hurt. */
if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
qh->element = LINK_TO_TD(uhci, td);
}
/* Treat the queue as if it has just advanced */
qh->wait_expired = 0;
qh->advance_jiffies = jiffies;
if (qh->state == QH_STATE_ACTIVE)
return;
qh->state = QH_STATE_ACTIVE;
/* Move the QH from its old list to the correct spot in the appropriate
* skeleton's list */
if (qh == uhci->next_qh)
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
node);
list_del(&qh->node);
if (qh->skel == SKEL_ISO)
link_iso(uhci, qh);
else if (qh->skel < SKEL_ASYNC)
link_interrupt(uhci, qh);
else
link_async(uhci, qh);
}
/*
* Unlink a high-period interrupt QH from the schedule
*/
static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
pqh->link = qh->link;
mb();
}
/*
* Unlink a period-1 interrupt or async QH from the schedule
*/
static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
__hc32 link_to_next_qh = qh->link;
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
pqh->link = link_to_next_qh;
/* If this was the old first FSBR QH, link the terminating skeleton
* QH to the next (new first FSBR) QH. */
if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
uhci->skel_term_qh->link = link_to_next_qh;
mb();
}
/*
* Take a QH off the hardware schedule
*/
static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
if (qh->state == QH_STATE_UNLINKING)
return;
WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
qh->state = QH_STATE_UNLINKING;
/* Unlink the QH from the schedule and record when we did it */
if (qh->skel == SKEL_ISO)
;
else if (qh->skel < SKEL_ASYNC)
unlink_interrupt(uhci, qh);
else
unlink_async(uhci, qh);
uhci_get_current_frame_number(uhci);
qh->unlink_frame = uhci->frame_number;
/* Force an interrupt so we know when the QH is fully unlinked */
if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
uhci_set_next_interrupt(uhci);
/* Move the QH from its old list to the end of the unlinking list */
if (qh == uhci->next_qh)
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
node);
list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
}
/*
* When we and the controller are through with a QH, it becomes IDLE.
* This happens when a QH has been off the schedule (on the unlinking
* list) for more than one frame, or when an error occurs while adding
* the first URB onto a new QH.
*/
static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
WARN_ON(qh->state == QH_STATE_ACTIVE);
if (qh == uhci->next_qh)
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
node);
list_move(&qh->node, &uhci->idle_qh_list);
qh->state = QH_STATE_IDLE;
/* Now that the QH is idle, its post_td isn't being used */
if (qh->post_td) {
uhci_free_td(uhci, qh->post_td);
qh->post_td = NULL;
}
/* If anyone is waiting for a QH to become idle, wake them up */
if (uhci->num_waiting)
wake_up_all(&uhci->waitqh);
}
/*
* Find the highest existing bandwidth load for a given phase and period.
*/
static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
{
int highest_load = uhci->load[phase];
for (phase += period; phase < MAX_PHASE; phase += period)
highest_load = max_t(int, highest_load, uhci->load[phase]);
return highest_load;
}
/*
* Set qh->phase to the optimal phase for a periodic transfer and
* check whether the bandwidth requirement is acceptable.
*/
static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
int minimax_load;
/* Find the optimal phase (unless it is already set) and get
* its load value. */
if (qh->phase >= 0)
minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
else {
int phase, load;
int max_phase = min_t(int, MAX_PHASE, qh->period);
qh->phase = 0;
minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
for (phase = 1; phase < max_phase; ++phase) {
load = uhci_highest_load(uhci, phase, qh->period);
if (load < minimax_load) {
minimax_load = load;
qh->phase = phase;
}
}
}
/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
if (minimax_load + qh->load > 900) {
dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
"period %d, phase %d, %d + %d us\n",
qh->period, qh->phase, minimax_load, qh->load);
return -ENOSPC;
}
return 0;
}
/*
* Reserve a periodic QH's bandwidth in the schedule
*/
static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
int i;
int load = qh->load;
char *p = "??";
for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
uhci->load[i] += load;
uhci->total_load += load;
}
uhci_to_hcd(uhci)->self.bandwidth_allocated =
uhci->total_load / MAX_PHASE;
switch (qh->type) {
case USB_ENDPOINT_XFER_INT:
++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
p = "INT";
break;
case USB_ENDPOINT_XFER_ISOC:
++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
p = "ISO";
break;
}
qh->bandwidth_reserved = 1;
dev_dbg(uhci_dev(uhci),
"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
"reserve", qh->udev->devnum,
qh->hep->desc.bEndpointAddress, p,
qh->period, qh->phase, load);
}
/*
* Release a periodic QH's bandwidth reservation
*/
static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
int i;
int load = qh->load;
char *p = "??";
for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
uhci->load[i] -= load;
uhci->total_load -= load;
}
uhci_to_hcd(uhci)->self.bandwidth_allocated =
uhci->total_load / MAX_PHASE;
switch (qh->type) {
case USB_ENDPOINT_XFER_INT:
--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
p = "INT";
break;
case USB_ENDPOINT_XFER_ISOC:
--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
p = "ISO";
break;
}
qh->bandwidth_reserved = 0;
dev_dbg(uhci_dev(uhci),
"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
"release", qh->udev->devnum,
qh->hep->desc.bEndpointAddress, p,
qh->period, qh->phase, load);
}
static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
struct urb *urb)
{
struct urb_priv *urbp;
urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
if (!urbp)
return NULL;
urbp->urb = urb;
urb->hcpriv = urbp;
INIT_LIST_HEAD(&urbp->node);
INIT_LIST_HEAD(&urbp->td_list);
return urbp;
}
static void uhci_free_urb_priv(struct uhci_hcd *uhci,
struct urb_priv *urbp)
{
struct uhci_td *td, *tmp;
if (!list_empty(&urbp->node))
dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
urbp->urb);
list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
uhci_remove_td_from_urbp(td);
uhci_free_td(uhci, td);
}
kmem_cache_free(uhci_up_cachep, urbp);
}
/*
* Map status to standard result codes
*
* <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
* uhci_status_bits(td_status(uhci, td)).
* Note: <status> does not include the TD_CTRL_NAK bit.
* <dir_out> is True for output TDs and False for input TDs.
*/
static int uhci_map_status(int status, int dir_out)
{
if (!status)
return 0;
if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
return -EPROTO;
if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
if (dir_out)
return -EPROTO;
else
return -EILSEQ;
}
if (status & TD_CTRL_BABBLE) /* Babble */
return -EOVERFLOW;
if (status & TD_CTRL_DBUFERR) /* Buffer error */
return -ENOSR;
if (status & TD_CTRL_STALLED) /* Stalled */
return -EPIPE;
return 0;
}
/*
* Control transfers
*/
static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
struct uhci_td *td;
unsigned long destination, status;
int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
dma_addr_t data = urb->transfer_dma;
__hc32 *plink;
struct urb_priv *urbp = urb->hcpriv;
int skel;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
/* 3 errors, dummy TD remains inactive */
status = uhci_maxerr(3);
if (urb->dev->speed == USB_SPEED_LOW)
status |= TD_CTRL_LS;
/*
* Build the TD for the control request setup packet
*/
td = qh->dummy_td;
uhci_add_td_to_urbp(td, urbp);
uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
urb->setup_dma);
plink = &td->link;
status |= TD_CTRL_ACTIVE;
/*
* If direction is "send", change the packet ID from SETUP (0x2D)
* to OUT (0xE1). Else change it from SETUP to IN (0x69) and
* set Short Packet Detect (SPD) for all data packets.
*
* 0-length transfers always get treated as "send".
*/
if (usb_pipeout(urb->pipe) || len == 0)
destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
else {
destination ^= (USB_PID_SETUP ^ USB_PID_IN);
status |= TD_CTRL_SPD;
}
/*
* Build the DATA TDs
*/
while (len > 0) {
int pktsze = maxsze;
if (len <= pktsze) { /* The last data packet */
pktsze = len;
status &= ~TD_CTRL_SPD;
}
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
*plink = LINK_TO_TD(uhci, td);
/* Alternate Data0/1 (start with Data1) */
destination ^= TD_TOKEN_TOGGLE;
uhci_add_td_to_urbp(td, urbp);
uhci_fill_td(uhci, td, status,
destination | uhci_explen(pktsze), data);
plink = &td->link;
data += pktsze;
len -= pktsze;
}
/*
* Build the final TD for control status
*/
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
*plink = LINK_TO_TD(uhci, td);
/* Change direction for the status transaction */
destination ^= (USB_PID_IN ^ USB_PID_OUT);
destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
uhci_add_td_to_urbp(td, urbp);
uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
destination | uhci_explen(0), 0);
plink = &td->link;
/*
* Build the new dummy TD and activate the old one
*/
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
*plink = LINK_TO_TD(uhci, td);
uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
wmb();
qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
qh->dummy_td = td;
/* Low-speed transfers get a different queue, and won't hog the bus.
* Also, some devices enumerate better without FSBR; the easiest way
* to do that is to put URBs on the low-speed queue while the device
* isn't in the CONFIGURED state. */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->state != USB_STATE_CONFIGURED)
skel = SKEL_LS_CONTROL;
else {
skel = SKEL_FS_CONTROL;
uhci_add_fsbr(uhci, urb);
}
if (qh->state != QH_STATE_ACTIVE)
qh->skel = skel;
return 0;
nomem:
/* Remove the dummy TD from the td_list so it doesn't get freed */
uhci_remove_td_from_urbp(qh->dummy_td);
return -ENOMEM;
}
/*
* Common submit for bulk and interrupt
*/
static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
struct uhci_td *td;
unsigned long destination, status;
int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
int this_sg_len;
dma_addr_t data;
__hc32 *plink;
struct urb_priv *urbp = urb->hcpriv;
unsigned int toggle;
struct scatterlist *sg;
int i;
if (len < 0)
return -EINVAL;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe));
/* 3 errors, dummy TD remains inactive */
status = uhci_maxerr(3);
if (urb->dev->speed == USB_SPEED_LOW)
status |= TD_CTRL_LS;
if (usb_pipein(urb->pipe))
status |= TD_CTRL_SPD;
i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
data = sg_dma_address(sg);
/* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
this_sg_len = min_t(int, sg_dma_len(sg), len);
} else {
sg = NULL;
data = urb->transfer_dma;
this_sg_len = len;
}
/*
* Build the DATA TDs
*/
plink = NULL;
td = qh->dummy_td;
for (;;) { /* Allow zero length packets */
int pktsze = maxsze;
if (len <= pktsze) { /* The last packet */
pktsze = len;
if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
status &= ~TD_CTRL_SPD;
}
if (plink) {
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
*plink = LINK_TO_TD(uhci, td);
}
uhci_add_td_to_urbp(td, urbp);
uhci_fill_td(uhci, td, status,
destination | uhci_explen(pktsze) |
(toggle << TD_TOKEN_TOGGLE_SHIFT),
data);
plink = &td->link;
status |= TD_CTRL_ACTIVE;
toggle ^= 1;
data += pktsze;
this_sg_len -= pktsze;
len -= maxsze;
if (this_sg_len <= 0) {
if (--i <= 0 || len <= 0)
break;
sg = sg_next(sg);
data = sg_dma_address(sg);
this_sg_len = min_t(int, sg_dma_len(sg), len);
}
}
/*
* URB_ZERO_PACKET means adding a 0-length packet, if direction
* is OUT and the transfer_length was an exact multiple of maxsze,
* hence (len = transfer_length - N * maxsze) == 0
* however, if transfer_length == 0, the zero packet was already
* prepared above.
*/
if ((urb->transfer_flags & URB_ZERO_PACKET) &&
usb_pipeout(urb->pipe) && len == 0 &&
urb->transfer_buffer_length > 0) {
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
*plink = LINK_TO_TD(uhci, td);
uhci_add_td_to_urbp(td, urbp);
uhci_fill_td(uhci, td, status,
destination | uhci_explen(0) |
(toggle << TD_TOKEN_TOGGLE_SHIFT),
data);
plink = &td->link;
toggle ^= 1;
}
/* Set the interrupt-on-completion flag on the last packet.
* A more-or-less typical 4 KB URB (= size of one memory page)
* will require about 3 ms to transfer; that's a little on the
* fast side but not enough to justify delaying an interrupt
* more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
* flag setting. */
td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
/*
* Build the new dummy TD and activate the old one
*/
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
*plink = LINK_TO_TD(uhci, td);
uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
wmb();
qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
qh->dummy_td = td;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), toggle);
return 0;
nomem:
/* Remove the dummy TD from the td_list so it doesn't get freed */
uhci_remove_td_from_urbp(qh->dummy_td);
return -ENOMEM;
}
static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
int ret;
/* Can't have low-speed bulk transfers */
if (urb->dev->speed == USB_SPEED_LOW)
return -EINVAL;
if (qh->state != QH_STATE_ACTIVE)
qh->skel = SKEL_BULK;
ret = uhci_submit_common(uhci, urb, qh);
if (ret == 0)
uhci_add_fsbr(uhci, urb);
return ret;
}
static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
int ret;
/* USB 1.1 interrupt transfers only involve one packet per interval.
* Drivers can submit URBs of any length, but longer ones will need
* multiple intervals to complete.
*/
if (!qh->bandwidth_reserved) {
int exponent;
/* Figure out which power-of-two queue to use */
for (exponent = 7; exponent >= 0; --exponent) {
if ((1 << exponent) <= urb->interval)
break;
}
if (exponent < 0)
return -EINVAL;
/* If the slot is full, try a lower period */
do {
qh->period = 1 << exponent;
qh->skel = SKEL_INDEX(exponent);
/* For now, interrupt phase is fixed by the layout
* of the QH lists.
*/
qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
ret = uhci_check_bandwidth(uhci, qh);
} while (ret != 0 && --exponent >= 0);
if (ret)
return ret;
} else if (qh->period > urb->interval)
return -EINVAL; /* Can't decrease the period */
ret = uhci_submit_common(uhci, urb, qh);
if (ret == 0) {
urb->interval = qh->period;
if (!qh->bandwidth_reserved)
uhci_reserve_bandwidth(uhci, qh);
}
return ret;
}
/*
* Fix up the data structures following a short transfer
*/
static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
struct uhci_qh *qh, struct urb_priv *urbp)
{
struct uhci_td *td;
struct list_head *tmp;
int ret;
td = list_entry(urbp->td_list.prev, struct uhci_td, list);
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
/* When a control transfer is short, we have to restart
* the queue at the status stage transaction, which is
* the last TD. */
WARN_ON(list_empty(&urbp->td_list));
qh->element = LINK_TO_TD(uhci, td);
tmp = td->list.prev;
ret = -EINPROGRESS;
} else {
/* When a bulk/interrupt transfer is short, we have to
* fix up the toggles of the following URBs on the queue
* before restarting the queue at the next URB. */
qh->initial_toggle =
uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
uhci_fixup_toggles(uhci, qh, 1);
if (list_empty(&urbp->td_list))
td = qh->post_td;
qh->element = td->link;
tmp = urbp->td_list.prev;
ret = 0;
}
/* Remove all the TDs we skipped over, from tmp back to the start */
while (tmp != &urbp->td_list) {
td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->prev;
uhci_remove_td_from_urbp(td);
uhci_free_td(uhci, td);
}
return ret;
}
/*
* Common result for control, bulk, and interrupt
*/
static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = urb->hcpriv;
struct uhci_qh *qh = urbp->qh;
struct uhci_td *td, *tmp;
unsigned status;
int ret = 0;
list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
unsigned int ctrlstat;
int len;
ctrlstat = td_status(uhci, td);
status = uhci_status_bits(ctrlstat);
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
len = uhci_actual_length(ctrlstat);
urb->actual_length += len;
if (status) {
ret = uhci_map_status(status,
uhci_packetout(td_token(uhci, td)));
if ((debug == 1 && ret != -EPIPE) || debug > 1) {
/* Some debugging code */
dev_dbg(&urb->dev->dev,
"%s: failed with status %x\n",
__func__, status);
if (debug > 1 && errbuf) {
/* Print the chain for debugging */
uhci_show_qh(uhci, urbp->qh, errbuf,
ERRBUF_LEN - EXTRA_SPACE, 0);
lprintk(errbuf);
}
}
/* Did we receive a short packet? */
} else if (len < uhci_expected_length(td_token(uhci, td))) {
/* For control transfers, go to the status TD if
* this isn't already the last data TD */
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
if (td->list.next != urbp->td_list.prev)
ret = 1;
}
/* For bulk and interrupt, this may be an error */
else if (urb->transfer_flags & URB_SHORT_NOT_OK)
ret = -EREMOTEIO;
/* Fixup needed only if this isn't the URB's last TD */
else if (&td->list != urbp->td_list.prev)
ret = 1;
}
uhci_remove_td_from_urbp(td);
if (qh->post_td)
uhci_free_td(uhci, qh->post_td);
qh->post_td = td;
if (ret != 0)
goto err;
}
return ret;
err:
if (ret < 0) {
/* Note that the queue has stopped and save
* the next toggle value */
qh->element = UHCI_PTR_TERM(uhci);
qh->is_stopped = 1;
qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
(ret == -EREMOTEIO);
} else /* Short packet received */
ret = uhci_fixup_short_transfer(uhci, qh, urbp);
return ret;
}
/*
* Isochronous transfers
*/
static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
int i;
unsigned frame, next;
unsigned long destination, status;
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
/* Values must not be too big (could overflow below) */
if (urb->interval >= UHCI_NUMFRAMES ||
urb->number_of_packets >= UHCI_NUMFRAMES)
return -EFBIG;
uhci_get_current_frame_number(uhci);
/* Check the period and figure out the starting frame number */
if (!qh->bandwidth_reserved) {
qh->period = urb->interval;
qh->phase = -1; /* Find the best phase */
i = uhci_check_bandwidth(uhci, qh);
if (i)
return i;
/* Allow a little time to allocate the TDs */
next = uhci->frame_number + 10;
frame = qh->phase;
/* Round up to the first available slot */
frame += (next - frame + qh->period - 1) & -qh->period;
} else if (qh->period != urb->interval) {
return -EINVAL; /* Can't change the period */
} else {
next = uhci->frame_number + 1;
/* Find the next unused frame */
if (list_empty(&qh->queue)) {
frame = qh->iso_frame;
} else {
struct urb *lurb;
lurb = list_entry(qh->queue.prev,
struct urb_priv, node)->urb;
frame = lurb->start_frame +
lurb->number_of_packets *
lurb->interval;
}
/* Fell behind? */
if (!uhci_frame_before_eq(next, frame)) {
/* USB_ISO_ASAP: Round up to the first available slot */
if (urb->transfer_flags & URB_ISO_ASAP)
frame += (next - frame + qh->period - 1) &
-qh->period;
/*
* Not ASAP: Use the next slot in the stream,
* no matter what.
*/
else if (!uhci_frame_before_eq(next,
frame + (urb->number_of_packets - 1) *
qh->period))
dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
urb, frame,
(urb->number_of_packets - 1) *
qh->period,
next);
}
}
/* Make sure we won't have to go too far into the future */
if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
frame + urb->number_of_packets * urb->interval))
return -EFBIG;
urb->start_frame = frame;
status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
for (i = 0; i < urb->number_of_packets; i++) {
td = uhci_alloc_td(uhci);
if (!td)
return -ENOMEM;
uhci_add_td_to_urbp(td, urbp);
uhci_fill_td(uhci, td, status, destination |
uhci_explen(urb->iso_frame_desc[i].length),
urb->transfer_dma +
urb->iso_frame_desc[i].offset);
}
/* Set the interrupt-on-completion flag on the last packet. */
td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
/* Add the TDs to the frame list */
frame = urb->start_frame;
list_for_each_entry(td, &urbp->td_list, list) {
uhci_insert_td_in_frame_list(uhci, td, frame);
frame += qh->period;
}
if (list_empty(&qh->queue)) {
qh->iso_packet_desc = &urb->iso_frame_desc[0];
qh->iso_frame = urb->start_frame;
}
qh->skel = SKEL_ISO;
if (!qh->bandwidth_reserved)
uhci_reserve_bandwidth(uhci, qh);
return 0;
}
static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
{
struct uhci_td *td, *tmp;
struct urb_priv *urbp = urb->hcpriv;
struct uhci_qh *qh = urbp->qh;
list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
unsigned int ctrlstat;
int status;
int actlength;
if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
return -EINPROGRESS;
uhci_remove_tds_from_frame(uhci, qh->iso_frame);
ctrlstat = td_status(uhci, td);
if (ctrlstat & TD_CTRL_ACTIVE) {
status = -EXDEV; /* TD was added too late? */
} else {
status = uhci_map_status(uhci_status_bits(ctrlstat),
usb_pipeout(urb->pipe));
actlength = uhci_actual_length(ctrlstat);
urb->actual_length += actlength;
qh->iso_packet_desc->actual_length = actlength;
qh->iso_packet_desc->status = status;
}
if (status)
urb->error_count++;
uhci_remove_td_from_urbp(td);
uhci_free_td(uhci, td);
qh->iso_frame += qh->period;
++qh->iso_packet_desc;
}
return 0;
}
static int uhci_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb, gfp_t mem_flags)
{
int ret;
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
struct urb_priv *urbp;
struct uhci_qh *qh;
spin_lock_irqsave(&uhci->lock, flags);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto done_not_linked;
ret = -ENOMEM;
urbp = uhci_alloc_urb_priv(uhci, urb);
if (!urbp)
goto done;
if (urb->ep->hcpriv)
qh = urb->ep->hcpriv;
else {
qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
if (!qh)
goto err_no_qh;
}
urbp->qh = qh;
switch (qh->type) {
case USB_ENDPOINT_XFER_CONTROL:
ret = uhci_submit_control(uhci, urb, qh);
break;
case USB_ENDPOINT_XFER_BULK:
ret = uhci_submit_bulk(uhci, urb, qh);
break;
case USB_ENDPOINT_XFER_INT:
ret = uhci_submit_interrupt(uhci, urb, qh);
break;
case USB_ENDPOINT_XFER_ISOC:
urb->error_count = 0;
ret = uhci_submit_isochronous(uhci, urb, qh);
break;
}
if (ret != 0)
goto err_submit_failed;
/* Add this URB to the QH */
list_add_tail(&urbp->node, &qh->queue);
/* If the new URB is the first and only one on this QH then either
* the QH is new and idle or else it's unlinked and waiting to
* become idle, so we can activate it right away. But only if the
* queue isn't stopped. */
if (qh->queue.next == &urbp->node && !qh->is_stopped) {
uhci_activate_qh(uhci, qh);
uhci_urbp_wants_fsbr(uhci, urbp);
}
goto done;
err_submit_failed:
if (qh->state == QH_STATE_IDLE)
uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
err_no_qh:
uhci_free_urb_priv(uhci, urbp);
done:
if (ret)
usb_hcd_unlink_urb_from_ep(hcd, urb);
done_not_linked:
spin_unlock_irqrestore(&uhci->lock, flags);
return ret;
}
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
struct uhci_qh *qh;
int rc;
spin_lock_irqsave(&uhci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
qh = ((struct urb_priv *) urb->hcpriv)->qh;
/* Remove Isochronous TDs from the frame list ASAP */
if (qh->type == USB_ENDPOINT_XFER_ISOC) {
uhci_unlink_isochronous_tds(uhci, urb);
mb();
/* If the URB has already started, update the QH unlink time */
uhci_get_current_frame_number(uhci);
if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
qh->unlink_frame = uhci->frame_number;
}
uhci_unlink_qh(uhci, qh);
done:
spin_unlock_irqrestore(&uhci->lock, flags);
return rc;
}
/*
* Finish unlinking an URB and give it back
*/
static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
struct urb *urb, int status)
__releases(uhci->lock)
__acquires(uhci->lock)
{
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
/* Subtract off the length of the SETUP packet from
* urb->actual_length.
*/
urb->actual_length -= min_t(u32, 8, urb->actual_length);
}
/* When giving back the first URB in an Isochronous queue,
* reinitialize the QH's iso-related members for the next URB. */
else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
urbp->node.prev == &qh->queue &&
urbp->node.next != &qh->queue) {
struct urb *nurb = list_entry(urbp->node.next,
struct urb_priv, node)->urb;
qh->iso_packet_desc = &nurb->iso_frame_desc[0];
qh->iso_frame = nurb->start_frame;
}
/* Take the URB off the QH's queue. If the queue is now empty,
* this is a perfect time for a toggle fixup. */
list_del_init(&urbp->node);
if (list_empty(&qh->queue) && qh->needs_fixup) {
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), qh->initial_toggle);
qh->needs_fixup = 0;
}
uhci_free_urb_priv(uhci, urbp);
usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
spin_unlock(&uhci->lock);
usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
spin_lock(&uhci->lock);
/* If the queue is now empty, we can unlink the QH and give up its
* reserved bandwidth. */
if (list_empty(&qh->queue)) {
uhci_unlink_qh(uhci, qh);
if (qh->bandwidth_reserved)
uhci_release_bandwidth(uhci, qh);
}
}
/*
* Scan the URBs in a QH's queue
*/
#define QH_FINISHED_UNLINKING(qh) \
(qh->state == QH_STATE_UNLINKING && \
uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct urb_priv *urbp;
struct urb *urb;
int status;
while (!list_empty(&qh->queue)) {
urbp = list_entry(qh->queue.next, struct urb_priv, node);
urb = urbp->urb;
if (qh->type == USB_ENDPOINT_XFER_ISOC)
status = uhci_result_isochronous(uhci, urb);
else
status = uhci_result_common(uhci, urb);
if (status == -EINPROGRESS)
break;
/* Dequeued but completed URBs can't be given back unless
* the QH is stopped or has finished unlinking. */
if (urb->unlinked) {
if (QH_FINISHED_UNLINKING(qh))
qh->is_stopped = 1;
else if (!qh->is_stopped)
return;
}
uhci_giveback_urb(uhci, qh, urb, status);
if (status < 0)
break;
}
/* If the QH is neither stopped nor finished unlinking (normal case),
* our work here is done. */
if (QH_FINISHED_UNLINKING(qh))
qh->is_stopped = 1;
else if (!qh->is_stopped)
return;
/* Otherwise give back each of the dequeued URBs */
restart:
list_for_each_entry(urbp, &qh->queue, node) {
urb = urbp->urb;
if (urb->unlinked) {
/* Fix up the TD links and save the toggles for
* non-Isochronous queues. For Isochronous queues,
* test for too-recent dequeues. */
if (!uhci_cleanup_queue(uhci, qh, urb)) {
qh->is_stopped = 0;
return;
}
uhci_giveback_urb(uhci, qh, urb, 0);
goto restart;
}
}
qh->is_stopped = 0;
/* There are no more dequeued URBs. If there are still URBs on the
* queue, the QH can now be re-activated. */
if (!list_empty(&qh->queue)) {
if (qh->needs_fixup)
uhci_fixup_toggles(uhci, qh, 0);
/* If the first URB on the queue wants FSBR but its time
* limit has expired, set the next TD to interrupt on
* completion before reactivating the QH. */
urbp = list_entry(qh->queue.next, struct urb_priv, node);
if (urbp->fsbr && qh->wait_expired) {
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
}
uhci_activate_qh(uhci, qh);
}
/* The queue is empty. The QH can become idle if it is fully
* unlinked. */
else if (QH_FINISHED_UNLINKING(qh))
uhci_make_qh_idle(uhci, qh);
}
/*
* Check for queues that have made some forward progress.
* Returns 0 if the queue is not Isochronous, is ACTIVE, and
* has not advanced since last examined; 1 otherwise.
*
* Early Intel controllers have a bug which causes qh->element sometimes
* not to advance when a TD completes successfully. The queue remains
* stuck on the inactive completed TD. We detect such cases and advance
* the element pointer by hand.
*/
static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct urb_priv *urbp = NULL;
struct uhci_td *td;
int ret = 1;
unsigned status;
if (qh->type == USB_ENDPOINT_XFER_ISOC)
goto done;
/* Treat an UNLINKING queue as though it hasn't advanced.
* This is okay because reactivation will treat it as though
* it has advanced, and if it is going to become IDLE then
* this doesn't matter anyway. Furthermore it's possible
* for an UNLINKING queue not to have any URBs at all, or
* for its first URB not to have any TDs (if it was dequeued
* just as it completed). So it's not easy in any case to
* test whether such queues have advanced. */
if (qh->state != QH_STATE_ACTIVE) {
urbp = NULL;
status = 0;
} else {
urbp = list_entry(qh->queue.next, struct urb_priv, node);
td = list_entry(urbp->td_list.next, struct uhci_td, list);
status = td_status(uhci, td);
if (!(status & TD_CTRL_ACTIVE)) {
/* We're okay, the queue has advanced */
qh->wait_expired = 0;
qh->advance_jiffies = jiffies;
goto done;
}
ret = uhci->is_stopped;
}
/* The queue hasn't advanced; check for timeout */
if (qh->wait_expired)
goto done;
if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
/* Detect the Intel bug and work around it */
if (qh->post_td && qh_element(qh) ==
LINK_TO_TD(uhci, qh->post_td)) {
qh->element = qh->post_td->link;
qh->advance_jiffies = jiffies;
ret = 1;
goto done;
}
qh->wait_expired = 1;
/* If the current URB wants FSBR, unlink it temporarily
* so that we can safely set the next TD to interrupt on
* completion. That way we'll know as soon as the queue
* starts moving again. */
if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
uhci_unlink_qh(uhci, qh);
} else {
/* Unmoving but not-yet-expired queues keep FSBR alive */
if (urbp)
uhci_urbp_wants_fsbr(uhci, urbp);
}
done:
return ret;
}
/*
* Process events in the schedule, but only in one thread at a time
*/
static void uhci_scan_schedule(struct uhci_hcd *uhci)
{
int i;
struct uhci_qh *qh;
/* Don't allow re-entrant calls */
if (uhci->scan_in_progress) {
uhci->need_rescan = 1;
return;
}
uhci->scan_in_progress = 1;
rescan:
uhci->need_rescan = 0;
uhci->fsbr_is_wanted = 0;
uhci_clear_next_interrupt(uhci);
uhci_get_current_frame_number(uhci);
uhci->cur_iso_frame = uhci->frame_number;
/* Go through all the QH queues and process the URBs in each one */
for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
struct uhci_qh, node);
while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
uhci->next_qh = list_entry(qh->node.next,
struct uhci_qh, node);
if (uhci_advance_check(uhci, qh)) {
uhci_scan_qh(uhci, qh);
if (qh->state == QH_STATE_ACTIVE) {
uhci_urbp_wants_fsbr(uhci,
list_entry(qh->queue.next, struct urb_priv, node));
}
}
}
}
uhci->last_iso_frame = uhci->cur_iso_frame;
if (uhci->need_rescan)
goto rescan;
uhci->scan_in_progress = 0;
if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
!uhci->fsbr_expiring) {
uhci->fsbr_expiring = 1;
mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
}
if (list_empty(&uhci->skel_unlink_qh->node))
uhci_clear_next_interrupt(uhci);
else
uhci_set_next_interrupt(uhci);
}
| linux-master | drivers/usb/host/uhci-q.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
*/
/* this file is part of ehci-hcd.c */
#ifdef CONFIG_DYNAMIC_DEBUG
/*
* check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
* see EHCI spec, Table 2-4 for each value
*/
static void dbg_hcs_params(struct ehci_hcd *ehci, char *label)
{
u32 params = ehci_readl(ehci, &ehci->caps->hcs_params);
ehci_dbg(ehci,
"%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n",
label, params,
HCS_DEBUG_PORT(params),
HCS_INDICATOR(params) ? " ind" : "",
HCS_N_CC(params),
HCS_N_PCC(params),
HCS_PORTROUTED(params) ? "" : " ordered",
HCS_PPC(params) ? "" : " !ppc",
HCS_N_PORTS(params));
/* Port routing, per EHCI 0.95 Spec, Section 2.2.5 */
if (HCS_PORTROUTED(params)) {
int i;
char buf[46], tmp[7], byte;
buf[0] = 0;
for (i = 0; i < HCS_N_PORTS(params); i++) {
/* FIXME MIPS won't readb() ... */
byte = readb(&ehci->caps->portroute[(i >> 1)]);
sprintf(tmp, "%d ",
(i & 0x1) ? byte & 0xf : (byte >> 4) & 0xf);
strcat(buf, tmp);
}
ehci_dbg(ehci, "%s portroute %s\n", label, buf);
}
}
/*
* check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
* see EHCI Spec, Table 2-5 for each value
*/
static void dbg_hcc_params(struct ehci_hcd *ehci, char *label)
{
u32 params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(params)) {
ehci_dbg(ehci,
"%s hcc_params %04x caching frame %s%s%s\n",
label, params,
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "",
HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
} else {
ehci_dbg(ehci,
"%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
label,
params,
HCC_ISOC_THRES(params),
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "",
HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
HCC_LPM(params) ? " LPM" : "",
HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
HCC_32FRAME_PERIODIC_LIST(params) ?
" 32 periodic list" : "");
}
}
static void __maybe_unused
dbg_qtd(const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
{
ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
hc32_to_cpup(ehci, &qtd->hw_next),
hc32_to_cpup(ehci, &qtd->hw_alt_next),
hc32_to_cpup(ehci, &qtd->hw_token),
hc32_to_cpup(ehci, &qtd->hw_buf[0]));
if (qtd->hw_buf[1])
ehci_dbg(ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
hc32_to_cpup(ehci, &qtd->hw_buf[1]),
hc32_to_cpup(ehci, &qtd->hw_buf[2]),
hc32_to_cpup(ehci, &qtd->hw_buf[3]),
hc32_to_cpup(ehci, &qtd->hw_buf[4]));
}
static void __maybe_unused
dbg_qh(const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh_hw *hw = qh->hw;
ehci_dbg(ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
}
static void __maybe_unused
dbg_itd(const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
{
ehci_dbg(ehci, "%s [%d] itd %p, next %08x, urb %p\n",
label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next),
itd->urb);
ehci_dbg(ehci,
" trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
hc32_to_cpu(ehci, itd->hw_transaction[0]),
hc32_to_cpu(ehci, itd->hw_transaction[1]),
hc32_to_cpu(ehci, itd->hw_transaction[2]),
hc32_to_cpu(ehci, itd->hw_transaction[3]),
hc32_to_cpu(ehci, itd->hw_transaction[4]),
hc32_to_cpu(ehci, itd->hw_transaction[5]),
hc32_to_cpu(ehci, itd->hw_transaction[6]),
hc32_to_cpu(ehci, itd->hw_transaction[7]));
ehci_dbg(ehci,
" buf: %08x %08x %08x %08x %08x %08x %08x\n",
hc32_to_cpu(ehci, itd->hw_bufp[0]),
hc32_to_cpu(ehci, itd->hw_bufp[1]),
hc32_to_cpu(ehci, itd->hw_bufp[2]),
hc32_to_cpu(ehci, itd->hw_bufp[3]),
hc32_to_cpu(ehci, itd->hw_bufp[4]),
hc32_to_cpu(ehci, itd->hw_bufp[5]),
hc32_to_cpu(ehci, itd->hw_bufp[6]));
ehci_dbg(ehci, " index: %d %d %d %d %d %d %d %d\n",
itd->index[0], itd->index[1], itd->index[2],
itd->index[3], itd->index[4], itd->index[5],
itd->index[6], itd->index[7]);
}
static void __maybe_unused
dbg_sitd(const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
ehci_dbg(ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next),
sitd->urb);
ehci_dbg(ehci,
" addr %08x sched %04x result %08x buf %08x %08x\n",
hc32_to_cpu(ehci, sitd->hw_fullspeed_ep),
hc32_to_cpu(ehci, sitd->hw_uframe),
hc32_to_cpu(ehci, sitd->hw_results),
hc32_to_cpu(ehci, sitd->hw_buf[0]),
hc32_to_cpu(ehci, sitd->hw_buf[1]));
}
static int __maybe_unused
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf(buf, len,
"%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", status,
(status & STS_PPCE_MASK) ? " PPCE" : "",
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
(status & STS_HALT) ? " Halt" : "",
(status & STS_IAA) ? " IAA" : "",
(status & STS_FATAL) ? " FATAL" : "",
(status & STS_FLR) ? " FLR" : "",
(status & STS_PCD) ? " PCD" : "",
(status & STS_ERR) ? " ERR" : "",
(status & STS_INT) ? " INT" : "");
}
static int __maybe_unused
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf(buf, len,
"%s%sintrenable %02x%s%s%s%s%s%s%s",
label, label[0] ? " " : "", enable,
(enable & STS_PPCE_MASK) ? " PPCE" : "",
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
(enable & STS_PCD) ? " PCD" : "",
(enable & STS_ERR) ? " ERR" : "",
(enable & STS_INT) ? " INT" : "");
}
static const char *const fls_strings[] = { "1024", "512", "256", "??" };
static int
dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
{
return scnprintf(buf, len,
"%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
"period=%s%s %s",
label, label[0] ? " " : "", command,
(command & CMD_HIRD) ? " HIRD" : "",
(command & CMD_PPCEE) ? " PPCEE" : "",
(command & CMD_FSP) ? " FSP" : "",
(command & CMD_ASPE) ? " ASPE" : "",
(command & CMD_PSPE) ? " PSPE" : "",
(command & CMD_PARK) ? " park" : "(park)",
CMD_PARK_CNT(command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
(command & CMD_IAAD) ? " IAAD" : "",
(command & CMD_ASE) ? " Async" : "",
(command & CMD_PSE) ? " Periodic" : "",
fls_strings[(command >> 2) & 0x3],
(command & CMD_RESET) ? " Reset" : "",
(command & CMD_RUN) ? "RUN" : "HALT");
}
static int
dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
{
char *sig;
/* signaling state */
switch (status & (3 << 10)) {
case 0 << 10:
sig = "se0";
break;
case 1 << 10: /* low speed */
sig = "k";
break;
case 2 << 10:
sig = "j";
break;
default:
sig = "?";
break;
}
return scnprintf(buf, len,
"%s%sport:%d status %06x %d %s%s%s%s%s%s "
"sig=%s%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", port, status,
status >> 25, /*device address */
(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ACK ?
" ACK" : "",
(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_NYET ?
" NYET" : "",
(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_STALL ?
" STALL" : "",
(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ERR ?
" ERR" : "",
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
(status & PORT_LPM) ? " LPM" : "",
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
(status & PORT_OCC) ? " OCC" : "",
(status & PORT_OC) ? " OC" : "",
(status & PORT_PEC) ? " PEC" : "",
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
(status & PORT_CONNECT) ? " CONNECT" : "");
}
static inline void
dbg_status(struct ehci_hcd *ehci, const char *label, u32 status)
{
char buf[80];
dbg_status_buf(buf, sizeof(buf), label, status);
ehci_dbg(ehci, "%s\n", buf);
}
static inline void
dbg_cmd(struct ehci_hcd *ehci, const char *label, u32 command)
{
char buf[80];
dbg_command_buf(buf, sizeof(buf), label, command);
ehci_dbg(ehci, "%s\n", buf);
}
static inline void
dbg_port(struct ehci_hcd *ehci, const char *label, int port, u32 status)
{
char buf[80];
dbg_port_buf(buf, sizeof(buf), label, port, status);
ehci_dbg(ehci, "%s\n", buf);
}
/*-------------------------------------------------------------------------*/
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
static int debug_bandwidth_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
static const struct file_operations debug_async_fops = {
.owner = THIS_MODULE,
.open = debug_async_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_bandwidth_fops = {
.owner = THIS_MODULE,
.open = debug_bandwidth_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_registers_fops = {
.owner = THIS_MODULE,
.open = debug_registers_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static struct dentry *ehci_debug_root;
struct debug_buffer {
ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
struct usb_bus *bus;
struct mutex mutex; /* protect filling of buffer */
size_t count; /* number of characters filled into buffer */
char *output_buf;
size_t alloc_size;
};
static inline char speed_char(u32 info1)
{
switch (info1 & (3 << 12)) {
case QH_FULL_SPEED:
return 'f';
case QH_LOW_SPEED:
return 'l';
case QH_HIGH_SPEED:
return 'h';
default:
return '?';
}
}
static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
{
__u32 v = hc32_to_cpu(ehci, token);
if (v & QTD_STS_ACTIVE)
return '*';
if (v & QTD_STS_HALT)
return '-';
if (!IS_SHORT_READ(v))
return ' ';
/* tries to advance through hw_alt_next */
return '/';
}
static void qh_lines(struct ehci_hcd *ehci, struct ehci_qh *qh,
char **nextp, unsigned *sizep)
{
u32 scratch;
u32 hw_curr;
struct list_head *entry;
struct ehci_qtd *td;
unsigned temp;
unsigned size = *sizep;
char *next = *nextp;
char mark;
__le32 list_end = EHCI_LIST_END(ehci);
struct ehci_qh_hw *hw = qh->hw;
if (hw->hw_qtd_next == list_end) /* NEC does this */
mark = '@';
else
mark = token_mark(ehci, hw->hw_token);
if (mark == '/') { /* qh_alt_next controls qh advance? */
if ((hw->hw_alt_next & QTD_MASK(ehci))
== ehci->async->hw->hw_alt_next)
mark = '#'; /* blocked */
else if (hw->hw_alt_next == list_end)
mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
scratch = hc32_to_cpup(ehci, &hw->hw_info1);
hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
temp = scnprintf(next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)"
" [cur %08x next %08x buf[0] %08x]",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
scratch, hc32_to_cpup(ehci, &hw->hw_info2),
hc32_to_cpup(ehci, &hw->hw_token), mark,
(cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token)
? "data1" : "data0",
(hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f,
hc32_to_cpup(ehci, &hw->hw_current),
hc32_to_cpup(ehci, &hw->hw_qtd_next),
hc32_to_cpup(ehci, &hw->hw_buf[0]));
size -= temp;
next += temp;
/* hc may be modifying the list as we read it ... */
list_for_each(entry, &qh->qtd_list) {
char *type;
td = list_entry(entry, struct ehci_qtd, qtd_list);
scratch = hc32_to_cpup(ehci, &td->hw_token);
mark = ' ';
if (hw_curr == td->qtd_dma) {
mark = '*';
} else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) {
mark = '+';
} else if (QTD_LENGTH(scratch)) {
if (td->hw_alt_next == ehci->async->hw->hw_alt_next)
mark = '#';
else if (td->hw_alt_next != list_end)
mark = '/';
}
switch ((scratch >> 8) & 0x03) {
case 0:
type = "out";
break;
case 1:
type = "in";
break;
case 2:
type = "setup";
break;
default:
type = "?";
break;
}
temp = scnprintf(next, size,
"\n\t%p%c%s len=%d %08x urb %p"
" [td %08x buf[0] %08x]",
td, mark, type,
(scratch >> 16) & 0x7fff,
scratch,
td->urb,
(u32) td->qtd_dma,
hc32_to_cpup(ehci, &td->hw_buf[0]));
size -= temp;
next += temp;
if (temp == size)
goto done;
}
temp = scnprintf(next, size, "\n");
size -= temp;
next += temp;
done:
*sizep = size;
*nextp = next;
}
static ssize_t fill_async_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
unsigned long flags;
unsigned temp, size;
char *next;
struct ehci_qh *qh;
hcd = bus_to_hcd(buf->bus);
ehci = hcd_to_ehci(hcd);
next = buf->output_buf;
size = buf->alloc_size;
*next = 0;
/*
* dumps a snapshot of the async schedule.
* usually empty except for long-term bulk reads, or head.
* one QH per line, and TDs we know about
*/
spin_lock_irqsave(&ehci->lock, flags);
for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
qh_lines(ehci, qh, &next, &size);
if (!list_empty(&ehci->async_unlink) && size > 0) {
temp = scnprintf(next, size, "\nunlink =\n");
size -= temp;
next += temp;
list_for_each_entry(qh, &ehci->async_unlink, unlink_node) {
if (size <= 0)
break;
qh_lines(ehci, qh, &next, &size);
}
}
spin_unlock_irqrestore(&ehci->lock, flags);
return strlen(buf->output_buf);
}
static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
{
struct ehci_hcd *ehci;
struct ehci_tt *tt;
struct ehci_per_sched *ps;
unsigned temp, size;
char *next;
unsigned i;
u8 *bw;
u16 *bf;
u8 budget[EHCI_BANDWIDTH_SIZE];
ehci = hcd_to_ehci(bus_to_hcd(buf->bus));
next = buf->output_buf;
size = buf->alloc_size;
*next = 0;
spin_lock_irq(&ehci->lock);
/* Dump the HS bandwidth table */
temp = scnprintf(next, size,
"HS bandwidth allocation (us per microframe)\n");
size -= temp;
next += temp;
for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
bw = &ehci->bandwidth[i];
temp = scnprintf(next, size,
"%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
i, bw[0], bw[1], bw[2], bw[3],
bw[4], bw[5], bw[6], bw[7]);
size -= temp;
next += temp;
}
/* Dump all the FS/LS tables */
list_for_each_entry(tt, &ehci->tt_list, tt_list) {
temp = scnprintf(next, size,
"\nTT %s port %d FS/LS bandwidth allocation (us per frame)\n",
dev_name(&tt->usb_tt->hub->dev),
tt->tt_port + !!tt->usb_tt->multi);
size -= temp;
next += temp;
bf = tt->bandwidth;
temp = scnprintf(next, size,
" %5u%5u%5u%5u%5u%5u%5u%5u\n",
bf[0], bf[1], bf[2], bf[3],
bf[4], bf[5], bf[6], bf[7]);
size -= temp;
next += temp;
temp = scnprintf(next, size,
"FS/LS budget (us per microframe)\n");
size -= temp;
next += temp;
compute_tt_budget(budget, tt);
for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
bw = &budget[i];
temp = scnprintf(next, size,
"%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
i, bw[0], bw[1], bw[2], bw[3],
bw[4], bw[5], bw[6], bw[7]);
size -= temp;
next += temp;
}
list_for_each_entry(ps, &tt->ps_list, ps_list) {
temp = scnprintf(next, size,
"%s ep %02x: %4u @ %2u.%u+%u mask %04x\n",
dev_name(&ps->udev->dev),
ps->ep->desc.bEndpointAddress,
ps->tt_usecs,
ps->bw_phase, ps->phase_uf,
ps->bw_period, ps->cs_mask);
size -= temp;
next += temp;
}
}
spin_unlock_irq(&ehci->lock);
return next - buf->output_buf;
}
static unsigned output_buf_tds_dir(char *buf, struct ehci_hcd *ehci,
struct ehci_qh_hw *hw, struct ehci_qh *qh, unsigned size)
{
u32 scratch = hc32_to_cpup(ehci, &hw->hw_info1);
struct ehci_qtd *qtd;
char *type = "";
unsigned temp = 0;
/* count tds, get ep direction */
list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
temp++;
switch ((hc32_to_cpu(ehci, qtd->hw_token) >> 8) & 0x03) {
case 0:
type = "out";
continue;
case 1:
type = "in";
continue;
}
}
return scnprintf(buf, size, " (%c%d ep%d%s [%d/%d] q%d p%d)",
speed_char(scratch), scratch & 0x007f,
(scratch >> 8) & 0x000f, type, qh->ps.usecs,
qh->ps.c_usecs, temp, 0x7ff & (scratch >> 16));
}
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
unsigned long flags;
union ehci_shadow p, *seen;
unsigned temp, size, seen_count;
char *next;
unsigned i;
__hc32 tag;
seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
if (!seen)
return 0;
seen_count = 0;
hcd = bus_to_hcd(buf->bus);
ehci = hcd_to_ehci(hcd);
next = buf->output_buf;
size = buf->alloc_size;
temp = scnprintf(next, size, "size = %d\n", ehci->periodic_size);
size -= temp;
next += temp;
/*
* dump a snapshot of the periodic schedule.
* iso changes, interrupt usually doesn't.
*/
spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < ehci->periodic_size; i++) {
p = ehci->pshadow[i];
if (likely(!p.ptr))
continue;
tag = Q_NEXT_TYPE(ehci, ehci->periodic[i]);
temp = scnprintf(next, size, "%4d: ", i);
size -= temp;
next += temp;
do {
struct ehci_qh_hw *hw;
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
hw = p.qh->hw;
temp = scnprintf(next, size, " qh%d-%04x/%p",
p.qh->ps.period,
hc32_to_cpup(ehci,
&hw->hw_info2)
/* uframe masks */
& (QH_CMASK | QH_SMASK),
p.qh);
size -= temp;
next += temp;
/* don't repeat what follows this qh */
for (temp = 0; temp < seen_count; temp++) {
if (seen[temp].ptr != p.ptr)
continue;
if (p.qh->qh_next.ptr) {
temp = scnprintf(next, size,
" ...");
size -= temp;
next += temp;
}
break;
}
/* show more info the first time around */
if (temp == seen_count) {
temp = output_buf_tds_dir(next, ehci,
hw, p.qh, size);
if (seen_count < DBG_SCHED_LIMIT)
seen[seen_count++].qh = p.qh;
} else {
temp = 0;
}
tag = Q_NEXT_TYPE(ehci, hw->hw_next);
p = p.qh->qh_next;
break;
case Q_TYPE_FSTN:
temp = scnprintf(next, size,
" fstn-%8x/%p", p.fstn->hw_prev,
p.fstn);
tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
temp = scnprintf(next, size,
" itd/%p", p.itd);
tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
p = p.itd->itd_next;
break;
case Q_TYPE_SITD:
temp = scnprintf(next, size,
" sitd%d-%04x/%p",
p.sitd->stream->ps.period,
hc32_to_cpup(ehci, &p.sitd->hw_uframe)
& 0x0000ffff,
p.sitd);
tag = Q_NEXT_TYPE(ehci, p.sitd->hw_next);
p = p.sitd->sitd_next;
break;
}
size -= temp;
next += temp;
} while (p.ptr);
temp = scnprintf(next, size, "\n");
size -= temp;
next += temp;
}
spin_unlock_irqrestore(&ehci->lock, flags);
kfree(seen);
return buf->alloc_size - size;
}
#undef DBG_SCHED_LIMIT
static const char *rh_state_string(struct ehci_hcd *ehci)
{
switch (ehci->rh_state) {
case EHCI_RH_HALTED:
return "halted";
case EHCI_RH_SUSPENDED:
return "suspended";
case EHCI_RH_RUNNING:
return "running";
case EHCI_RH_STOPPING:
return "stopping";
}
return "?";
}
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
unsigned long flags;
unsigned temp, size, i;
char *next, scratch[80];
static char fmt[] = "%*s\n";
static char label[] = "";
hcd = bus_to_hcd(buf->bus);
ehci = hcd_to_ehci(hcd);
next = buf->output_buf;
size = buf->alloc_size;
spin_lock_irqsave(&ehci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd)) {
size = scnprintf(next, size,
"bus %s, device %s\n"
"%s\n"
"SUSPENDED (no register access)\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc);
goto done;
}
/* Capability Registers */
i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
temp = scnprintf(next, size,
"bus %s, device %s\n"
"%s\n"
"EHCI %x.%02x, rh state %s\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc,
i >> 8, i & 0x0ff, rh_state_string(ehci));
size -= temp;
next += temp;
#ifdef CONFIG_USB_PCI
/* EHCI 0.96 and later may have "extended capabilities" */
if (dev_is_pci(hcd->self.controller)) {
struct pci_dev *pdev;
u32 offset, cap, cap2;
unsigned count = 256 / 4;
pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
offset = HCC_EXT_CAPS(ehci_readl(ehci,
&ehci->caps->hcc_params));
while (offset && count--) {
pci_read_config_dword(pdev, offset, &cap);
switch (cap & 0xff) {
case 1:
temp = scnprintf(next, size,
"ownership %08x%s%s\n", cap,
(cap & (1 << 24)) ? " linux" : "",
(cap & (1 << 16)) ? " firmware" : "");
size -= temp;
next += temp;
offset += 4;
pci_read_config_dword(pdev, offset, &cap2);
temp = scnprintf(next, size,
"SMI sts/enable 0x%08x\n", cap2);
size -= temp;
next += temp;
break;
case 0: /* illegal reserved capability */
cap = 0;
fallthrough;
default: /* unknown */
break;
}
offset = (cap >> 8) & 0xff;
}
}
#endif
/* FIXME interpret both types of params */
i = ehci_readl(ehci, &ehci->caps->hcs_params);
temp = scnprintf(next, size, "structural params 0x%08x\n", i);
size -= temp;
next += temp;
i = ehci_readl(ehci, &ehci->caps->hcc_params);
temp = scnprintf(next, size, "capability params 0x%08x\n", i);
size -= temp;
next += temp;
/* Operational Registers */
temp = dbg_status_buf(scratch, sizeof(scratch), label,
ehci_readl(ehci, &ehci->regs->status));
temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
temp = dbg_command_buf(scratch, sizeof(scratch), label,
ehci_readl(ehci, &ehci->regs->command));
temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
temp = dbg_intr_buf(scratch, sizeof(scratch), label,
ehci_readl(ehci, &ehci->regs->intr_enable));
temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
temp = scnprintf(next, size, "uframe %04x\n",
ehci_read_frame_index(ehci));
size -= temp;
next += temp;
for (i = 1; i <= HCS_N_PORTS(ehci->hcs_params); i++) {
temp = dbg_port_buf(scratch, sizeof(scratch), label, i,
ehci_readl(ehci,
&ehci->regs->port_status[i - 1]));
temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
temp = scnprintf(next, size,
" debug control %08x\n",
ehci_readl(ehci,
&ehci->debug->control));
size -= temp;
next += temp;
}
}
if (!list_empty(&ehci->async_unlink)) {
temp = scnprintf(next, size, "async unlink qh %p\n",
list_first_entry(&ehci->async_unlink,
struct ehci_qh, unlink_node));
size -= temp;
next += temp;
}
#ifdef EHCI_STATS
temp = scnprintf(next, size,
"irq normal %ld err %ld iaa %ld (lost %ld)\n",
ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
ehci->stats.lost_iaa);
size -= temp;
next += temp;
temp = scnprintf(next, size, "complete %ld unlink %ld\n",
ehci->stats.complete, ehci->stats.unlink);
size -= temp;
next += temp;
#endif
done:
spin_unlock_irqrestore(&ehci->lock, flags);
return buf->alloc_size - size;
}
static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (buf) {
buf->bus = bus;
buf->fill_func = fill_func;
mutex_init(&buf->mutex);
buf->alloc_size = PAGE_SIZE;
}
return buf;
}
static int fill_buffer(struct debug_buffer *buf)
{
int ret;
if (!buf->output_buf)
buf->output_buf = vmalloc(buf->alloc_size);
if (!buf->output_buf) {
ret = -ENOMEM;
goto out;
}
ret = buf->fill_func(buf);
if (ret >= 0) {
buf->count = ret;
ret = 0;
}
out:
return ret;
}
static ssize_t debug_output(struct file *file, char __user *user_buf,
size_t len, loff_t *offset)
{
struct debug_buffer *buf = file->private_data;
int ret;
mutex_lock(&buf->mutex);
if (buf->count == 0) {
ret = fill_buffer(buf);
if (ret != 0) {
mutex_unlock(&buf->mutex);
goto out;
}
}
mutex_unlock(&buf->mutex);
ret = simple_read_from_buffer(user_buf, len, offset,
buf->output_buf, buf->count);
out:
return ret;
}
static int debug_close(struct inode *inode, struct file *file)
{
struct debug_buffer *buf = file->private_data;
if (buf) {
vfree(buf->output_buf);
kfree(buf);
}
return 0;
}
static int debug_async_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static int debug_bandwidth_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private,
fill_bandwidth_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static int debug_periodic_open(struct inode *inode, struct file *file)
{
struct debug_buffer *buf;
buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
if (!buf)
return -ENOMEM;
buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8) * PAGE_SIZE;
file->private_data = buf;
return 0;
}
static int debug_registers_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private,
fill_registers_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static inline void create_debug_files(struct ehci_hcd *ehci)
{
struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root);
debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus,
&debug_async_fops);
debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
&debug_bandwidth_fops);
debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
&debug_periodic_fops);
debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus,
&debug_registers_fops);
}
static inline void remove_debug_files(struct ehci_hcd *ehci)
{
debugfs_remove_recursive(ehci->debug_dir);
}
#else /* CONFIG_DYNAMIC_DEBUG */
static inline void dbg_hcs_params(struct ehci_hcd *ehci, char *label) { }
static inline void dbg_hcc_params(struct ehci_hcd *ehci, char *label) { }
static inline void __maybe_unused dbg_qh(const char *label,
struct ehci_hcd *ehci, struct ehci_qh *qh) { }
static inline int __maybe_unused dbg_status_buf(const char *buf,
unsigned int len, const char *label, u32 status)
{ return 0; }
static inline int __maybe_unused dbg_command_buf(const char *buf,
unsigned int len, const char *label, u32 command)
{ return 0; }
static inline int __maybe_unused dbg_intr_buf(const char *buf,
unsigned int len, const char *label, u32 enable)
{ return 0; }
static inline int __maybe_unused dbg_port_buf(char *buf,
unsigned int len, const char *label, int port, u32 status)
{ return 0; }
static inline void dbg_status(struct ehci_hcd *ehci, const char *label,
u32 status) { }
static inline void dbg_cmd(struct ehci_hcd *ehci, const char *label,
u32 command) { }
static inline void dbg_port(struct ehci_hcd *ehci, const char *label,
int port, u32 status) { }
static inline void create_debug_files(struct ehci_hcd *bus) { }
static inline void remove_debug_files(struct ehci_hcd *bus) { }
#endif /* CONFIG_DYNAMIC_DEBUG */
| linux-master | drivers/usb/host/ehci-dbg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xhci-dbgcap.c - xHCI debug capability support
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Lu Baolu <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/nls.h>
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-dbgcap.h"
static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
{
if (!ctx)
return;
dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
kfree(ctx);
}
/* we use only one segment for DbC rings */
static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
{
if (!ring)
return;
if (ring->first_seg && ring->first_seg->trbs) {
dma_free_coherent(dev, TRB_SEGMENT_SIZE,
ring->first_seg->trbs,
ring->first_seg->dma);
kfree(ring->first_seg);
}
kfree(ring);
}
static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
{
struct usb_string_descriptor *s_desc;
u32 string_length;
/* Serial string: */
s_desc = (struct usb_string_descriptor *)strings->serial;
utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
DBC_MAX_STRING_LENGTH);
s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
s_desc->bDescriptorType = USB_DT_STRING;
string_length = s_desc->bLength;
string_length <<= 8;
/* Product string: */
s_desc = (struct usb_string_descriptor *)strings->product;
utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
DBC_MAX_STRING_LENGTH);
s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
s_desc->bDescriptorType = USB_DT_STRING;
string_length += s_desc->bLength;
string_length <<= 8;
/* Manufacture string: */
s_desc = (struct usb_string_descriptor *)strings->manufacturer;
utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
strlen(DBC_STRING_MANUFACTURER),
UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
DBC_MAX_STRING_LENGTH);
s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
s_desc->bDescriptorType = USB_DT_STRING;
string_length += s_desc->bLength;
string_length <<= 8;
/* String0: */
strings->string0[0] = 4;
strings->string0[1] = USB_DT_STRING;
strings->string0[2] = 0x09;
strings->string0[3] = 0x04;
string_length += 4;
return string_length;
}
static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
{
struct dbc_info_context *info;
struct xhci_ep_ctx *ep_ctx;
u32 dev_info;
dma_addr_t deq, dma;
unsigned int max_burst;
if (!dbc)
return;
/* Populate info Context: */
info = (struct dbc_info_context *)dbc->ctx->bytes;
dma = dbc->string_dma;
info->string0 = cpu_to_le64(dma);
info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
info->length = cpu_to_le32(string_length);
/* Populate bulk out endpoint context: */
ep_ctx = dbc_bulkout_ctx(dbc);
max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
deq = dbc_bulkout_enq(dbc);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
/* Populate bulk in endpoint context: */
ep_ctx = dbc_bulkin_ctx(dbc);
deq = dbc_bulkin_enq(dbc);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
/* Set DbC context and info registers: */
lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
writel(dev_info, &dbc->regs->devinfo1);
dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
writel(dev_info, &dbc->regs->devinfo2);
}
static void xhci_dbc_giveback(struct dbc_request *req, int status)
__releases(&dbc->lock)
__acquires(&dbc->lock)
{
struct xhci_dbc *dbc = req->dbc;
struct device *dev = dbc->dev;
list_del_init(&req->list_pending);
req->trb_dma = 0;
req->trb = NULL;
if (req->status == -EINPROGRESS)
req->status = status;
trace_xhci_dbc_giveback_request(req);
dma_unmap_single(dev,
req->dma,
req->length,
dbc_ep_dma_direction(req));
/* Give back the transfer request: */
spin_unlock(&dbc->lock);
req->complete(dbc, req);
spin_lock(&dbc->lock);
}
static void xhci_dbc_flush_single_request(struct dbc_request *req)
{
union xhci_trb *trb = req->trb;
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0;
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
xhci_dbc_giveback(req, -ESHUTDOWN);
}
static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
{
struct dbc_request *req, *tmp;
list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
xhci_dbc_flush_single_request(req);
}
static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
{
xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
}
struct dbc_request *
dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
{
struct dbc_request *req;
if (direction != BULK_IN &&
direction != BULK_OUT)
return NULL;
if (!dbc)
return NULL;
req = kzalloc(sizeof(*req), flags);
if (!req)
return NULL;
req->dbc = dbc;
INIT_LIST_HEAD(&req->list_pending);
INIT_LIST_HEAD(&req->list_pool);
req->direction = direction;
trace_xhci_dbc_alloc_request(req);
return req;
}
void
dbc_free_request(struct dbc_request *req)
{
trace_xhci_dbc_free_request(req);
kfree(req);
}
static void
xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
u32 field2, u32 field3, u32 field4)
{
union xhci_trb *trb, *next;
trb = ring->enqueue;
trb->generic.field[0] = cpu_to_le32(field1);
trb->generic.field[1] = cpu_to_le32(field2);
trb->generic.field[2] = cpu_to_le32(field3);
trb->generic.field[3] = cpu_to_le32(field4);
trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
ring->num_trbs_free--;
next = ++(ring->enqueue);
if (TRB_TYPE_LINK_LE32(next->link.control)) {
next->link.control ^= cpu_to_le32(TRB_CYCLE);
ring->enqueue = ring->enq_seg->trbs;
ring->cycle_state ^= 1;
}
}
static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
struct dbc_request *req)
{
u64 addr;
union xhci_trb *trb;
unsigned int num_trbs;
struct xhci_dbc *dbc = req->dbc;
struct xhci_ring *ring = dep->ring;
u32 length, control, cycle;
num_trbs = count_trbs(req->dma, req->length);
WARN_ON(num_trbs != 1);
if (ring->num_trbs_free < num_trbs)
return -EBUSY;
addr = req->dma;
trb = ring->enqueue;
cycle = ring->cycle_state;
length = TRB_LEN(req->length);
control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
if (cycle)
control &= cpu_to_le32(~TRB_CYCLE);
else
control |= cpu_to_le32(TRB_CYCLE);
req->trb = ring->enqueue;
req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
xhci_dbc_queue_trb(ring,
lower_32_bits(addr),
upper_32_bits(addr),
length, control);
/*
* Add a barrier between writes of trb fields and flipping
* the cycle bit:
*/
wmb();
if (cycle)
trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
else
trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
return 0;
}
static int
dbc_ep_do_queue(struct dbc_request *req)
{
int ret;
struct xhci_dbc *dbc = req->dbc;
struct device *dev = dbc->dev;
struct dbc_ep *dep = &dbc->eps[req->direction];
if (!req->length || !req->buf)
return -EINVAL;
req->actual = 0;
req->status = -EINPROGRESS;
req->dma = dma_map_single(dev,
req->buf,
req->length,
dbc_ep_dma_direction(dep));
if (dma_mapping_error(dev, req->dma)) {
dev_err(dbc->dev, "failed to map buffer\n");
return -EFAULT;
}
ret = xhci_dbc_queue_bulk_tx(dep, req);
if (ret) {
dev_err(dbc->dev, "failed to queue trbs\n");
dma_unmap_single(dev,
req->dma,
req->length,
dbc_ep_dma_direction(dep));
return -EFAULT;
}
list_add_tail(&req->list_pending, &dep->list_pending);
return 0;
}
int dbc_ep_queue(struct dbc_request *req)
{
unsigned long flags;
struct xhci_dbc *dbc = req->dbc;
int ret = -ESHUTDOWN;
if (!dbc)
return -ENODEV;
if (req->direction != BULK_IN &&
req->direction != BULK_OUT)
return -EINVAL;
spin_lock_irqsave(&dbc->lock, flags);
if (dbc->state == DS_CONFIGURED)
ret = dbc_ep_do_queue(req);
spin_unlock_irqrestore(&dbc->lock, flags);
mod_delayed_work(system_wq, &dbc->event_work, 0);
trace_xhci_dbc_queue_request(req);
return ret;
}
static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
{
struct dbc_ep *dep;
dep = &dbc->eps[direction];
dep->dbc = dbc;
dep->direction = direction;
dep->ring = direction ? dbc->ring_in : dbc->ring_out;
INIT_LIST_HEAD(&dep->list_pending);
}
static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
{
xhci_dbc_do_eps_init(dbc, BULK_OUT);
xhci_dbc_do_eps_init(dbc, BULK_IN);
}
static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
{
memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
}
static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
struct xhci_erst *erst, gfp_t flags)
{
erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
&erst->erst_dma_addr, flags);
if (!erst->entries)
return -ENOMEM;
erst->num_entries = 1;
erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
erst->entries[0].rsvd = 0;
return 0;
}
static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
{
if (erst->entries)
dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
erst->entries, erst->erst_dma_addr);
erst->entries = NULL;
}
static struct xhci_container_ctx *
dbc_alloc_ctx(struct device *dev, gfp_t flags)
{
struct xhci_container_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), flags);
if (!ctx)
return NULL;
/* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
ctx->size = 3 * DBC_CONTEXT_SIZE;
ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
if (!ctx->bytes) {
kfree(ctx);
return NULL;
}
return ctx;
}
static struct xhci_ring *
xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
{
struct xhci_ring *ring;
struct xhci_segment *seg;
dma_addr_t dma;
ring = kzalloc(sizeof(*ring), flags);
if (!ring)
return NULL;
ring->num_segs = 1;
ring->type = type;
seg = kzalloc(sizeof(*seg), flags);
if (!seg)
goto seg_fail;
ring->first_seg = seg;
ring->last_seg = seg;
seg->next = seg;
seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
if (!seg->trbs)
goto dma_fail;
seg->dma = dma;
/* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
trb->link.segment_ptr = cpu_to_le64(dma);
trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
}
INIT_LIST_HEAD(&ring->td_list);
xhci_initialize_ring_info(ring, 1);
return ring;
dma_fail:
kfree(seg);
seg_fail:
kfree(ring);
return NULL;
}
static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
{
int ret;
dma_addr_t deq;
u32 string_length;
struct device *dev = dbc->dev;
/* Allocate various rings for events and transfers: */
dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
if (!dbc->ring_evt)
goto evt_fail;
dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
if (!dbc->ring_in)
goto in_fail;
dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
if (!dbc->ring_out)
goto out_fail;
/* Allocate and populate ERST: */
ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
if (ret)
goto erst_fail;
/* Allocate context data structure: */
dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
if (!dbc->ctx)
goto ctx_fail;
/* Allocate the string table: */
dbc->string_size = sizeof(struct dbc_str_descs);
dbc->string = dma_alloc_coherent(dev, dbc->string_size,
&dbc->string_dma, flags);
if (!dbc->string)
goto string_fail;
/* Setup ERST register: */
writel(dbc->erst.erst_size, &dbc->regs->ersts);
lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
dbc->ring_evt->dequeue);
lo_hi_writeq(deq, &dbc->regs->erdp);
/* Setup strings and contexts: */
string_length = xhci_dbc_populate_strings(dbc->string);
xhci_dbc_init_contexts(dbc, string_length);
xhci_dbc_eps_init(dbc);
dbc->state = DS_INITIALIZED;
return 0;
string_fail:
dbc_free_ctx(dev, dbc->ctx);
dbc->ctx = NULL;
ctx_fail:
dbc_erst_free(dev, &dbc->erst);
erst_fail:
dbc_ring_free(dev, dbc->ring_out);
dbc->ring_out = NULL;
out_fail:
dbc_ring_free(dev, dbc->ring_in);
dbc->ring_in = NULL;
in_fail:
dbc_ring_free(dev, dbc->ring_evt);
dbc->ring_evt = NULL;
evt_fail:
return -ENOMEM;
}
static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
{
if (!dbc)
return;
xhci_dbc_eps_exit(dbc);
if (dbc->string) {
dma_free_coherent(dbc->dev, dbc->string_size,
dbc->string, dbc->string_dma);
dbc->string = NULL;
}
dbc_free_ctx(dbc->dev, dbc->ctx);
dbc->ctx = NULL;
dbc_erst_free(dbc->dev, &dbc->erst);
dbc_ring_free(dbc->dev, dbc->ring_out);
dbc_ring_free(dbc->dev, dbc->ring_in);
dbc_ring_free(dbc->dev, dbc->ring_evt);
dbc->ring_in = NULL;
dbc->ring_out = NULL;
dbc->ring_evt = NULL;
}
static int xhci_do_dbc_start(struct xhci_dbc *dbc)
{
int ret;
u32 ctrl;
if (dbc->state != DS_DISABLED)
return -EINVAL;
writel(0, &dbc->regs->control);
ret = xhci_handshake(&dbc->regs->control,
DBC_CTRL_DBC_ENABLE,
0, 1000);
if (ret)
return ret;
ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
if (ret)
return ret;
ctrl = readl(&dbc->regs->control);
writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
&dbc->regs->control);
ret = xhci_handshake(&dbc->regs->control,
DBC_CTRL_DBC_ENABLE,
DBC_CTRL_DBC_ENABLE, 1000);
if (ret)
return ret;
dbc->state = DS_ENABLED;
return 0;
}
static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
{
if (dbc->state == DS_DISABLED)
return -1;
writel(0, &dbc->regs->control);
dbc->state = DS_DISABLED;
return 0;
}
static int xhci_dbc_start(struct xhci_dbc *dbc)
{
int ret;
unsigned long flags;
WARN_ON(!dbc);
pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
spin_lock_irqsave(&dbc->lock, flags);
ret = xhci_do_dbc_start(dbc);
spin_unlock_irqrestore(&dbc->lock, flags);
if (ret) {
pm_runtime_put(dbc->dev); /* note this was self.controller */
return ret;
}
return mod_delayed_work(system_wq, &dbc->event_work, 1);
}
static void xhci_dbc_stop(struct xhci_dbc *dbc)
{
int ret;
unsigned long flags;
WARN_ON(!dbc);
switch (dbc->state) {
case DS_DISABLED:
return;
case DS_CONFIGURED:
case DS_STALLED:
if (dbc->driver->disconnect)
dbc->driver->disconnect(dbc);
break;
default:
break;
}
cancel_delayed_work_sync(&dbc->event_work);
spin_lock_irqsave(&dbc->lock, flags);
ret = xhci_do_dbc_stop(dbc);
spin_unlock_irqrestore(&dbc->lock, flags);
if (!ret) {
xhci_dbc_mem_cleanup(dbc);
pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
}
}
static void
dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
{
u32 portsc;
portsc = readl(&dbc->regs->portsc);
if (portsc & DBC_PORTSC_CONN_CHANGE)
dev_info(dbc->dev, "DbC port connect change\n");
if (portsc & DBC_PORTSC_RESET_CHANGE)
dev_info(dbc->dev, "DbC port reset change\n");
if (portsc & DBC_PORTSC_LINK_CHANGE)
dev_info(dbc->dev, "DbC port link status change\n");
if (portsc & DBC_PORTSC_CONFIG_CHANGE)
dev_info(dbc->dev, "DbC config error change\n");
/* Port reset change bit will be cleared in other place: */
writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
}
static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
{
struct dbc_ep *dep;
struct xhci_ring *ring;
int ep_id;
int status;
u32 comp_code;
size_t remain_length;
struct dbc_request *req = NULL, *r;
comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
dep = (ep_id == EPID_OUT) ?
get_out_ep(dbc) : get_in_ep(dbc);
ring = dep->ring;
switch (comp_code) {
case COMP_SUCCESS:
remain_length = 0;
fallthrough;
case COMP_SHORT_PACKET:
status = 0;
break;
case COMP_TRB_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_USB_TRANSACTION_ERROR:
case COMP_STALL_ERROR:
dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
status = -comp_code;
break;
default:
dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
status = -comp_code;
break;
}
/* Match the pending request: */
list_for_each_entry(r, &dep->list_pending, list_pending) {
if (r->trb_dma == event->trans_event.buffer) {
req = r;
break;
}
}
if (!req) {
dev_warn(dbc->dev, "no matched request\n");
return;
}
trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
ring->num_trbs_free++;
req->actual = req->length - remain_length;
xhci_dbc_giveback(req, status);
}
static void inc_evt_deq(struct xhci_ring *ring)
{
/* If on the last TRB of the segment go back to the beginning */
if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
ring->cycle_state ^= 1;
ring->dequeue = ring->deq_seg->trbs;
return;
}
ring->dequeue++;
}
static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
{
dma_addr_t deq;
struct dbc_ep *dep;
union xhci_trb *evt;
u32 ctrl, portsc;
bool update_erdp = false;
/* DbC state machine: */
switch (dbc->state) {
case DS_DISABLED:
case DS_INITIALIZED:
return EVT_ERR;
case DS_ENABLED:
portsc = readl(&dbc->regs->portsc);
if (portsc & DBC_PORTSC_CONN_STATUS) {
dbc->state = DS_CONNECTED;
dev_info(dbc->dev, "DbC connected\n");
}
return EVT_DONE;
case DS_CONNECTED:
ctrl = readl(&dbc->regs->control);
if (ctrl & DBC_CTRL_DBC_RUN) {
dbc->state = DS_CONFIGURED;
dev_info(dbc->dev, "DbC configured\n");
portsc = readl(&dbc->regs->portsc);
writel(portsc, &dbc->regs->portsc);
return EVT_GSER;
}
return EVT_DONE;
case DS_CONFIGURED:
/* Handle cable unplug event: */
portsc = readl(&dbc->regs->portsc);
if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
!(portsc & DBC_PORTSC_CONN_STATUS)) {
dev_info(dbc->dev, "DbC cable unplugged\n");
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
return EVT_DISC;
}
/* Handle debug port reset event: */
if (portsc & DBC_PORTSC_RESET_CHANGE) {
dev_info(dbc->dev, "DbC port reset\n");
writel(portsc, &dbc->regs->portsc);
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
return EVT_DISC;
}
/* Handle endpoint stall event: */
ctrl = readl(&dbc->regs->control);
if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
(ctrl & DBC_CTRL_HALT_OUT_TR)) {
dev_info(dbc->dev, "DbC Endpoint stall\n");
dbc->state = DS_STALLED;
if (ctrl & DBC_CTRL_HALT_IN_TR) {
dep = get_in_ep(dbc);
xhci_dbc_flush_endpoint_requests(dep);
}
if (ctrl & DBC_CTRL_HALT_OUT_TR) {
dep = get_out_ep(dbc);
xhci_dbc_flush_endpoint_requests(dep);
}
return EVT_DONE;
}
/* Clear DbC run change bit: */
if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
writel(ctrl, &dbc->regs->control);
ctrl = readl(&dbc->regs->control);
}
break;
case DS_STALLED:
ctrl = readl(&dbc->regs->control);
if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
!(ctrl & DBC_CTRL_HALT_OUT_TR) &&
(ctrl & DBC_CTRL_DBC_RUN)) {
dbc->state = DS_CONFIGURED;
break;
}
return EVT_DONE;
default:
dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
break;
}
/* Handle the events in the event ring: */
evt = dbc->ring_evt->dequeue;
while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
dbc->ring_evt->cycle_state) {
/*
* Add a barrier between reading the cycle flag and any
* reads of the event's flags/data below:
*/
rmb();
trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_PORT_STATUS):
dbc_handle_port_status(dbc, evt);
break;
case TRB_TYPE(TRB_TRANSFER):
dbc_handle_xfer_event(dbc, evt);
break;
default:
break;
}
inc_evt_deq(dbc->ring_evt);
evt = dbc->ring_evt->dequeue;
update_erdp = true;
}
/* Update event ring dequeue pointer: */
if (update_erdp) {
deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
dbc->ring_evt->dequeue);
lo_hi_writeq(deq, &dbc->regs->erdp);
}
return EVT_DONE;
}
static void xhci_dbc_handle_events(struct work_struct *work)
{
enum evtreturn evtr;
struct xhci_dbc *dbc;
unsigned long flags;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
spin_lock_irqsave(&dbc->lock, flags);
evtr = xhci_dbc_do_handle_events(dbc);
spin_unlock_irqrestore(&dbc->lock, flags);
switch (evtr) {
case EVT_GSER:
if (dbc->driver->configure)
dbc->driver->configure(dbc);
break;
case EVT_DISC:
if (dbc->driver->disconnect)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
}
mod_delayed_work(system_wq, &dbc->event_work, 1);
}
static ssize_t dbc_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
const char *p;
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
switch (dbc->state) {
case DS_DISABLED:
p = "disabled";
break;
case DS_INITIALIZED:
p = "initialized";
break;
case DS_ENABLED:
p = "enabled";
break;
case DS_CONNECTED:
p = "connected";
break;
case DS_CONFIGURED:
p = "configured";
break;
case DS_STALLED:
p = "stalled";
break;
default:
p = "unknown";
}
return sprintf(buf, "%s\n", p);
}
static ssize_t dbc_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct xhci_hcd *xhci;
struct xhci_dbc *dbc;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
if (!strncmp(buf, "enable", 6))
xhci_dbc_start(dbc);
else if (!strncmp(buf, "disable", 7))
xhci_dbc_stop(dbc);
else
return -EINVAL;
return count;
}
static ssize_t dbc_idVendor_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
return sprintf(buf, "%04x\n", dbc->idVendor);
}
static ssize_t dbc_idVendor_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
void __iomem *ptr;
u16 value;
u32 dev_info;
if (kstrtou16(buf, 0, &value))
return -EINVAL;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
if (dbc->state != DS_DISABLED)
return -EBUSY;
dbc->idVendor = value;
ptr = &dbc->regs->devinfo1;
dev_info = readl(ptr);
dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
writel(dev_info, ptr);
return size;
}
static ssize_t dbc_idProduct_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
return sprintf(buf, "%04x\n", dbc->idProduct);
}
static ssize_t dbc_idProduct_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
void __iomem *ptr;
u32 dev_info;
u16 value;
if (kstrtou16(buf, 0, &value))
return -EINVAL;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
if (dbc->state != DS_DISABLED)
return -EBUSY;
dbc->idProduct = value;
ptr = &dbc->regs->devinfo2;
dev_info = readl(ptr);
dev_info = (dev_info & ~(0xffffu)) | value;
writel(dev_info, ptr);
return size;
}
static ssize_t dbc_bcdDevice_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
return sprintf(buf, "%04x\n", dbc->bcdDevice);
}
static ssize_t dbc_bcdDevice_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
void __iomem *ptr;
u32 dev_info;
u16 value;
if (kstrtou16(buf, 0, &value))
return -EINVAL;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
if (dbc->state != DS_DISABLED)
return -EBUSY;
dbc->bcdDevice = value;
ptr = &dbc->regs->devinfo2;
dev_info = readl(ptr);
dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
writel(dev_info, ptr);
return size;
}
static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
return sprintf(buf, "%02x\n", dbc->bInterfaceProtocol);
}
static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
void __iomem *ptr;
u32 dev_info;
u8 value;
int ret;
/* bInterfaceProtocol is 8 bit, but xhci only supports values 0 and 1 */
ret = kstrtou8(buf, 0, &value);
if (ret || value > 1)
return -EINVAL;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
if (dbc->state != DS_DISABLED)
return -EBUSY;
dbc->bInterfaceProtocol = value;
ptr = &dbc->regs->devinfo1;
dev_info = readl(ptr);
dev_info = (dev_info & ~(0xffu)) | value;
writel(dev_info, ptr);
return size;
}
static DEVICE_ATTR_RW(dbc);
static DEVICE_ATTR_RW(dbc_idVendor);
static DEVICE_ATTR_RW(dbc_idProduct);
static DEVICE_ATTR_RW(dbc_bcdDevice);
static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
static struct attribute *dbc_dev_attributes[] = {
&dev_attr_dbc.attr,
&dev_attr_dbc_idVendor.attr,
&dev_attr_dbc_idProduct.attr,
&dev_attr_dbc_bcdDevice.attr,
&dev_attr_dbc_bInterfaceProtocol.attr,
NULL
};
static const struct attribute_group dbc_dev_attrib_grp = {
.attrs = dbc_dev_attributes,
};
struct xhci_dbc *
xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
{
struct xhci_dbc *dbc;
int ret;
dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
if (!dbc)
return NULL;
dbc->regs = base;
dbc->dev = dev;
dbc->driver = driver;
dbc->idProduct = DBC_PRODUCT_ID;
dbc->idVendor = DBC_VENDOR_ID;
dbc->bcdDevice = DBC_DEVICE_REV;
dbc->bInterfaceProtocol = DBC_PROTOCOL;
if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
goto err;
INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
spin_lock_init(&dbc->lock);
ret = sysfs_create_group(&dev->kobj, &dbc_dev_attrib_grp);
if (ret)
goto err;
return dbc;
err:
kfree(dbc);
return NULL;
}
/* undo what xhci_alloc_dbc() did */
void xhci_dbc_remove(struct xhci_dbc *dbc)
{
if (!dbc)
return;
/* stop hw, stop wq and call dbc->ops->stop() */
xhci_dbc_stop(dbc);
/* remove sysfs files */
sysfs_remove_group(&dbc->dev->kobj, &dbc_dev_attrib_grp);
kfree(dbc);
}
int xhci_create_dbc_dev(struct xhci_hcd *xhci)
{
struct device *dev;
void __iomem *base;
int ret;
int dbc_cap_offs;
/* create all parameters needed resembling a dbc device */
dev = xhci_to_hcd(xhci)->self.controller;
base = &xhci->cap_regs->hc_capbase;
dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
if (!dbc_cap_offs)
return -ENODEV;
/* already allocated and in use */
if (xhci->dbc)
return -EBUSY;
ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
return ret;
}
void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
{
unsigned long flags;
if (!xhci->dbc)
return;
xhci_dbc_tty_remove(xhci->dbc);
spin_lock_irqsave(&xhci->lock, flags);
xhci->dbc = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
}
#ifdef CONFIG_PM
int xhci_dbc_suspend(struct xhci_hcd *xhci)
{
struct xhci_dbc *dbc = xhci->dbc;
if (!dbc)
return 0;
if (dbc->state == DS_CONFIGURED)
dbc->resume_required = 1;
xhci_dbc_stop(dbc);
return 0;
}
int xhci_dbc_resume(struct xhci_hcd *xhci)
{
int ret = 0;
struct xhci_dbc *dbc = xhci->dbc;
if (!dbc)
return 0;
if (dbc->resume_required) {
dbc->resume_required = 0;
xhci_dbc_start(dbc);
}
return ret;
}
#endif /* CONFIG_PM */
int xhci_dbc_init(void)
{
return dbc_tty_init();
}
void xhci_dbc_exit(void)
{
dbc_tty_exit();
}
| linux-master | drivers/usb/host/xhci-dbgcap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xen-hcd.c
*
* Xen USB Virtual Host Controller driver
*
* Copyright (C) 2009, FUJITSU LABORATORIES LTD.
* Author: Noboru Iwamatsu <[email protected]>
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/list.h>
#include <linux/usb/hcd.h>
#include <linux/io.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/interface/io/usbif.h>
/* Private per-URB data */
struct urb_priv {
struct list_head list;
struct urb *urb;
int req_id; /* RING_REQUEST id for submitting */
int unlink_req_id; /* RING_REQUEST id for unlinking */
int status;
bool unlinked; /* dequeued marker */
};
/* virtual roothub port status */
struct rhport_status {
__u32 status;
bool resuming; /* in resuming */
bool c_connection; /* connection changed */
unsigned long timeout;
};
/* status of attached device */
struct vdevice_status {
int devnum;
enum usb_device_state status;
enum usb_device_speed speed;
};
/* RING request shadow */
struct usb_shadow {
struct xenusb_urb_request req;
struct urb *urb;
bool in_flight;
};
struct xenhcd_info {
/* Virtual Host Controller has 4 urb queues */
struct list_head pending_submit_list;
struct list_head pending_unlink_list;
struct list_head in_progress_list;
struct list_head giveback_waiting_list;
spinlock_t lock;
/* timer that kick pending and giveback waiting urbs */
struct timer_list watchdog;
unsigned long actions;
/* virtual root hub */
int rh_numports;
struct rhport_status ports[XENUSB_MAX_PORTNR];
struct vdevice_status devices[XENUSB_MAX_PORTNR];
/* Xen related staff */
struct xenbus_device *xbdev;
int urb_ring_ref;
int conn_ring_ref;
struct xenusb_urb_front_ring urb_ring;
struct xenusb_conn_front_ring conn_ring;
unsigned int evtchn;
unsigned int irq;
struct usb_shadow shadow[XENUSB_URB_RING_SIZE];
unsigned int shadow_free;
bool error;
};
#define XENHCD_RING_JIFFIES (HZ/200)
#define XENHCD_SCAN_JIFFIES 1
enum xenhcd_timer_action {
TIMER_RING_WATCHDOG,
TIMER_SCAN_PENDING_URBS,
};
static struct kmem_cache *xenhcd_urbp_cachep;
static inline struct xenhcd_info *xenhcd_hcd_to_info(struct usb_hcd *hcd)
{
return (struct xenhcd_info *)hcd->hcd_priv;
}
static inline struct usb_hcd *xenhcd_info_to_hcd(struct xenhcd_info *info)
{
return container_of((void *)info, struct usb_hcd, hcd_priv);
}
static void xenhcd_set_error(struct xenhcd_info *info, const char *msg)
{
info->error = true;
pr_alert("xen-hcd: protocol error: %s!\n", msg);
}
static inline void xenhcd_timer_action_done(struct xenhcd_info *info,
enum xenhcd_timer_action action)
{
clear_bit(action, &info->actions);
}
static void xenhcd_timer_action(struct xenhcd_info *info,
enum xenhcd_timer_action action)
{
if (timer_pending(&info->watchdog) &&
test_bit(TIMER_SCAN_PENDING_URBS, &info->actions))
return;
if (!test_and_set_bit(action, &info->actions)) {
unsigned long t;
switch (action) {
case TIMER_RING_WATCHDOG:
t = XENHCD_RING_JIFFIES;
break;
default:
t = XENHCD_SCAN_JIFFIES;
break;
}
mod_timer(&info->watchdog, t + jiffies);
}
}
/*
* set virtual port connection status
*/
static void xenhcd_set_connect_state(struct xenhcd_info *info, int portnum)
{
int port;
port = portnum - 1;
if (info->ports[port].status & USB_PORT_STAT_POWER) {
switch (info->devices[port].speed) {
case XENUSB_SPEED_NONE:
info->ports[port].status &=
~(USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_SUSPEND);
break;
case XENUSB_SPEED_LOW:
info->ports[port].status |= USB_PORT_STAT_CONNECTION;
info->ports[port].status |= USB_PORT_STAT_LOW_SPEED;
break;
case XENUSB_SPEED_FULL:
info->ports[port].status |= USB_PORT_STAT_CONNECTION;
break;
case XENUSB_SPEED_HIGH:
info->ports[port].status |= USB_PORT_STAT_CONNECTION;
info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED;
break;
default: /* error */
return;
}
info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16);
}
}
/*
* set virtual device connection status
*/
static int xenhcd_rhport_connect(struct xenhcd_info *info, __u8 portnum,
__u8 speed)
{
int port;
if (portnum < 1 || portnum > info->rh_numports)
return -EINVAL; /* invalid port number */
port = portnum - 1;
if (info->devices[port].speed != speed) {
switch (speed) {
case XENUSB_SPEED_NONE: /* disconnect */
info->devices[port].status = USB_STATE_NOTATTACHED;
break;
case XENUSB_SPEED_LOW:
case XENUSB_SPEED_FULL:
case XENUSB_SPEED_HIGH:
info->devices[port].status = USB_STATE_ATTACHED;
break;
default: /* error */
return -EINVAL;
}
info->devices[port].speed = speed;
info->ports[port].c_connection = true;
xenhcd_set_connect_state(info, portnum);
}
return 0;
}
/*
* SetPortFeature(PORT_SUSPENDED)
*/
static void xenhcd_rhport_suspend(struct xenhcd_info *info, int portnum)
{
int port;
port = portnum - 1;
info->ports[port].status |= USB_PORT_STAT_SUSPEND;
info->devices[port].status = USB_STATE_SUSPENDED;
}
/*
* ClearPortFeature(PORT_SUSPENDED)
*/
static void xenhcd_rhport_resume(struct xenhcd_info *info, int portnum)
{
int port;
port = portnum - 1;
if (info->ports[port].status & USB_PORT_STAT_SUSPEND) {
info->ports[port].resuming = true;
info->ports[port].timeout = jiffies + msecs_to_jiffies(20);
}
}
/*
* SetPortFeature(PORT_POWER)
*/
static void xenhcd_rhport_power_on(struct xenhcd_info *info, int portnum)
{
int port;
port = portnum - 1;
if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) {
info->ports[port].status |= USB_PORT_STAT_POWER;
if (info->devices[port].status != USB_STATE_NOTATTACHED)
info->devices[port].status = USB_STATE_POWERED;
if (info->ports[port].c_connection)
xenhcd_set_connect_state(info, portnum);
}
}
/*
* ClearPortFeature(PORT_POWER)
* SetConfiguration(non-zero)
* Power_Source_Off
* Over-current
*/
static void xenhcd_rhport_power_off(struct xenhcd_info *info, int portnum)
{
int port;
port = portnum - 1;
if (info->ports[port].status & USB_PORT_STAT_POWER) {
info->ports[port].status = 0;
if (info->devices[port].status != USB_STATE_NOTATTACHED)
info->devices[port].status = USB_STATE_ATTACHED;
}
}
/*
* ClearPortFeature(PORT_ENABLE)
*/
static void xenhcd_rhport_disable(struct xenhcd_info *info, int portnum)
{
int port;
port = portnum - 1;
info->ports[port].status &= ~USB_PORT_STAT_ENABLE;
info->ports[port].status &= ~USB_PORT_STAT_SUSPEND;
info->ports[port].resuming = false;
if (info->devices[port].status != USB_STATE_NOTATTACHED)
info->devices[port].status = USB_STATE_POWERED;
}
/*
* SetPortFeature(PORT_RESET)
*/
static void xenhcd_rhport_reset(struct xenhcd_info *info, int portnum)
{
int port;
port = portnum - 1;
info->ports[port].status &= ~(USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED);
info->ports[port].status |= USB_PORT_STAT_RESET;
if (info->devices[port].status != USB_STATE_NOTATTACHED)
info->devices[port].status = USB_STATE_ATTACHED;
/* 10msec reset signaling */
info->ports[port].timeout = jiffies + msecs_to_jiffies(10);
}
#ifdef CONFIG_PM
static int xenhcd_bus_suspend(struct usb_hcd *hcd)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
int ret = 0;
int i, ports;
ports = info->rh_numports;
spin_lock_irq(&info->lock);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
ret = -ESHUTDOWN;
} else {
/* suspend any active ports*/
for (i = 1; i <= ports; i++)
xenhcd_rhport_suspend(info, i);
}
spin_unlock_irq(&info->lock);
del_timer_sync(&info->watchdog);
return ret;
}
static int xenhcd_bus_resume(struct usb_hcd *hcd)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
int ret = 0;
int i, ports;
ports = info->rh_numports;
spin_lock_irq(&info->lock);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
ret = -ESHUTDOWN;
} else {
/* resume any suspended ports*/
for (i = 1; i <= ports; i++)
xenhcd_rhport_resume(info, i);
}
spin_unlock_irq(&info->lock);
return ret;
}
#endif
static void xenhcd_hub_descriptor(struct xenhcd_info *info,
struct usb_hub_descriptor *desc)
{
__u16 temp;
int ports = info->rh_numports;
desc->bDescriptorType = 0x29;
desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
/* size of DeviceRemovable and PortPwrCtrlMask fields */
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* bitmaps for DeviceRemovable and PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
/* per-port over current reporting and no power switching */
temp = 0x000a;
desc->wHubCharacteristics = cpu_to_le16(temp);
}
/* port status change mask for hub_status_data */
#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
USB_PORT_STAT_C_ENABLE | \
USB_PORT_STAT_C_SUSPEND | \
USB_PORT_STAT_C_OVERCURRENT | \
USB_PORT_STAT_C_RESET) << 16)
/*
* See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap.
* If port status changed, writes the bitmap to buf and return
* that length(number of bytes).
* If Nothing changed, return 0.
*/
static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
int ports;
int i;
unsigned long flags;
int ret;
int changed = 0;
/* initialize the status to no-changes */
ports = info->rh_numports;
ret = 1 + (ports / 8);
memset(buf, 0, ret);
spin_lock_irqsave(&info->lock, flags);
for (i = 0; i < ports; i++) {
/* check status for each port */
if (info->ports[i].status & PORT_C_MASK) {
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
changed = 1;
}
}
if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
usb_hcd_resume_root_hub(hcd);
spin_unlock_irqrestore(&info->lock, flags);
return changed ? ret : 0;
}
static int xenhcd_hub_control(struct usb_hcd *hcd, __u16 typeReq, __u16 wValue,
__u16 wIndex, char *buf, __u16 wLength)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
int ports = info->rh_numports;
unsigned long flags;
int ret = 0;
int i;
int changed = 0;
spin_lock_irqsave(&info->lock, flags);
switch (typeReq) {
case ClearHubFeature:
/* ignore this request */
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
xenhcd_rhport_resume(info, wIndex);
break;
case USB_PORT_FEAT_POWER:
xenhcd_rhport_power_off(info, wIndex);
break;
case USB_PORT_FEAT_ENABLE:
xenhcd_rhport_disable(info, wIndex);
break;
case USB_PORT_FEAT_C_CONNECTION:
info->ports[wIndex - 1].c_connection = false;
fallthrough;
default:
info->ports[wIndex - 1].status &= ~(1 << wValue);
break;
}
break;
case GetHubDescriptor:
xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
/* always local power supply good and no over-current exists. */
*(__le32 *)buf = cpu_to_le32(0);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
/* resume completion */
if (info->ports[wIndex].resuming &&
time_after_eq(jiffies, info->ports[wIndex].timeout)) {
info->ports[wIndex].status |=
USB_PORT_STAT_C_SUSPEND << 16;
info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND;
}
/* reset completion */
if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 &&
time_after_eq(jiffies, info->ports[wIndex].timeout)) {
info->ports[wIndex].status |=
USB_PORT_STAT_C_RESET << 16;
info->ports[wIndex].status &= ~USB_PORT_STAT_RESET;
if (info->devices[wIndex].status !=
USB_STATE_NOTATTACHED) {
info->ports[wIndex].status |=
USB_PORT_STAT_ENABLE;
info->devices[wIndex].status =
USB_STATE_DEFAULT;
}
switch (info->devices[wIndex].speed) {
case XENUSB_SPEED_LOW:
info->ports[wIndex].status |=
USB_PORT_STAT_LOW_SPEED;
break;
case XENUSB_SPEED_HIGH:
info->ports[wIndex].status |=
USB_PORT_STAT_HIGH_SPEED;
break;
default:
break;
}
}
*(__le32 *)buf = cpu_to_le32(info->ports[wIndex].status);
break;
case SetPortFeature:
if (!wIndex || wIndex > ports)
goto error;
switch (wValue) {
case USB_PORT_FEAT_POWER:
xenhcd_rhport_power_on(info, wIndex);
break;
case USB_PORT_FEAT_RESET:
xenhcd_rhport_reset(info, wIndex);
break;
case USB_PORT_FEAT_SUSPEND:
xenhcd_rhport_suspend(info, wIndex);
break;
default:
if (info->ports[wIndex-1].status & USB_PORT_STAT_POWER)
info->ports[wIndex-1].status |= (1 << wValue);
}
break;
case SetHubFeature:
/* not supported */
default:
error:
ret = -EPIPE;
}
spin_unlock_irqrestore(&info->lock, flags);
/* check status for each port */
for (i = 0; i < ports; i++) {
if (info->ports[i].status & PORT_C_MASK)
changed = 1;
}
if (changed)
usb_hcd_poll_rh_status(hcd);
return ret;
}
static void xenhcd_free_urb_priv(struct urb_priv *urbp)
{
urbp->urb->hcpriv = NULL;
kmem_cache_free(xenhcd_urbp_cachep, urbp);
}
static inline unsigned int xenhcd_get_id_from_freelist(struct xenhcd_info *info)
{
unsigned int free;
free = info->shadow_free;
info->shadow_free = info->shadow[free].req.id;
info->shadow[free].req.id = 0x0fff; /* debug */
return free;
}
static inline void xenhcd_add_id_to_freelist(struct xenhcd_info *info,
unsigned int id)
{
info->shadow[id].req.id = info->shadow_free;
info->shadow[id].urb = NULL;
info->shadow_free = id;
}
static inline int xenhcd_count_pages(void *addr, int length)
{
unsigned long vaddr = (unsigned long)addr;
return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
}
static void xenhcd_gnttab_map(struct xenhcd_info *info, void *addr, int length,
grant_ref_t *gref_head,
struct xenusb_request_segment *seg,
int nr_pages, int flags)
{
grant_ref_t ref;
unsigned int offset;
unsigned int len = length;
unsigned int bytes;
int i;
for (i = 0; i < nr_pages; i++) {
offset = offset_in_page(addr);
bytes = PAGE_SIZE - offset;
if (bytes > len)
bytes = len;
ref = gnttab_claim_grant_reference(gref_head);
gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
virt_to_gfn(addr), flags);
seg[i].gref = ref;
seg[i].offset = (__u16)offset;
seg[i].length = (__u16)bytes;
addr += bytes;
len -= bytes;
}
}
static __u32 xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe, __u8 port)
{
static __u32 pipe;
pipe = usb_pipedevice(urb_pipe) << XENUSB_PIPE_DEV_SHIFT;
pipe |= usb_pipeendpoint(urb_pipe) << XENUSB_PIPE_EP_SHIFT;
if (usb_pipein(urb_pipe))
pipe |= XENUSB_PIPE_DIR;
switch (usb_pipetype(urb_pipe)) {
case PIPE_ISOCHRONOUS:
pipe |= XENUSB_PIPE_TYPE_ISOC << XENUSB_PIPE_TYPE_SHIFT;
break;
case PIPE_INTERRUPT:
pipe |= XENUSB_PIPE_TYPE_INT << XENUSB_PIPE_TYPE_SHIFT;
break;
case PIPE_CONTROL:
pipe |= XENUSB_PIPE_TYPE_CTRL << XENUSB_PIPE_TYPE_SHIFT;
break;
case PIPE_BULK:
pipe |= XENUSB_PIPE_TYPE_BULK << XENUSB_PIPE_TYPE_SHIFT;
break;
}
pipe = xenusb_setportnum_pipe(pipe, port);
return pipe;
}
static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
struct xenusb_urb_request *req)
{
grant_ref_t gref_head;
int nr_buff_pages = 0;
int nr_isodesc_pages = 0;
int nr_grants = 0;
if (urb->transfer_buffer_length) {
nr_buff_pages = xenhcd_count_pages(urb->transfer_buffer,
urb->transfer_buffer_length);
if (usb_pipeisoc(urb->pipe))
nr_isodesc_pages = xenhcd_count_pages(
&urb->iso_frame_desc[0],
sizeof(struct usb_iso_packet_descriptor) *
urb->number_of_packets);
nr_grants = nr_buff_pages + nr_isodesc_pages;
if (nr_grants > XENUSB_MAX_SEGMENTS_PER_REQUEST) {
pr_err("xenhcd: error: %d grants\n", nr_grants);
return -E2BIG;
}
if (gnttab_alloc_grant_references(nr_grants, &gref_head)) {
pr_err("xenhcd: gnttab_alloc_grant_references() error\n");
return -ENOMEM;
}
xenhcd_gnttab_map(info, urb->transfer_buffer,
urb->transfer_buffer_length, &gref_head,
&req->seg[0], nr_buff_pages,
usb_pipein(urb->pipe) ? 0 : GTF_readonly);
}
req->pipe = xenhcd_pipe_urb_to_xenusb(urb->pipe, urb->dev->portnum);
req->transfer_flags = 0;
if (urb->transfer_flags & URB_SHORT_NOT_OK)
req->transfer_flags |= XENUSB_SHORT_NOT_OK;
req->buffer_length = urb->transfer_buffer_length;
req->nr_buffer_segs = nr_buff_pages;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
req->u.isoc.interval = urb->interval;
req->u.isoc.start_frame = urb->start_frame;
req->u.isoc.number_of_packets = urb->number_of_packets;
req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
sizeof(struct usb_iso_packet_descriptor) *
urb->number_of_packets,
&gref_head, &req->seg[nr_buff_pages],
nr_isodesc_pages, 0);
break;
case PIPE_INTERRUPT:
req->u.intr.interval = urb->interval;
break;
case PIPE_CONTROL:
if (urb->setup_packet)
memcpy(req->u.ctrl, urb->setup_packet, 8);
break;
case PIPE_BULK:
break;
default:
break;
}
if (nr_grants)
gnttab_free_grant_references(gref_head);
return 0;
}
static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
{
struct usb_shadow *shadow = info->shadow + id;
int nr_segs = 0;
int i;
if (!shadow->in_flight) {
xenhcd_set_error(info, "Illegal request id");
return;
}
shadow->in_flight = false;
nr_segs = shadow->req.nr_buffer_segs;
if (xenusb_pipeisoc(shadow->req.pipe))
nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
for (i = 0; i < nr_segs; i++) {
if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
xenhcd_set_error(info, "backend didn't release grant");
}
shadow->req.nr_buffer_segs = 0;
shadow->req.u.isoc.nr_frame_desc_segs = 0;
}
static int xenhcd_translate_status(int status)
{
switch (status) {
case XENUSB_STATUS_OK:
return 0;
case XENUSB_STATUS_NODEV:
return -ENODEV;
case XENUSB_STATUS_INVAL:
return -EINVAL;
case XENUSB_STATUS_STALL:
return -EPIPE;
case XENUSB_STATUS_IOERROR:
return -EPROTO;
case XENUSB_STATUS_BABBLE:
return -EOVERFLOW;
default:
return -ESHUTDOWN;
}
}
static void xenhcd_giveback_urb(struct xenhcd_info *info, struct urb *urb,
int status)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
int priv_status = urbp->status;
list_del_init(&urbp->list);
xenhcd_free_urb_priv(urbp);
if (urb->status == -EINPROGRESS)
urb->status = xenhcd_translate_status(status);
spin_unlock(&info->lock);
usb_hcd_giveback_urb(xenhcd_info_to_hcd(info), urb,
priv_status <= 0 ? priv_status : urb->status);
spin_lock(&info->lock);
}
static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
{
struct xenusb_urb_request *req;
struct urb *urb = urbp->urb;
unsigned int id;
int notify;
int ret;
id = xenhcd_get_id_from_freelist(info);
req = &info->shadow[id].req;
req->id = id;
if (unlikely(urbp->unlinked)) {
req->u.unlink.unlink_id = urbp->req_id;
req->pipe = xenusb_setunlink_pipe(xenhcd_pipe_urb_to_xenusb(
urb->pipe, urb->dev->portnum));
urbp->unlink_req_id = id;
} else {
ret = xenhcd_map_urb_for_request(info, urb, req);
if (ret) {
xenhcd_add_id_to_freelist(info, id);
return ret;
}
urbp->req_id = id;
}
req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt);
*req = info->shadow[id].req;
info->urb_ring.req_prod_pvt++;
info->shadow[id].urb = urb;
info->shadow[id].in_flight = true;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
return 0;
}
static void xenhcd_kick_pending_urbs(struct xenhcd_info *info)
{
struct urb_priv *urbp;
while (!list_empty(&info->pending_submit_list)) {
if (RING_FULL(&info->urb_ring)) {
xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
return;
}
urbp = list_entry(info->pending_submit_list.next,
struct urb_priv, list);
if (!xenhcd_do_request(info, urbp))
list_move_tail(&urbp->list, &info->in_progress_list);
else
xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
}
xenhcd_timer_action_done(info, TIMER_SCAN_PENDING_URBS);
}
/*
* caller must lock info->lock
*/
static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
{
struct urb_priv *urbp, *tmp;
int req_id;
list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
req_id = urbp->req_id;
if (!urbp->unlinked) {
xenhcd_gnttab_done(info, req_id);
if (info->error)
return;
if (urbp->urb->status == -EINPROGRESS)
/* not dequeued */
xenhcd_giveback_urb(info, urbp->urb,
-ESHUTDOWN);
else /* dequeued */
xenhcd_giveback_urb(info, urbp->urb,
urbp->urb->status);
}
info->shadow[req_id].urb = NULL;
}
list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list)
xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
}
/*
* caller must lock info->lock
*/
static void xenhcd_giveback_unlinked_urbs(struct xenhcd_info *info)
{
struct urb_priv *urbp, *tmp;
list_for_each_entry_safe(urbp, tmp, &info->giveback_waiting_list, list)
xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status);
}
static int xenhcd_submit_urb(struct xenhcd_info *info, struct urb_priv *urbp)
{
int ret;
if (RING_FULL(&info->urb_ring)) {
list_add_tail(&urbp->list, &info->pending_submit_list);
xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
return 0;
}
if (!list_empty(&info->pending_submit_list)) {
list_add_tail(&urbp->list, &info->pending_submit_list);
xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
return 0;
}
ret = xenhcd_do_request(info, urbp);
if (ret == 0)
list_add_tail(&urbp->list, &info->in_progress_list);
return ret;
}
static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
{
int ret;
/* already unlinked? */
if (urbp->unlinked)
return -EBUSY;
urbp->unlinked = true;
/* the urb is still in pending_submit queue */
if (urbp->req_id == ~0) {
list_move_tail(&urbp->list, &info->giveback_waiting_list);
xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
return 0;
}
/* send unlink request to backend */
if (RING_FULL(&info->urb_ring)) {
list_move_tail(&urbp->list, &info->pending_unlink_list);
xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
return 0;
}
if (!list_empty(&info->pending_unlink_list)) {
list_move_tail(&urbp->list, &info->pending_unlink_list);
xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
return 0;
}
ret = xenhcd_do_request(info, urbp);
if (ret == 0)
list_move_tail(&urbp->list, &info->in_progress_list);
return ret;
}
static void xenhcd_res_to_urb(struct xenhcd_info *info,
struct xenusb_urb_response *res, struct urb *urb)
{
if (unlikely(!urb))
return;
if (res->actual_length > urb->transfer_buffer_length)
urb->actual_length = urb->transfer_buffer_length;
else if (res->actual_length < 0)
urb->actual_length = 0;
else
urb->actual_length = res->actual_length;
urb->error_count = res->error_count;
urb->start_frame = res->start_frame;
xenhcd_giveback_urb(info, urb, res->status);
}
static int xenhcd_urb_request_done(struct xenhcd_info *info,
unsigned int *eoiflag)
{
struct xenusb_urb_response res;
RING_IDX i, rp;
__u16 id;
int more_to_do = 0;
unsigned long flags;
spin_lock_irqsave(&info->lock, flags);
rp = info->urb_ring.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
xenhcd_set_error(info, "Illegal index on urb-ring");
goto err;
}
rmb(); /* ensure we see queued responses up to "rp" */
for (i = info->urb_ring.rsp_cons; i != rp; i++) {
RING_COPY_RESPONSE(&info->urb_ring, i, &res);
id = res.id;
if (id >= XENUSB_URB_RING_SIZE) {
xenhcd_set_error(info, "Illegal data on urb-ring");
goto err;
}
if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
xenhcd_gnttab_done(info, id);
if (info->error)
goto err;
xenhcd_res_to_urb(info, &res, info->shadow[id].urb);
}
xenhcd_add_id_to_freelist(info, id);
*eoiflag = 0;
}
info->urb_ring.rsp_cons = i;
if (i != info->urb_ring.req_prod_pvt)
RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do);
else
info->urb_ring.sring->rsp_event = i + 1;
spin_unlock_irqrestore(&info->lock, flags);
return more_to_do;
err:
spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int xenhcd_conn_notify(struct xenhcd_info *info, unsigned int *eoiflag)
{
struct xenusb_conn_response res;
struct xenusb_conn_request *req;
RING_IDX rc, rp;
__u16 id;
__u8 portnum, speed;
int more_to_do = 0;
int notify;
int port_changed = 0;
unsigned long flags;
spin_lock_irqsave(&info->lock, flags);
rc = info->conn_ring.rsp_cons;
rp = info->conn_ring.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&info->conn_ring, rp)) {
xenhcd_set_error(info, "Illegal index on conn-ring");
spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
rmb(); /* ensure we see queued responses up to "rp" */
while (rc != rp) {
RING_COPY_RESPONSE(&info->conn_ring, rc, &res);
id = res.id;
portnum = res.portnum;
speed = res.speed;
info->conn_ring.rsp_cons = ++rc;
if (xenhcd_rhport_connect(info, portnum, speed)) {
xenhcd_set_error(info, "Illegal data on conn-ring");
spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
if (info->ports[portnum - 1].c_connection)
port_changed = 1;
barrier();
req = RING_GET_REQUEST(&info->conn_ring,
info->conn_ring.req_prod_pvt);
req->id = id;
info->conn_ring.req_prod_pvt++;
*eoiflag = 0;
}
if (rc != info->conn_ring.req_prod_pvt)
RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do);
else
info->conn_ring.sring->rsp_event = rc + 1;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
spin_unlock_irqrestore(&info->lock, flags);
if (port_changed)
usb_hcd_poll_rh_status(xenhcd_info_to_hcd(info));
return more_to_do;
}
static irqreturn_t xenhcd_int(int irq, void *dev_id)
{
struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
if (unlikely(info->error)) {
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
return IRQ_HANDLED;
}
while (xenhcd_urb_request_done(info, &eoiflag) |
xenhcd_conn_notify(info, &eoiflag))
/* Yield point for this unbounded loop. */
cond_resched();
xen_irq_lateeoi(irq, eoiflag);
return IRQ_HANDLED;
}
static void xenhcd_destroy_rings(struct xenhcd_info *info)
{
if (info->irq)
unbind_from_irqhandler(info->irq, info);
info->irq = 0;
xenbus_teardown_ring((void **)&info->urb_ring.sring, 1,
&info->urb_ring_ref);
xenbus_teardown_ring((void **)&info->conn_ring.sring, 1,
&info->conn_ring_ref);
}
static int xenhcd_setup_rings(struct xenbus_device *dev,
struct xenhcd_info *info)
{
struct xenusb_urb_sring *urb_sring;
struct xenusb_conn_sring *conn_sring;
int err;
info->conn_ring_ref = INVALID_GRANT_REF;
err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH,
(void **)&urb_sring, 1, &info->urb_ring_ref);
if (err) {
xenbus_dev_fatal(dev, err, "allocating urb ring");
return err;
}
XEN_FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH,
(void **)&conn_sring, 1, &info->conn_ring_ref);
if (err) {
xenbus_dev_fatal(dev, err, "allocating conn ring");
goto fail;
}
XEN_FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err) {
xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
goto fail;
}
err = bind_evtchn_to_irq_lateeoi(info->evtchn);
if (err <= 0) {
xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq_lateeoi");
goto fail;
}
info->irq = err;
err = request_threaded_irq(info->irq, NULL, xenhcd_int,
IRQF_ONESHOT, "xenhcd", info);
if (err) {
xenbus_dev_fatal(dev, err, "request_threaded_irq");
goto free_irq;
}
return 0;
free_irq:
unbind_from_irqhandler(info->irq, info);
fail:
xenhcd_destroy_rings(info);
return err;
}
static int xenhcd_talk_to_backend(struct xenbus_device *dev,
struct xenhcd_info *info)
{
const char *message;
struct xenbus_transaction xbt;
int err;
err = xenhcd_setup_rings(dev, info);
if (err)
return err;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
xenbus_dev_fatal(dev, err, "starting transaction");
goto destroy_ring;
}
err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", "%u",
info->urb_ring_ref);
if (err) {
message = "writing urb-ring-ref";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", "%u",
info->conn_ring_ref);
if (err) {
message = "writing conn-ring-ref";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
info->evtchn);
if (err) {
message = "writing event-channel";
goto abort_transaction;
}
err = xenbus_transaction_end(xbt, 0);
if (err) {
if (err == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, err, "completing transaction");
goto destroy_ring;
}
return 0;
abort_transaction:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, err, "%s", message);
destroy_ring:
xenhcd_destroy_rings(info);
return err;
}
static int xenhcd_connect(struct xenbus_device *dev)
{
struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
struct xenusb_conn_request *req;
int idx, err;
int notify;
char name[TASK_COMM_LEN];
struct usb_hcd *hcd;
hcd = xenhcd_info_to_hcd(info);
snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);
err = xenhcd_talk_to_backend(dev, info);
if (err)
return err;
/* prepare ring for hotplug notification */
for (idx = 0; idx < XENUSB_CONN_RING_SIZE; idx++) {
req = RING_GET_REQUEST(&info->conn_ring, idx);
req->id = idx;
}
info->conn_ring.req_prod_pvt = idx;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
return 0;
}
static void xenhcd_disconnect(struct xenbus_device *dev)
{
struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
usb_remove_hcd(hcd);
xenbus_frontend_closed(dev);
}
static void xenhcd_watchdog(struct timer_list *timer)
{
struct xenhcd_info *info = from_timer(info, timer, watchdog);
unsigned long flags;
spin_lock_irqsave(&info->lock, flags);
if (likely(HC_IS_RUNNING(xenhcd_info_to_hcd(info)->state))) {
xenhcd_timer_action_done(info, TIMER_RING_WATCHDOG);
xenhcd_giveback_unlinked_urbs(info);
xenhcd_kick_pending_urbs(info);
}
spin_unlock_irqrestore(&info->lock, flags);
}
/*
* one-time HC init
*/
static int xenhcd_setup(struct usb_hcd *hcd)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
spin_lock_init(&info->lock);
INIT_LIST_HEAD(&info->pending_submit_list);
INIT_LIST_HEAD(&info->pending_unlink_list);
INIT_LIST_HEAD(&info->in_progress_list);
INIT_LIST_HEAD(&info->giveback_waiting_list);
timer_setup(&info->watchdog, xenhcd_watchdog, 0);
hcd->has_tt = (hcd->driver->flags & HCD_MASK) != HCD_USB11;
return 0;
}
/*
* start HC running
*/
static int xenhcd_run(struct usb_hcd *hcd)
{
hcd->uses_new_polling = 1;
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
hcd->state = HC_STATE_RUNNING;
return 0;
}
/*
* stop running HC
*/
static void xenhcd_stop(struct usb_hcd *hcd)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
del_timer_sync(&info->watchdog);
spin_lock_irq(&info->lock);
/* cancel all urbs */
hcd->state = HC_STATE_HALT;
xenhcd_cancel_all_enqueued_urbs(info);
xenhcd_giveback_unlinked_urbs(info);
spin_unlock_irq(&info->lock);
}
/*
* called as .urb_enqueue()
* non-error returns are promise to giveback the urb later
*/
static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
struct urb_priv *urbp;
unsigned long flags;
int ret;
if (unlikely(info->error))
return -ESHUTDOWN;
urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, mem_flags);
if (!urbp)
return -ENOMEM;
spin_lock_irqsave(&info->lock, flags);
urbp->urb = urb;
urb->hcpriv = urbp;
urbp->req_id = ~0;
urbp->unlink_req_id = ~0;
INIT_LIST_HEAD(&urbp->list);
urbp->status = 1;
urb->unlinked = false;
ret = xenhcd_submit_urb(info, urbp);
if (ret)
xenhcd_free_urb_priv(urbp);
spin_unlock_irqrestore(&info->lock, flags);
return ret;
}
/*
* called as .urb_dequeue()
*/
static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
struct urb_priv *urbp;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&info->lock, flags);
urbp = urb->hcpriv;
if (urbp) {
urbp->status = status;
ret = xenhcd_unlink_urb(info, urbp);
}
spin_unlock_irqrestore(&info->lock, flags);
return ret;
}
/*
* called from usb_get_current_frame_number(),
* but, almost all drivers not use such function.
*/
static int xenhcd_get_frame(struct usb_hcd *hcd)
{
/* it means error, but probably no problem :-) */
return 0;
}
static struct hc_driver xenhcd_usb20_hc_driver = {
.description = "xen-hcd",
.product_desc = "Xen USB2.0 Virtual Host Controller",
.hcd_priv_size = sizeof(struct xenhcd_info),
.flags = HCD_USB2,
/* basic HC lifecycle operations */
.reset = xenhcd_setup,
.start = xenhcd_run,
.stop = xenhcd_stop,
/* managing urb I/O */
.urb_enqueue = xenhcd_urb_enqueue,
.urb_dequeue = xenhcd_urb_dequeue,
.get_frame_number = xenhcd_get_frame,
/* root hub operations */
.hub_status_data = xenhcd_hub_status_data,
.hub_control = xenhcd_hub_control,
#ifdef CONFIG_PM
.bus_suspend = xenhcd_bus_suspend,
.bus_resume = xenhcd_bus_resume,
#endif
};
static struct hc_driver xenhcd_usb11_hc_driver = {
.description = "xen-hcd",
.product_desc = "Xen USB1.1 Virtual Host Controller",
.hcd_priv_size = sizeof(struct xenhcd_info),
.flags = HCD_USB11,
/* basic HC lifecycle operations */
.reset = xenhcd_setup,
.start = xenhcd_run,
.stop = xenhcd_stop,
/* managing urb I/O */
.urb_enqueue = xenhcd_urb_enqueue,
.urb_dequeue = xenhcd_urb_dequeue,
.get_frame_number = xenhcd_get_frame,
/* root hub operations */
.hub_status_data = xenhcd_hub_status_data,
.hub_control = xenhcd_hub_control,
#ifdef CONFIG_PM
.bus_suspend = xenhcd_bus_suspend,
.bus_resume = xenhcd_bus_resume,
#endif
};
static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
{
int i;
int err = 0;
int num_ports;
int usb_ver;
struct usb_hcd *hcd = NULL;
struct xenhcd_info *info;
err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", "%d",
&num_ports);
if (err != 1) {
xenbus_dev_fatal(dev, err, "reading num-ports");
return ERR_PTR(-EINVAL);
}
if (num_ports < 1 || num_ports > XENUSB_MAX_PORTNR) {
xenbus_dev_fatal(dev, err, "invalid num-ports");
return ERR_PTR(-EINVAL);
}
err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver);
if (err != 1) {
xenbus_dev_fatal(dev, err, "reading usb-ver");
return ERR_PTR(-EINVAL);
}
switch (usb_ver) {
case XENUSB_VER_USB11:
hcd = usb_create_hcd(&xenhcd_usb11_hc_driver, &dev->dev,
dev_name(&dev->dev));
break;
case XENUSB_VER_USB20:
hcd = usb_create_hcd(&xenhcd_usb20_hc_driver, &dev->dev,
dev_name(&dev->dev));
break;
default:
xenbus_dev_fatal(dev, err, "invalid usb-ver");
return ERR_PTR(-EINVAL);
}
if (!hcd) {
xenbus_dev_fatal(dev, err,
"fail to allocate USB host controller");
return ERR_PTR(-ENOMEM);
}
info = xenhcd_hcd_to_info(hcd);
info->xbdev = dev;
info->rh_numports = num_ports;
for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
info->shadow[i].req.id = i + 1;
info->shadow[i].urb = NULL;
info->shadow[i].in_flight = false;
}
info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
return hcd;
}
static void xenhcd_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
break;
case XenbusStateInitWait:
case XenbusStateInitialised:
case XenbusStateConnected:
if (dev->state != XenbusStateInitialising)
break;
if (!xenhcd_connect(dev))
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
fallthrough; /* Missed the backend's Closing state. */
case XenbusStateClosing:
xenhcd_disconnect(dev);
break;
default:
xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
backend_state);
break;
}
}
static void xenhcd_remove(struct xenbus_device *dev)
{
struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
xenhcd_destroy_rings(info);
usb_put_hcd(hcd);
}
static int xenhcd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int err;
struct usb_hcd *hcd;
struct xenhcd_info *info;
if (usb_disabled())
return -ENODEV;
hcd = xenhcd_create_hcd(dev);
if (IS_ERR(hcd)) {
err = PTR_ERR(hcd);
xenbus_dev_fatal(dev, err,
"fail to create usb host controller");
return err;
}
info = xenhcd_hcd_to_info(hcd);
dev_set_drvdata(&dev->dev, info);
err = usb_add_hcd(hcd, 0, 0);
if (err) {
xenbus_dev_fatal(dev, err, "fail to add USB host controller");
usb_put_hcd(hcd);
dev_set_drvdata(&dev->dev, NULL);
}
return err;
}
static const struct xenbus_device_id xenhcd_ids[] = {
{ "vusb" },
{ "" },
};
static struct xenbus_driver xenhcd_driver = {
.ids = xenhcd_ids,
.probe = xenhcd_probe,
.otherend_changed = xenhcd_backend_changed,
.remove = xenhcd_remove,
};
static int __init xenhcd_init(void)
{
if (!xen_domain())
return -ENODEV;
xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
sizeof(struct urb_priv), 0, 0, NULL);
if (!xenhcd_urbp_cachep) {
pr_err("xenhcd failed to create kmem cache\n");
return -ENOMEM;
}
return xenbus_register_frontend(&xenhcd_driver);
}
module_init(xenhcd_init);
static void __exit xenhcd_exit(void)
{
kmem_cache_destroy(xenhcd_urbp_cachep);
xenbus_unregister_driver(&xenhcd_driver);
}
module_exit(xenhcd_exit);
MODULE_ALIAS("xen:vusb");
MODULE_AUTHOR("Juergen Gross <[email protected]>");
MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (xen-hcd)");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/usb/host/xen-hcd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <[email protected]>
* Neil Zhang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/usb/otg.h>
#include <linux/usb/of.h>
#include <linux/platform_data/mv_usb.h>
#include <linux/io.h>
#include <linux/usb/hcd.h>
#include "ehci.h"
/* registers */
#define U2x_CAPREGS_OFFSET 0x100
#define CAPLENGTH_MASK (0xff)
#define hcd_to_ehci_hcd_mv(h) ((struct ehci_hcd_mv *)hcd_to_ehci(h)->priv)
struct ehci_hcd_mv {
/* Which mode does this ehci running OTG/Host ? */
int mode;
void __iomem *base;
void __iomem *cap_regs;
void __iomem *op_regs;
struct usb_phy *otg;
struct clk *clk;
struct phy *phy;
int (*set_vbus)(unsigned int vbus);
};
static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
{
int retval;
retval = clk_prepare_enable(ehci_mv->clk);
if (retval)
return retval;
retval = phy_init(ehci_mv->phy);
if (retval)
clk_disable_unprepare(ehci_mv->clk);
return retval;
}
static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
{
phy_exit(ehci_mv->phy);
clk_disable_unprepare(ehci_mv->clk);
}
static int mv_ehci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 status;
int retval;
if (ehci_mv == NULL) {
dev_err(dev, "Can not find private ehci data\n");
return -ENODEV;
}
hcd->has_tt = 1;
retval = ehci_setup(hcd);
if (retval)
dev_err(dev, "ehci_setup failed %d\n", retval);
if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) {
status = ehci_readl(ehci, &ehci->regs->port_status[0]);
status |= PORT_TEST_FORCE;
ehci_writel(ehci, status, &ehci->regs->port_status[0]);
status &= ~PORT_TEST_FORCE;
ehci_writel(ehci, status, &ehci->regs->port_status[0]);
}
return retval;
}
static struct hc_driver __read_mostly ehci_platform_hc_driver;
static const struct ehci_driver_overrides platform_overrides __initconst = {
.reset = mv_ehci_reset,
.extra_priv_size = sizeof(struct ehci_hcd_mv),
};
static int mv_ehci_probe(struct platform_device *pdev)
{
struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct ehci_hcd_mv *ehci_mv;
struct resource *r;
int retval;
u32 offset;
u32 status;
if (usb_disabled())
return -ENODEV;
hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
platform_set_drvdata(pdev, hcd);
ehci_mv = hcd_to_ehci_hcd_mv(hcd);
ehci_mv->mode = MV_USB_MODE_HOST;
if (pdata) {
ehci_mv->mode = pdata->mode;
ehci_mv->set_vbus = pdata->set_vbus;
}
ehci_mv->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(ehci_mv->phy)) {
retval = PTR_ERR(ehci_mv->phy);
if (retval != -EPROBE_DEFER)
dev_err(&pdev->dev, "Failed to get phy.\n");
goto err_put_hcd;
}
ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ehci_mv->clk)) {
dev_err(&pdev->dev, "error getting clock\n");
retval = PTR_ERR(ehci_mv->clk);
goto err_put_hcd;
}
ehci_mv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(ehci_mv->base)) {
retval = PTR_ERR(ehci_mv->base);
goto err_put_hcd;
}
retval = mv_ehci_enable(ehci_mv);
if (retval) {
dev_err(&pdev->dev, "init phy error %d\n", retval);
goto err_put_hcd;
}
ehci_mv->cap_regs =
(void __iomem *) ((unsigned long) ehci_mv->base + U2x_CAPREGS_OFFSET);
offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
ehci_mv->op_regs =
(void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
hcd->rsrc_start = r->start;
hcd->rsrc_len = resource_size(r);
hcd->regs = ehci_mv->op_regs;
retval = platform_get_irq(pdev, 0);
if (retval < 0)
goto err_disable_clk;
hcd->irq = retval;
ehci = hcd_to_ehci(hcd);
ehci->caps = (struct ehci_caps __iomem *) ehci_mv->cap_regs;
if (ehci_mv->mode == MV_USB_MODE_OTG) {
ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(ehci_mv->otg)) {
retval = PTR_ERR(ehci_mv->otg);
if (retval == -ENXIO)
dev_info(&pdev->dev, "MV_USB_MODE_OTG "
"must have CONFIG_USB_PHY enabled\n");
else
dev_err(&pdev->dev,
"unable to find transceiver\n");
goto err_disable_clk;
}
retval = otg_set_host(ehci_mv->otg->otg, &hcd->self);
if (retval < 0) {
dev_err(&pdev->dev,
"unable to register with transceiver\n");
retval = -ENODEV;
goto err_disable_clk;
}
/* otg will enable clock before use as host */
mv_ehci_disable(ehci_mv);
} else {
if (ehci_mv->set_vbus)
ehci_mv->set_vbus(1);
retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
if (retval) {
dev_err(&pdev->dev,
"failed to add hcd with err %d\n", retval);
goto err_set_vbus;
}
device_wakeup_enable(hcd->self.controller);
}
if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_HSIC) {
status = ehci_readl(ehci, &ehci->regs->port_status[0]);
/* These "reserved" bits actually enable HSIC mode. */
status |= BIT(25);
status &= ~GENMASK(31, 30);
ehci_writel(ehci, status, &ehci->regs->port_status[0]);
}
dev_info(&pdev->dev,
"successful find EHCI device with regs 0x%p irq %d"
" working in %s mode\n", hcd->regs, hcd->irq,
ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
return 0;
err_set_vbus:
if (ehci_mv->set_vbus)
ehci_mv->set_vbus(0);
err_disable_clk:
mv_ehci_disable(ehci_mv);
err_put_hcd:
usb_put_hcd(hcd);
return retval;
}
static void mv_ehci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
if (hcd->rh_registered)
usb_remove_hcd(hcd);
if (!IS_ERR_OR_NULL(ehci_mv->otg))
otg_set_host(ehci_mv->otg->otg, NULL);
if (ehci_mv->mode == MV_USB_MODE_HOST) {
if (ehci_mv->set_vbus)
ehci_mv->set_vbus(0);
mv_ehci_disable(ehci_mv);
}
usb_put_hcd(hcd);
}
static const struct platform_device_id ehci_id_table[] = {
{"pxa-u2oehci", 0},
{"pxa-sph", 0},
{},
};
static void mv_ehci_shutdown(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (!hcd->rh_registered)
return;
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
static const struct of_device_id ehci_mv_dt_ids[] = {
{ .compatible = "marvell,pxau2o-ehci", },
{},
};
static struct platform_driver ehci_mv_driver = {
.probe = mv_ehci_probe,
.remove_new = mv_ehci_remove,
.shutdown = mv_ehci_shutdown,
.driver = {
.name = "mv-ehci",
.bus = &platform_bus_type,
.of_match_table = ehci_mv_dt_ids,
},
.id_table = ehci_id_table,
};
static int __init ehci_platform_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ehci_mv_driver);
}
module_init(ehci_platform_init);
static void __exit ehci_platform_cleanup(void)
{
platform_driver_unregister(&ehci_mv_driver);
}
module_exit(ehci_platform_cleanup);
MODULE_DESCRIPTION("Marvell EHCI driver");
MODULE_AUTHOR("Chao Xie <[email protected]>");
MODULE_AUTHOR("Neil Zhang <[email protected]>");
MODULE_ALIAS("mv-ehci");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
| linux-master | drivers/usb/host/ehci-mv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
/*
* Ring initialization rules:
* 1. Each segment is initialized to zero, except for link TRBs.
* 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
* Consumer Cycle State (CCS), depending on ring function.
* 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
*
* Ring behavior rules:
* 1. A ring is empty if enqueue == dequeue. This means there will always be at
* least one free TRB in the ring. This is useful if you want to turn that
* into a link TRB and expand the ring.
* 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
* link TRB, then load the pointer with the address in the link TRB. If the
* link TRB had its toggle bit set, you may need to update the ring cycle
* state (see cycle bit rules). You may have to do this multiple times
* until you reach a non-link TRB.
* 3. A ring is full if enqueue++ (for the definition of increment above)
* equals the dequeue pointer.
*
* Cycle bit rules:
* 1. When a consumer increments a dequeue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
* 2. When a producer increments an enqueue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
*
* Producer rules:
* 1. Check if ring is full before you enqueue.
* 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
* Update enqueue pointer between each write (which may update the ring
* cycle state).
* 3. Notify consumer. If SW is producer, it rings the doorbell for command
* and endpoint rings. If HC is the producer for the event ring,
* and it generates an interrupt according to interrupt modulation rules.
*
* Consumer rules:
* 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
* the TRB is owned by the consumer.
* 2. Update dequeue pointer (which may update the ring cycle state) and
* continue processing TRBs until you reach a TRB which is not owned by you.
* 3. Notify the producer. SW is the consumer for the event ring, and it
* updates event ring dequeue pointer. HC is the consumer for the command and
* endpoint rings; it generates events on the event ring for these.
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2,
u32 field3, u32 field4, bool command_must_succeed);
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
*/
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
union xhci_trb *trb)
{
unsigned long segment_offset;
if (!seg || !trb || trb < seg->trbs)
return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
if (segment_offset >= TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
static bool trb_is_noop(union xhci_trb *trb)
{
return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
}
static bool trb_is_link(union xhci_trb *trb)
{
return TRB_TYPE_LINK_LE32(trb->link.control);
}
static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}
static bool last_trb_on_ring(struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
}
static bool link_trb_toggles_cycle(union xhci_trb *trb)
{
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
static bool last_td_in_urb(struct xhci_td *td)
{
struct urb_priv *urb_priv = td->urb->hcpriv;
return urb_priv->num_tds_done == urb_priv->num_tds;
}
static void inc_td_cnt(struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
urb_priv->num_tds_done++;
}
static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
{
if (trb_is_link(trb)) {
/* unchain chained link TRBs */
trb->link.control &= cpu_to_le32(~TRB_CHAIN);
} else {
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
}
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
*/
static void next_trb(struct xhci_hcd *xhci,
struct xhci_ring *ring,
struct xhci_segment **seg,
union xhci_trb **trb)
{
if (trb_is_link(*trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
(*trb)++;
}
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
*/
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
unsigned int link_trb_count = 0;
/* event ring doesn't have link trbs, check for last trb */
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
goto out;
}
if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
goto out;
}
/* All other rings have link trbs */
if (!trb_is_link(ring->dequeue)) {
if (last_trb_on_seg(ring->deq_seg, ring->dequeue))
xhci_warn(xhci, "Missing link TRB at end of segment\n");
else
ring->dequeue++;
}
while (trb_is_link(ring->dequeue)) {
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "Ring is an endless link TRB loop\n");
break;
}
}
out:
trace_xhci_inc_deq(ring);
return;
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
* chain bit is set), then set the chain bit in all the following link TRBs.
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
* Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
* set, but other sections talk about dealing with the chain bit set. This was
* fixed in the 0.96 specification errata, but we have to assume that all 0.95
* xHCI hardware can't handle the chain bit being cleared on a link TRB.
*
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
unsigned int link_trb_count = 0;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
xhci_err(xhci, "Tried to move enqueue past ring segment\n");
return;
}
next = ++(ring->enqueue);
/* Update the dequeue pointer further if that was a link TRB */
while (trb_is_link(next)) {
/*
* If the caller doesn't plan on enqueueing more TDs before
* ringing the doorbell, then we don't want to give the link TRB
* to the hardware just yet. We'll give the link TRB back in
* prepare_ring() just before we enqueue the TD at the top of
* the ring.
*/
if (!chain && !more_trbs_coming)
break;
/* If we're not dealing with 0.95 hardware or isoc rings on
* AMD 0.96 host, carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)) &&
!xhci_link_trb_quirk(xhci)) {
next->link.control &= cpu_to_le32(~TRB_CHAIN);
next->link.control |= cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (link_trb_toggles_cycle(next))
ring->cycle_state ^= 1;
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
break;
}
}
trace_xhci_inc_enq(ring);
}
/*
* Return number of free normal TRBs from enqueue to dequeue pointer on ring.
* Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment.
* Only for transfer and command rings where driver is the producer, not for
* event rings.
*/
static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
struct xhci_segment *enq_seg = ring->enq_seg;
union xhci_trb *enq = ring->enqueue;
union xhci_trb *last_on_seg;
unsigned int free = 0;
int i = 0;
/* Ring might be empty even if enq != deq if enq is left on a link trb */
if (trb_is_link(enq)) {
enq_seg = enq_seg->next;
enq = enq_seg->trbs;
}
/* Empty ring, common case, don't walk the segments */
if (enq == ring->dequeue)
return ring->num_segs * (TRBS_PER_SEGMENT - 1);
do {
if (ring->deq_seg == enq_seg && ring->dequeue >= enq)
return free + (ring->dequeue - enq);
last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1];
free += last_on_seg - enq;
enq_seg = enq_seg->next;
enq = enq_seg->trbs;
} while (i++ <= ring->num_segs);
return free;
}
/*
* Check to see if there's room to enqueue num_trbs on the ring and make sure
* enqueue pointer will not advance into dequeue segment. See rules above.
* return number of new segments needed to ensure this.
*/
static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
struct xhci_segment *seg;
int trbs_past_seg;
int enq_used;
int new_segs;
enq_used = ring->enqueue - ring->enq_seg->trbs;
/* how many trbs will be queued past the enqueue segment? */
trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
if (trbs_past_seg <= 0)
return 0;
/* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue)
return 0;
new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1));
seg = ring->enq_seg;
while (new_segs > 0) {
seg = seg->next;
if (seg == ring->deq_seg) {
xhci_dbg(xhci, "Ring expansion by %d segments needed\n",
new_segs);
xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n",
num_trbs, trbs_past_seg % TRBS_PER_SEGMENT);
return new_segs;
}
new_segs--;
}
return 0;
}
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
return;
xhci_dbg(xhci, "// Ding dong!\n");
trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
readl(&xhci->dba->doorbell[0]);
}
static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
{
return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
}
static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
{
return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
cmd_list);
}
/*
* Turn all commands on command ring with status set to "aborted" to no-op trbs.
* If there are other commands waiting then restart the ring and kick the timer.
* This must be called with command ring stopped and xhci->lock held.
*/
static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
struct xhci_command *cur_cmd)
{
struct xhci_command *i_cmd;
/* Turn all aborted commands in list to no-ops, then restart */
list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
if (i_cmd->status != COMP_COMMAND_ABORTED)
continue;
i_cmd->status = COMP_COMMAND_RING_STOPPED;
xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
i_cmd->command_trb);
trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
/*
* caller waiting for completion is called when command
* completion event is received for these no-op commands
*/
}
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
/* ring command ring doorbell to restart the command ring */
if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd;
xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
xhci_ring_cmd_db(xhci);
}
}
/* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
u64 crcr;
int ret;
xhci_dbg(xhci, "Abort command ring\n");
reinit_completion(&xhci->cmd_ring_stop_completion);
/*
* The control bits like command stop, abort are located in lower
* dword of the command ring control register.
* Some controllers require all 64 bits to be written to abort the ring.
* Make sure the upper dword is valid, pointing to the next command,
* avoiding corrupting the command ring pointer in case the command ring
* is stopped by the time the upper dword is written.
*/
next_trb(xhci, NULL, &new_seg, &new_deq);
if (trb_is_link(new_deq))
next_trb(xhci, NULL, &new_seg, &new_deq);
crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
* completion of the Command Abort operation. If CRR is not negated in 5
* seconds then driver handles it as if host died (-ENODEV).
* In the future we should distinguish between -ENODEV and -ETIMEDOUT
* and try to recover a -ETIMEDOUT with a host controller reset.
*/
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
if (ret < 0) {
xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
xhci_halt(xhci);
xhci_hc_died(xhci);
return ret;
}
/*
* Writing the CMD_RING_ABORT bit should cause a cmd completion event,
* however on some host hw the CMD_RING_RUNNING bit is correctly cleared
* but the completion event in never sent. Wait 2 secs (arbitrary
* number) to handle those cases after negation of CMD_RING_RUNNING.
*/
spin_unlock_irqrestore(&xhci->lock, flags);
ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
msecs_to_jiffies(2000));
spin_lock_irqsave(&xhci->lock, flags);
if (!ret) {
xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
xhci_cleanup_command_queue(xhci);
} else {
xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
}
return 0;
}
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index,
unsigned int stream_id)
{
__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
unsigned int ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because we don't want to interrupt processing.
* We don't want to restart any stream rings if there's a set dequeue
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
*/
if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
return;
trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
writel(DB_VALUE(ep_index, stream_id), db_addr);
/* flush the write */
readl(db_addr);
}
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
unsigned int stream_id;
struct xhci_virt_ep *ep;
ep = &xhci->devs[slot_id]->eps[ep_index];
/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
if (ep->ring && !(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
struct xhci_stream_info *stream_info = ep->stream_info;
if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
stream_id);
}
}
void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
return NULL;
}
if (ep_index >= EP_CTX_PER_DEV) {
xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
return NULL;
}
if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
return NULL;
}
return &xhci->devs[slot_id]->eps[ep_index];
}
static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep,
unsigned int stream_id)
{
/* common case, no streams */
if (!(ep->ep_state & EP_HAS_STREAMS))
return ep->ring;
if (!ep->stream_info)
return NULL;
if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
stream_id, ep->vdev->slot_id, ep->ep_index);
return NULL;
}
return ep->stream_info->stream_rings[stream_id];
}
/* Get the right ring for the given slot_id, ep_index and stream_id.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
*/
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id)
{
struct xhci_virt_ep *ep;
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return NULL;
return xhci_virt_ep_to_ring(xhci, ep, stream_id);
}
/*
* Get the hw dequeue pointer xHC stopped on, either directly from the
* endpoint context, or if streams are in use from the stream context.
* The returned hw_dequeue contains the lowest four bits with cycle state
* and possbile stream context type.
*/
static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
unsigned int ep_index, unsigned int stream_id)
{
struct xhci_ep_ctx *ep_ctx;
struct xhci_stream_ctx *st_ctx;
struct xhci_virt_ep *ep;
ep = &vdev->eps[ep_index];
if (ep->ep_state & EP_HAS_STREAMS) {
st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
return le64_to_cpu(st_ctx->stream_ring);
}
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
return le64_to_cpu(ep_ctx->deq);
}
static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *td)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_command *cmd;
struct xhci_segment *new_seg;
union xhci_trb *new_deq;
int new_cycle;
dma_addr_t addr;
u64 hw_dequeue;
bool cycle_found = false;
bool td_last_trb_found = false;
u32 trb_sct = 0;
int ret;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
stream_id);
return -ENODEV;
}
/*
* A cancelled TD can complete with a stall if HW cached the trb.
* In this case driver can't find td, but if the ring is empty we
* can move the dequeue pointer to the current enqueue position.
* We shouldn't hit this anymore as cached cancelled TRBs are given back
* after clearing the cache, but be on the safe side and keep it anyway
*/
if (!td) {
if (list_empty(&ep_ring->td_list)) {
new_seg = ep_ring->enq_seg;
new_deq = ep_ring->enqueue;
new_cycle = ep_ring->cycle_state;
xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
goto deq_found;
} else {
xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
return -EINVAL;
}
}
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
new_cycle = hw_dequeue & 0x1;
/*
* We want to find the pointer, segment and cycle state of the new trb
* (the one after current TD's last_trb). We know the cycle state at
* hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
* found.
*/
do {
if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
== (dma_addr_t)(hw_dequeue & ~0xf)) {
cycle_found = true;
if (td_last_trb_found)
break;
}
if (new_deq == td->last_trb)
td_last_trb_found = true;
if (cycle_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
new_cycle ^= 0x1;
next_trb(xhci, ep_ring, &new_seg, &new_deq);
/* Search wrapped around, bail out */
if (new_deq == ep->ring->dequeue) {
xhci_err(xhci, "Error: Failed finding new dequeue state\n");
return -EINVAL;
}
} while (!cycle_found || !td_last_trb_found);
deq_found:
/* Don't update the ring cycle state for the producer (us). */
addr = xhci_trb_virt_to_dma(new_seg, new_deq);
if (addr == 0) {
xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
return -EINVAL;
}
if ((ep->ep_state & SET_DEQ_PENDING)) {
xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
&addr);
return -EBUSY;
}
/* This function gets called from contexts where it cannot sleep */
cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!cmd) {
xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
return -ENOMEM;
}
if (stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
ret = queue_command(xhci, cmd,
lower_32_bits(addr) | trb_sct | new_cycle,
upper_32_bits(addr),
STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
if (ret < 0) {
xhci_free_command(xhci, cmd);
return ret;
}
ep->queued_deq_seg = new_seg;
ep->queued_deq_ptr = new_deq;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
/* Stop the TD queueing code from ringing the doorbell until
* this command completes. The HC won't set the dequeue pointer
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep->ep_state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci);
return 0;
}
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
* (The last TRB actually points to the ring enqueue pointer, which is not part
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
struct xhci_td *td, bool flip_cycle)
{
struct xhci_segment *seg = td->start_seg;
union xhci_trb *trb = td->first_trb;
while (1) {
trb_to_noop(trb, TRB_TR_NOOP);
/* flip cycle if asked to */
if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
if (trb == td->last_trb)
break;
next_trb(xhci, ep_ring, &seg, &trb);
}
}
/*
* Must be called with xhci->lock held in interrupt context,
* releases and re-acquires xhci->lock
*/
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status)
{
struct urb *urb = cur_td->urb;
struct urb_priv *urb_priv = urb->hcpriv;
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
trace_xhci_urb_giveback(urb);
usb_hcd_giveback_urb(hcd, urb, status);
}
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
struct xhci_ring *ring, struct xhci_td *td)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
size_t len;
if (!ring || !seg || !urb)
return;
if (usb_urb_dir_out(urb)) {
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_TO_DEVICE);
return;
}
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
if (urb->num_sgs) {
len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
seg->bounce_len, seg->bounce_offs);
if (len != seg->bounce_len)
xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
len, seg->bounce_len);
} else {
memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
seg->bounce_len);
}
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_ring *ep_ring, int status)
{
struct urb *urb = NULL;
/* Clean up the endpoint's TD list */
urb = td->urb;
/* if a bounce buffer was used to align this td then unmap it */
xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
/* Do one last check of the actual transfer length.
* If the host controller said we transferred more data than the buffer
* length, urb->actual_length will be a very big number (since it's
* unsigned). Play it safe and say we didn't transfer anything.
*/
if (urb->actual_length > urb->transfer_buffer_length) {
xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
urb->transfer_buffer_length, urb->actual_length);
urb->actual_length = 0;
status = 0;
}
/* TD might be removed from td_list if we are giving back a cancelled URB */
if (!list_empty(&td->td_list))
list_del_init(&td->td_list);
/* Giving back a cancelled URB, or if a slated TD completed anyway */
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
inc_td_cnt(urb);
/* Giveback the urb when all the tds are completed */
if (last_td_in_urb(td)) {
if ((urb->actual_length != urb->transfer_buffer_length &&
(urb->transfer_flags & URB_SHORT_NOT_OK)) ||
(status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
urb, urb->actual_length,
urb->transfer_buffer_length, status);
/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
status = 0;
xhci_giveback_urb_in_irq(xhci, td, status);
}
return 0;
}
/* Complete the cancelled URBs we unlinked from td_list. */
static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
{
struct xhci_ring *ring;
struct xhci_td *td, *tmp_td;
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
cancelled_td_list) {
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
if (td->cancel_status == TD_CLEARED) {
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ring, td->status);
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
}
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
return;
}
}
static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, enum xhci_ep_reset_type reset_type)
{
struct xhci_command *command;
int ret = 0;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
ret = -ENOMEM;
goto done;
}
xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
(reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
ep_index, slot_id);
ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
done:
if (ret)
xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
slot_id, ep_index, ret);
return ret;
}
static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep,
struct xhci_td *td,
enum xhci_ep_reset_type reset_type)
{
unsigned int slot_id = ep->vdev->slot_id;
int err;
/*
* Avoid resetting endpoint if link is inactive. Can cause host hang.
* Device will be reset soon to recover the link so don't do anything
*/
if (ep->vdev->flags & VDEV_PORT_ERROR)
return -ENODEV;
/* add td to cancelled list and let reset ep handler take care of it */
if (reset_type == EP_HARD_RESET) {
ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
if (td && list_empty(&td->cancelled_td_list)) {
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
td->cancel_status = TD_HALTED;
}
}
if (ep->ep_state & EP_HALTED) {
xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
ep->ep_index);
return 0;
}
err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
if (err)
return err;
ep->ep_state |= EP_HALTED;
xhci_ring_cmd_db(xhci);
return 0;
}
/*
* Fix up the ep ring first, so HW stops executing cancelled TDs.
* We have the xHCI lock, so nothing can modify this list until we drop it.
* We're also in the event handler, so we can't get re-interrupted if another
* Stop Endpoint command completes.
*
* only call this when ring is not in a running state
*/
static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
{
struct xhci_hcd *xhci;
struct xhci_td *td = NULL;
struct xhci_td *tmp_td = NULL;
struct xhci_td *cached_td = NULL;
struct xhci_ring *ring;
u64 hw_deq;
unsigned int slot_id = ep->vdev->slot_id;
int err;
xhci = ep->xhci;
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
(unsigned long long)xhci_trb_virt_to_dma(
td->start_seg, td->first_trb),
td->urb->stream_id, td->urb);
list_del_init(&td->td_list);
ring = xhci_urb_to_transfer_ring(xhci, td->urb);
if (!ring) {
xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
td->urb, td->urb->stream_id);
continue;
}
/*
* If a ring stopped on the TD we need to cancel then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
* Rings halted due to STALL may show hw_deq is past the stalled
* TD, but still require a set TR Deq command to flush xHC cache.
*/
hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
td->urb->stream_id);
hw_deq &= ~0xf;
if (td->cancel_status == TD_HALTED ||
trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
switch (td->cancel_status) {
case TD_CLEARED: /* TD is already no-op */
case TD_CLEARING_CACHE: /* set TR deq command already queued */
break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
td->cancel_status = TD_CLEARING_CACHE;
if (cached_td)
/* FIXME stream case, several stopped rings */
xhci_dbg(xhci,
"Move dq past stream %u URB %p instead of stream %u URB %p\n",
td->urb->stream_id, td->urb,
cached_td->urb->stream_id, cached_td->urb);
cached_td = td;
break;
}
} else {
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
}
}
/* If there's no need to move the dequeue pointer then we're done */
if (!cached_td)
return 0;
err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
cached_td->urb->stream_id,
cached_td);
if (err) {
/* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
if (td->cancel_status != TD_CLEARING_CACHE)
continue;
xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
td->urb);
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
}
}
return 0;
}
/*
* Returns the TD the endpoint ring halted on.
* Only call for non-running rings without streams.
*/
static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
{
struct xhci_td *td;
u64 hw_deq;
if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
hw_deq &= ~0xf;
td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
td->last_trb, hw_deq, false))
return td;
}
return NULL;
}
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that:
*
* 1. If the HW was in the middle of processing the TD that needs to be
* cancelled, then we must move the ring's dequeue pointer past the last TRB
* in the TD with a Set Dequeue Pointer Command.
* 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
* bit cleared) so that the HW will skip over them.
*/
static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 comp_code)
{
unsigned int ep_index;
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_td *td = NULL;
enum xhci_ep_reset_type reset_type;
struct xhci_command *command;
int err;
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
if (!xhci->devs[slot_id])
xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
slot_id);
return;
}
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_stop_ep(ep_ctx);
if (comp_code == COMP_CONTEXT_STATE_ERROR) {
/*
* If stop endpoint command raced with a halting endpoint we need to
* reset the host side endpoint first.
* If the TD we halted on isn't cancelled the TD should be given back
* with a proper error code, and the ring dequeue moved past the TD.
* If streams case we can't find hw_deq, or the TD we halted on so do a
* soft reset.
*
* Proper error code is unknown here, it would be -EPIPE if device side
* of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
* We use -EPROTO, if device is stalled it should return a stall error on
* next transfer, which then will return -EPIPE, and device side stall is
* noted and cleared by class driver.
*/
switch (GET_EP_CTX_STATE(ep_ctx)) {
case EP_STATE_HALTED:
xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
if (ep->ep_state & EP_HAS_STREAMS) {
reset_type = EP_SOFT_RESET;
} else {
reset_type = EP_HARD_RESET;
td = find_halted_td(ep);
if (td)
td->status = -EPROTO;
}
/* reset ep, reset handler cleans up cancelled tds */
err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
if (err)
break;
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
}
xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
xhci_ring_cmd_db(xhci);
return;
default:
break;
}
}
/* will queue a set TR deq if stopped on a cancelled, uncleared TD */
xhci_invalidate_cancelled_tds(ep);
ep->ep_state &= ~EP_STOP_CMD_PENDING;
/* Otherwise ring the doorbell(s) to restart queued transfers */
xhci_giveback_invalidated_tds(ep);
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
struct xhci_td *cur_td;
struct xhci_td *tmp;
list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
inc_td_cnt(cur_td->urb);
if (last_td_in_urb(cur_td))
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
int slot_id, int ep_index)
{
struct xhci_td *cur_td;
struct xhci_td *tmp;
struct xhci_virt_ep *ep;
struct xhci_ring *ring;
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
if ((ep->ep_state & EP_HAS_STREAMS) ||
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
ring = ep->stream_info->stream_rings[stream_id];
if (!ring)
continue;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
slot_id, ep_index, stream_id);
xhci_kill_ring_urbs(xhci, ring);
}
} else {
ring = ep->ring;
if (!ring)
return;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u",
slot_id, ep_index);
xhci_kill_ring_urbs(xhci, ring);
}
list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
cancelled_td_list) {
list_del_init(&cur_td->cancelled_td_list);
inc_td_cnt(cur_td->urb);
if (last_td_in_urb(cur_td))
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
/*
* host controller died, register read returns 0xffffffff
* Complete pending commands, mark them ABORTED.
* URBs need to be given back as usb core might be waiting with device locks
* held for the URBs to finish during device disconnect, blocking host remove.
*
* Call with xhci->lock held.
* lock is relased and re-acquired while giving back urb.
*/
void xhci_hc_died(struct xhci_hcd *xhci)
{
int i, j;
if (xhci->xhc_state & XHCI_STATE_DYING)
return;
xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
xhci->xhc_state |= XHCI_STATE_DYING;
xhci_cleanup_command_queue(xhci);
/* return any pending urbs, remove may be waiting for them */
for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; j++)
xhci_kill_endpoint_urbs(xhci, i, j);
}
/* inform usb core hc died if PCI remove isn't already handling it */
if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
usb_hc_died(xhci_to_hcd(xhci));
}
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
struct xhci_ring *ep_ring,
unsigned int ep_index)
{
union xhci_trb *dequeue_temp;
dequeue_temp = ep_ring->dequeue;
/* If we get two back-to-back stalls, and the first stalled transfer
* ends just before a link TRB, the dequeue pointer will be left on
* the link TRB by the code in the while loop. So we have to update
* the dequeue pointer one segment further, or we'll jump off
* the segment into la-la-land.
*/
if (trb_is_link(ep_ring->dequeue)) {
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->dequeue++;
if (trb_is_link(ep_ring->dequeue)) {
if (ep_ring->dequeue ==
dev->eps[ep_index].queued_deq_ptr)
break;
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
if (ep_ring->dequeue == dequeue_temp) {
xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
break;
}
}
}
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
* the TD queueing code can ring the doorbell again. We also need to ring the
* endpoint doorbell to restart the ring, but only if there aren't more
* cancellations pending.
*/
static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 cmd_comp_code)
{
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_td *td, *tmp_td;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
goto cleanup;
}
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
trace_xhci_handle_cmd_set_deq(slot_ctx);
trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
if (cmd_comp_code != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
switch (cmd_comp_code) {
case COMP_TRB_ERROR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CONTEXT_STATE_ERROR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = GET_EP_CTX_STATE(ep_ctx);
slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Slot state = %u, EP state = %u",
slot_state, ep_state);
break;
case COMP_SLOT_NOT_ENABLED_ERROR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
slot_id);
break;
default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
* should never get to this point if the synchronization between
* queueing, and endpoint state are correct. This might happen
* if the device gets disconnected after we've finished
* cancelling URBs, which might not be an error...
*/
} else {
u64 deq;
/* 4.6.10 deq ptr is written to the stream ctx for streams */
if (ep->ep_state & EP_HAS_STREAMS) {
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
ep->queued_deq_ptr) == deq) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, ep->vdev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
/* HW cached TDs cleared from cache, give them back */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
cancelled_td_list) {
ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
if (td->cancel_status == TD_CLEARING_CACHE) {
td->cancel_status = TD_CLEARED;
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
}
}
cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 cmd_comp_code)
{
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_reset_ep(ep_ctx);
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Ignoring reset ep completion code of %u", cmd_comp_code);
/* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
xhci_invalidate_cancelled_tds(ep);
/* Clear our internal halted state */
ep->ep_state &= ~EP_HALTED;
xhci_giveback_invalidated_tds(ep);
/* if this was a soft reset, then restart */
if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
struct xhci_command *command, u32 cmd_comp_code)
{
if (cmd_comp_code == COMP_SUCCESS)
command->slot_id = slot_id;
else
command->slot_id = 0;
}
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
virt_dev = xhci->devs[slot_id];
if (!virt_dev)
return;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
trace_xhci_handle_cmd_disable_slot(slot_ctx);
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, true);
}
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
u32 cmd_comp_code)
{
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
u32 add_flags;
/*
* Configure endpoint commands can come from the USB core configuration
* or alt setting changes, or when streams were being configured.
*/
virt_dev = xhci->devs[slot_id];
if (!virt_dev)
return;
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "Could not get input context, bad type.\n");
return;
}
add_flags = le32_to_cpu(ctrl_ctx->add_flags);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(add_flags) - 1;
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
trace_xhci_handle_cmd_config_ep(ep_ctx);
return;
}
static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *vdev;
struct xhci_slot_ctx *slot_ctx;
vdev = xhci->devs[slot_id];
if (!vdev)
return;
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_handle_cmd_addr_dev(slot_ctx);
}
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *vdev;
struct xhci_slot_ctx *slot_ctx;
vdev = xhci->devs[slot_id];
if (!vdev) {
xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
slot_id);
return;
}
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_handle_cmd_reset_dev(slot_ctx);
xhci_dbg(xhci, "Completed reset device command.\n");
}
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
if (!(xhci->quirks & XHCI_NEC_HOST)) {
xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
return;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"NEC firmware version %2x.%02x",
NEC_FW_MAJOR(le32_to_cpu(event->status)),
NEC_FW_MINOR(le32_to_cpu(event->status)));
}
static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
{
list_del(&cmd->cmd_list);
if (cmd->completion) {
cmd->status = status;
complete(cmd->completion);
} else {
kfree(cmd);
}
}
void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
struct xhci_command *cur_cmd, *tmp_cmd;
xhci->current_cmd = NULL;
list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
}
void xhci_handle_command_timeout(struct work_struct *work)
{
struct xhci_hcd *xhci;
unsigned long flags;
char str[XHCI_MSG_MAX];
u64 hw_ring_state;
u32 cmd_field3;
u32 usbsts;
xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
spin_lock_irqsave(&xhci->lock, flags);
/*
* If timeout work is pending, or current_cmd is NULL, it means we
* raced with command completion. Command is handled so just return.
*/
if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
usbsts = readl(&xhci->op_regs->status);
xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
/* Bail out and tear down xhci if a stop endpoint command failed */
if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
struct xhci_virt_ep *ep;
xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
TRB_TO_EP_INDEX(cmd_field3));
if (ep)
ep->ep_state &= ~EP_STOP_CMD_PENDING;
xhci_halt(xhci);
xhci_hc_died(xhci);
goto time_out_completed;
}
/* mark this command to be cancelled */
xhci->current_cmd->status = COMP_COMMAND_ABORTED;
/* Make sure command ring is running before aborting it */
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if (hw_ring_state == ~(u64)0) {
xhci_hc_died(xhci);
goto time_out_completed;
}
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
(hw_ring_state & CMD_RING_RUNNING)) {
/* Prevent new doorbell, and start command abort */
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
xhci_dbg(xhci, "Command timeout\n");
xhci_abort_cmd_ring(xhci, flags);
goto time_out_completed;
}
/* host removed. Bail out */
if (xhci->xhc_state & XHCI_STATE_REMOVING) {
xhci_dbg(xhci, "host removed, ring start fail?\n");
xhci_cleanup_command_queue(xhci);
goto time_out_completed;
}
/* command timeout on stopped ring, ring can't be aborted */
xhci_dbg(xhci, "Command timeout on stopped ring\n");
xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
time_out_completed:
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
u32 cmd_comp_code;
union xhci_trb *cmd_trb;
struct xhci_command *cmd;
u32 cmd_type;
if (slot_id >= MAX_HC_SLOTS) {
xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
return;
}
cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_trb = xhci->cmd_ring->dequeue;
trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
cmd_trb);
/*
* Check whether the completion event is for our internal kept
* command.
*/
if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
xhci_warn(xhci,
"ERROR mismatched command completion event\n");
return;
}
cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
cancel_delayed_work(&xhci->cmd_timer);
cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
/* If CMD ring stopped we own the trbs between enqueue and dequeue */
if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
complete_all(&xhci->cmd_ring_stop_completion);
return;
}
if (cmd->command_trb != xhci->cmd_ring->dequeue) {
xhci_err(xhci,
"Command completion event does not match command\n");
return;
}
/*
* Host aborted the command ring, check if the current command was
* supposed to be aborted, otherwise continue normally.
* The command ring is stopped now, but the xHC will issue a Command
* Ring Stopped event which will cause us to restart it.
*/
if (cmd_comp_code == COMP_COMMAND_ABORTED) {
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
if (cmd->status == COMP_COMMAND_ABORTED) {
if (xhci->current_cmd == cmd)
xhci->current_cmd = NULL;
goto event_handled;
}
}
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
switch (cmd_type) {
case TRB_ENABLE_SLOT:
xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
break;
case TRB_DISABLE_SLOT:
xhci_handle_cmd_disable_slot(xhci, slot_id);
break;
case TRB_CONFIG_EP:
if (!cmd->completion)
xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
break;
case TRB_EVAL_CONTEXT:
break;
case TRB_ADDR_DEV:
xhci_handle_cmd_addr_dev(xhci, slot_id);
break;
case TRB_STOP_RING:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3])));
if (!cmd->completion)
xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
cmd_comp_code);
break;
case TRB_SET_DEQ:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3])));
xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
case TRB_CMD_NOOP:
/* Is this an aborted command turned to NO-OP? */
if (cmd->status == COMP_COMMAND_RING_STOPPED)
cmd_comp_code = COMP_COMMAND_RING_STOPPED;
break;
case TRB_RESET_EP:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3])));
xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
case TRB_RESET_DEV:
/* SLOT_ID field in reset device cmd completion event TRB is 0.
* Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
*/
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3]));
xhci_handle_cmd_reset_dev(xhci, slot_id);
break;
case TRB_NEC_GET_FW:
xhci_handle_cmd_nec_get_fw(xhci, event);
break;
default:
/* Skip over unknown commands on the event ring */
xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
break;
}
/* restart timer if this wasn't the last command */
if (!list_is_singular(&xhci->cmd_list)) {
xhci->current_cmd = list_first_entry(&cmd->cmd_list,
struct xhci_command, cmd_list);
xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
} else if (xhci->current_cmd == cmd) {
xhci->current_cmd = NULL;
}
event_handled:
xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
inc_deq(xhci, xhci->cmd_ring);
}
static void handle_vendor_event(struct xhci_hcd *xhci,
union xhci_trb *event, u32 trb_type)
{
xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
handle_cmd_completion(xhci, &event->event_cmd);
}
static void handle_device_notification(struct xhci_hcd *xhci,
union xhci_trb *event)
{
u32 slot_id;
struct usb_device *udev;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "Device Notification event for "
"unused slot %u\n", slot_id);
return;
}
xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
slot_id);
udev = xhci->devs[slot_id]->udev;
if (udev && udev->parent)
usb_wakeup_notification(udev->parent, udev->portnum);
}
/*
* Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
* Controller.
* As per ThunderX2errata-129 USB 2 device may come up as USB 1
* If a connection to a USB 1 device is followed by another connection
* to a USB 2 device.
*
* Reset the PHY after the USB device is disconnected if device speed
* is less than HCD_USB3.
* Retry the reset sequence max of 4 times checking the PLL lock status.
*
*/
static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 pll_lock_check;
u32 retry_count = 4;
do {
/* Assert PHY reset */
writel(0x6F, hcd->regs + 0x1048);
udelay(10);
/* De-assert the PHY reset */
writel(0x7F, hcd->regs + 0x1048);
udelay(200);
pll_lock_check = readl(hcd->regs + 0x1070);
} while (!(pll_lock_check & 0x1) && --retry_count);
}
static void handle_port_status(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
union xhci_trb *event)
{
struct usb_hcd *hcd;
u32 port_id;
u32 portsc, cmd_reg;
int max_ports;
int slot_id;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
bool bogus_port_status = false;
struct xhci_port *port;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
xhci_warn(xhci,
"WARN: xHC returned failed port status event\n");
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Port change event with invalid port ID %d\n",
port_id);
inc_deq(xhci, ir->event_ring);
return;
}
port = &xhci->hw_ports[port_id - 1];
if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Port change event, no port for port ID %u\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
/* We might get interrupts after shared_hcd is removed */
if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
bogus_port_status = true;
goto cleanup;
}
hcd = port->rhub->hcd;
bus_state = &port->rhub->bus_state;
hcd_portnum = port->hcd_portnum;
portsc = readl(port->addr);
xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
trace_xhci_handle_port_status(hcd_portnum, portsc);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
}
if (hcd->speed >= HCD_USB3 &&
(portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
if (slot_id && xhci->devs[slot_id])
xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
cmd_reg = readl(&xhci->op_regs->command);
if (!(cmd_reg & CMD_RUN)) {
xhci_warn(xhci, "xHC is not running.\n");
goto cleanup;
}
if (DEV_SUPERSPEED_ANY(portsc)) {
xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
/* Set a flag to say the port signaled remote wakeup,
* so we can tell the difference between the end of
* device and host initiated resume.
*/
bus_state->port_remote_wakeup |= 1 << hcd_portnum;
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
xhci_set_link_state(xhci, port, XDEV_U0);
/* Need to wait until the next link state change
* indicates the device is actually in U0.
*/
bogus_port_status = true;
goto cleanup;
} else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
port->resume_timestamp = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(hcd_portnum, &bus_state->resuming_ports);
/* Do the rest in GetPortStatus after resume time delay.
* Avoid polling roothub status before that so that a
* usb device auto-resume latency around ~40ms.
*/
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
port->resume_timestamp);
usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
bogus_port_status = true;
}
}
if ((portsc & PORT_PLC) &&
DEV_SUPERSPEED_ANY(portsc) &&
((portsc & PORT_PLS_MASK) == XDEV_U0 ||
(portsc & PORT_PLS_MASK) == XDEV_U1 ||
(portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
complete(&port->u3exit_done);
/* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
hcd_portnum + 1);
bogus_port_status = true;
goto cleanup;
}
}
/*
* Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
* RExit to a disconnect state). If so, let the driver know it's
* out of the RExit state.
*/
if (hcd->speed < HCD_USB3 && port->rexit_active) {
complete(&port->rexit_done);
port->rexit_active = false;
bogus_port_status = true;
goto cleanup;
}
if (hcd->speed < HCD_USB3) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
(portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
xhci_cavium_reset_phy_quirk(xhci);
}
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, ir->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
* (USB 2.0 or USB 3.0) to kick.
*/
if (bogus_port_status)
return;
/*
* xHCI port-status-change events occur when the "or" of all the
* status-change bits in the portsc register changes from 0 to 1.
* New status changes won't cause an event if any other change
* bits are still set. When an event occurs, switch over to
* polling to avoid losing status changes.
*/
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
__func__, hcd->self.busnum);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
spin_unlock(&xhci->lock);
/* Pass this up to the core */
usb_hcd_poll_rh_status(hcd);
spin_lock(&xhci->lock);
}
/*
* This TD is defined by the TRBs starting at start_trb in start_seg and ending
* at end_trb, which may be in another segment. If the suspect DMA address is a
* TRB in this TD, this function returns that TRB's segment. Otherwise it
* returns 0.
*/
struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
struct xhci_segment *start_seg,
union xhci_trb *start_trb,
union xhci_trb *end_trb,
dma_addr_t suspect_dma,
bool debug)
{
dma_addr_t start_dma;
dma_addr_t end_seg_dma;
dma_addr_t end_trb_dma;
struct xhci_segment *cur_seg;
start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
cur_seg = start_seg;
do {
if (start_dma == 0)
return NULL;
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
/* If the end TRB isn't in this segment, this is set to 0 */
end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
if (debug)
xhci_warn(xhci,
"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
(unsigned long long)suspect_dma,
(unsigned long long)start_dma,
(unsigned long long)end_trb_dma,
(unsigned long long)cur_seg->dma,
(unsigned long long)end_seg_dma);
if (end_trb_dma > 0) {
/* The end TRB is in this segment, so suspect should be here */
if (start_dma <= end_trb_dma) {
if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
return cur_seg;
} else {
/* Case for one segment with
* a TD wrapped around to the top
*/
if ((suspect_dma >= start_dma &&
suspect_dma <= end_seg_dma) ||
(suspect_dma >= cur_seg->dma &&
suspect_dma <= end_trb_dma))
return cur_seg;
}
return NULL;
} else {
/* Might still be somewhere in this segment */
if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
return cur_seg;
}
cur_seg = cur_seg->next;
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
} while (cur_seg != start_seg);
return NULL;
}
static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_virt_ep *ep)
{
/*
* As part of low/full-speed endpoint-halt processing
* we must clear the TT buffer (USB 2.0 specification 11.17.5).
*/
if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
(td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
!(ep->ep_state & EP_CLEARING_TT)) {
ep->ep_state |= EP_CLEARING_TT;
td->urb->ep->hcpriv = td->urb->dev;
if (usb_hub_clear_tt_buffer(td->urb))
ep->ep_state &= ~EP_CLEARING_TT;
}
}
/* Check if an error has halted the endpoint ring. The class driver will
* cleanup the halt for a non-default control endpoint if we indicate a stall.
* However, a babble and other errors also halt the endpoint ring, and the class
* driver won't clear the halt in that case, so we need to issue a Set Transfer
* Ring Dequeue Pointer command manually.
*/
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
unsigned int trb_comp_code)
{
/* TRB completion codes that may require a manual halt cleanup */
if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
/* The 0.95 spec says a babbling control endpoint
* is not halted. The 0.96 spec says it is. Some HW
* claims to be 0.95 compliant, but it halts the control
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
return 1;
return 0;
}
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
if (trb_comp_code >= 224 && trb_comp_code <= 255) {
/* Vendor defined "informational" completion code,
* treat as not-an-error.
*/
xhci_dbg(xhci, "Vendor defined info completion code %u\n",
trb_comp_code);
xhci_dbg(xhci, "Treating code as success.\n");
return 1;
}
return 0;
}
static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
struct xhci_ring *ep_ring, struct xhci_td *td,
u32 trb_comp_code)
{
struct xhci_ep_ctx *ep_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
switch (trb_comp_code) {
case COMP_STOPPED_LENGTH_INVALID:
case COMP_STOPPED_SHORT_PACKET:
case COMP_STOPPED:
/*
* The "Stop Endpoint" completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
return 0;
case COMP_USB_TRANSACTION_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_SPLIT_TRANSACTION_ERROR:
/*
* If endpoint context state is not halted we might be
* racing with a reset endpoint command issued by a unsuccessful
* stop endpoint completion (context error). In that case the
* td should be on the cancelled list, and EP_HALTED flag set.
*
* Or then it's not halted due to the 0.95 spec stating that a
* babbling control endpoint should not halt. The 0.96 spec
* again says it should. Some HW claims to be 0.95 compliant,
* but it halts the control endpoint anyway.
*/
if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
/*
* If EP_HALTED is set and TD is on the cancelled list
* the TD and dequeue pointer will be handled by reset
* ep command completion
*/
if ((ep->ep_state & EP_HALTED) &&
!list_empty(&td->cancelled_td_list)) {
xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
(unsigned long long)xhci_trb_virt_to_dma(
td->start_seg, td->first_trb));
return 0;
}
/* endpoint not halted, don't reset it */
break;
}
/* Almost same procedure as for STALL_ERROR below */
xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
return 0;
case COMP_STALL_ERROR:
/*
* xhci internal endpoint state will go to a "halt" state for
* any stall, including default control pipe protocol stall.
* To clear the host side halt we need to issue a reset endpoint
* command, followed by a set dequeue command to move past the
* TD.
* Class drivers clear the device side halt from a functional
* stall later. Hub TT buffer should only be cleared for FS/LS
* devices behind HS hubs for functional stalls.
*/
if (ep->ep_index != 0)
xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
return 0; /* xhci_handle_halted_endpoint marked td cancelled */
default:
break;
}
/* Update ring dequeue pointer */
ep_ring->dequeue = td->last_trb;
ep_ring->deq_seg = td->last_trb_seg;
inc_deq(xhci, ep_ring);
return xhci_td_cleanup(xhci, td, ep_ring, td->status);
}
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
union xhci_trb *stop_trb)
{
u32 sum;
union xhci_trb *trb = ring->dequeue;
struct xhci_segment *seg = ring->deq_seg;
for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
if (!trb_is_noop(trb) && !trb_is_link(trb))
sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
}
return sum;
}
/*
* Process control tds, update urb status and actual_length.
*/
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
struct xhci_ring *ep_ring, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
u32 remaining, requested;
u32 trb_type;
trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
requested = td->urb->transfer_buffer_length;
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
switch (trb_comp_code) {
case COMP_SUCCESS:
if (trb_type != TRB_STATUS) {
xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
(trb_type == TRB_DATA) ? "data" : "setup");
td->status = -ESHUTDOWN;
break;
}
td->status = 0;
break;
case COMP_SHORT_PACKET:
td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
td->urb->actual_length = remaining;
else
xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
goto finish_td;
case COMP_STOPPED:
switch (trb_type) {
case TRB_SETUP:
td->urb->actual_length = 0;
goto finish_td;
case TRB_DATA:
case TRB_NORMAL:
td->urb->actual_length = requested - remaining;
goto finish_td;
case TRB_STATUS:
td->urb->actual_length = requested;
goto finish_td;
default:
xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
trb_type);
goto finish_td;
}
case COMP_STOPPED_LENGTH_INVALID:
goto finish_td;
default:
if (!xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code))
break;
xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
trb_comp_code, ep->ep_index);
fallthrough;
case COMP_STALL_ERROR:
/* Did we transfer part of the data (middle) phase? */
if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
td->urb->actual_length = requested - remaining;
else if (!td->urb_length_set)
td->urb->actual_length = 0;
goto finish_td;
}
/* stopped at setup stage, no data transferred */
if (trb_type == TRB_SETUP)
goto finish_td;
/*
* if on data stage then update the actual_length of the URB and flag it
* as set, so it won't be overwritten in the event for the last TRB.
*/
if (trb_type == TRB_DATA ||
trb_type == TRB_NORMAL) {
td->urb_length_set = true;
td->urb->actual_length = requested - remaining;
xhci_dbg(xhci, "Waiting for status stage event\n");
return 0;
}
/* at status stage */
if (!td->urb_length_set)
td->urb->actual_length = requested;
finish_td:
return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
/*
* Process isochronous tds, update urb packet status and actual_length.
*/
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
struct xhci_ring *ep_ring, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct urb_priv *urb_priv;
int idx;
struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
bool sum_trbs_for_length = false;
u32 remaining, requested, ep_trb_len;
int short_framestatus;
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->num_tds_done;
frame = &td->urb->iso_frame_desc[idx];
requested = frame->length;
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
-EREMOTEIO : 0;
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
if (remaining) {
frame->status = short_framestatus;
if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
sum_trbs_for_length = true;
break;
}
frame->status = 0;
break;
case COMP_SHORT_PACKET:
frame->status = short_framestatus;
sum_trbs_for_length = true;
break;
case COMP_BANDWIDTH_OVERRUN_ERROR:
frame->status = -ECOMM;
break;
case COMP_ISOCH_BUFFER_OVERRUN:
case COMP_BABBLE_DETECTED_ERROR:
frame->status = -EOVERFLOW;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
case COMP_STALL_ERROR:
frame->status = -EPROTO;
break;
case COMP_USB_TRANSACTION_ERROR:
frame->status = -EPROTO;
if (ep_trb != td->last_trb)
return 0;
break;
case COMP_STOPPED:
sum_trbs_for_length = true;
break;
case COMP_STOPPED_SHORT_PACKET:
/* field normally containing residue now contains tranferred */
frame->status = short_framestatus;
requested = remaining;
break;
case COMP_STOPPED_LENGTH_INVALID:
requested = 0;
remaining = 0;
break;
default:
sum_trbs_for_length = true;
frame->status = -1;
break;
}
if (sum_trbs_for_length)
frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
ep_trb_len - remaining;
else
frame->actual_length = requested;
td->urb->actual_length += frame->actual_length;
return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_virt_ep *ep, int status)
{
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
int idx;
urb_priv = td->urb->hcpriv;
idx = urb_priv->num_tds_done;
frame = &td->urb->iso_frame_desc[idx];
/* The transfer is partly done. */
frame->status = -EXDEV;
/* calc actual length */
frame->actual_length = 0;
/* Update ring dequeue pointer */
ep->ring->dequeue = td->last_trb;
ep->ring->deq_seg = td->last_trb_seg;
inc_deq(xhci, ep->ring);
return xhci_td_cleanup(xhci, td, ep->ring, status);
}
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
struct xhci_ring *ep_ring, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct xhci_slot_ctx *slot_ctx;
u32 trb_comp_code;
u32 remaining, requested, ep_trb_len;
slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
requested = td->urb->transfer_buffer_length;
switch (trb_comp_code) {
case COMP_SUCCESS:
ep->err_count = 0;
/* handle success with untransferred data as short packet */
if (ep_trb != td->last_trb || remaining) {
xhci_warn(xhci, "WARN Successful completion on short TX\n");
xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
requested, remaining);
}
td->status = 0;
break;
case COMP_SHORT_PACKET:
xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
requested, remaining);
td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
td->urb->actual_length = remaining;
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
ep_trb_len = 0;
remaining = 0;
break;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
(ep->err_count++ > MAX_SOFT_RETRY) ||
le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
break;
td->status = 0;
xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
return 0;
default:
/* do nothing */
break;
}
if (ep_trb == td->last_trb)
td->urb->actual_length = requested - remaining;
else
td->urb->actual_length =
sum_trb_lengths(xhci, ep_ring, ep_trb) +
ep_trb_len - remaining;
finish_td:
if (remaining > requested) {
xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
remaining);
td->urb->actual_length = 0;
}
return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
* At this point, the host controller is probably hosed and should be reset.
*/
static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
struct xhci_transfer_event *event)
{
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct xhci_td *td = NULL;
dma_addr_t ep_trb_dma;
struct xhci_segment *ep_seg;
union xhci_trb *ep_trb;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
int td_num = 0;
bool handling_skipped_tds = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
ep_trb_dma = le64_to_cpu(event->buffer);
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep) {
xhci_err(xhci, "ERROR Invalid Transfer event\n");
goto err_out;
}
ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
xhci_err(xhci,
"ERROR Transfer event for disabled endpoint slot %u ep %u\n",
slot_id, ep_index);
goto err_out;
}
/* Some transfer events don't always point to a trb, see xhci 4.17.4 */
if (!ep_ring) {
switch (trb_comp_code) {
case COMP_STALL_ERROR:
case COMP_USB_TRANSACTION_ERROR:
case COMP_INVALID_STREAM_TYPE_ERROR:
case COMP_INVALID_STREAM_ID_ERROR:
xhci_dbg(xhci, "Stream transaction error ep %u no id\n",
ep_index);
if (ep->err_count++ > MAX_SOFT_RETRY)
xhci_handle_halted_endpoint(xhci, ep, NULL,
EP_HARD_RESET);
else
xhci_handle_halted_endpoint(xhci, ep, NULL,
EP_SOFT_RESET);
goto cleanup;
case COMP_RING_UNDERRUN:
case COMP_RING_OVERRUN:
case COMP_STOPPED_LENGTH_INVALID:
goto cleanup;
default:
xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
slot_id, ep_index);
goto err_out;
}
}
/* Count current td numbers if ep->skip is set */
if (ep->skip)
td_num += list_count_nodes(&ep_ring->td_list);
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
*/
case COMP_SUCCESS:
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
break;
if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
ep_ring->last_td_was_short)
trb_comp_code = COMP_SHORT_PACKET;
else
xhci_warn_ratelimited(xhci,
"WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
slot_id, ep_index);
break;
case COMP_SHORT_PACKET:
break;
/* Completion codes for endpoint stopped state */
case COMP_STOPPED:
xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
slot_id, ep_index);
break;
case COMP_STOPPED_LENGTH_INVALID:
xhci_dbg(xhci,
"Stopped on No-op or Link TRB for slot %u ep %u\n",
slot_id, ep_index);
break;
case COMP_STOPPED_SHORT_PACKET:
xhci_dbg(xhci,
"Stopped with short packet transfer detected for slot %u ep %u\n",
slot_id, ep_index);
break;
/* Completion codes for endpoint halted state */
case COMP_STALL_ERROR:
xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
ep_index);
status = -EPIPE;
break;
case COMP_SPLIT_TRANSACTION_ERROR:
xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
slot_id, ep_index);
status = -EPROTO;
break;
case COMP_USB_TRANSACTION_ERROR:
xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
slot_id, ep_index);
status = -EPROTO;
break;
case COMP_BABBLE_DETECTED_ERROR:
xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
slot_id, ep_index);
status = -EOVERFLOW;
break;
/* Completion codes for endpoint error state */
case COMP_TRB_ERROR:
xhci_warn(xhci,
"WARN: TRB error for slot %u ep %u on endpoint\n",
slot_id, ep_index);
status = -EILSEQ;
break;
/* completion codes not indicating endpoint state change */
case COMP_DATA_BUFFER_ERROR:
xhci_warn(xhci,
"WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
slot_id, ep_index);
status = -ENOSR;
break;
case COMP_BANDWIDTH_OVERRUN_ERROR:
xhci_warn(xhci,
"WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
slot_id, ep_index);
break;
case COMP_ISOCH_BUFFER_OVERRUN:
xhci_warn(xhci,
"WARN: buffer overrun event for slot %u ep %u on endpoint",
slot_id, ep_index);
break;
case COMP_RING_UNDERRUN:
/*
* When the Isoch ring is empty, the xHC will generate
* a Ring Overrun Event for IN Isoch endpoint or Ring
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "underrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
"still with TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
case COMP_RING_OVERRUN:
xhci_dbg(xhci, "overrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
"still with TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
* may be missed by xHC.
* Set skip flag of the ep_ring; Complete the missed tds as
* short transfer when process the ep_ring next time.
*/
ep->skip = true;
xhci_dbg(xhci,
"Miss service interval error for slot %u ep %u, set skip flag\n",
slot_id, ep_index);
goto cleanup;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
xhci_dbg(xhci,
"No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
slot_id, ep_index);
goto cleanup;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
/* needs disable slot command to recover */
xhci_warn(xhci,
"WARN: detect an incompatible device for slot %u ep %u",
slot_id, ep_index);
status = -EPROTO;
break;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
xhci_warn(xhci,
"ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
trb_comp_code, slot_id, ep_index);
goto cleanup;
}
do {
/* This TRB should be in the TD at the head of this ring's
* TD list.
*/
if (list_empty(&ep_ring->td_list)) {
/*
* Don't print wanings if it's due to a stopped endpoint
* generating an extra completion event if the device
* was suspended. Or, a event for the last TRB of a
* short TD we already got a short event for.
* The short TD is already removed from the TD list.
*/
if (!(trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
ep_ring->last_td_was_short)) {
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
}
if (ep->skip) {
ep->skip = false;
xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
slot_id, ep_index);
}
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code)) {
xhci_handle_halted_endpoint(xhci, ep, NULL,
EP_HARD_RESET);
}
goto cleanup;
}
/* We've skipped all the TDs on the ep ring when ep->skip set */
if (ep->skip && td_num == 0) {
ep->skip = false;
xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
slot_id, ep_index);
goto cleanup;
}
td = list_first_entry(&ep_ring->td_list, struct xhci_td,
td_list);
if (ep->skip)
td_num--;
/* Is this a TRB in the currently executing TD? */
ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, ep_trb_dma, false);
/*
* Skip the Force Stopped Event. The event_trb(event_dma) of FSE
* is not in the current TD pointed by ep_ring->dequeue because
* that the hardware dequeue pointer still at the previous TRB
* of the current TD. The previous TRB maybe a Link TD or the
* last TRB of the previous TD. The command completion handle
* will take care the rest.
*/
if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
goto cleanup;
}
if (!ep_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* Some host controllers give a spurious
* successful event after a short transfer.
* Ignore it.
*/
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
goto cleanup;
}
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
"part of current TD ep_index %d "
"comp_code %u\n", ep_index,
trb_comp_code);
trb_in_td(xhci, ep_ring->deq_seg,
ep_ring->dequeue, td->last_trb,
ep_trb_dma, true);
return -ESHUTDOWN;
}
skip_isoc_td(xhci, td, ep, status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_PACKET)
ep_ring->last_td_was_short = true;
else
ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci,
"Found td. Clear skip flag for slot %u ep %u.\n",
slot_id, ep_index);
ep->skip = false;
}
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
sizeof(*ep_trb)];
trace_xhci_handle_transfer(ep_ring,
(struct xhci_generic_trb *) ep_trb);
/*
* No-op TRB could trigger interrupts in a case where
* a URB was killed and a STALL_ERROR happens right
* after the endpoint ring stopped. Reset the halted
* endpoint. Otherwise, the endpoint remains stalled
* indefinitely.
*/
if (trb_is_noop(ep_trb)) {
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code))
xhci_handle_halted_endpoint(xhci, ep, td,
EP_HARD_RESET);
goto cleanup;
}
td->status = status;
/* update the urb's actual_length and give back to the core */
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
else
process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
cleanup:
handling_skipped_tds = ep->skip &&
trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
/*
* Do not update event ring dequeue pointer if we're in a loop
* processing missed tds.
*/
if (!handling_skipped_tds)
inc_deq(xhci, ir->event_ring);
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
* Process them as short transfer until reach the td pointed by
* the event.
*/
} while (handling_skipped_tds);
return 0;
err_out:
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
ir->event_ring->deq_seg,
ir->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
return -ENODEV;
}
/*
* This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/
static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
union xhci_trb *event;
int update_ptrs = 1;
u32 trb_type;
int ret;
/* Event ring hasn't been allocated yet. */
if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
xhci_err(xhci, "ERROR interrupter not ready\n");
return -ENOMEM;
}
event = ir->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
ir->event_ring->cycle_state)
return 0;
trace_xhci_handle_event(ir->event_ring, &event->generic);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
* speculative reads of the event's flags/data below.
*/
rmb();
trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
/* FIXME: Handle more event types. */
switch (trb_type) {
case TRB_COMPLETION:
handle_cmd_completion(xhci, &event->event_cmd);
break;
case TRB_PORT_STATUS:
handle_port_status(xhci, ir, event);
update_ptrs = 0;
break;
case TRB_TRANSFER:
ret = handle_tx_event(xhci, ir, &event->trans_event);
if (ret >= 0)
update_ptrs = 0;
break;
case TRB_DEV_NOTE:
handle_device_notification(xhci, event);
break;
default:
if (trb_type >= TRB_VENDOR_DEFINED_LOW)
handle_vendor_event(xhci, event, trb_type);
else
xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI host dying, returning from "
"event handler.\n");
return 0;
}
if (update_ptrs)
/* Update SW event ring dequeue pointer */
inc_deq(xhci, ir->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
*/
return 1;
}
/*
* Update Event Ring Dequeue Pointer:
* - When all events have finished
* - To avoid "Event Ring Full Error" condition
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
union xhci_trb *event_ring_deq)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != ir->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
ir->event_ring->dequeue);
if (deq == 0)
xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
/*
* Per 4.9.4, Software writes to the ERDP register shall
* always advance the Event Ring Dequeue Pointer value.
*/
if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
((u64) deq & (u64) ~ERST_PTR_MASK))
return;
/* Update HC event ring dequeue pointer */
temp_64 &= ERST_PTR_MASK;
temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C) */
temp_64 |= ERST_EHB;
xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
*/
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
union xhci_trb *event_ring_deq;
struct xhci_interrupter *ir;
irqreturn_t ret = IRQ_NONE;
u64 temp_64;
u32 status;
int event_loop = 0;
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
ret = IRQ_HANDLED;
goto out;
}
if (!(status & STS_EINT))
goto out;
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
goto out;
}
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
ret = IRQ_HANDLED;
goto out;
}
/*
* Clear the op reg interrupt status first,
* so we can receive interrupts from other MSI-X interrupters.
* Write 1 to clear the interrupt status.
*/
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
/* This is the handler of the primary interrupter */
ir = xhci->interrupter;
if (!hcd->msi_enabled) {
u32 irq_pending;
irq_pending = readl(&ir->ir_set->irq_pending);
irq_pending |= IMAN_IP;
writel(irq_pending, &ir->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING ||
xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
"Shouldn't IRQs be disabled?\n");
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
xhci_write_64(xhci, temp_64 | ERST_EHB,
&ir->ir_set->erst_dequeue);
ret = IRQ_HANDLED;
goto out;
}
event_ring_deq = ir->event_ring->dequeue;
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
while (xhci_handle_event(xhci, ir) > 0) {
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
event_ring_deq = ir->event_ring->dequeue;
/* ring is half-full, force isoc trbs to interrupt more often */
if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
event_loop = 0;
}
xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
ret = IRQ_HANDLED;
out:
spin_unlock(&xhci->lock);
return ret;
}
irqreturn_t xhci_msi_irq(int irq, void *hcd)
{
return xhci_irq(hcd);
}
EXPORT_SYMBOL_GPL(xhci_msi_irq);
/**** Endpoint Ring Operations ****/
/*
* Generic function for queueing a TRB on a ring.
* The caller must have checked to make sure there's room on the ring.
*
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
trb = &ring->enqueue->generic;
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
/* make sure TRB is fully written before giving it to the controller */
wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
inc_enq(xhci, ring, more_trbs_coming);
}
/*
* Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
* expand ring if it start to be full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
unsigned int link_trb_count = 0;
unsigned int new_segs = 0;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
case EP_STATE_DISABLED:
/*
* USB core changed config/interfaces without notifying us,
* or hardware is reporting the wrong state.
*/
xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
return -ENOENT;
case EP_STATE_ERROR:
xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
/* FIXME event handling code for error needs to clear it */
/* XXX not sure if this should be -ENOENT or not */
return -EINVAL;
case EP_STATE_HALTED:
xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
break;
case EP_STATE_STOPPED:
case EP_STATE_RUNNING:
break;
default:
xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
/*
* FIXME issue Configure Endpoint command to try to get the HC
* back into a known state.
*/
return -EINVAL;
}
if (ep_ring != xhci->cmd_ring) {
new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs);
} else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) {
xhci_err(xhci, "Do not support expand command ring\n");
return -ENOMEM;
}
if (new_segs) {
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ERROR no room on ep ring, try ring expansion");
if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) {
xhci_err(xhci, "Ring expansion failed\n");
return -ENOMEM;
}
}
while (trb_is_link(ep_ring->enqueue)) {
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
if (!xhci_link_trb_quirk(xhci) &&
!(ep_ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
ep_ring->enqueue->link.control &=
cpu_to_le32(~TRB_CHAIN);
else
ep_ring->enqueue->link.control |=
cpu_to_le32(TRB_CHAIN);
wmb();
ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (link_trb_toggles_cycle(ep_ring->enqueue))
ep_ring->cycle_state ^= 1;
ep_ring->enq_seg = ep_ring->enq_seg->next;
ep_ring->enqueue = ep_ring->enq_seg->trbs;
/* prevent infinite loop if all first trbs are link trbs */
if (link_trb_count++ > ep_ring->num_segs) {
xhci_warn(xhci, "Ring is an endless link TRB loop\n");
return -EINVAL;
}
}
if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
return -EINVAL;
}
return 0;
}
static int prepare_transfer(struct xhci_hcd *xhci,
struct xhci_virt_device *xdev,
unsigned int ep_index,
unsigned int stream_id,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
gfp_t mem_flags)
{
int ret;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
stream_id);
if (!ep_ring) {
xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
stream_id);
return -EINVAL;
}
ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
num_trbs, mem_flags);
if (ret)
return ret;
urb_priv = urb->hcpriv;
td = &urb_priv->td[td_index];
INIT_LIST_HEAD(&td->td_list);
INIT_LIST_HEAD(&td->cancelled_td_list);
if (td_index == 0) {
ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
if (unlikely(ret))
return ret;
}
td->urb = urb;
/* Add this TD to the tail of the endpoint ring's TD list */
list_add_tail(&td->td_list, &ep_ring->td_list);
td->start_seg = ep_ring->enq_seg;
td->first_trb = ep_ring->enqueue;
return 0;
}
unsigned int count_trbs(u64 addr, u64 len)
{
unsigned int num_trbs;
num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
TRB_MAX_BUFF_SIZE);
if (num_trbs == 0)
num_trbs++;
return num_trbs;
}
static inline unsigned int count_trbs_needed(struct urb *urb)
{
return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
}
static unsigned int count_sg_trbs_needed(struct urb *urb)
{
struct scatterlist *sg;
unsigned int i, len, full_len, num_trbs = 0;
full_len = urb->transfer_buffer_length;
for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
len = sg_dma_len(sg);
num_trbs += count_trbs(sg_dma_address(sg), len);
len = min_t(unsigned int, len, full_len);
full_len -= len;
if (full_len == 0)
break;
}
return num_trbs;
}
static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
{
u64 addr, len;
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
len = urb->iso_frame_desc[i].length;
return count_trbs(addr, len);
}
static void check_trb_math(struct urb *urb, int running_total)
{
if (unlikely(running_total != urb->transfer_buffer_length))
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
running_total, running_total,
urb->transfer_buffer_length,
urb->transfer_buffer_length);
}
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id, int start_cycle,
struct xhci_generic_trb *start_trb)
{
/*
* Pass all the TRBs to the hardware at once and make sure this write
* isn't reordered.
*/
wmb();
if (start_cycle)
start_trb->field[3] |= cpu_to_le32(start_cycle);
else
start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
struct xhci_ep_ctx *ep_ctx)
{
int xhci_interval;
int ep_interval;
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
dev_dbg_ratelimited(&urb->dev->dev,
"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
ep_interval, ep_interval == 1 ? "" : "s",
xhci_interval, xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
}
/*
* xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
* endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
* (comprised of sg list entries) can take several service intervals to
* transmit.
*/
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ep_ctx *ep_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
check_interval(xhci, urb, ep_ctx);
return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/*
* For xHCI 1.0 host controllers, TD size is the number of max packet sized
* packets remaining in the TD (*not* including this TRB).
*
* Total TD packet count = total_packet_count =
* DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
*
* Packets transferred up to and including this TRB = packets_transferred =
* rounddown(total bytes transferred including this TRB / wMaxPacketSize)
*
* TD size = total_packet_count - packets_transferred
*
* For xHCI 0.96 and older, TD size field should be the remaining bytes
* including this TRB, right shifted by 10
*
* For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
* This is taken care of in the TRB_TD_SIZE() macro
*
* The last TRB in a TD must have the TD size set to zero.
*/
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
int trb_buff_len, unsigned int td_total_len,
struct urb *urb, bool more_trbs_coming)
{
u32 maxp, total_packet_count;
/* MTK xHCI 0.96 contains some features from 1.0 */
if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
return ((td_total_len - transferred) >> 10);
/* One TRB with a zero-length data packet. */
if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
trb_buff_len == td_total_len)
return 0;
/* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
trb_buff_len = 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
/* Queueing functions don't count the current TRB into transferred */
return (total_packet_count - ((transferred + trb_buff_len) / maxp));
}
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
u32 *trb_buff_len, struct xhci_segment *seg)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
size_t len;
max_pkt = usb_endpoint_maxp(&urb->ep->desc);
unalign = (enqd_len + *trb_buff_len) % max_pkt;
/* we got lucky, last normal TRB data on segment is packet aligned */
if (unalign == 0)
return 0;
xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
unalign, *trb_buff_len);
/* is the last nornal TRB alignable by splitting it */
if (*trb_buff_len > unalign) {
*trb_buff_len -= unalign;
xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
return 0;
}
/*
* We want enqd_len + trb_buff_len to sum up to a number aligned to
* number which is divisible by the endpoint's wMaxPacketSize. IOW:
* (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
*/
new_buff_len = max_pkt - (enqd_len % max_pkt);
if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
new_buff_len = (urb->transfer_buffer_length - enqd_len);
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
if (urb->num_sgs) {
len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
seg->bounce_buf, new_buff_len, enqd_len);
if (len != new_buff_len)
xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
len, new_buff_len);
} else {
memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
}
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_FROM_DEVICE);
}
if (dma_mapping_error(dev, seg->bounce_dma)) {
/* try without aligning. Some host controllers survive */
xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
return 0;
}
*trb_buff_len = new_buff_len;
seg->bounce_len = new_buff_len;
seg->bounce_offs = enqd_len;
xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
return 1;
}
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct xhci_generic_trb *start_trb;
struct scatterlist *sg = NULL;
bool more_trbs_coming = true;
bool need_zero_pkt = false;
bool first_trb = true;
unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0;
unsigned int enqd_len, block_len, trb_buff_len, full_len;
int sent_len, ret;
u32 field, length_field, remainder;
u64 addr, send_addr;
ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ring)
return -EINVAL;
full_len = urb->transfer_buffer_length;
/* If we have scatter/gather list, we use it. */
if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
num_sgs = urb->num_mapped_sgs;
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
block_len = sg_dma_len(sg);
num_trbs = count_sg_trbs_needed(urb);
} else {
num_trbs = count_trbs_needed(urb);
addr = (u64) urb->transfer_dma;
block_len = full_len;
}
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (unlikely(ret < 0))
return ret;
urb_priv = urb->hcpriv;
/* Deal with URB_ZERO_PACKET - need one more td/trb */
if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
need_zero_pkt = true;
td = &urb_priv->td[0];
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ring->enqueue->generic;
start_cycle = ring->cycle_state;
send_addr = addr;
/* Queue the TRBs, even if they are zero-length */
for (enqd_len = 0; first_trb || enqd_len < full_len;
enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL);
/* TRB buffer should not cross 64KB boundaries */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - enqd_len;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb) {
first_trb = false;
if (start_cycle == 0)
field |= TRB_CYCLE;
} else
field |= ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
if (enqd_len + trb_buff_len < full_len) {
field |= TRB_CHAIN;
if (trb_is_link(ring->enqueue + 1)) {
if (xhci_align_td(xhci, urb, enqd_len,
&trb_buff_len,
ring->enq_seg)) {
send_addr = ring->enq_seg->bounce_dma;
/* assuming TD won't span 2 segs */
td->bounce_seg = ring->enq_seg;
}
}
}
if (enqd_len + trb_buff_len >= full_len) {
field &= ~TRB_CHAIN;
field |= TRB_IOC;
more_trbs_coming = false;
td->last_trb = ring->enqueue;
td->last_trb_seg = ring->enq_seg;
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&send_addr, urb->transfer_buffer,
trb_buff_len);
le64_to_cpus(&send_addr);
field |= TRB_IDT;
}
}
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
full_len, urb, more_trbs_coming);
length_field = TRB_LEN(trb_buff_len) |
TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
lower_32_bits(send_addr),
upper_32_bits(send_addr),
length_field,
field);
td->num_trbs++;
addr += trb_buff_len;
sent_len = trb_buff_len;
while (sg && sent_len >= block_len) {
/* New sg entry */
--num_sgs;
sent_len -= block_len;
sg = sg_next(sg);
if (num_sgs != 0 && sg) {
block_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
addr += sent_len;
}
}
block_len -= sent_len;
send_addr = addr;
}
if (need_zero_pkt) {
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
urb_priv->td[1].last_trb = ring->enqueue;
urb_priv->td[1].last_trb_seg = ring->enq_seg;
field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
urb_priv->td[1].num_trbs++;
}
check_trb_math(urb, enqd_len);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
}
/* Caller must have locked xhci->lock */
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
int num_trbs;
int ret;
struct usb_ctrlrequest *setup;
struct xhci_generic_trb *start_trb;
int start_cycle;
u32 field;
struct urb_priv *urb_priv;
struct xhci_td *td;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
/*
* Need to copy setup packet into setup TRB, so we can't use the setup
* DMA address.
*/
if (!urb->setup_packet)
return -EINVAL;
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
* Don't need to check if we need additional event data and normal TRBs,
* since data in control transfers will never get bigger than 16MB
* XXX: can we get a buffer that crosses 64KB boundaries?
*/
if (urb->transfer_buffer_length > 0)
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
urb_priv = urb->hcpriv;
td = &urb_priv->td[0];
td->num_trbs = num_trbs;
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
/* Queue setup TRB - see section 6.4.1.2.1 */
/* FIXME better way to translate setup_packet into two u32 fields? */
setup = (struct usb_ctrlrequest *) urb->setup_packet;
field = 0;
field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
if (start_cycle == 0)
field |= 0x1;
/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
else
field |= TRB_TX_TYPE(TRB_DATA_OUT);
}
}
queue_trb(xhci, ep_ring, true,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
/* Immediate data in pointer */
field);
/* If there's data, queue data TRBs */
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field = TRB_ISP | TRB_TYPE(TRB_DATA);
else
field = TRB_TYPE(TRB_DATA);
if (urb->transfer_buffer_length > 0) {
u32 length_field, remainder;
u64 addr;
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&addr, urb->transfer_buffer,
urb->transfer_buffer_length);
le64_to_cpus(&addr);
field |= TRB_IDT;
} else {
addr = (u64) urb->transfer_dma;
}
remainder = xhci_td_remainder(xhci, 0,
urb->transfer_buffer_length,
urb->transfer_buffer_length,
urb, 1);
length_field = TRB_LEN(urb->transfer_buffer_length) |
TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, true,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field | ep_ring->cycle_state);
}
/* Save the DMA address of the last TRB in the TD */
td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */
if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
field = 0;
else
field = TRB_DIR_IN;
queue_trb(xhci, ep_ring, false,
0,
0,
TRB_INTR_TARGET(0),
/* Event on completion */
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
giveback_first_trb(xhci, slot_id, ep_index, 0,
start_cycle, start_trb);
return 0;
}
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
* devices can burst up to bMaxBurst number of packets per service interval.
* This field is zero based, meaning a value of zero in the field means one
* burst. Basically, for everything but SuperSpeed devices, this field will be
* zero. Only xHCI 1.0 host controllers support this field.
*/
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
}
/*
* Returns the number of packets in the last "burst" of packets. This field is
* valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
* the last burst packet count is equal to the total number of packets in the
* TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
* must contain (bMaxBurst + 1) number of packets, but the last burst can
* contain 1 to (bMaxBurst + 1) packets.
*/
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
unsigned int residue;
if (xhci->hci_version < 0x100)
return 0;
if (urb->dev->speed >= USB_SPEED_SUPER) {
/* bMaxBurst is zero based: 0 means 1 packet per burst */
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
residue = total_packet_count % (max_burst + 1);
/* If residue is zero, the last burst contains (max_burst + 1)
* number of packets, but the TLBPC field is zero-based.
*/
if (residue == 0)
return max_burst;
return residue - 1;
}
if (total_packet_count == 0)
return 0;
return total_packet_count - 1;
}
/*
* Calculates Frame ID field of the isochronous TRB identifies the
* target frame that the Interval associated with this Isochronous
* Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
*
* Returns actual frame id on success, negative value on error.
*/
static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
struct urb *urb, int index)
{
int start_frame, ist, ret = 0;
int start_frame_id, end_frame_id, current_frame_id;
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
start_frame = urb->start_frame + index * urb->interval;
else
start_frame = (urb->start_frame + index * urb->interval) >> 3;
/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
*
* If bit [3] of IST is cleared to '0', software can add a TRB no
* later than IST[2:0] Microframes before that TRB is scheduled to
* be executed.
* If bit [3] of IST is set to '1', software can add a TRB no later
* than IST[2:0] Frames before that TRB is scheduled to be executed.
*/
ist = HCS_IST(xhci->hcs_params2) & 0x7;
if (HCS_IST(xhci->hcs_params2) & (1 << 3))
ist <<= 3;
/* Software shall not schedule an Isoch TD with a Frame ID value that
* is less than the Start Frame ID or greater than the End Frame ID,
* where:
*
* End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
* Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
*
* Both the End Frame ID and Start Frame ID values are calculated
* in microframes. When software determines the valid Frame ID value;
* The End Frame ID value should be rounded down to the nearest Frame
* boundary, and the Start Frame ID value should be rounded up to the
* nearest Frame boundary.
*/
current_frame_id = readl(&xhci->run_regs->microframe_index);
start_frame_id = roundup(current_frame_id + ist + 1, 8);
end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
start_frame &= 0x7ff;
start_frame_id = (start_frame_id >> 3) & 0x7ff;
end_frame_id = (end_frame_id >> 3) & 0x7ff;
xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
__func__, index, readl(&xhci->run_regs->microframe_index),
start_frame_id, end_frame_id, start_frame);
if (start_frame_id < end_frame_id) {
if (start_frame > end_frame_id ||
start_frame < start_frame_id)
ret = -EINVAL;
} else if (start_frame_id > end_frame_id) {
if ((start_frame > end_frame_id &&
start_frame < start_frame_id))
ret = -EINVAL;
} else {
ret = -EINVAL;
}
if (index == 0) {
if (ret == -EINVAL || start_frame == start_frame_id) {
start_frame = start_frame_id + 1;
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->start_frame = start_frame;
else
urb->start_frame = start_frame << 3;
ret = 0;
}
}
if (ret) {
xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
start_frame, current_frame_id, index,
start_frame_id, end_frame_id);
xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
return ret;
}
return start_frame;
}
/* Check if we should generate event interrupt for a TD in an isoc URB */
static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
{
if (xhci->hci_version < 0x100)
return false;
/* always generate an event interrupt for the last TD */
if (i == num_tds - 1)
return false;
/*
* If AVOID_BEI is set the host handles full event rings poorly,
* generate an event at least every 8th TD to clear the event ring
*/
if (i && xhci->quirks & XHCI_AVOID_BEI)
return !!(i % xhci->isoc_bei_interval);
return true;
}
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
int num_tds, trbs_per_td;
struct xhci_generic_trb *start_trb;
bool first_trb;
int start_cycle;
u32 field, length_field;
int running_total, trb_buff_len, td_len, td_remain_len, ret;
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
struct xhci_virt_ep *xep;
int frame_id;
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_tds = urb->number_of_packets;
if (num_tds < 1) {
xhci_dbg(xhci, "Isoc URB with zero packets?\n");
return -EINVAL;
}
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
urb_priv = urb->hcpriv;
/* Queue the TRBs for each TD, even if they are zero-length */
for (i = 0; i < num_tds; i++) {
unsigned int total_pkt_count, max_pkt;
unsigned int burst_count, last_burst_pkt_count;
u32 sia_frame_id;
first_trb = true;
running_total = 0;
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
max_pkt = usb_endpoint_maxp(&urb->ep->desc);
total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
/* A zero-length transfer still involves at least one packet. */
if (total_pkt_count == 0)
total_pkt_count++;
burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
urb, total_pkt_count);
trbs_per_td = count_isoc_trbs_needed(urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
goto cleanup;
}
td = &urb_priv->td[i];
td->num_trbs = trbs_per_td;
/* use SIA as default, if frame id is used overwrite it */
sia_frame_id = TRB_SIA;
if (!(urb->transfer_flags & URB_ISO_ASAP) &&
HCC_CFC(xhci->hcc_params)) {
frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
if (frame_id >= 0)
sia_frame_id = TRB_FRAME_ID(frame_id);
}
/*
* Set isoc specific data for the first TRB in a TD.
* Prevent HW from getting the TRBs by keeping the cycle state
* inverted in the first TDs isoc TRB.
*/
field = TRB_TYPE(TRB_ISOC) |
TRB_TLBPC(last_burst_pkt_count) |
sia_frame_id |
(i ? ep_ring->cycle_state : !start_cycle);
/* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
if (!xep->use_extended_tbc)
field |= TRB_TBC(burst_count);
/* fill the rest of the TRB fields, and remaining normal TRBs */
for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0;
/* only first TRB is isoc, overwrite otherwise */
if (!first_trb)
field = TRB_TYPE(TRB_NORMAL) |
ep_ring->cycle_state;
/* Only set interrupt on short packet for IN EPs */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Set the chain bit for all except the last TRB */
if (j < trbs_per_td - 1) {
more_trbs_coming = true;
field |= TRB_CHAIN;
} else {
more_trbs_coming = false;
td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC;
if (trb_block_event_intr(xhci, num_tds, i))
field |= TRB_BEI;
}
/* Calculate TRB length */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
remainder = xhci_td_remainder(xhci, running_total,
trb_buff_len, td_len,
urb, more_trbs_coming);
length_field = TRB_LEN(trb_buff_len) |
TRB_INTR_TARGET(0);
/* xhci 1.1 with ETE uses TD Size field for TBC */
if (first_trb && xep->use_extended_tbc)
length_field |= TRB_TD_SIZE_TBC(burst_count);
else
length_field |= TRB_TD_SIZE(remainder);
first_trb = false;
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field);
running_total += trb_buff_len;
addr += trb_buff_len;
td_remain_len -= trb_buff_len;
}
/* Check TD length */
if (running_total != td_len) {
xhci_err(xhci, "ISOC TD length unmatch\n");
ret = -EINVAL;
goto cleanup;
}
}
/* store the next frame id */
if (HCC_CFC(xhci->hcc_params))
xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable();
}
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
cleanup:
/* Clean up a partially enqueued isoc transfer. */
for (i--; i >= 0; i--)
list_del_init(&urb_priv->td[i].td_list);
/* Use the first TD as a temporary variable to turn the TDs we've queued
* into No-ops with a software-owned cycle bit. That way the hardware
* won't accidentally start executing bogus TDs when we partially
* overwrite them. td->first_trb and td->start_seg are already set.
*/
urb_priv->td[0].last_trb = ep_ring->enqueue;
/* Every TRB except the first & last will have its cycle bit flipped. */
td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
ep_ring->enqueue = urb_priv->td[0].first_trb;
ep_ring->enq_seg = urb_priv->td[0].start_seg;
ep_ring->cycle_state = start_cycle;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
return ret;
}
/*
* Check transfer ring to guarantee there is enough room for the urb.
* Update ISO URB start_frame and interval.
* Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
* update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
* Contiguous Frame ID is not supported by HC.
*/
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx;
int start_frame;
int num_tds, num_trbs, i;
int ret;
struct xhci_virt_ep *xep;
int ist;
xdev = xhci->devs[slot_id];
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xdev->eps[ep_index].ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
num_trbs = 0;
num_tds = urb->number_of_packets;
for (i = 0; i < num_tds; i++)
num_trbs += count_isoc_trbs_needed(urb, i);
/* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
num_trbs, mem_flags);
if (ret)
return ret;
/*
* Check interval value. This should be done before we start to
* calculate the start frame value.
*/
check_interval(xhci, urb, ep_ctx);
/* Calculate the start frame and put it in urb->start_frame. */
if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
urb->start_frame = xep->next_frame_id;
goto skip_start_over;
}
}
start_frame = readl(&xhci->run_regs->microframe_index);
start_frame &= 0x3fff;
/*
* Round up to the next frame and consider the time before trb really
* gets scheduled by hardare.
*/
ist = HCS_IST(xhci->hcs_params2) & 0x7;
if (HCS_IST(xhci->hcs_params2) & (1 << 3))
ist <<= 3;
start_frame += ist + XHCI_CFC_DELAY;
start_frame = roundup(start_frame, 8);
/*
* Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
* is greate than 8 microframes.
*/
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL) {
start_frame = roundup(start_frame, urb->interval << 3);
urb->start_frame = start_frame >> 3;
} else {
start_frame = roundup(start_frame, urb->interval);
urb->start_frame = start_frame;
}
skip_start_over:
return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/**** Command Ring Operations ****/
/* Generic function for queueing a command TRB on the command ring.
* Check to make sure there's room on the command ring for one command TRB.
* Also check that there's room reserved for commands that must not fail.
* If this is a command that must not fail, meaning command_must_succeed = TRUE,
* then only check for the number of reserved spots.
* Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
* because the command event handler may want to resubmit a failed command.
*/
static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2,
u32 field3, u32 field4, bool command_must_succeed)
{
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
return -ESHUTDOWN;
}
if (!command_must_succeed)
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
reserved_trbs, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
xhci_err(xhci, "ERR: Reserved TRB counting for "
"unfailable commands failed.\n");
return ret;
}
cmd->command_trb = xhci->cmd_ring->enqueue;
/* if there are no other commands queued we start the timeout timer */
if (list_empty(&xhci->cmd_list)) {
xhci->current_cmd = cmd;
xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
}
list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}
/* Queue a slot enable or disable request on the command ring */
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 trb_type, u32 slot_id)
{
return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
{
return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
}
int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2, u32 field3, u32 field4)
{
return queue_command(xhci, cmd, field1, field2, field3, field4, false);
}
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id)
{
return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
{
return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
{
return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/*
* Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
* activity on an endpoint that is about to be suspended.
*/
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id, unsigned int ep_index, int suspend)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_STOP_RING);
u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id, unsigned int ep_index,
enum xhci_ep_reset_type reset_type)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
if (reset_type == EP_SOFT_RESET)
type |= TRB_TSP;
return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type, false);
}
| linux-master | drivers/usb/host/xhci-ring.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ST OHCI driver
*
* Copyright (C) 2014 STMicroelectronics – All Rights Reserved
*
* Author: Peter Griffin <[email protected]>
*
* Derived from ohci-platform.c
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/usb/ohci_pdriver.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ohci.h"
#define USB_MAX_CLKS 3
struct st_ohci_platform_priv {
struct clk *clks[USB_MAX_CLKS];
struct clk *clk48;
struct reset_control *rst;
struct reset_control *pwr;
struct phy *phy;
};
#define DRIVER_DESC "OHCI STMicroelectronics driver"
#define hcd_to_ohci_priv(h) \
((struct st_ohci_platform_priv *)hcd_to_ohci(h)->priv)
static int st_ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct st_ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
int clk, ret;
ret = reset_control_deassert(priv->pwr);
if (ret)
return ret;
ret = reset_control_deassert(priv->rst);
if (ret)
goto err_assert_power;
/* some SoCs don't have a dedicated 48Mhz clock, but those that do
need the rate to be explicitly set */
if (priv->clk48) {
ret = clk_set_rate(priv->clk48, 48000000);
if (ret)
goto err_assert_reset;
}
for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++) {
ret = clk_prepare_enable(priv->clks[clk]);
if (ret)
goto err_disable_clks;
}
ret = phy_init(priv->phy);
if (ret)
goto err_disable_clks;
ret = phy_power_on(priv->phy);
if (ret)
goto err_exit_phy;
return 0;
err_exit_phy:
phy_exit(priv->phy);
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(priv->clks[clk]);
err_assert_reset:
reset_control_assert(priv->rst);
err_assert_power:
reset_control_assert(priv->pwr);
return ret;
}
static void st_ohci_platform_power_off(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct st_ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
int clk;
reset_control_assert(priv->pwr);
reset_control_assert(priv->rst);
phy_power_off(priv->phy);
phy_exit(priv->phy);
for (clk = USB_MAX_CLKS - 1; clk >= 0; clk--)
if (priv->clks[clk])
clk_disable_unprepare(priv->clks[clk]);
}
static struct hc_driver __read_mostly ohci_platform_hc_driver;
static const struct ohci_driver_overrides platform_overrides __initconst = {
.product_desc = "ST OHCI controller",
.extra_priv_size = sizeof(struct st_ohci_platform_priv),
};
static struct usb_ohci_pdata ohci_platform_defaults = {
.power_on = st_ohci_platform_power_on,
.power_suspend = st_ohci_platform_power_off,
.power_off = st_ohci_platform_power_off,
};
static int st_ohci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ohci_pdata *pdata = &ohci_platform_defaults;
struct st_ohci_platform_priv *priv;
int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
if (!hcd)
return -ENOMEM;
platform_set_drvdata(dev, hcd);
dev->dev.platform_data = pdata;
priv = hcd_to_ohci_priv(hcd);
priv->phy = devm_phy_get(&dev->dev, "usb");
if (IS_ERR(priv->phy)) {
err = PTR_ERR(priv->phy);
goto err_put_hcd;
}
for (clk = 0; clk < USB_MAX_CLKS; clk++) {
priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
if (IS_ERR(priv->clks[clk])) {
err = PTR_ERR(priv->clks[clk]);
if (err == -EPROBE_DEFER)
goto err_put_clks;
priv->clks[clk] = NULL;
break;
}
}
/* some SoCs don't have a dedicated 48Mhz clock, but those that
do need the rate to be explicitly set */
priv->clk48 = devm_clk_get(&dev->dev, "clk48");
if (IS_ERR(priv->clk48)) {
dev_info(&dev->dev, "48MHz clk not found\n");
priv->clk48 = NULL;
}
priv->pwr =
devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
goto err_put_clks;
}
priv->rst =
devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
goto err_put_clks;
}
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
goto err_power;
}
hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
}
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(dev, hcd);
return err;
err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
err_put_hcd:
if (pdata == &ohci_platform_defaults)
dev->dev.platform_data = NULL;
usb_put_hcd(hcd);
return err;
}
static void st_ohci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct st_ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
int clk;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
usb_put_hcd(hcd);
if (pdata == &ohci_platform_defaults)
dev->dev.platform_data = NULL;
}
#ifdef CONFIG_PM_SLEEP
static int st_ohci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int ret;
ret = ohci_suspend(hcd, do_wakeup);
if (ret)
return ret;
if (pdata->power_suspend)
pdata->power_suspend(pdev);
return ret;
}
static int st_ohci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
int err;
if (pdata->power_on) {
err = pdata->power_on(pdev);
if (err < 0)
return err;
}
ohci_resume(hcd, false);
return 0;
}
static SIMPLE_DEV_PM_OPS(st_ohci_pm_ops, st_ohci_suspend, st_ohci_resume);
#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id st_ohci_platform_ids[] = {
{ .compatible = "st,st-ohci-300x", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, st_ohci_platform_ids);
static struct platform_driver ohci_platform_driver = {
.probe = st_ohci_platform_probe,
.remove_new = st_ohci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "st-ohci",
#ifdef CONFIG_PM_SLEEP
.pm = &st_ohci_pm_ops,
#endif
.of_match_table = st_ohci_platform_ids,
}
};
static int __init ohci_platform_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ohci_platform_driver);
}
module_init(ohci_platform_init);
static void __exit ohci_platform_cleanup(void)
{
platform_driver_unregister(&ohci_platform_driver);
}
module_exit(ohci_platform_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Peter Griffin <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ohci-st.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NVIDIA Tegra xHCI host controller driver
*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (C) 2014 Google, Inc.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
#include <linux/usb/role.h>
#include <soc/tegra/pmc.h>
#include "xhci.h"
#define TEGRA_XHCI_SS_HIGH_SPEED 120000000
#define TEGRA_XHCI_SS_LOW_SPEED 12000000
/* FPCI CFG registers */
#define XUSB_CFG_1 0x004
#define XUSB_IO_SPACE_EN BIT(0)
#define XUSB_MEM_SPACE_EN BIT(1)
#define XUSB_BUS_MASTER_EN BIT(2)
#define XUSB_CFG_4 0x010
#define XUSB_BASE_ADDR_SHIFT 15
#define XUSB_BASE_ADDR_MASK 0x1ffff
#define XUSB_CFG_7 0x01c
#define XUSB_BASE2_ADDR_SHIFT 16
#define XUSB_BASE2_ADDR_MASK 0xffff
#define XUSB_CFG_16 0x040
#define XUSB_CFG_24 0x060
#define XUSB_CFG_AXI_CFG 0x0f8
#define XUSB_CFG_ARU_C11_CSBRANGE 0x41c
#define XUSB_CFG_ARU_CONTEXT 0x43c
#define XUSB_CFG_ARU_CONTEXT_HS_PLS 0x478
#define XUSB_CFG_ARU_CONTEXT_FS_PLS 0x47c
#define XUSB_CFG_ARU_CONTEXT_HSFS_SPEED 0x480
#define XUSB_CFG_ARU_CONTEXT_HSFS_PP 0x484
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
/* XUSB_CFG_ARU_MBOX_CMD */
#define MBOX_DEST_FALC BIT(27)
#define MBOX_DEST_PME BIT(28)
#define MBOX_DEST_SMI BIT(29)
#define MBOX_DEST_XHCI BIT(30)
#define MBOX_INT_EN BIT(31)
/* XUSB_CFG_ARU_MBOX_DATA_IN and XUSB_CFG_ARU_MBOX_DATA_OUT */
#define CMD_DATA_SHIFT 0
#define CMD_DATA_MASK 0xffffff
#define CMD_TYPE_SHIFT 24
#define CMD_TYPE_MASK 0xff
/* XUSB_CFG_ARU_MBOX_OWNER */
#define MBOX_OWNER_NONE 0
#define MBOX_OWNER_FW 1
#define MBOX_OWNER_SW 2
#define XUSB_CFG_ARU_SMI_INTR 0x428
#define MBOX_SMI_INTR_FW_HANG BIT(1)
#define MBOX_SMI_INTR_EN BIT(3)
/* BAR2 registers */
#define XUSB_BAR2_ARU_MBOX_CMD 0x004
#define XUSB_BAR2_ARU_MBOX_DATA_IN 0x008
#define XUSB_BAR2_ARU_MBOX_DATA_OUT 0x00c
#define XUSB_BAR2_ARU_MBOX_OWNER 0x010
#define XUSB_BAR2_ARU_SMI_INTR 0x014
#define XUSB_BAR2_ARU_SMI_ARU_FW_SCRATCH_DATA0 0x01c
#define XUSB_BAR2_ARU_IFRDMA_CFG0 0x0e0
#define XUSB_BAR2_ARU_IFRDMA_CFG1 0x0e4
#define XUSB_BAR2_ARU_IFRDMA_STREAMID_FIELD 0x0e8
#define XUSB_BAR2_ARU_C11_CSBRANGE 0x9c
#define XUSB_BAR2_ARU_FW_SCRATCH 0x1000
#define XUSB_BAR2_CSB_BASE_ADDR 0x2000
/* IPFS registers */
#define IPFS_XUSB_HOST_MSI_BAR_SZ_0 0x0c0
#define IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0 0x0c4
#define IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0 0x0c8
#define IPFS_XUSB_HOST_MSI_VEC0_0 0x100
#define IPFS_XUSB_HOST_MSI_EN_VEC0_0 0x140
#define IPFS_XUSB_HOST_CONFIGURATION_0 0x180
#define IPFS_EN_FPCI BIT(0)
#define IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0 0x184
#define IPFS_XUSB_HOST_INTR_MASK_0 0x188
#define IPFS_IP_INT_MASK BIT(16)
#define IPFS_XUSB_HOST_INTR_ENABLE_0 0x198
#define IPFS_XUSB_HOST_UFPCI_CONFIG_0 0x19c
#define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0 0x1bc
#define IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0 0x1dc
#define CSB_PAGE_SELECT_MASK 0x7fffff
#define CSB_PAGE_SELECT_SHIFT 9
#define CSB_PAGE_OFFSET_MASK 0x1ff
#define CSB_PAGE_SELECT(addr) ((addr) >> (CSB_PAGE_SELECT_SHIFT) & \
CSB_PAGE_SELECT_MASK)
#define CSB_PAGE_OFFSET(addr) ((addr) & CSB_PAGE_OFFSET_MASK)
/* Falcon CSB registers */
#define XUSB_FALC_CPUCTL 0x100
#define CPUCTL_STARTCPU BIT(1)
#define CPUCTL_STATE_HALTED BIT(4)
#define CPUCTL_STATE_STOPPED BIT(5)
#define XUSB_FALC_BOOTVEC 0x104
#define XUSB_FALC_DMACTL 0x10c
#define XUSB_FALC_IMFILLRNG1 0x154
#define IMFILLRNG1_TAG_MASK 0xffff
#define IMFILLRNG1_TAG_LO_SHIFT 0
#define IMFILLRNG1_TAG_HI_SHIFT 16
#define XUSB_FALC_IMFILLCTL 0x158
/* CSB ARU registers */
#define XUSB_CSB_ARU_SCRATCH0 0x100100
/* MP CSB registers */
#define XUSB_CSB_MP_ILOAD_ATTR 0x101a00
#define XUSB_CSB_MP_ILOAD_BASE_LO 0x101a04
#define XUSB_CSB_MP_ILOAD_BASE_HI 0x101a08
#define XUSB_CSB_MP_L2IMEMOP_SIZE 0x101a10
#define L2IMEMOP_SIZE_SRC_OFFSET_SHIFT 8
#define L2IMEMOP_SIZE_SRC_OFFSET_MASK 0x3ff
#define L2IMEMOP_SIZE_SRC_COUNT_SHIFT 24
#define L2IMEMOP_SIZE_SRC_COUNT_MASK 0xff
#define XUSB_CSB_MP_L2IMEMOP_TRIG 0x101a14
#define L2IMEMOP_ACTION_SHIFT 24
#define L2IMEMOP_INVALIDATE_ALL (0x40 << L2IMEMOP_ACTION_SHIFT)
#define L2IMEMOP_LOAD_LOCKED_RESULT (0x11 << L2IMEMOP_ACTION_SHIFT)
#define XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT 0x101a18
#define L2IMEMOP_RESULT_VLD BIT(31)
#define XUSB_CSB_MP_APMAP 0x10181c
#define APMAP_BOOTPATH BIT(31)
#define IMEM_BLOCK_SIZE 256
#define FW_IOCTL_TYPE_SHIFT 24
#define FW_IOCTL_CFGTBL_READ 17
struct tegra_xusb_fw_header {
__le32 boot_loadaddr_in_imem;
__le32 boot_codedfi_offset;
__le32 boot_codetag;
__le32 boot_codesize;
__le32 phys_memaddr;
__le16 reqphys_memsize;
__le16 alloc_phys_memsize;
__le32 rodata_img_offset;
__le32 rodata_section_start;
__le32 rodata_section_end;
__le32 main_fnaddr;
__le32 fwimg_cksum;
__le32 fwimg_created_time;
__le32 imem_resident_start;
__le32 imem_resident_end;
__le32 idirect_start;
__le32 idirect_end;
__le32 l2_imem_start;
__le32 l2_imem_end;
__le32 version_id;
u8 init_ddirect;
u8 reserved[3];
__le32 phys_addr_log_buffer;
__le32 total_log_entries;
__le32 dequeue_ptr;
__le32 dummy_var[2];
__le32 fwimg_len;
u8 magic[8];
__le32 ss_low_power_entry_timeout;
u8 num_hsic_port;
u8 padding[139]; /* Pad to 256 bytes */
};
struct tegra_xusb_phy_type {
const char *name;
unsigned int num;
};
struct tegra_xusb_mbox_regs {
u16 cmd;
u16 data_in;
u16 data_out;
u16 owner;
u16 smi_intr;
};
struct tegra_xusb_context_soc {
struct {
const unsigned int *offsets;
unsigned int num_offsets;
} ipfs;
struct {
const unsigned int *offsets;
unsigned int num_offsets;
} fpci;
};
struct tegra_xusb;
struct tegra_xusb_soc_ops {
u32 (*mbox_reg_readl)(struct tegra_xusb *tegra, unsigned int offset);
void (*mbox_reg_writel)(struct tegra_xusb *tegra, u32 value, unsigned int offset);
u32 (*csb_reg_readl)(struct tegra_xusb *tegra, unsigned int offset);
void (*csb_reg_writel)(struct tegra_xusb *tegra, u32 value, unsigned int offset);
};
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
unsigned int num_supplies;
const struct tegra_xusb_phy_type *phy_types;
unsigned int num_types;
const struct tegra_xusb_context_soc *context;
struct {
struct {
unsigned int offset;
unsigned int count;
} usb2, ulpi, hsic, usb3;
} ports;
struct tegra_xusb_mbox_regs mbox;
const struct tegra_xusb_soc_ops *ops;
bool scale_ss_clock;
bool has_ipfs;
bool lpm_support;
bool otg_reset_sspi;
bool has_bar2;
};
struct tegra_xusb_context {
u32 *ipfs;
u32 *fpci;
};
struct tegra_xusb {
struct device *dev;
void __iomem *regs;
struct usb_hcd *hcd;
struct mutex lock;
int xhci_irq;
int mbox_irq;
int padctl_irq;
void __iomem *ipfs_base;
void __iomem *fpci_base;
void __iomem *bar2_base;
struct resource *bar2;
const struct tegra_xusb_soc *soc;
struct regulator_bulk_data *supplies;
struct tegra_xusb_padctl *padctl;
struct clk *host_clk;
struct clk *falcon_clk;
struct clk *ss_clk;
struct clk *ss_src_clk;
struct clk *hs_src_clk;
struct clk *fs_src_clk;
struct clk *pll_u_480m;
struct clk *clk_m;
struct clk *pll_e;
struct reset_control *host_rst;
struct reset_control *ss_rst;
struct device *genpd_dev_host;
struct device *genpd_dev_ss;
bool use_genpd;
struct phy **phys;
unsigned int num_phys;
struct usb_phy **usbphy;
unsigned int num_usb_phys;
int otg_usb2_port;
int otg_usb3_port;
bool host_mode;
struct notifier_block id_nb;
struct work_struct id_work;
/* Firmware loading related */
struct {
size_t size;
void *virt;
dma_addr_t phys;
} fw;
bool suspended;
struct tegra_xusb_context context;
u8 lp0_utmi_pad_mask;
};
static struct hc_driver __read_mostly tegra_xhci_hc_driver;
static inline u32 fpci_readl(struct tegra_xusb *tegra, unsigned int offset)
{
return readl(tegra->fpci_base + offset);
}
static inline void fpci_writel(struct tegra_xusb *tegra, u32 value,
unsigned int offset)
{
writel(value, tegra->fpci_base + offset);
}
static inline u32 ipfs_readl(struct tegra_xusb *tegra, unsigned int offset)
{
return readl(tegra->ipfs_base + offset);
}
static inline void ipfs_writel(struct tegra_xusb *tegra, u32 value,
unsigned int offset)
{
writel(value, tegra->ipfs_base + offset);
}
static inline u32 bar2_readl(struct tegra_xusb *tegra, unsigned int offset)
{
return readl(tegra->bar2_base + offset);
}
static inline void bar2_writel(struct tegra_xusb *tegra, u32 value,
unsigned int offset)
{
writel(value, tegra->bar2_base + offset);
}
static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
{
const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
return ops->csb_reg_readl(tegra, offset);
}
static void csb_writel(struct tegra_xusb *tegra, u32 value,
unsigned int offset)
{
const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
ops->csb_reg_writel(tegra, value, offset);
}
static u32 fpci_csb_readl(struct tegra_xusb *tegra, unsigned int offset)
{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
return fpci_readl(tegra, XUSB_CFG_CSB_BASE_ADDR + ofs);
}
static void fpci_csb_writel(struct tegra_xusb *tegra, u32 value,
unsigned int offset)
{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
fpci_writel(tegra, value, XUSB_CFG_CSB_BASE_ADDR + ofs);
}
static u32 bar2_csb_readl(struct tegra_xusb *tegra, unsigned int offset)
{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
bar2_writel(tegra, page, XUSB_BAR2_ARU_C11_CSBRANGE);
return bar2_readl(tegra, XUSB_BAR2_CSB_BASE_ADDR + ofs);
}
static void bar2_csb_writel(struct tegra_xusb *tegra, u32 value,
unsigned int offset)
{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
bar2_writel(tegra, page, XUSB_BAR2_ARU_C11_CSBRANGE);
bar2_writel(tegra, value, XUSB_BAR2_CSB_BASE_ADDR + ofs);
}
static int tegra_xusb_set_ss_clk(struct tegra_xusb *tegra,
unsigned long rate)
{
unsigned long new_parent_rate, old_parent_rate;
struct clk *clk = tegra->ss_src_clk;
unsigned int div;
int err;
if (clk_get_rate(clk) == rate)
return 0;
switch (rate) {
case TEGRA_XHCI_SS_HIGH_SPEED:
/*
* Reparent to PLLU_480M. Set divider first to avoid
* overclocking.
*/
old_parent_rate = clk_get_rate(clk_get_parent(clk));
new_parent_rate = clk_get_rate(tegra->pll_u_480m);
div = new_parent_rate / rate;
err = clk_set_rate(clk, old_parent_rate / div);
if (err)
return err;
err = clk_set_parent(clk, tegra->pll_u_480m);
if (err)
return err;
/*
* The rate should already be correct, but set it again just
* to be sure.
*/
err = clk_set_rate(clk, rate);
if (err)
return err;
break;
case TEGRA_XHCI_SS_LOW_SPEED:
/* Reparent to CLK_M */
err = clk_set_parent(clk, tegra->clk_m);
if (err)
return err;
err = clk_set_rate(clk, rate);
if (err)
return err;
break;
default:
dev_err(tegra->dev, "Invalid SS rate: %lu Hz\n", rate);
return -EINVAL;
}
if (clk_get_rate(clk) != rate) {
dev_err(tegra->dev, "SS clock doesn't match requested rate\n");
return -EINVAL;
}
return 0;
}
static unsigned long extract_field(u32 value, unsigned int start,
unsigned int count)
{
return (value >> start) & ((1 << count) - 1);
}
/* Command requests from the firmware */
enum tegra_xusb_mbox_cmd {
MBOX_CMD_MSG_ENABLED = 1,
MBOX_CMD_INC_FALC_CLOCK,
MBOX_CMD_DEC_FALC_CLOCK,
MBOX_CMD_INC_SSPI_CLOCK,
MBOX_CMD_DEC_SSPI_CLOCK,
MBOX_CMD_SET_BW, /* no ACK/NAK required */
MBOX_CMD_SET_SS_PWR_GATING,
MBOX_CMD_SET_SS_PWR_UNGATING,
MBOX_CMD_SAVE_DFE_CTLE_CTX,
MBOX_CMD_AIRPLANE_MODE_ENABLED, /* unused */
MBOX_CMD_AIRPLANE_MODE_DISABLED, /* unused */
MBOX_CMD_START_HSIC_IDLE,
MBOX_CMD_STOP_HSIC_IDLE,
MBOX_CMD_DBC_WAKE_STACK, /* unused */
MBOX_CMD_HSIC_PRETEND_CONNECT,
MBOX_CMD_RESET_SSPI,
MBOX_CMD_DISABLE_SS_LFPS_DETECTION,
MBOX_CMD_ENABLE_SS_LFPS_DETECTION,
MBOX_CMD_MAX,
/* Response message to above commands */
MBOX_CMD_ACK = 128,
MBOX_CMD_NAK
};
struct tegra_xusb_mbox_msg {
u32 cmd;
u32 data;
};
static inline u32 tegra_xusb_mbox_pack(const struct tegra_xusb_mbox_msg *msg)
{
return (msg->cmd & CMD_TYPE_MASK) << CMD_TYPE_SHIFT |
(msg->data & CMD_DATA_MASK) << CMD_DATA_SHIFT;
}
static inline void tegra_xusb_mbox_unpack(struct tegra_xusb_mbox_msg *msg,
u32 value)
{
msg->cmd = (value >> CMD_TYPE_SHIFT) & CMD_TYPE_MASK;
msg->data = (value >> CMD_DATA_SHIFT) & CMD_DATA_MASK;
}
static bool tegra_xusb_mbox_cmd_requires_ack(enum tegra_xusb_mbox_cmd cmd)
{
switch (cmd) {
case MBOX_CMD_SET_BW:
case MBOX_CMD_ACK:
case MBOX_CMD_NAK:
return false;
default:
return true;
}
}
static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
const struct tegra_xusb_mbox_msg *msg)
{
const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
bool wait_for_idle = false;
u32 value;
/*
* Acquire the mailbox. The firmware still owns the mailbox for
* ACK/NAK messages.
*/
if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE) {
dev_err(tegra->dev, "mailbox is busy\n");
return -EBUSY;
}
ops->mbox_reg_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_SW) {
dev_err(tegra->dev, "failed to acquire mailbox\n");
return -EBUSY;
}
wait_for_idle = true;
}
value = tegra_xusb_mbox_pack(msg);
ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.data_in);
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.cmd);
value |= MBOX_INT_EN | MBOX_DEST_FALC;
ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.cmd);
if (wait_for_idle) {
unsigned long timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value == MBOX_OWNER_NONE)
break;
usleep_range(10, 20);
}
if (time_after(jiffies, timeout))
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE)
return -ETIMEDOUT;
}
return 0;
}
static irqreturn_t tegra_xusb_mbox_irq(int irq, void *data)
{
struct tegra_xusb *tegra = data;
const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
u32 value;
/* clear mailbox interrupts */
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.smi_intr);
ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.smi_intr);
if (value & MBOX_SMI_INTR_FW_HANG)
dev_err(tegra->dev, "controller firmware hang\n");
return IRQ_WAKE_THREAD;
}
static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
const struct tegra_xusb_mbox_msg *msg)
{
struct tegra_xusb_padctl *padctl = tegra->padctl;
const struct tegra_xusb_soc *soc = tegra->soc;
struct device *dev = tegra->dev;
struct tegra_xusb_mbox_msg rsp;
unsigned long mask;
unsigned int port;
bool idle, enable;
int err = 0;
memset(&rsp, 0, sizeof(rsp));
switch (msg->cmd) {
case MBOX_CMD_INC_FALC_CLOCK:
case MBOX_CMD_DEC_FALC_CLOCK:
rsp.data = clk_get_rate(tegra->falcon_clk) / 1000;
if (rsp.data != msg->data)
rsp.cmd = MBOX_CMD_NAK;
else
rsp.cmd = MBOX_CMD_ACK;
break;
case MBOX_CMD_INC_SSPI_CLOCK:
case MBOX_CMD_DEC_SSPI_CLOCK:
if (tegra->soc->scale_ss_clock) {
err = tegra_xusb_set_ss_clk(tegra, msg->data * 1000);
if (err < 0)
rsp.cmd = MBOX_CMD_NAK;
else
rsp.cmd = MBOX_CMD_ACK;
rsp.data = clk_get_rate(tegra->ss_src_clk) / 1000;
} else {
rsp.cmd = MBOX_CMD_ACK;
rsp.data = msg->data;
}
break;
case MBOX_CMD_SET_BW:
/*
* TODO: Request bandwidth once EMC scaling is supported.
* Ignore for now since ACK/NAK is not required for SET_BW
* messages.
*/
break;
case MBOX_CMD_SAVE_DFE_CTLE_CTX:
err = tegra_xusb_padctl_usb3_save_context(padctl, msg->data);
if (err < 0) {
dev_err(dev, "failed to save context for USB3#%u: %d\n",
msg->data, err);
rsp.cmd = MBOX_CMD_NAK;
} else {
rsp.cmd = MBOX_CMD_ACK;
}
rsp.data = msg->data;
break;
case MBOX_CMD_START_HSIC_IDLE:
case MBOX_CMD_STOP_HSIC_IDLE:
if (msg->cmd == MBOX_CMD_STOP_HSIC_IDLE)
idle = false;
else
idle = true;
mask = extract_field(msg->data, 1 + soc->ports.hsic.offset,
soc->ports.hsic.count);
for_each_set_bit(port, &mask, 32) {
err = tegra_xusb_padctl_hsic_set_idle(padctl, port,
idle);
if (err < 0)
break;
}
if (err < 0) {
dev_err(dev, "failed to set HSIC#%u %s: %d\n", port,
idle ? "idle" : "busy", err);
rsp.cmd = MBOX_CMD_NAK;
} else {
rsp.cmd = MBOX_CMD_ACK;
}
rsp.data = msg->data;
break;
case MBOX_CMD_DISABLE_SS_LFPS_DETECTION:
case MBOX_CMD_ENABLE_SS_LFPS_DETECTION:
if (msg->cmd == MBOX_CMD_DISABLE_SS_LFPS_DETECTION)
enable = false;
else
enable = true;
mask = extract_field(msg->data, 1 + soc->ports.usb3.offset,
soc->ports.usb3.count);
for_each_set_bit(port, &mask, soc->ports.usb3.count) {
err = tegra_xusb_padctl_usb3_set_lfps_detect(padctl,
port,
enable);
if (err < 0)
break;
/*
* wait 500us for LFPS detector to be disabled before
* sending ACK
*/
if (!enable)
usleep_range(500, 1000);
}
if (err < 0) {
dev_err(dev,
"failed to %s LFPS detection on USB3#%u: %d\n",
enable ? "enable" : "disable", port, err);
rsp.cmd = MBOX_CMD_NAK;
} else {
rsp.cmd = MBOX_CMD_ACK;
}
rsp.data = msg->data;
break;
default:
dev_warn(dev, "unknown message: %#x\n", msg->cmd);
break;
}
if (rsp.cmd) {
const char *cmd = (rsp.cmd == MBOX_CMD_ACK) ? "ACK" : "NAK";
err = tegra_xusb_mbox_send(tegra, &rsp);
if (err < 0)
dev_err(dev, "failed to send %s: %d\n", cmd, err);
}
}
static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
{
struct tegra_xusb *tegra = data;
const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
struct tegra_xusb_mbox_msg msg;
u32 value;
mutex_lock(&tegra->lock);
if (pm_runtime_suspended(tegra->dev) || tegra->suspended)
goto out;
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.cmd);
value &= ~MBOX_DEST_SMI;
ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.cmd);
/* clear mailbox owner if no ACK/NAK is required */
if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
ops->mbox_reg_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
tegra_xusb_mbox_handle(tegra, &msg);
out:
mutex_unlock(&tegra->lock);
return IRQ_HANDLED;
}
static void tegra_xusb_config(struct tegra_xusb *tegra)
{
u32 regs = tegra->hcd->rsrc_start;
u32 value;
if (tegra->soc->has_ipfs) {
value = ipfs_readl(tegra, IPFS_XUSB_HOST_CONFIGURATION_0);
value |= IPFS_EN_FPCI;
ipfs_writel(tegra, value, IPFS_XUSB_HOST_CONFIGURATION_0);
usleep_range(10, 20);
}
/* Program BAR0 space */
value = fpci_readl(tegra, XUSB_CFG_4);
value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
value |= regs & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
fpci_writel(tegra, value, XUSB_CFG_4);
/* Program BAR2 space */
if (tegra->bar2) {
value = fpci_readl(tegra, XUSB_CFG_7);
value &= ~(XUSB_BASE2_ADDR_MASK << XUSB_BASE2_ADDR_SHIFT);
value |= tegra->bar2->start &
(XUSB_BASE2_ADDR_MASK << XUSB_BASE2_ADDR_SHIFT);
fpci_writel(tegra, value, XUSB_CFG_7);
}
usleep_range(100, 200);
/* Enable bus master */
value = fpci_readl(tegra, XUSB_CFG_1);
value |= XUSB_IO_SPACE_EN | XUSB_MEM_SPACE_EN | XUSB_BUS_MASTER_EN;
fpci_writel(tegra, value, XUSB_CFG_1);
if (tegra->soc->has_ipfs) {
/* Enable interrupt assertion */
value = ipfs_readl(tegra, IPFS_XUSB_HOST_INTR_MASK_0);
value |= IPFS_IP_INT_MASK;
ipfs_writel(tegra, value, IPFS_XUSB_HOST_INTR_MASK_0);
/* Set hysteresis */
ipfs_writel(tegra, 0x80, IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0);
}
}
static int tegra_xusb_clk_enable(struct tegra_xusb *tegra)
{
int err;
err = clk_prepare_enable(tegra->pll_e);
if (err < 0)
return err;
err = clk_prepare_enable(tegra->host_clk);
if (err < 0)
goto disable_plle;
err = clk_prepare_enable(tegra->ss_clk);
if (err < 0)
goto disable_host;
err = clk_prepare_enable(tegra->falcon_clk);
if (err < 0)
goto disable_ss;
err = clk_prepare_enable(tegra->fs_src_clk);
if (err < 0)
goto disable_falc;
err = clk_prepare_enable(tegra->hs_src_clk);
if (err < 0)
goto disable_fs_src;
if (tegra->soc->scale_ss_clock) {
err = tegra_xusb_set_ss_clk(tegra, TEGRA_XHCI_SS_HIGH_SPEED);
if (err < 0)
goto disable_hs_src;
}
return 0;
disable_hs_src:
clk_disable_unprepare(tegra->hs_src_clk);
disable_fs_src:
clk_disable_unprepare(tegra->fs_src_clk);
disable_falc:
clk_disable_unprepare(tegra->falcon_clk);
disable_ss:
clk_disable_unprepare(tegra->ss_clk);
disable_host:
clk_disable_unprepare(tegra->host_clk);
disable_plle:
clk_disable_unprepare(tegra->pll_e);
return err;
}
static void tegra_xusb_clk_disable(struct tegra_xusb *tegra)
{
clk_disable_unprepare(tegra->pll_e);
clk_disable_unprepare(tegra->host_clk);
clk_disable_unprepare(tegra->ss_clk);
clk_disable_unprepare(tegra->falcon_clk);
clk_disable_unprepare(tegra->fs_src_clk);
clk_disable_unprepare(tegra->hs_src_clk);
}
static int tegra_xusb_phy_enable(struct tegra_xusb *tegra)
{
unsigned int i;
int err;
for (i = 0; i < tegra->num_phys; i++) {
err = phy_init(tegra->phys[i]);
if (err)
goto disable_phy;
err = phy_power_on(tegra->phys[i]);
if (err) {
phy_exit(tegra->phys[i]);
goto disable_phy;
}
}
return 0;
disable_phy:
while (i--) {
phy_power_off(tegra->phys[i]);
phy_exit(tegra->phys[i]);
}
return err;
}
static void tegra_xusb_phy_disable(struct tegra_xusb *tegra)
{
unsigned int i;
for (i = 0; i < tegra->num_phys; i++) {
phy_power_off(tegra->phys[i]);
phy_exit(tegra->phys[i]);
}
}
#ifdef CONFIG_PM_SLEEP
static int tegra_xusb_init_context(struct tegra_xusb *tegra)
{
const struct tegra_xusb_context_soc *soc = tegra->soc->context;
tegra->context.ipfs = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
sizeof(u32), GFP_KERNEL);
if (!tegra->context.ipfs)
return -ENOMEM;
tegra->context.fpci = devm_kcalloc(tegra->dev, soc->fpci.num_offsets,
sizeof(u32), GFP_KERNEL);
if (!tegra->context.fpci)
return -ENOMEM;
return 0;
}
#else
static inline int tegra_xusb_init_context(struct tegra_xusb *tegra)
{
return 0;
}
#endif
static int tegra_xusb_request_firmware(struct tegra_xusb *tegra)
{
struct tegra_xusb_fw_header *header;
const struct firmware *fw;
int err;
err = request_firmware(&fw, tegra->soc->firmware, tegra->dev);
if (err < 0) {
dev_err(tegra->dev, "failed to request firmware: %d\n", err);
return err;
}
/* Load Falcon controller with its firmware. */
header = (struct tegra_xusb_fw_header *)fw->data;
tegra->fw.size = le32_to_cpu(header->fwimg_len);
tegra->fw.virt = dma_alloc_coherent(tegra->dev, tegra->fw.size,
&tegra->fw.phys, GFP_KERNEL);
if (!tegra->fw.virt) {
dev_err(tegra->dev, "failed to allocate memory for firmware\n");
release_firmware(fw);
return -ENOMEM;
}
header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
release_firmware(fw);
return 0;
}
static int tegra_xusb_wait_for_falcon(struct tegra_xusb *tegra)
{
struct xhci_cap_regs __iomem *cap_regs;
struct xhci_op_regs __iomem *op_regs;
int ret;
u32 value;
cap_regs = tegra->regs;
op_regs = tegra->regs + HC_LENGTH(readl(&cap_regs->hc_capbase));
ret = readl_poll_timeout(&op_regs->status, value, !(value & STS_CNR), 1000, 200000);
if (ret)
dev_err(tegra->dev, "XHCI Controller not ready. Falcon state: 0x%x\n",
csb_readl(tegra, XUSB_FALC_CPUCTL));
return ret;
}
static int tegra_xusb_load_firmware_rom(struct tegra_xusb *tegra)
{
unsigned int code_tag_blocks, code_size_blocks, code_blocks;
struct tegra_xusb_fw_header *header;
struct device *dev = tegra->dev;
time64_t timestamp;
u64 address;
u32 value;
int err;
header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
csb_readl(tegra, XUSB_FALC_CPUCTL));
return 0;
}
/* Program the size of DFI into ILOAD_ATTR. */
csb_writel(tegra, tegra->fw.size, XUSB_CSB_MP_ILOAD_ATTR);
/*
* Boot code of the firmware reads the ILOAD_BASE registers
* to get to the start of the DFI in system memory.
*/
address = tegra->fw.phys + sizeof(*header);
csb_writel(tegra, address >> 32, XUSB_CSB_MP_ILOAD_BASE_HI);
csb_writel(tegra, address, XUSB_CSB_MP_ILOAD_BASE_LO);
/* Set BOOTPATH to 1 in APMAP. */
csb_writel(tegra, APMAP_BOOTPATH, XUSB_CSB_MP_APMAP);
/* Invalidate L2IMEM. */
csb_writel(tegra, L2IMEMOP_INVALIDATE_ALL, XUSB_CSB_MP_L2IMEMOP_TRIG);
/*
* Initiate fetch of bootcode from system memory into L2IMEM.
* Program bootcode location and size in system memory.
*/
code_tag_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codetag),
IMEM_BLOCK_SIZE);
code_size_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codesize),
IMEM_BLOCK_SIZE);
code_blocks = code_tag_blocks + code_size_blocks;
value = ((code_tag_blocks & L2IMEMOP_SIZE_SRC_OFFSET_MASK) <<
L2IMEMOP_SIZE_SRC_OFFSET_SHIFT) |
((code_size_blocks & L2IMEMOP_SIZE_SRC_COUNT_MASK) <<
L2IMEMOP_SIZE_SRC_COUNT_SHIFT);
csb_writel(tegra, value, XUSB_CSB_MP_L2IMEMOP_SIZE);
/* Trigger L2IMEM load operation. */
csb_writel(tegra, L2IMEMOP_LOAD_LOCKED_RESULT,
XUSB_CSB_MP_L2IMEMOP_TRIG);
/* Setup Falcon auto-fill. */
csb_writel(tegra, code_size_blocks, XUSB_FALC_IMFILLCTL);
value = ((code_tag_blocks & IMFILLRNG1_TAG_MASK) <<
IMFILLRNG1_TAG_LO_SHIFT) |
((code_blocks & IMFILLRNG1_TAG_MASK) <<
IMFILLRNG1_TAG_HI_SHIFT);
csb_writel(tegra, value, XUSB_FALC_IMFILLRNG1);
csb_writel(tegra, 0, XUSB_FALC_DMACTL);
/* wait for RESULT_VLD to get set */
#define tegra_csb_readl(offset) csb_readl(tegra, offset)
err = readx_poll_timeout(tegra_csb_readl,
XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT, value,
value & L2IMEMOP_RESULT_VLD, 100, 10000);
if (err < 0) {
dev_err(dev, "DMA controller not ready %#010x\n", value);
return err;
}
#undef tegra_csb_readl
csb_writel(tegra, le32_to_cpu(header->boot_codetag),
XUSB_FALC_BOOTVEC);
/* Boot Falcon CPU and wait for USBSTS_CNR to get cleared. */
csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
if (tegra_xusb_wait_for_falcon(tegra))
return -EIO;
timestamp = le32_to_cpu(header->fwimg_created_time);
dev_info(dev, "Firmware timestamp: %ptTs UTC\n", ×tamp);
return 0;
}
static u32 tegra_xusb_read_firmware_header(struct tegra_xusb *tegra, u32 offset)
{
/*
* We only accept reading the firmware config table
* The offset should not exceed the fw header structure
*/
if (offset >= sizeof(struct tegra_xusb_fw_header))
return 0;
bar2_writel(tegra, (FW_IOCTL_CFGTBL_READ << FW_IOCTL_TYPE_SHIFT) | offset,
XUSB_BAR2_ARU_FW_SCRATCH);
return bar2_readl(tegra, XUSB_BAR2_ARU_SMI_ARU_FW_SCRATCH_DATA0);
}
static int tegra_xusb_init_ifr_firmware(struct tegra_xusb *tegra)
{
time64_t timestamp;
if (tegra_xusb_wait_for_falcon(tegra))
return -EIO;
#define offsetof_32(X, Y) ((u8)(offsetof(X, Y) / sizeof(__le32)))
timestamp = tegra_xusb_read_firmware_header(tegra, offsetof_32(struct tegra_xusb_fw_header,
fwimg_created_time) << 2);
dev_info(tegra->dev, "Firmware timestamp: %ptTs UTC\n", ×tamp);
return 0;
}
static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
{
if (!tegra->soc->firmware)
return tegra_xusb_init_ifr_firmware(tegra);
else
return tegra_xusb_load_firmware_rom(tegra);
}
static void tegra_xusb_powerdomain_remove(struct device *dev,
struct tegra_xusb *tegra)
{
if (!tegra->use_genpd)
return;
if (!IS_ERR_OR_NULL(tegra->genpd_dev_ss))
dev_pm_domain_detach(tegra->genpd_dev_ss, true);
if (!IS_ERR_OR_NULL(tegra->genpd_dev_host))
dev_pm_domain_detach(tegra->genpd_dev_host, true);
}
static int tegra_xusb_powerdomain_init(struct device *dev,
struct tegra_xusb *tegra)
{
int err;
tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
if (IS_ERR(tegra->genpd_dev_host)) {
err = PTR_ERR(tegra->genpd_dev_host);
dev_err(dev, "failed to get host pm-domain: %d\n", err);
return err;
}
tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
if (IS_ERR(tegra->genpd_dev_ss)) {
err = PTR_ERR(tegra->genpd_dev_ss);
dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
return err;
}
tegra->use_genpd = true;
return 0;
}
static int tegra_xusb_unpowergate_partitions(struct tegra_xusb *tegra)
{
struct device *dev = tegra->dev;
int rc;
if (tegra->use_genpd) {
rc = pm_runtime_resume_and_get(tegra->genpd_dev_ss);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB SS partition\n");
return rc;
}
rc = pm_runtime_resume_and_get(tegra->genpd_dev_host);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB Host partition\n");
pm_runtime_put_sync(tegra->genpd_dev_ss);
return rc;
}
} else {
rc = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBA,
tegra->ss_clk,
tegra->ss_rst);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB SS partition\n");
return rc;
}
rc = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
tegra->host_clk,
tegra->host_rst);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB Host partition\n");
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
return rc;
}
}
return 0;
}
static int tegra_xusb_powergate_partitions(struct tegra_xusb *tegra)
{
struct device *dev = tegra->dev;
int rc;
if (tegra->use_genpd) {
rc = pm_runtime_put_sync(tegra->genpd_dev_host);
if (rc < 0) {
dev_err(dev, "failed to disable XUSB Host partition\n");
return rc;
}
rc = pm_runtime_put_sync(tegra->genpd_dev_ss);
if (rc < 0) {
dev_err(dev, "failed to disable XUSB SS partition\n");
pm_runtime_get_sync(tegra->genpd_dev_host);
return rc;
}
} else {
rc = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
if (rc < 0) {
dev_err(dev, "failed to disable XUSB Host partition\n");
return rc;
}
rc = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
if (rc < 0) {
dev_err(dev, "failed to disable XUSB SS partition\n");
tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
tegra->host_clk,
tegra->host_rst);
return rc;
}
}
return 0;
}
static int __tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
{
struct tegra_xusb_mbox_msg msg;
int err;
/* Enable firmware messages from controller. */
msg.cmd = MBOX_CMD_MSG_ENABLED;
msg.data = 0;
err = tegra_xusb_mbox_send(tegra, &msg);
if (err < 0)
dev_err(tegra->dev, "failed to enable messages: %d\n", err);
return err;
}
static irqreturn_t tegra_xusb_padctl_irq(int irq, void *data)
{
struct tegra_xusb *tegra = data;
mutex_lock(&tegra->lock);
if (tegra->suspended) {
mutex_unlock(&tegra->lock);
return IRQ_HANDLED;
}
mutex_unlock(&tegra->lock);
pm_runtime_resume(tegra->dev);
return IRQ_HANDLED;
}
static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
{
int err;
mutex_lock(&tegra->lock);
err = __tegra_xusb_enable_firmware_messages(tegra);
mutex_unlock(&tegra->lock);
return err;
}
static void tegra_xhci_set_port_power(struct tegra_xusb *tegra, bool main,
bool set)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct usb_hcd *hcd = main ? xhci->main_hcd : xhci->shared_hcd;
unsigned int wait = (!main && !set) ? 1000 : 10;
u16 typeReq = set ? SetPortFeature : ClearPortFeature;
u16 wIndex = main ? tegra->otg_usb2_port + 1 : tegra->otg_usb3_port + 1;
u32 status;
u32 stat_power = main ? USB_PORT_STAT_POWER : USB_SS_PORT_STAT_POWER;
u32 status_val = set ? stat_power : 0;
dev_dbg(tegra->dev, "%s():%s %s port power\n", __func__,
set ? "set" : "clear", main ? "HS" : "SS");
hcd->driver->hub_control(hcd, typeReq, USB_PORT_FEAT_POWER, wIndex,
NULL, 0);
do {
tegra_xhci_hc_driver.hub_control(hcd, GetPortStatus, 0, wIndex,
(char *) &status, sizeof(status));
if (status_val == (status & stat_power))
break;
if (!main && !set)
usleep_range(600, 700);
else
usleep_range(10, 20);
} while (--wait > 0);
if (status_val != (status & stat_power))
dev_info(tegra->dev, "failed to %s %s PP %d\n",
set ? "set" : "clear",
main ? "HS" : "SS", status);
}
static struct phy *tegra_xusb_get_phy(struct tegra_xusb *tegra, char *name,
int port)
{
unsigned int i, phy_count = 0;
for (i = 0; i < tegra->soc->num_types; i++) {
if (!strncmp(tegra->soc->phy_types[i].name, name,
strlen(name)))
return tegra->phys[phy_count+port];
phy_count += tegra->soc->phy_types[i].num;
}
return NULL;
}
static void tegra_xhci_id_work(struct work_struct *work)
{
struct tegra_xusb *tegra = container_of(work, struct tegra_xusb,
id_work);
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct tegra_xusb_mbox_msg msg;
struct phy *phy = tegra_xusb_get_phy(tegra, "usb2",
tegra->otg_usb2_port);
u32 status;
int ret;
dev_dbg(tegra->dev, "host mode %s\n", tegra->host_mode ? "on" : "off");
mutex_lock(&tegra->lock);
if (tegra->host_mode)
phy_set_mode_ext(phy, PHY_MODE_USB_OTG, USB_ROLE_HOST);
else
phy_set_mode_ext(phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
mutex_unlock(&tegra->lock);
tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
tegra->otg_usb2_port);
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
if (tegra->soc->otg_reset_sspi) {
/* set PP=0 */
tegra_xhci_hc_driver.hub_control(
xhci->shared_hcd, GetPortStatus,
0, tegra->otg_usb3_port+1,
(char *) &status, sizeof(status));
if (status & USB_SS_PORT_STAT_POWER)
tegra_xhci_set_port_power(tegra, false,
false);
/* reset OTG port SSPI */
msg.cmd = MBOX_CMD_RESET_SSPI;
msg.data = tegra->otg_usb3_port+1;
ret = tegra_xusb_mbox_send(tegra, &msg);
if (ret < 0) {
dev_info(tegra->dev,
"failed to RESET_SSPI %d\n",
ret);
}
}
tegra_xhci_set_port_power(tegra, false, true);
}
tegra_xhci_set_port_power(tegra, true, true);
} else {
if (tegra->otg_usb3_port >= 0)
tegra_xhci_set_port_power(tegra, false, false);
tegra_xhci_set_port_power(tegra, true, false);
}
}
#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
static bool is_usb2_otg_phy(struct tegra_xusb *tegra, unsigned int index)
{
return (tegra->usbphy[index] != NULL);
}
static bool is_usb3_otg_phy(struct tegra_xusb *tegra, unsigned int index)
{
struct tegra_xusb_padctl *padctl = tegra->padctl;
unsigned int i;
int port;
for (i = 0; i < tegra->num_usb_phys; i++) {
if (is_usb2_otg_phy(tegra, i)) {
port = tegra_xusb_padctl_get_usb3_companion(padctl, i);
if ((port >= 0) && (index == (unsigned int)port))
return true;
}
}
return false;
}
static bool is_host_mode_phy(struct tegra_xusb *tegra, unsigned int phy_type, unsigned int index)
{
if (strcmp(tegra->soc->phy_types[phy_type].name, "hsic") == 0)
return true;
if (strcmp(tegra->soc->phy_types[phy_type].name, "usb2") == 0) {
if (is_usb2_otg_phy(tegra, index))
return ((index == tegra->otg_usb2_port) && tegra->host_mode);
else
return true;
}
if (strcmp(tegra->soc->phy_types[phy_type].name, "usb3") == 0) {
if (is_usb3_otg_phy(tegra, index))
return ((index == tegra->otg_usb3_port) && tegra->host_mode);
else
return true;
}
return false;
}
#endif
static int tegra_xusb_get_usb2_port(struct tegra_xusb *tegra,
struct usb_phy *usbphy)
{
unsigned int i;
for (i = 0; i < tegra->num_usb_phys; i++) {
if (tegra->usbphy[i] && usbphy == tegra->usbphy[i])
return i;
}
return -1;
}
static int tegra_xhci_id_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct tegra_xusb *tegra = container_of(nb, struct tegra_xusb,
id_nb);
struct usb_phy *usbphy = (struct usb_phy *)data;
dev_dbg(tegra->dev, "%s(): action is %d", __func__, usbphy->last_event);
if ((tegra->host_mode && usbphy->last_event == USB_EVENT_ID) ||
(!tegra->host_mode && usbphy->last_event != USB_EVENT_ID)) {
dev_dbg(tegra->dev, "Same role(%d) received. Ignore",
tegra->host_mode);
return NOTIFY_OK;
}
tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;
schedule_work(&tegra->id_work);
return NOTIFY_OK;
}
static int tegra_xusb_init_usb_phy(struct tegra_xusb *tegra)
{
unsigned int i;
tegra->usbphy = devm_kcalloc(tegra->dev, tegra->num_usb_phys,
sizeof(*tegra->usbphy), GFP_KERNEL);
if (!tegra->usbphy)
return -ENOMEM;
INIT_WORK(&tegra->id_work, tegra_xhci_id_work);
tegra->id_nb.notifier_call = tegra_xhci_id_notify;
tegra->otg_usb2_port = -EINVAL;
tegra->otg_usb3_port = -EINVAL;
for (i = 0; i < tegra->num_usb_phys; i++) {
struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
if (!phy)
continue;
tegra->usbphy[i] = devm_usb_get_phy_by_node(tegra->dev,
phy->dev.of_node,
&tegra->id_nb);
if (!IS_ERR(tegra->usbphy[i])) {
dev_dbg(tegra->dev, "usbphy-%d registered", i);
otg_set_host(tegra->usbphy[i]->otg, &tegra->hcd->self);
} else {
/*
* usb-phy is optional, continue if its not available.
*/
tegra->usbphy[i] = NULL;
}
}
return 0;
}
static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
{
unsigned int i;
cancel_work_sync(&tegra->id_work);
for (i = 0; i < tegra->num_usb_phys; i++)
if (tegra->usbphy[i])
otg_set_host(tegra->usbphy[i]->otg, NULL);
}
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb *tegra;
struct device_node *np;
struct resource *regs;
struct xhci_hcd *xhci;
unsigned int i, j, k;
struct phy *phy;
int err;
BUILD_BUG_ON(sizeof(struct tegra_xusb_fw_header) != 256);
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
tegra->soc = of_device_get_match_data(&pdev->dev);
mutex_init(&tegra->lock);
tegra->dev = &pdev->dev;
err = tegra_xusb_init_context(tegra);
if (err < 0)
return err;
tegra->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
tegra->fpci_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->fpci_base))
return PTR_ERR(tegra->fpci_base);
if (tegra->soc->has_ipfs) {
tegra->ipfs_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(tegra->ipfs_base))
return PTR_ERR(tegra->ipfs_base);
} else if (tegra->soc->has_bar2) {
tegra->bar2_base = devm_platform_get_and_ioremap_resource(pdev, 2, &tegra->bar2);
if (IS_ERR(tegra->bar2_base))
return PTR_ERR(tegra->bar2_base);
}
tegra->xhci_irq = platform_get_irq(pdev, 0);
if (tegra->xhci_irq < 0)
return tegra->xhci_irq;
tegra->mbox_irq = platform_get_irq(pdev, 1);
if (tegra->mbox_irq < 0)
return tegra->mbox_irq;
tegra->padctl = tegra_xusb_padctl_get(&pdev->dev);
if (IS_ERR(tegra->padctl))
return PTR_ERR(tegra->padctl);
np = of_parse_phandle(pdev->dev.of_node, "nvidia,xusb-padctl", 0);
if (!np) {
err = -ENODEV;
goto put_padctl;
}
tegra->padctl_irq = of_irq_get(np, 0);
if (tegra->padctl_irq == -EPROBE_DEFER) {
err = tegra->padctl_irq;
goto put_padctl;
} else if (tegra->padctl_irq <= 0) {
/* Older device-trees don't have padctrl interrupt */
tegra->padctl_irq = 0;
dev_dbg(&pdev->dev,
"%pOF is missing an interrupt, disabling PM support\n", np);
}
tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
if (IS_ERR(tegra->host_clk)) {
err = PTR_ERR(tegra->host_clk);
dev_err(&pdev->dev, "failed to get xusb_host: %d\n", err);
goto put_padctl;
}
tegra->falcon_clk = devm_clk_get(&pdev->dev, "xusb_falcon_src");
if (IS_ERR(tegra->falcon_clk)) {
err = PTR_ERR(tegra->falcon_clk);
dev_err(&pdev->dev, "failed to get xusb_falcon_src: %d\n", err);
goto put_padctl;
}
tegra->ss_clk = devm_clk_get(&pdev->dev, "xusb_ss");
if (IS_ERR(tegra->ss_clk)) {
err = PTR_ERR(tegra->ss_clk);
dev_err(&pdev->dev, "failed to get xusb_ss: %d\n", err);
goto put_padctl;
}
tegra->ss_src_clk = devm_clk_get(&pdev->dev, "xusb_ss_src");
if (IS_ERR(tegra->ss_src_clk)) {
err = PTR_ERR(tegra->ss_src_clk);
dev_err(&pdev->dev, "failed to get xusb_ss_src: %d\n", err);
goto put_padctl;
}
tegra->hs_src_clk = devm_clk_get(&pdev->dev, "xusb_hs_src");
if (IS_ERR(tegra->hs_src_clk)) {
err = PTR_ERR(tegra->hs_src_clk);
dev_err(&pdev->dev, "failed to get xusb_hs_src: %d\n", err);
goto put_padctl;
}
tegra->fs_src_clk = devm_clk_get(&pdev->dev, "xusb_fs_src");
if (IS_ERR(tegra->fs_src_clk)) {
err = PTR_ERR(tegra->fs_src_clk);
dev_err(&pdev->dev, "failed to get xusb_fs_src: %d\n", err);
goto put_padctl;
}
tegra->pll_u_480m = devm_clk_get(&pdev->dev, "pll_u_480m");
if (IS_ERR(tegra->pll_u_480m)) {
err = PTR_ERR(tegra->pll_u_480m);
dev_err(&pdev->dev, "failed to get pll_u_480m: %d\n", err);
goto put_padctl;
}
tegra->clk_m = devm_clk_get(&pdev->dev, "clk_m");
if (IS_ERR(tegra->clk_m)) {
err = PTR_ERR(tegra->clk_m);
dev_err(&pdev->dev, "failed to get clk_m: %d\n", err);
goto put_padctl;
}
tegra->pll_e = devm_clk_get(&pdev->dev, "pll_e");
if (IS_ERR(tegra->pll_e)) {
err = PTR_ERR(tegra->pll_e);
dev_err(&pdev->dev, "failed to get pll_e: %d\n", err);
goto put_padctl;
}
if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
err = PTR_ERR(tegra->host_rst);
dev_err(&pdev->dev,
"failed to get xusb_host reset: %d\n", err);
goto put_padctl;
}
tegra->ss_rst = devm_reset_control_get(&pdev->dev, "xusb_ss");
if (IS_ERR(tegra->ss_rst)) {
err = PTR_ERR(tegra->ss_rst);
dev_err(&pdev->dev, "failed to get xusb_ss reset: %d\n",
err);
goto put_padctl;
}
} else {
err = tegra_xusb_powerdomain_init(&pdev->dev, tegra);
if (err)
goto put_powerdomains;
}
tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
sizeof(*tegra->supplies), GFP_KERNEL);
if (!tegra->supplies) {
err = -ENOMEM;
goto put_powerdomains;
}
regulator_bulk_set_supply_names(tegra->supplies,
tegra->soc->supply_names,
tegra->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
tegra->supplies);
if (err) {
dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
goto put_powerdomains;
}
for (i = 0; i < tegra->soc->num_types; i++) {
if (!strncmp(tegra->soc->phy_types[i].name, "usb2", 4))
tegra->num_usb_phys = tegra->soc->phy_types[i].num;
tegra->num_phys += tegra->soc->phy_types[i].num;
}
tegra->phys = devm_kcalloc(&pdev->dev, tegra->num_phys,
sizeof(*tegra->phys), GFP_KERNEL);
if (!tegra->phys) {
err = -ENOMEM;
goto put_powerdomains;
}
for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
char prop[8];
for (j = 0; j < tegra->soc->phy_types[i].num; j++) {
snprintf(prop, sizeof(prop), "%s-%d",
tegra->soc->phy_types[i].name, j);
phy = devm_phy_optional_get(&pdev->dev, prop);
if (IS_ERR(phy)) {
dev_err(&pdev->dev,
"failed to get PHY %s: %ld\n", prop,
PTR_ERR(phy));
err = PTR_ERR(phy);
goto put_powerdomains;
}
tegra->phys[k++] = phy;
}
}
tegra->hcd = usb_create_hcd(&tegra_xhci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!tegra->hcd) {
err = -ENOMEM;
goto put_powerdomains;
}
tegra->hcd->skip_phy_initialization = 1;
tegra->hcd->regs = tegra->regs;
tegra->hcd->rsrc_start = regs->start;
tegra->hcd->rsrc_len = resource_size(regs);
/*
* This must happen after usb_create_hcd(), because usb_create_hcd()
* will overwrite the drvdata of the device with the hcd it creates.
*/
platform_set_drvdata(pdev, tegra);
err = tegra_xusb_clk_enable(tegra);
if (err) {
dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
goto put_hcd;
}
err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
if (err) {
dev_err(tegra->dev, "failed to enable regulators: %d\n", err);
goto disable_clk;
}
err = tegra_xusb_phy_enable(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
goto disable_regulator;
}
/*
* The XUSB Falcon microcontroller can only address 40 bits, so set
* the DMA mask accordingly.
*/
err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
goto disable_phy;
}
if (tegra->soc->firmware) {
err = tegra_xusb_request_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev,
"failed to request firmware: %d\n", err);
goto disable_phy;
}
}
err = tegra_xusb_unpowergate_partitions(tegra);
if (err)
goto free_firmware;
tegra_xusb_config(tegra);
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
goto powergate;
}
err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
goto powergate;
}
device_wakeup_enable(tegra->hcd->self.controller);
xhci = hcd_to_xhci(tegra->hcd);
xhci->shared_hcd = usb_create_shared_hcd(&tegra_xhci_hc_driver,
&pdev->dev,
dev_name(&pdev->dev),
tegra->hcd);
if (!xhci->shared_hcd) {
dev_err(&pdev->dev, "failed to create shared HCD\n");
err = -ENOMEM;
goto remove_usb2;
}
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
err = usb_add_hcd(xhci->shared_hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add shared HCD: %d\n", err);
goto put_usb3;
}
err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
tegra_xusb_mbox_irq,
tegra_xusb_mbox_thread, 0,
dev_name(&pdev->dev), tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto remove_usb3;
}
if (tegra->padctl_irq) {
err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq,
NULL, tegra_xusb_padctl_irq,
IRQF_ONESHOT, dev_name(&pdev->dev),
tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
goto remove_usb3;
}
}
err = tegra_xusb_enable_firmware_messages(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
goto remove_usb3;
}
err = tegra_xusb_init_usb_phy(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to init USB PHY: %d\n", err);
goto remove_usb3;
}
/* Enable wake for both USB 2.0 and USB 3.0 roothubs */
device_init_wakeup(&tegra->hcd->self.root_hub->dev, true);
device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true);
pm_runtime_use_autosuspend(tegra->dev);
pm_runtime_set_autosuspend_delay(tegra->dev, 2000);
pm_runtime_mark_last_busy(tegra->dev);
pm_runtime_set_active(tegra->dev);
if (tegra->padctl_irq) {
device_init_wakeup(tegra->dev, true);
pm_runtime_enable(tegra->dev);
}
return 0;
remove_usb3:
usb_remove_hcd(xhci->shared_hcd);
put_usb3:
usb_put_hcd(xhci->shared_hcd);
remove_usb2:
usb_remove_hcd(tegra->hcd);
powergate:
tegra_xusb_powergate_partitions(tegra);
free_firmware:
dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
tegra->fw.phys);
disable_phy:
tegra_xusb_phy_disable(tegra);
disable_regulator:
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
disable_clk:
tegra_xusb_clk_disable(tegra);
put_hcd:
usb_put_hcd(tegra->hcd);
put_powerdomains:
tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
put_padctl:
of_node_put(np);
tegra_xusb_padctl_put(tegra->padctl);
return err;
}
static void tegra_xusb_disable(struct tegra_xusb *tegra)
{
tegra_xusb_powergate_partitions(tegra);
tegra_xusb_powerdomain_remove(tegra->dev, tegra);
tegra_xusb_phy_disable(tegra);
tegra_xusb_clk_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
}
static void tegra_xusb_remove(struct platform_device *pdev)
{
struct tegra_xusb *tegra = platform_get_drvdata(pdev);
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
tegra_xusb_deinit_usb_phy(tegra);
pm_runtime_get_sync(&pdev->dev);
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
xhci->shared_hcd = NULL;
usb_remove_hcd(tegra->hcd);
usb_put_hcd(tegra->hcd);
dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
tegra->fw.phys);
if (tegra->padctl_irq)
pm_runtime_disable(&pdev->dev);
pm_runtime_put(&pdev->dev);
tegra_xusb_disable(tegra);
tegra_xusb_padctl_put(tegra->padctl);
}
static void tegra_xusb_shutdown(struct platform_device *pdev)
{
struct tegra_xusb *tegra = platform_get_drvdata(pdev);
pm_runtime_get_sync(&pdev->dev);
disable_irq(tegra->xhci_irq);
xhci_shutdown(tegra->hcd);
tegra_xusb_disable(tegra);
}
static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
{
struct device *dev = hub->hcd->self.controller;
bool status = true;
unsigned int i;
u32 value;
for (i = 0; i < hub->num_ports; i++) {
value = readl(hub->ports[i]->addr);
if ((value & PORT_PE) == 0)
continue;
if ((value & PORT_PLS_MASK) != XDEV_U3) {
dev_info(dev, "%u-%u isn't suspended: %#010x\n",
hub->hcd->self.busnum, i + 1, value);
status = false;
}
}
return status;
}
static int tegra_xusb_check_ports(struct tegra_xusb *tegra)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct xhci_bus_state *bus_state = &xhci->usb2_rhub.bus_state;
unsigned long flags;
int err = 0;
if (bus_state->bus_suspended) {
/* xusb_hub_suspend() has just directed one or more USB2 port(s)
* to U3 state, it takes 3ms to enter U3.
*/
usleep_range(3000, 4000);
}
spin_lock_irqsave(&xhci->lock, flags);
if (!xhci_hub_ports_suspended(&xhci->usb2_rhub) ||
!xhci_hub_ports_suspended(&xhci->usb3_rhub))
err = -EBUSY;
spin_unlock_irqrestore(&xhci->lock, flags);
return err;
}
static void tegra_xusb_save_context(struct tegra_xusb *tegra)
{
const struct tegra_xusb_context_soc *soc = tegra->soc->context;
struct tegra_xusb_context *ctx = &tegra->context;
unsigned int i;
if (soc->ipfs.num_offsets > 0) {
for (i = 0; i < soc->ipfs.num_offsets; i++)
ctx->ipfs[i] = ipfs_readl(tegra, soc->ipfs.offsets[i]);
}
if (soc->fpci.num_offsets > 0) {
for (i = 0; i < soc->fpci.num_offsets; i++)
ctx->fpci[i] = fpci_readl(tegra, soc->fpci.offsets[i]);
}
}
static void tegra_xusb_restore_context(struct tegra_xusb *tegra)
{
const struct tegra_xusb_context_soc *soc = tegra->soc->context;
struct tegra_xusb_context *ctx = &tegra->context;
unsigned int i;
if (soc->fpci.num_offsets > 0) {
for (i = 0; i < soc->fpci.num_offsets; i++)
fpci_writel(tegra, ctx->fpci[i], soc->fpci.offsets[i]);
}
if (soc->ipfs.num_offsets > 0) {
for (i = 0; i < soc->ipfs.num_offsets; i++)
ipfs_writel(tegra, ctx->ipfs[i], soc->ipfs.offsets[i]);
}
}
static enum usb_device_speed tegra_xhci_portsc_to_speed(struct tegra_xusb *tegra, u32 portsc)
{
if (DEV_LOWSPEED(portsc))
return USB_SPEED_LOW;
if (DEV_HIGHSPEED(portsc))
return USB_SPEED_HIGH;
if (DEV_FULLSPEED(portsc))
return USB_SPEED_FULL;
if (DEV_SUPERSPEED_ANY(portsc))
return USB_SPEED_SUPER;
return USB_SPEED_UNKNOWN;
}
static void tegra_xhci_enable_phy_sleepwalk_wake(struct tegra_xusb *tegra)
{
struct tegra_xusb_padctl *padctl = tegra->padctl;
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
enum usb_device_speed speed;
struct phy *phy;
unsigned int index, offset;
unsigned int i, j, k;
struct xhci_hub *rhub;
u32 portsc;
for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
if (strcmp(tegra->soc->phy_types[i].name, "usb3") == 0)
rhub = &xhci->usb3_rhub;
else
rhub = &xhci->usb2_rhub;
if (strcmp(tegra->soc->phy_types[i].name, "hsic") == 0)
offset = tegra->soc->ports.usb2.count;
else
offset = 0;
for (j = 0; j < tegra->soc->phy_types[i].num; j++) {
phy = tegra->phys[k++];
if (!phy)
continue;
index = j + offset;
if (index >= rhub->num_ports)
continue;
if (!is_host_mode_phy(tegra, i, j))
continue;
portsc = readl(rhub->ports[index]->addr);
speed = tegra_xhci_portsc_to_speed(tegra, portsc);
tegra_xusb_padctl_enable_phy_sleepwalk(padctl, phy, speed);
tegra_xusb_padctl_enable_phy_wake(padctl, phy);
}
}
}
static void tegra_xhci_disable_phy_wake(struct tegra_xusb *tegra)
{
struct tegra_xusb_padctl *padctl = tegra->padctl;
unsigned int i;
for (i = 0; i < tegra->num_usb_phys; i++) {
struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
if (!phy)
continue;
if (tegra_xusb_padctl_remote_wake_detected(padctl, phy))
tegra_phy_xusb_utmi_pad_power_on(phy);
}
for (i = 0; i < tegra->num_phys; i++) {
if (!tegra->phys[i])
continue;
if (tegra_xusb_padctl_remote_wake_detected(padctl, tegra->phys[i]))
dev_dbg(tegra->dev, "%pOF remote wake detected\n",
tegra->phys[i]->dev.of_node);
tegra_xusb_padctl_disable_phy_wake(padctl, tegra->phys[i]);
}
}
static void tegra_xhci_disable_phy_sleepwalk(struct tegra_xusb *tegra)
{
struct tegra_xusb_padctl *padctl = tegra->padctl;
unsigned int i;
for (i = 0; i < tegra->num_phys; i++) {
if (!tegra->phys[i])
continue;
tegra_xusb_padctl_disable_phy_sleepwalk(padctl, tegra->phys[i]);
}
}
static void tegra_xhci_program_utmi_power_lp0_exit(struct tegra_xusb *tegra)
{
unsigned int i, index_to_usb2;
struct phy *phy;
for (i = 0; i < tegra->soc->num_types; i++) {
if (strcmp(tegra->soc->phy_types[i].name, "usb2") == 0)
index_to_usb2 = i;
}
for (i = 0; i < tegra->num_usb_phys; i++) {
if (!is_host_mode_phy(tegra, index_to_usb2, i))
continue;
phy = tegra_xusb_get_phy(tegra, "usb2", i);
if (tegra->lp0_utmi_pad_mask & BIT(i))
tegra_phy_xusb_utmi_pad_power_on(phy);
else
tegra_phy_xusb_utmi_pad_power_down(phy);
}
}
static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct device *dev = tegra->dev;
bool wakeup = runtime ? true : device_may_wakeup(dev);
unsigned int i;
int err;
u32 usbcmd;
u32 portsc;
dev_dbg(dev, "entering ELPG\n");
usbcmd = readl(&xhci->op_regs->command);
usbcmd &= ~CMD_EIE;
writel(usbcmd, &xhci->op_regs->command);
err = tegra_xusb_check_ports(tegra);
if (err < 0) {
dev_err(tegra->dev, "not all ports suspended: %d\n", err);
goto out;
}
for (i = 0; i < tegra->num_usb_phys; i++) {
if (!xhci->usb2_rhub.ports[i])
continue;
portsc = readl(xhci->usb2_rhub.ports[i]->addr);
tegra->lp0_utmi_pad_mask &= ~BIT(i);
if (((portsc & PORT_PLS_MASK) == XDEV_U3) || ((portsc & DEV_SPEED_MASK) == XDEV_FS))
tegra->lp0_utmi_pad_mask |= BIT(i);
}
err = xhci_suspend(xhci, wakeup);
if (err < 0) {
dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
goto out;
}
tegra_xusb_save_context(tegra);
if (wakeup)
tegra_xhci_enable_phy_sleepwalk_wake(tegra);
tegra_xusb_powergate_partitions(tegra);
for (i = 0; i < tegra->num_phys; i++) {
if (!tegra->phys[i])
continue;
phy_power_off(tegra->phys[i]);
if (!wakeup)
phy_exit(tegra->phys[i]);
}
tegra_xusb_clk_disable(tegra);
out:
if (!err)
dev_dbg(tegra->dev, "entering ELPG done\n");
else {
usbcmd = readl(&xhci->op_regs->command);
usbcmd |= CMD_EIE;
writel(usbcmd, &xhci->op_regs->command);
dev_dbg(tegra->dev, "entering ELPG failed\n");
pm_runtime_mark_last_busy(tegra->dev);
}
return err;
}
static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct device *dev = tegra->dev;
bool wakeup = runtime ? true : device_may_wakeup(dev);
unsigned int i;
u32 usbcmd;
int err;
dev_dbg(dev, "exiting ELPG\n");
pm_runtime_mark_last_busy(tegra->dev);
err = tegra_xusb_clk_enable(tegra);
if (err < 0) {
dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
goto out;
}
err = tegra_xusb_unpowergate_partitions(tegra);
if (err)
goto disable_clks;
if (wakeup)
tegra_xhci_disable_phy_wake(tegra);
for (i = 0; i < tegra->num_phys; i++) {
if (!tegra->phys[i])
continue;
if (!wakeup)
phy_init(tegra->phys[i]);
phy_power_on(tegra->phys[i]);
}
if (tegra->suspended)
tegra_xhci_program_utmi_power_lp0_exit(tegra);
tegra_xusb_config(tegra);
tegra_xusb_restore_context(tegra);
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(tegra->dev, "failed to load firmware: %d\n", err);
goto disable_phy;
}
err = __tegra_xusb_enable_firmware_messages(tegra);
if (err < 0) {
dev_err(tegra->dev, "failed to enable messages: %d\n", err);
goto disable_phy;
}
if (wakeup)
tegra_xhci_disable_phy_sleepwalk(tegra);
err = xhci_resume(xhci, runtime ? PMSG_AUTO_RESUME : PMSG_RESUME);
if (err < 0) {
dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
goto disable_phy;
}
usbcmd = readl(&xhci->op_regs->command);
usbcmd |= CMD_EIE;
writel(usbcmd, &xhci->op_regs->command);
goto out;
disable_phy:
for (i = 0; i < tegra->num_phys; i++) {
if (!tegra->phys[i])
continue;
phy_power_off(tegra->phys[i]);
if (!wakeup)
phy_exit(tegra->phys[i]);
}
tegra_xusb_powergate_partitions(tegra);
disable_clks:
tegra_xusb_clk_disable(tegra);
out:
if (!err)
dev_dbg(dev, "exiting ELPG done\n");
else
dev_dbg(dev, "exiting ELPG failed\n");
return err;
}
static __maybe_unused int tegra_xusb_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
synchronize_irq(tegra->mbox_irq);
mutex_lock(&tegra->lock);
if (pm_runtime_suspended(dev)) {
err = tegra_xusb_exit_elpg(tegra, true);
if (err < 0)
goto out;
}
err = tegra_xusb_enter_elpg(tegra, false);
if (err < 0) {
if (pm_runtime_suspended(dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
goto out;
}
out:
if (!err) {
tegra->suspended = true;
pm_runtime_disable(dev);
if (device_may_wakeup(dev)) {
if (enable_irq_wake(tegra->padctl_irq))
dev_err(dev, "failed to enable padctl wakes\n");
}
}
mutex_unlock(&tegra->lock);
return err;
}
static __maybe_unused int tegra_xusb_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
mutex_lock(&tegra->lock);
if (!tegra->suspended) {
mutex_unlock(&tegra->lock);
return 0;
}
err = tegra_xusb_exit_elpg(tegra, false);
if (err < 0) {
mutex_unlock(&tegra->lock);
return err;
}
if (device_may_wakeup(dev)) {
if (disable_irq_wake(tegra->padctl_irq))
dev_err(dev, "failed to disable padctl wakes\n");
}
tegra->suspended = false;
mutex_unlock(&tegra->lock);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
static __maybe_unused int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int ret;
synchronize_irq(tegra->mbox_irq);
mutex_lock(&tegra->lock);
ret = tegra_xusb_enter_elpg(tegra, true);
mutex_unlock(&tegra->lock);
return ret;
}
static __maybe_unused int tegra_xusb_runtime_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
mutex_lock(&tegra->lock);
err = tegra_xusb_exit_elpg(tegra, true);
mutex_unlock(&tegra->lock);
return err;
}
static const struct dev_pm_ops tegra_xusb_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
tegra_xusb_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_xusb_suspend, tegra_xusb_resume)
};
static const char * const tegra124_supply_names[] = {
"avddio-pex",
"dvddio-pex",
"avdd-usb",
"hvdd-usb-ss",
};
static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
{ .name = "usb3", .num = 2, },
{ .name = "usb2", .num = 3, },
{ .name = "hsic", .num = 2, },
};
static const unsigned int tegra124_xusb_context_ipfs[] = {
IPFS_XUSB_HOST_MSI_BAR_SZ_0,
IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0,
IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0,
IPFS_XUSB_HOST_MSI_VEC0_0,
IPFS_XUSB_HOST_MSI_EN_VEC0_0,
IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0,
IPFS_XUSB_HOST_INTR_MASK_0,
IPFS_XUSB_HOST_INTR_ENABLE_0,
IPFS_XUSB_HOST_UFPCI_CONFIG_0,
IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0,
IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0,
};
static const unsigned int tegra124_xusb_context_fpci[] = {
XUSB_CFG_ARU_CONTEXT_HS_PLS,
XUSB_CFG_ARU_CONTEXT_FS_PLS,
XUSB_CFG_ARU_CONTEXT_HSFS_SPEED,
XUSB_CFG_ARU_CONTEXT_HSFS_PP,
XUSB_CFG_ARU_CONTEXT,
XUSB_CFG_AXI_CFG,
XUSB_CFG_24,
XUSB_CFG_16,
};
static const struct tegra_xusb_context_soc tegra124_xusb_context = {
.ipfs = {
.num_offsets = ARRAY_SIZE(tegra124_xusb_context_ipfs),
.offsets = tegra124_xusb_context_ipfs,
},
.fpci = {
.num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
.offsets = tegra124_xusb_context_fpci,
},
};
static const struct tegra_xusb_soc_ops tegra124_ops = {
.mbox_reg_readl = &fpci_readl,
.mbox_reg_writel = &fpci_writel,
.csb_reg_readl = &fpci_csb_readl,
.csb_reg_writel = &fpci_csb_writel,
};
static const struct tegra_xusb_soc tegra124_soc = {
.firmware = "nvidia/tegra124/xusb.bin",
.supply_names = tegra124_supply_names,
.num_supplies = ARRAY_SIZE(tegra124_supply_names),
.phy_types = tegra124_phy_types,
.num_types = ARRAY_SIZE(tegra124_phy_types),
.context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 6, .count = 2, },
.usb3 = { .offset = 0, .count = 2, },
},
.scale_ss_clock = true,
.has_ipfs = true,
.otg_reset_sspi = false,
.ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
.smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
static const char * const tegra210_supply_names[] = {
"dvddio-pex",
"hvddio-pex",
"avdd-usb",
};
static const struct tegra_xusb_phy_type tegra210_phy_types[] = {
{ .name = "usb3", .num = 4, },
{ .name = "usb2", .num = 4, },
{ .name = "hsic", .num = 1, },
};
static const struct tegra_xusb_soc tegra210_soc = {
.firmware = "nvidia/tegra210/xusb.bin",
.supply_names = tegra210_supply_names,
.num_supplies = ARRAY_SIZE(tegra210_supply_names),
.phy_types = tegra210_phy_types,
.num_types = ARRAY_SIZE(tegra210_phy_types),
.context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 8, .count = 1, },
.usb3 = { .offset = 0, .count = 4, },
},
.scale_ss_clock = false,
.has_ipfs = true,
.otg_reset_sspi = true,
.ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
.smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
static const char * const tegra186_supply_names[] = {
};
MODULE_FIRMWARE("nvidia/tegra186/xusb.bin");
static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
{ .name = "usb3", .num = 3, },
{ .name = "usb2", .num = 3, },
{ .name = "hsic", .num = 1, },
};
static const struct tegra_xusb_context_soc tegra186_xusb_context = {
.fpci = {
.num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
.offsets = tegra124_xusb_context_fpci,
},
};
static const struct tegra_xusb_soc tegra186_soc = {
.firmware = "nvidia/tegra186/xusb.bin",
.supply_names = tegra186_supply_names,
.num_supplies = ARRAY_SIZE(tegra186_supply_names),
.phy_types = tegra186_phy_types,
.num_types = ARRAY_SIZE(tegra186_phy_types),
.context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 3, },
.usb2 = { .offset = 3, .count = 3, },
.hsic = { .offset = 6, .count = 1, },
},
.scale_ss_clock = false,
.has_ipfs = false,
.otg_reset_sspi = false,
.ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
.smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
.lpm_support = true,
};
static const char * const tegra194_supply_names[] = {
};
static const struct tegra_xusb_phy_type tegra194_phy_types[] = {
{ .name = "usb3", .num = 4, },
{ .name = "usb2", .num = 4, },
};
static const struct tegra_xusb_soc tegra194_soc = {
.firmware = "nvidia/tegra194/xusb.bin",
.supply_names = tegra194_supply_names,
.num_supplies = ARRAY_SIZE(tegra194_supply_names),
.phy_types = tegra194_phy_types,
.num_types = ARRAY_SIZE(tegra194_phy_types),
.context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 4, },
.usb2 = { .offset = 4, .count = 4, },
},
.scale_ss_clock = false,
.has_ipfs = false,
.otg_reset_sspi = false,
.ops = &tegra124_ops,
.mbox = {
.cmd = 0x68,
.data_in = 0x6c,
.data_out = 0x70,
.owner = 0x74,
.smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
.lpm_support = true,
};
MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
static const struct tegra_xusb_soc_ops tegra234_ops = {
.mbox_reg_readl = &bar2_readl,
.mbox_reg_writel = &bar2_writel,
.csb_reg_readl = &bar2_csb_readl,
.csb_reg_writel = &bar2_csb_writel,
};
static const struct tegra_xusb_soc tegra234_soc = {
.supply_names = tegra194_supply_names,
.num_supplies = ARRAY_SIZE(tegra194_supply_names),
.phy_types = tegra194_phy_types,
.num_types = ARRAY_SIZE(tegra194_phy_types),
.context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 4, },
.usb2 = { .offset = 4, .count = 4, },
},
.scale_ss_clock = false,
.has_ipfs = false,
.otg_reset_sspi = false,
.ops = &tegra234_ops,
.mbox = {
.cmd = XUSB_BAR2_ARU_MBOX_CMD,
.data_in = XUSB_BAR2_ARU_MBOX_DATA_IN,
.data_out = XUSB_BAR2_ARU_MBOX_DATA_OUT,
.owner = XUSB_BAR2_ARU_MBOX_OWNER,
.smi_intr = XUSB_BAR2_ARU_SMI_INTR,
},
.lpm_support = true,
.has_bar2 = true,
};
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
{ .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
{ .compatible = "nvidia,tegra194-xusb", .data = &tegra194_soc },
{ .compatible = "nvidia,tegra234-xusb", .data = &tegra234_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
static struct platform_driver tegra_xusb_driver = {
.probe = tegra_xusb_probe,
.remove_new = tegra_xusb_remove,
.shutdown = tegra_xusb_shutdown,
.driver = {
.name = "tegra-xusb",
.pm = &tegra_xusb_pm_ops,
.of_match_table = tegra_xusb_of_match,
},
};
static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
if (tegra && tegra->soc->lpm_support)
xhci->quirks |= XHCI_LPM_SUPPORT;
}
static int tegra_xhci_setup(struct usb_hcd *hcd)
{
return xhci_gen_setup(hcd, tegra_xhci_quirks);
}
static int tegra_xhci_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
char *buf, u16 length)
{
struct tegra_xusb *tegra = dev_get_drvdata(hcd->self.controller);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_hub *rhub;
struct xhci_bus_state *bus_state;
int port = (index & 0xff) - 1;
unsigned int i;
struct xhci_port **ports;
u32 portsc;
int ret;
struct phy *phy;
rhub = &xhci->usb2_rhub;
bus_state = &rhub->bus_state;
if (bus_state->resuming_ports && hcd->speed == HCD_USB2) {
ports = rhub->ports;
i = rhub->num_ports;
while (i--) {
if (!test_bit(i, &bus_state->resuming_ports))
continue;
portsc = readl(ports[i]->addr);
if ((portsc & PORT_PLS_MASK) == XDEV_RESUME)
tegra_phy_xusb_utmi_pad_power_on(
tegra_xusb_get_phy(tegra, "usb2", (int) i));
}
}
if (hcd->speed == HCD_USB2) {
phy = tegra_xusb_get_phy(tegra, "usb2", port);
if ((type_req == ClearPortFeature) && (value == USB_PORT_FEAT_SUSPEND)) {
if (!index || index > rhub->num_ports)
return -EPIPE;
tegra_phy_xusb_utmi_pad_power_on(phy);
}
if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_RESET)) {
if (!index || index > rhub->num_ports)
return -EPIPE;
ports = rhub->ports;
portsc = readl(ports[port]->addr);
if (portsc & PORT_CONNECT)
tegra_phy_xusb_utmi_pad_power_on(phy);
}
}
ret = xhci_hub_control(hcd, type_req, value, index, buf, length);
if (ret < 0)
return ret;
if (hcd->speed == HCD_USB2) {
/* Use phy where we set previously */
if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_SUSPEND))
/* We don't suspend the PAD while HNP role swap happens on the OTG port */
if (!((hcd->self.otg_port == (port + 1)) && hcd->self.b_hnp_enable))
tegra_phy_xusb_utmi_pad_power_down(phy);
if ((type_req == ClearPortFeature) && (value == USB_PORT_FEAT_C_CONNECTION)) {
ports = rhub->ports;
portsc = readl(ports[port]->addr);
if (!(portsc & PORT_CONNECT)) {
/* We don't suspend the PAD while HNP role swap happens on the OTG
* port
*/
if (!((hcd->self.otg_port == (port + 1)) && hcd->self.b_hnp_enable))
tegra_phy_xusb_utmi_pad_power_down(phy);
}
}
if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_TEST))
tegra_phy_xusb_utmi_pad_power_on(phy);
}
return ret;
}
static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
.reset = tegra_xhci_setup,
.hub_control = tegra_xhci_hub_control,
};
static int __init tegra_xusb_init(void)
{
xhci_init_driver(&tegra_xhci_hc_driver, &tegra_xhci_overrides);
return platform_driver_register(&tegra_xusb_driver);
}
module_init(tegra_xusb_init);
static void __exit tegra_xusb_exit(void)
{
platform_driver_unregister(&tegra_xusb_driver);
}
module_exit(tegra_xusb_exit);
MODULE_AUTHOR("Andrew Bresticker <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra XUSB xHCI host-controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/xhci-tegra.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic UHCI HCD (Host Controller Driver) for Platform Devices
*
* Copyright (c) 2011 Tony Prisk <[email protected]>
*
* This file is based on uhci-grlib.c
* (C) Copyright 2004-2007 Alan Stern, [email protected]
*/
#include <linux/of.h>
#include <linux/device.h>
#include <linux/platform_device.h>
static int uhci_platform_init(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
/* Probe number of ports if not already provided by DT */
if (!uhci->rh_numports)
uhci->rh_numports = uhci_count_ports(hcd);
/* Set up pointers to to generic functions */
uhci->reset_hc = uhci_generic_reset_hc;
uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc;
/* No special actions need to be taken for the functions below */
uhci->configure_hc = NULL;
uhci->resume_detect_interrupts_are_broken = NULL;
uhci->global_suspend_mode_is_broken = NULL;
/* Reset if the controller isn't already safely quiescent. */
check_and_reset_hc(uhci);
return 0;
}
static const struct hc_driver uhci_platform_hc_driver = {
.description = hcd_name,
.product_desc = "Generic UHCI Host Controller",
.hcd_priv_size = sizeof(struct uhci_hcd),
/* Generic hardware linkage */
.irq = uhci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
/* Basic lifecycle operations */
.reset = uhci_platform_init,
.start = uhci_start,
#ifdef CONFIG_PM
.pci_suspend = NULL,
.pci_resume = NULL,
.bus_suspend = uhci_rh_suspend,
.bus_resume = uhci_rh_resume,
#endif
.stop = uhci_stop,
.urb_enqueue = uhci_urb_enqueue,
.urb_dequeue = uhci_urb_dequeue,
.endpoint_disable = uhci_hcd_endpoint_disable,
.get_frame_number = uhci_hcd_get_frame_number,
.hub_status_data = uhci_hub_status_data,
.hub_control = uhci_hub_control,
};
static int uhci_hcd_platform_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct usb_hcd *hcd;
struct uhci_hcd *uhci;
struct resource *res;
int ret;
if (usb_disabled())
return -ENODEV;
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
pdev->name);
if (!hcd)
return -ENOMEM;
uhci = hcd_to_uhci(hcd);
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err_rmr;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
uhci->regs = hcd->regs;
/* Grab some things from the device-tree */
if (np) {
u32 num_ports;
if (of_property_read_u32(np, "#ports", &num_ports) == 0) {
uhci->rh_numports = num_ports;
dev_info(&pdev->dev,
"Detected %d ports from device-tree\n",
num_ports);
}
if (of_device_is_compatible(np, "aspeed,ast2400-uhci") ||
of_device_is_compatible(np, "aspeed,ast2500-uhci") ||
of_device_is_compatible(np, "aspeed,ast2600-uhci")) {
uhci->is_aspeed = 1;
dev_info(&pdev->dev,
"Enabled Aspeed implementation workarounds\n");
}
}
/* Get and enable clock if any specified */
uhci->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(uhci->clk)) {
ret = PTR_ERR(uhci->clk);
goto err_rmr;
}
ret = clk_prepare_enable(uhci->clk);
if (ret) {
dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
goto err_rmr;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_clk;
ret = usb_add_hcd(hcd, ret, IRQF_SHARED);
if (ret)
goto err_clk;
device_wakeup_enable(hcd->self.controller);
return 0;
err_clk:
clk_disable_unprepare(uhci->clk);
err_rmr:
usb_put_hcd(hcd);
return ret;
}
static void uhci_hcd_platform_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
clk_disable_unprepare(uhci->clk);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
/* Make sure the controller is quiescent and that we're not using it
* any more. This is mainly for the benefit of programs which, like kexec,
* expect the hardware to be idle: not doing DMA or generating IRQs.
*
* This routine may be called in a damaged or failing kernel. Hence we
* do not acquire the spinlock before shutting down the controller.
*/
static void uhci_hcd_platform_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
uhci_hc_died(hcd_to_uhci(hcd));
}
static const struct of_device_id platform_uhci_ids[] = {
{ .compatible = "generic-uhci", },
{ .compatible = "platform-uhci", },
{}
};
MODULE_DEVICE_TABLE(of, platform_uhci_ids);
static struct platform_driver uhci_platform_driver = {
.probe = uhci_hcd_platform_probe,
.remove_new = uhci_hcd_platform_remove,
.shutdown = uhci_hcd_platform_shutdown,
.driver = {
.name = "platform-uhci",
.of_match_table = platform_uhci_ids,
},
};
| linux-master | drivers/usb/host/uhci-platform.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
*
* This file is licenced under the GPL.
*/
/*-------------------------------------------------------------------------*/
#define edstring(ed_type) ({ char *temp; \
switch (ed_type) { \
case PIPE_CONTROL: temp = "ctrl"; break; \
case PIPE_BULK: temp = "bulk"; break; \
case PIPE_INTERRUPT: temp = "intr"; break; \
default: temp = "isoc"; break; \
} temp;})
#define pipestring(pipe) edstring(usb_pipetype(pipe))
#define ohci_dbg_sw(ohci, next, size, format, arg...) \
do { \
if (next != NULL) { \
unsigned s_len; \
s_len = scnprintf (*next, *size, format, ## arg ); \
*size -= s_len; *next += s_len; \
} else \
ohci_dbg(ohci,format, ## arg ); \
} while (0);
/* Version for use where "next" is the address of a local variable */
#define ohci_dbg_nosw(ohci, next, size, format, arg...) \
do { \
unsigned s_len; \
s_len = scnprintf(*next, *size, format, ## arg); \
*size -= s_len; *next += s_len; \
} while (0);
static void ohci_dump_intr_mask (
struct ohci_hcd *ohci,
char *label,
u32 mask,
char **next,
unsigned *size)
{
ohci_dbg_sw (ohci, next, size, "%s 0x%08x%s%s%s%s%s%s%s%s%s\n",
label,
mask,
(mask & OHCI_INTR_MIE) ? " MIE" : "",
(mask & OHCI_INTR_OC) ? " OC" : "",
(mask & OHCI_INTR_RHSC) ? " RHSC" : "",
(mask & OHCI_INTR_FNO) ? " FNO" : "",
(mask & OHCI_INTR_UE) ? " UE" : "",
(mask & OHCI_INTR_RD) ? " RD" : "",
(mask & OHCI_INTR_SF) ? " SF" : "",
(mask & OHCI_INTR_WDH) ? " WDH" : "",
(mask & OHCI_INTR_SO) ? " SO" : ""
);
}
static void maybe_print_eds (
struct ohci_hcd *ohci,
char *label,
u32 value,
char **next,
unsigned *size)
{
if (value)
ohci_dbg_sw (ohci, next, size, "%s %08x\n", label, value);
}
static char *hcfs2string (int state)
{
switch (state) {
case OHCI_USB_RESET: return "reset";
case OHCI_USB_RESUME: return "resume";
case OHCI_USB_OPER: return "operational";
case OHCI_USB_SUSPEND: return "suspend";
}
return "?";
}
static const char *rh_state_string(struct ohci_hcd *ohci)
{
switch (ohci->rh_state) {
case OHCI_RH_HALTED:
return "halted";
case OHCI_RH_SUSPENDED:
return "suspended";
case OHCI_RH_RUNNING:
return "running";
}
return "?";
}
// dump control and status registers
static void
ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
{
struct ohci_regs __iomem *regs = controller->regs;
u32 temp;
temp = ohci_readl (controller, ®s->revision) & 0xff;
ohci_dbg_sw (controller, next, size,
"OHCI %d.%d, %s legacy support registers, rh state %s\n",
0x03 & (temp >> 4), (temp & 0x0f),
(temp & 0x0100) ? "with" : "NO",
rh_state_string(controller));
temp = ohci_readl (controller, ®s->control);
ohci_dbg_sw (controller, next, size,
"control 0x%03x%s%s%s HCFS=%s%s%s%s%s CBSR=%d\n",
temp,
(temp & OHCI_CTRL_RWE) ? " RWE" : "",
(temp & OHCI_CTRL_RWC) ? " RWC" : "",
(temp & OHCI_CTRL_IR) ? " IR" : "",
hcfs2string (temp & OHCI_CTRL_HCFS),
(temp & OHCI_CTRL_BLE) ? " BLE" : "",
(temp & OHCI_CTRL_CLE) ? " CLE" : "",
(temp & OHCI_CTRL_IE) ? " IE" : "",
(temp & OHCI_CTRL_PLE) ? " PLE" : "",
temp & OHCI_CTRL_CBSR
);
temp = ohci_readl (controller, ®s->cmdstatus);
ohci_dbg_sw (controller, next, size,
"cmdstatus 0x%05x SOC=%d%s%s%s%s\n", temp,
(temp & OHCI_SOC) >> 16,
(temp & OHCI_OCR) ? " OCR" : "",
(temp & OHCI_BLF) ? " BLF" : "",
(temp & OHCI_CLF) ? " CLF" : "",
(temp & OHCI_HCR) ? " HCR" : ""
);
ohci_dump_intr_mask (controller, "intrstatus",
ohci_readl (controller, ®s->intrstatus),
next, size);
ohci_dump_intr_mask (controller, "intrenable",
ohci_readl (controller, ®s->intrenable),
next, size);
// intrdisable always same as intrenable
maybe_print_eds (controller, "ed_periodcurrent",
ohci_readl (controller, ®s->ed_periodcurrent),
next, size);
maybe_print_eds (controller, "ed_controlhead",
ohci_readl (controller, ®s->ed_controlhead),
next, size);
maybe_print_eds (controller, "ed_controlcurrent",
ohci_readl (controller, ®s->ed_controlcurrent),
next, size);
maybe_print_eds (controller, "ed_bulkhead",
ohci_readl (controller, ®s->ed_bulkhead),
next, size);
maybe_print_eds (controller, "ed_bulkcurrent",
ohci_readl (controller, ®s->ed_bulkcurrent),
next, size);
maybe_print_eds (controller, "donehead",
ohci_readl (controller, ®s->donehead), next, size);
}
#define dbg_port_sw(hc,num,value,next,size) \
ohci_dbg_sw (hc, next, size, \
"roothub.portstatus [%d] " \
"0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
num, temp, \
(temp & RH_PS_PRSC) ? " PRSC" : "", \
(temp & RH_PS_OCIC) ? " OCIC" : "", \
(temp & RH_PS_PSSC) ? " PSSC" : "", \
(temp & RH_PS_PESC) ? " PESC" : "", \
(temp & RH_PS_CSC) ? " CSC" : "", \
\
(temp & RH_PS_LSDA) ? " LSDA" : "", \
(temp & RH_PS_PPS) ? " PPS" : "", \
(temp & RH_PS_PRS) ? " PRS" : "", \
(temp & RH_PS_POCI) ? " POCI" : "", \
(temp & RH_PS_PSS) ? " PSS" : "", \
\
(temp & RH_PS_PES) ? " PES" : "", \
(temp & RH_PS_CCS) ? " CCS" : "" \
);
static void
ohci_dump_roothub (
struct ohci_hcd *controller,
int verbose,
char **next,
unsigned *size)
{
u32 temp, i;
temp = roothub_a (controller);
if (temp == ~(u32)0)
return;
if (verbose) {
ohci_dbg_sw (controller, next, size,
"roothub.a %08x POTPGT=%d%s%s%s%s%s NDP=%d(%d)\n", temp,
((temp & RH_A_POTPGT) >> 24) & 0xff,
(temp & RH_A_NOCP) ? " NOCP" : "",
(temp & RH_A_OCPM) ? " OCPM" : "",
(temp & RH_A_DT) ? " DT" : "",
(temp & RH_A_NPS) ? " NPS" : "",
(temp & RH_A_PSM) ? " PSM" : "",
(temp & RH_A_NDP), controller->num_ports
);
temp = roothub_b (controller);
ohci_dbg_sw (controller, next, size,
"roothub.b %08x PPCM=%04x DR=%04x\n",
temp,
(temp & RH_B_PPCM) >> 16,
(temp & RH_B_DR)
);
temp = roothub_status (controller);
ohci_dbg_sw (controller, next, size,
"roothub.status %08x%s%s%s%s%s%s\n",
temp,
(temp & RH_HS_CRWE) ? " CRWE" : "",
(temp & RH_HS_OCIC) ? " OCIC" : "",
(temp & RH_HS_LPSC) ? " LPSC" : "",
(temp & RH_HS_DRWE) ? " DRWE" : "",
(temp & RH_HS_OCI) ? " OCI" : "",
(temp & RH_HS_LPS) ? " LPS" : ""
);
}
for (i = 0; i < controller->num_ports; i++) {
temp = roothub_portstatus (controller, i);
dbg_port_sw (controller, i, temp, next, size);
}
}
static void ohci_dump(struct ohci_hcd *controller)
{
ohci_dbg (controller, "OHCI controller state\n");
// dumps some of the state we know about
ohci_dump_status (controller, NULL, NULL);
if (controller->hcca)
ohci_dbg (controller,
"hcca frame #%04x\n", ohci_frame_no(controller));
ohci_dump_roothub (controller, 1, NULL, NULL);
}
static const char data0 [] = "DATA0";
static const char data1 [] = "DATA1";
static void ohci_dump_td (const struct ohci_hcd *ohci, const char *label,
const struct td *td)
{
u32 tmp = hc32_to_cpup (ohci, &td->hwINFO);
ohci_dbg (ohci, "%s td %p%s; urb %p index %d; hw next td %08x\n",
label, td,
(tmp & TD_DONE) ? " (DONE)" : "",
td->urb, td->index,
hc32_to_cpup (ohci, &td->hwNextTD));
if ((tmp & TD_ISO) == 0) {
const char *toggle, *pid;
u32 cbp, be;
switch (tmp & TD_T) {
case TD_T_DATA0: toggle = data0; break;
case TD_T_DATA1: toggle = data1; break;
case TD_T_TOGGLE: toggle = "(CARRY)"; break;
default: toggle = "(?)"; break;
}
switch (tmp & TD_DP) {
case TD_DP_SETUP: pid = "SETUP"; break;
case TD_DP_IN: pid = "IN"; break;
case TD_DP_OUT: pid = "OUT"; break;
default: pid = "(bad pid)"; break;
}
ohci_dbg (ohci, " info %08x CC=%x %s DI=%d %s %s\n", tmp,
TD_CC_GET(tmp), /* EC, */ toggle,
(tmp & TD_DI) >> 21, pid,
(tmp & TD_R) ? "R" : "");
cbp = hc32_to_cpup (ohci, &td->hwCBP);
be = hc32_to_cpup (ohci, &td->hwBE);
ohci_dbg (ohci, " cbp %08x be %08x (len %d)\n", cbp, be,
cbp ? (be + 1 - cbp) : 0);
} else {
unsigned i;
ohci_dbg (ohci, " info %08x CC=%x FC=%d DI=%d SF=%04x\n", tmp,
TD_CC_GET(tmp),
(tmp >> 24) & 0x07,
(tmp & TD_DI) >> 21,
tmp & 0x0000ffff);
ohci_dbg (ohci, " bp0 %08x be %08x\n",
hc32_to_cpup (ohci, &td->hwCBP) & ~0x0fff,
hc32_to_cpup (ohci, &td->hwBE));
for (i = 0; i < MAXPSW; i++) {
u16 psw = ohci_hwPSW (ohci, td, i);
int cc = (psw >> 12) & 0x0f;
ohci_dbg (ohci, " psw [%d] = %2x, CC=%x %s=%d\n", i,
psw, cc,
(cc >= 0x0e) ? "OFFSET" : "SIZE",
psw & 0x0fff);
}
}
}
/* caller MUST own hcd spinlock if verbose is set! */
static void __maybe_unused
ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
const struct ed *ed, int verbose)
{
u32 tmp = hc32_to_cpu (ohci, ed->hwINFO);
char *type = "";
ohci_dbg (ohci, "%s, ed %p state 0x%x type %s; next ed %08x\n",
label,
ed, ed->state, edstring (ed->type),
hc32_to_cpup (ohci, &ed->hwNextED));
switch (tmp & (ED_IN|ED_OUT)) {
case ED_OUT: type = "-OUT"; break;
case ED_IN: type = "-IN"; break;
/* else from TDs ... control */
}
ohci_dbg (ohci,
" info %08x MAX=%d%s%s%s%s EP=%d%s DEV=%d\n", tmp,
0x03ff & (tmp >> 16),
(tmp & ED_DEQUEUE) ? " DQ" : "",
(tmp & ED_ISO) ? " ISO" : "",
(tmp & ED_SKIP) ? " SKIP" : "",
(tmp & ED_LOWSPEED) ? " LOW" : "",
0x000f & (tmp >> 7),
type,
0x007f & tmp);
tmp = hc32_to_cpup (ohci, &ed->hwHeadP);
ohci_dbg (ohci, " tds: head %08x %s%s tail %08x%s\n",
tmp,
(tmp & ED_C) ? data1 : data0,
(tmp & ED_H) ? " HALT" : "",
hc32_to_cpup (ohci, &ed->hwTailP),
verbose ? "" : " (not listing)");
if (verbose) {
struct list_head *tmp;
/* use ed->td_list because HC concurrently modifies
* hwNextTD as it accumulates ed_donelist.
*/
list_for_each (tmp, &ed->td_list) {
struct td *td;
td = list_entry (tmp, struct td, td_list);
ohci_dump_td (ohci, " ->", td);
}
}
}
/*-------------------------------------------------------------------------*/
static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
static int debug_async_open(struct inode *, struct file *);
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
static const struct file_operations debug_async_fops = {
.owner = THIS_MODULE,
.open = debug_async_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_registers_fops = {
.owner = THIS_MODULE,
.open = debug_registers_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static struct dentry *ohci_debug_root;
struct debug_buffer {
ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
struct ohci_hcd *ohci;
struct mutex mutex; /* protect filling of buffer */
size_t count; /* number of characters filled into buffer */
char *page;
};
static ssize_t
show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
{
unsigned temp, size = count;
if (!ed)
return 0;
/* print first --> last */
while (ed->ed_prev)
ed = ed->ed_prev;
/* dump a snapshot of the bulk or control schedule */
while (ed) {
u32 info = hc32_to_cpu (ohci, ed->hwINFO);
u32 headp = hc32_to_cpu (ohci, ed->hwHeadP);
struct list_head *entry;
struct td *td;
temp = scnprintf (buf, size,
"ed/%p %cs dev%d ep%d%s max %d %08x%s%s %s",
ed,
(info & ED_LOWSPEED) ? 'l' : 'f',
info & 0x7f,
(info >> 7) & 0xf,
(info & ED_IN) ? "in" : "out",
0x03ff & (info >> 16),
info,
(info & ED_SKIP) ? " s" : "",
(headp & ED_H) ? " H" : "",
(headp & ED_C) ? data1 : data0);
size -= temp;
buf += temp;
list_for_each (entry, &ed->td_list) {
u32 cbp, be;
td = list_entry (entry, struct td, td_list);
info = hc32_to_cpup (ohci, &td->hwINFO);
cbp = hc32_to_cpup (ohci, &td->hwCBP);
be = hc32_to_cpup (ohci, &td->hwBE);
temp = scnprintf (buf, size,
"\n\ttd %p %s %d cc=%x urb %p (%08x)",
td,
({ char *pid;
switch (info & TD_DP) {
case TD_DP_SETUP: pid = "setup"; break;
case TD_DP_IN: pid = "in"; break;
case TD_DP_OUT: pid = "out"; break;
default: pid = "(?)"; break;
} pid;}),
cbp ? (be + 1 - cbp) : 0,
TD_CC_GET (info), td->urb, info);
size -= temp;
buf += temp;
}
temp = scnprintf (buf, size, "\n");
size -= temp;
buf += temp;
ed = ed->ed_next;
}
return count - size;
}
static ssize_t fill_async_buffer(struct debug_buffer *buf)
{
struct ohci_hcd *ohci;
size_t temp, size;
unsigned long flags;
ohci = buf->ohci;
size = PAGE_SIZE;
/* display control and bulk lists together, for simplicity */
spin_lock_irqsave (&ohci->lock, flags);
temp = show_list(ohci, buf->page, size, ohci->ed_controltail);
temp += show_list(ohci, buf->page + temp, size - temp,
ohci->ed_bulktail);
spin_unlock_irqrestore (&ohci->lock, flags);
return temp;
}
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
struct ohci_hcd *ohci;
struct ed **seen, *ed;
unsigned long flags;
unsigned temp, size, seen_count;
char *next;
unsigned i;
seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
if (!seen)
return 0;
seen_count = 0;
ohci = buf->ohci;
next = buf->page;
size = PAGE_SIZE;
temp = scnprintf (next, size, "size = %d\n", NUM_INTS);
size -= temp;
next += temp;
/* dump a snapshot of the periodic schedule (and load) */
spin_lock_irqsave (&ohci->lock, flags);
for (i = 0; i < NUM_INTS; i++) {
ed = ohci->periodic[i];
if (!ed)
continue;
temp = scnprintf (next, size, "%2d [%3d]:", i, ohci->load [i]);
size -= temp;
next += temp;
do {
temp = scnprintf (next, size, " ed%d/%p",
ed->interval, ed);
size -= temp;
next += temp;
for (temp = 0; temp < seen_count; temp++) {
if (seen [temp] == ed)
break;
}
/* show more info the first time around */
if (temp == seen_count) {
u32 info = hc32_to_cpu (ohci, ed->hwINFO);
struct list_head *entry;
unsigned qlen = 0;
/* qlen measured here in TDs, not urbs */
list_for_each (entry, &ed->td_list)
qlen++;
temp = scnprintf (next, size,
" (%cs dev%d ep%d%s-%s qlen %u"
" max %d %08x%s%s)",
(info & ED_LOWSPEED) ? 'l' : 'f',
info & 0x7f,
(info >> 7) & 0xf,
(info & ED_IN) ? "in" : "out",
(info & ED_ISO) ? "iso" : "int",
qlen,
0x03ff & (info >> 16),
info,
(info & ED_SKIP) ? " K" : "",
(ed->hwHeadP &
cpu_to_hc32(ohci, ED_H)) ?
" H" : "");
size -= temp;
next += temp;
if (seen_count < DBG_SCHED_LIMIT)
seen [seen_count++] = ed;
ed = ed->ed_next;
} else {
/* we've seen it and what's after */
temp = 0;
ed = NULL;
}
} while (ed);
temp = scnprintf (next, size, "\n");
size -= temp;
next += temp;
}
spin_unlock_irqrestore (&ohci->lock, flags);
kfree (seen);
return PAGE_SIZE - size;
}
#undef DBG_SCHED_LIMIT
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct ohci_regs __iomem *regs;
unsigned long flags;
unsigned temp, size;
char *next;
u32 rdata;
ohci = buf->ohci;
hcd = ohci_to_hcd(ohci);
regs = ohci->regs;
next = buf->page;
size = PAGE_SIZE;
spin_lock_irqsave (&ohci->lock, flags);
/* dump driver info, then registers in spec order */
ohci_dbg_nosw(ohci, &next, &size,
"bus %s, device %s\n"
"%s\n"
"%s\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc,
hcd_name);
if (!HCD_HW_ACCESSIBLE(hcd)) {
size -= scnprintf (next, size,
"SUSPENDED (no register access)\n");
goto done;
}
ohci_dump_status(ohci, &next, &size);
/* hcca */
if (ohci->hcca)
ohci_dbg_nosw(ohci, &next, &size,
"hcca frame 0x%04x\n", ohci_frame_no(ohci));
/* other registers mostly affect frame timings */
rdata = ohci_readl (ohci, ®s->fminterval);
temp = scnprintf (next, size,
"fmintvl 0x%08x %sFSMPS=0x%04x FI=0x%04x\n",
rdata, (rdata >> 31) ? "FIT " : "",
(rdata >> 16) & 0xefff, rdata & 0xffff);
size -= temp;
next += temp;
rdata = ohci_readl (ohci, ®s->fmremaining);
temp = scnprintf (next, size, "fmremaining 0x%08x %sFR=0x%04x\n",
rdata, (rdata >> 31) ? "FRT " : "",
rdata & 0x3fff);
size -= temp;
next += temp;
rdata = ohci_readl (ohci, ®s->periodicstart);
temp = scnprintf (next, size, "periodicstart 0x%04x\n",
rdata & 0x3fff);
size -= temp;
next += temp;
rdata = ohci_readl (ohci, ®s->lsthresh);
temp = scnprintf (next, size, "lsthresh 0x%04x\n",
rdata & 0x3fff);
size -= temp;
next += temp;
temp = scnprintf (next, size, "hub poll timer %s\n",
HCD_POLL_RH(ohci_to_hcd(ohci)) ? "ON" : "off");
size -= temp;
next += temp;
/* roothub */
ohci_dump_roothub (ohci, 1, &next, &size);
done:
spin_unlock_irqrestore (&ohci->lock, flags);
return PAGE_SIZE - size;
}
static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
if (buf) {
buf->ohci = ohci;
buf->fill_func = fill_func;
mutex_init(&buf->mutex);
}
return buf;
}
static int fill_buffer(struct debug_buffer *buf)
{
int ret;
if (!buf->page)
buf->page = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf->page) {
ret = -ENOMEM;
goto out;
}
ret = buf->fill_func(buf);
if (ret >= 0) {
buf->count = ret;
ret = 0;
}
out:
return ret;
}
static ssize_t debug_output(struct file *file, char __user *user_buf,
size_t len, loff_t *offset)
{
struct debug_buffer *buf = file->private_data;
int ret;
mutex_lock(&buf->mutex);
if (buf->count == 0) {
ret = fill_buffer(buf);
if (ret != 0) {
mutex_unlock(&buf->mutex);
goto out;
}
}
mutex_unlock(&buf->mutex);
ret = simple_read_from_buffer(user_buf, len, offset,
buf->page, buf->count);
out:
return ret;
}
static int debug_close(struct inode *inode, struct file *file)
{
struct debug_buffer *buf = file->private_data;
if (buf) {
if (buf->page)
free_page((unsigned long)buf->page);
kfree(buf);
}
return 0;
}
static int debug_async_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static int debug_periodic_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private,
fill_periodic_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static int debug_registers_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private,
fill_registers_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static inline void create_debug_files (struct ohci_hcd *ohci)
{
struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
struct dentry *root;
root = debugfs_create_dir(bus->bus_name, ohci_debug_root);
ohci->debug_dir = root;
debugfs_create_file("async", S_IRUGO, root, ohci, &debug_async_fops);
debugfs_create_file("periodic", S_IRUGO, root, ohci,
&debug_periodic_fops);
debugfs_create_file("registers", S_IRUGO, root, ohci,
&debug_registers_fops);
ohci_dbg (ohci, "created debug files\n");
}
static inline void remove_debug_files (struct ohci_hcd *ohci)
{
debugfs_remove_recursive(ohci->debug_dir);
}
/*-------------------------------------------------------------------------*/
| linux-master | drivers/usb/host/ohci-dbg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* driver for NXP USB Host devices
*
* Currently supported OHCI host devices:
* - NXP LPC32xx
*
* Authors: Dmitry Chigirev <[email protected]>
* Vitaly Wool <[email protected]>
*
* register initialization is based on code examples provided by Philips
* Copyright (c) 2005 Koninklijke Philips Electronics N.V.
*
* NOTE: This driver does not have suspend/resume functionality
* This driver is intended for engineering development purposes only
*
* 2005-2006 (c) MontaVista Software, Inc.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/isp1301.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ohci.h"
#define USB_CONFIG_BASE 0x31020000
/* USB_OTG_STAT_CONTROL bit defines */
#define TRANSPARENT_I2C_EN (1 << 7)
#define HOST_EN (1 << 0)
/* On LPC32xx, those are undefined */
#ifndef start_int_set_falling_edge
#define start_int_set_falling_edge(irq)
#define start_int_set_rising_edge(irq)
#define start_int_ack(irq)
#define start_int_mask(irq)
#define start_int_umask(irq)
#endif
#define DRIVER_DESC "OHCI NXP driver"
static const char hcd_name[] = "ohci-nxp";
static struct hc_driver __read_mostly ohci_nxp_hc_driver;
static struct i2c_client *isp1301_i2c_client;
static struct clk *usb_host_clk;
static void isp1301_configure_lpc32xx(void)
{
/* LPC32XX only supports DAT_SE0 USB mode */
/* This sequence is important */
/* Disable transparent UART mode first */
i2c_smbus_write_byte_data(isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
MC1_UART_EN);
i2c_smbus_write_byte_data(isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
~MC1_SPEED_REG);
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_1, MC1_SPEED_REG);
i2c_smbus_write_byte_data(isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR),
~0);
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_2,
(MC2_BI_DI | MC2_PSW_EN | MC2_SPD_SUSP_CTRL));
i2c_smbus_write_byte_data(isp1301_i2c_client,
(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_1, MC1_DAT_SE0);
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1,
(OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
i2c_smbus_write_byte_data(isp1301_i2c_client,
(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
(OTG1_DM_PULLUP | OTG1_DP_PULLUP));
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR,
~0);
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
printk(KERN_INFO "ISP1301 Vendor ID : 0x%04x\n",
i2c_smbus_read_word_data(isp1301_i2c_client, 0x00));
printk(KERN_INFO "ISP1301 Product ID : 0x%04x\n",
i2c_smbus_read_word_data(isp1301_i2c_client, 0x02));
printk(KERN_INFO "ISP1301 Version ID : 0x%04x\n",
i2c_smbus_read_word_data(isp1301_i2c_client, 0x14));
}
static void isp1301_configure(void)
{
isp1301_configure_lpc32xx();
}
static inline void isp1301_vbus_on(void)
{
i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1,
OTG1_VBUS_DRV);
}
static inline void isp1301_vbus_off(void)
{
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
OTG1_VBUS_DRV);
}
static void ohci_nxp_start_hc(void)
{
void __iomem *usb_otg_stat_control = ioremap(USB_CONFIG_BASE + 0x110, 4);
unsigned long tmp;
if (WARN_ON(!usb_otg_stat_control))
return;
tmp = __raw_readl(usb_otg_stat_control) | HOST_EN;
__raw_writel(tmp, usb_otg_stat_control);
isp1301_vbus_on();
iounmap(usb_otg_stat_control);
}
static void ohci_nxp_stop_hc(void)
{
void __iomem *usb_otg_stat_control = ioremap(USB_CONFIG_BASE + 0x110, 4);
unsigned long tmp;
if (WARN_ON(!usb_otg_stat_control))
return;
isp1301_vbus_off();
tmp = __raw_readl(usb_otg_stat_control) & ~HOST_EN;
__raw_writel(tmp, usb_otg_stat_control);
iounmap(usb_otg_stat_control);
}
static int ohci_hcd_nxp_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd = NULL;
const struct hc_driver *driver = &ohci_nxp_hc_driver;
struct resource *res;
int ret = 0, irq;
struct device_node *isp1301_node;
if (pdev->dev.of_node) {
isp1301_node = of_parse_phandle(pdev->dev.of_node,
"transceiver", 0);
} else {
isp1301_node = NULL;
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
of_node_put(isp1301_node);
if (!isp1301_i2c_client)
return -EPROBE_DEFER;
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto fail_disable;
dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
if (usb_disabled()) {
dev_err(&pdev->dev, "USB is disabled\n");
ret = -ENODEV;
goto fail_disable;
}
/* Enable USB host clock */
usb_host_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usb_host_clk)) {
dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n");
ret = PTR_ERR(usb_host_clk);
goto fail_disable;
}
ret = clk_prepare_enable(usb_host_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB OHCI clock\n");
goto fail_disable;
}
isp1301_configure();
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
ret = -ENOMEM;
goto fail_hcd;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto fail_resource;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
goto fail_resource;
}
ohci_nxp_start_hc();
platform_set_drvdata(pdev, hcd);
dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
ret = usb_add_hcd(hcd, irq, 0);
if (ret == 0) {
device_wakeup_enable(hcd->self.controller);
return ret;
}
ohci_nxp_stop_hc();
fail_resource:
usb_put_hcd(hcd);
fail_hcd:
clk_disable_unprepare(usb_host_clk);
fail_disable:
isp1301_i2c_client = NULL;
return ret;
}
static void ohci_hcd_nxp_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
ohci_nxp_stop_hc();
usb_put_hcd(hcd);
clk_disable_unprepare(usb_host_clk);
isp1301_i2c_client = NULL;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:usb-ohci");
#ifdef CONFIG_OF
static const struct of_device_id ohci_hcd_nxp_match[] = {
{ .compatible = "nxp,ohci-nxp" },
{},
};
MODULE_DEVICE_TABLE(of, ohci_hcd_nxp_match);
#endif
static struct platform_driver ohci_hcd_nxp_driver = {
.driver = {
.name = "usb-ohci",
.of_match_table = of_match_ptr(ohci_hcd_nxp_match),
},
.probe = ohci_hcd_nxp_probe,
.remove_new = ohci_hcd_nxp_remove,
};
static int __init ohci_nxp_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_nxp_hc_driver, NULL);
return platform_driver_register(&ohci_hcd_nxp_driver);
}
module_init(ohci_nxp_init);
static void __exit ohci_nxp_cleanup(void)
{
platform_driver_unregister(&ohci_hcd_nxp_driver);
}
module_exit(ohci_nxp_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/ohci-nxp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* TI DA8xx (OMAP-L1x) Bus Glue
*
* Derived from: ohci-omap.c and ohci-s3c2410.c
* Copyright (C) 2008-2009 MontaVista Software, Inc. <[email protected]>
*/
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/usb-davinci.h>
#include <linux/regulator/consumer.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <asm/unaligned.h>
#include "ohci.h"
#define DRIVER_DESC "DA8XX"
#define DRV_NAME "ohci-da8xx"
static struct hc_driver __read_mostly ohci_da8xx_hc_driver;
static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength);
static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
struct da8xx_ohci_hcd {
struct usb_hcd *hcd;
struct clk *usb11_clk;
struct phy *usb11_phy;
struct regulator *vbus_reg;
struct notifier_block nb;
struct gpio_desc *oc_gpio;
};
#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
/* Over-current indicator change bitmask */
static volatile u16 ocic_mask;
static int ohci_da8xx_enable(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
int ret;
ret = clk_prepare_enable(da8xx_ohci->usb11_clk);
if (ret)
return ret;
ret = phy_init(da8xx_ohci->usb11_phy);
if (ret)
goto err_phy_init;
ret = phy_power_on(da8xx_ohci->usb11_phy);
if (ret)
goto err_phy_power_on;
return 0;
err_phy_power_on:
phy_exit(da8xx_ohci->usb11_phy);
err_phy_init:
clk_disable_unprepare(da8xx_ohci->usb11_clk);
return ret;
}
static void ohci_da8xx_disable(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
phy_power_off(da8xx_ohci->usb11_phy);
phy_exit(da8xx_ohci->usb11_phy);
clk_disable_unprepare(da8xx_ohci->usb11_clk);
}
static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
struct device *dev = hcd->self.controller;
int ret;
if (!da8xx_ohci->vbus_reg)
return 0;
if (on) {
ret = regulator_enable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
return ret;
}
} else {
ret = regulator_disable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to disable regulator: %d\n", ret);
return ret;
}
}
return 0;
}
static int ohci_da8xx_get_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
if (da8xx_ohci->vbus_reg)
return regulator_is_enabled(da8xx_ohci->vbus_reg);
return 1;
}
static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
unsigned int flags;
int ret;
if (da8xx_ohci->oc_gpio)
return gpiod_get_value_cansleep(da8xx_ohci->oc_gpio);
if (!da8xx_ohci->vbus_reg)
return 0;
ret = regulator_get_error_flags(da8xx_ohci->vbus_reg, &flags);
if (ret)
return ret;
if (flags & REGULATOR_ERROR_OVER_CURRENT)
return 1;
return 0;
}
static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
if (da8xx_ohci->vbus_reg)
return 1;
return 0;
}
static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
if (da8xx_ohci->oc_gpio)
return 1;
if (da8xx_ohci->vbus_reg)
return 1;
return 0;
}
static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
if (hub && hub->potpgt)
return 1;
return 0;
}
static int ohci_da8xx_regulator_event(struct notifier_block *nb,
unsigned long event, void *data)
{
struct da8xx_ohci_hcd *da8xx_ohci =
container_of(nb, struct da8xx_ohci_hcd, nb);
if (event & REGULATOR_EVENT_OVER_CURRENT) {
ocic_mask |= 1 << 1;
ohci_da8xx_set_power(da8xx_ohci->hcd, 0);
}
return 0;
}
static irqreturn_t ohci_da8xx_oc_thread(int irq, void *data)
{
struct da8xx_ohci_hcd *da8xx_ohci = data;
struct device *dev = da8xx_ohci->hcd->self.controller;
int ret;
if (gpiod_get_value_cansleep(da8xx_ohci->oc_gpio) &&
da8xx_ohci->vbus_reg) {
ret = regulator_disable(da8xx_ohci->vbus_reg);
if (ret)
dev_err(dev, "Failed to disable regulator: %d\n", ret);
}
return IRQ_HANDLED;
}
static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
struct device *dev = hcd->self.controller;
int ret = 0;
if (!da8xx_ohci->oc_gpio && da8xx_ohci->vbus_reg) {
da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event;
ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg,
&da8xx_ohci->nb);
}
if (ret)
dev_err(dev, "Failed to register notifier: %d\n", ret);
return ret;
}
static int ohci_da8xx_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int result;
u32 rh_a;
dev_dbg(dev, "starting USB controller\n");
result = ohci_da8xx_enable(hcd);
if (result < 0)
return result;
/*
* DA8xx only have 1 port connected to the pins but the HC root hub
* register A reports 2 ports, thus we'll have to override it...
*/
ohci->num_ports = 1;
result = ohci_setup(hcd);
if (result < 0) {
ohci_da8xx_disable(hcd);
return result;
}
/*
* Since we're providing a board-specific root hub port power control
* and over-current reporting, we have to override the HC root hub A
* register's default value, so that ohci_hub_control() could return
* the correct hub descriptor...
*/
rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
if (ohci_da8xx_has_set_power(hcd)) {
rh_a &= ~RH_A_NPS;
rh_a |= RH_A_PSM;
}
if (ohci_da8xx_has_oci(hcd)) {
rh_a &= ~RH_A_NOCP;
rh_a |= RH_A_OCPM;
}
if (ohci_da8xx_has_potpgt(hcd)) {
rh_a &= ~RH_A_POTPGT;
rh_a |= hub->potpgt << 24;
}
ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
return result;
}
/*
* Update the status data from the hub with the over-current indicator change.
*/
static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
{
int length = orig_ohci_hub_status_data(hcd, buf);
/* See if we have OCIC bit set on port 1 */
if (ocic_mask & (1 << 1)) {
dev_dbg(hcd->self.controller, "over-current indicator change "
"on port 1\n");
if (!length)
length = 1;
buf[0] |= 1 << 1;
}
return length;
}
/*
* Look at the control requests to the root hub and see if we need to override.
*/
static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct device *dev = hcd->self.controller;
int temp;
switch (typeReq) {
case GetPortStatus:
/* Check the port number */
if (wIndex != 1)
break;
dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
/* The port power status (PPS) bit defaults to 1 */
if (!ohci_da8xx_get_power(hcd))
temp &= ~RH_PS_PPS;
/* The port over-current indicator (POCI) bit is always 0 */
if (ohci_da8xx_get_oci(hcd) > 0)
temp |= RH_PS_POCI;
/* The over-current indicator change (OCIC) bit is 0 too */
if (ocic_mask & (1 << wIndex))
temp |= RH_PS_OCIC;
put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
return 0;
case SetPortFeature:
temp = 1;
goto check_port;
case ClearPortFeature:
temp = 0;
check_port:
/* Check the port number */
if (wIndex != 1)
break;
switch (wValue) {
case USB_PORT_FEAT_POWER:
dev_dbg(dev, "%sPortFeature(%u): %s\n",
temp ? "Set" : "Clear", wIndex, "POWER");
return ohci_da8xx_set_power(hcd, temp) ? -EPIPE : 0;
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(dev, "%sPortFeature(%u): %s\n",
temp ? "Set" : "Clear", wIndex,
"C_OVER_CURRENT");
if (temp)
ocic_mask |= 1 << wIndex;
else
ocic_mask &= ~(1 << wIndex);
return 0;
}
}
return orig_ohci_hub_control(hcd, typeReq, wValue,
wIndex, buf, wLength);
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_OF
static const struct of_device_id da8xx_ohci_ids[] = {
{ .compatible = "ti,da830-ohci" },
{ }
};
MODULE_DEVICE_TABLE(of, da8xx_ohci_ids);
#endif
static int ohci_da8xx_probe(struct platform_device *pdev)
{
struct da8xx_ohci_hcd *da8xx_ohci;
struct device *dev = &pdev->dev;
int error, hcd_irq, oc_irq;
struct usb_hcd *hcd;
struct resource *mem;
hcd = usb_create_hcd(&ohci_da8xx_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
da8xx_ohci = to_da8xx_ohci(hcd);
da8xx_ohci->hcd = hcd;
da8xx_ohci->usb11_clk = devm_clk_get(dev, NULL);
if (IS_ERR(da8xx_ohci->usb11_clk)) {
error = PTR_ERR(da8xx_ohci->usb11_clk);
if (error != -EPROBE_DEFER)
dev_err(dev, "Failed to get clock.\n");
goto err;
}
da8xx_ohci->usb11_phy = devm_phy_get(dev, "usb-phy");
if (IS_ERR(da8xx_ohci->usb11_phy)) {
error = PTR_ERR(da8xx_ohci->usb11_phy);
if (error != -EPROBE_DEFER)
dev_err(dev, "Failed to get phy.\n");
goto err;
}
da8xx_ohci->vbus_reg = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(da8xx_ohci->vbus_reg)) {
error = PTR_ERR(da8xx_ohci->vbus_reg);
if (error == -ENODEV) {
da8xx_ohci->vbus_reg = NULL;
} else if (error == -EPROBE_DEFER) {
goto err;
} else {
dev_err(dev, "Failed to get regulator\n");
goto err;
}
}
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
if (IS_ERR(da8xx_ohci->oc_gpio)) {
error = PTR_ERR(da8xx_ohci->oc_gpio);
goto err;
}
if (da8xx_ohci->oc_gpio) {
oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
if (oc_irq < 0) {
error = oc_irq;
goto err;
}
error = devm_request_threaded_irq(dev, oc_irq, NULL,
ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"OHCI over-current indicator", da8xx_ohci);
if (error)
goto err;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(hcd->regs)) {
error = PTR_ERR(hcd->regs);
goto err;
}
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
hcd_irq = platform_get_irq(pdev, 0);
if (hcd_irq < 0) {
error = -ENODEV;
goto err;
}
error = usb_add_hcd(hcd, hcd_irq, 0);
if (error)
goto err;
device_wakeup_enable(hcd->self.controller);
error = ohci_da8xx_register_notify(hcd);
if (error)
goto err_remove_hcd;
return 0;
err_remove_hcd:
usb_remove_hcd(hcd);
err:
usb_put_hcd(hcd);
return error;
}
static void ohci_da8xx_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
#ifdef CONFIG_PM
static int ohci_da8xx_suspend(struct platform_device *pdev,
pm_message_t message)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
bool do_wakeup = device_may_wakeup(&pdev->dev);
int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
ret = ohci_suspend(hcd, do_wakeup);
if (ret)
return ret;
ohci_da8xx_disable(hcd);
hcd->state = HC_STATE_SUSPENDED;
return ret;
}
static int ohci_da8xx_resume(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
ret = ohci_da8xx_enable(hcd);
if (ret)
return ret;
ohci_resume(hcd, false);
return 0;
}
#endif
static const struct ohci_driver_overrides da8xx_overrides __initconst = {
.reset = ohci_da8xx_reset,
.extra_priv_size = sizeof(struct da8xx_ohci_hcd),
};
/*
* Driver definition to register with platform structure.
*/
static struct platform_driver ohci_hcd_da8xx_driver = {
.probe = ohci_da8xx_probe,
.remove_new = ohci_da8xx_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_da8xx_suspend,
.resume = ohci_da8xx_resume,
#endif
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(da8xx_ohci_ids),
},
};
static int __init ohci_da8xx_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides);
/*
* The Davinci da8xx HW has some unusual quirks, which require
* da8xx-specific workarounds. We override certain hc_driver
* functions here to achieve that. We explicitly do not enhance
* ohci_driver_overrides to allow this more easily, since this
* is an unusual case, and we don't want to encourage others to
* override these functions by making it too easy.
*/
orig_ohci_hub_control = ohci_da8xx_hc_driver.hub_control;
orig_ohci_hub_status_data = ohci_da8xx_hc_driver.hub_status_data;
ohci_da8xx_hc_driver.hub_status_data = ohci_da8xx_hub_status_data;
ohci_da8xx_hc_driver.hub_control = ohci_da8xx_hub_control;
return platform_driver_register(&ohci_hcd_da8xx_driver);
}
module_init(ohci_da8xx_init);
static void __exit ohci_da8xx_exit(void)
{
platform_driver_unregister(&ohci_hcd_da8xx_driver);
}
module_exit(ohci_da8xx_exit);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/usb/host/ohci-da8xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic platform ehci driver
*
* Copyright 2007 Steven Brown <[email protected]>
* Copyright 2010-2012 Hauke Mehrtens <[email protected]>
* Copyright 2014 Hans de Goede <[email protected]>
*
* Derived from the ohci-ssb driver
* Copyright 2007 Michael Buesch <[email protected]>
*
* Derived from the EHCI-PCI driver
* Copyright (c) 2000-2004 by David Brownell
*
* Derived from the ohci-pci driver
* Copyright 1999 Roman Weissgaerber
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
*/
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sys_soc.h>
#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/of.h>
#include "ehci.h"
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
#define BCM_USB_FIFO_THRESHOLD 0x00800040
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
struct reset_control *rsts;
bool reset_on_resume;
bool quirk_poll;
struct timer_list poll_timer;
struct delayed_work poll_work;
};
static int ehci_platform_reset(struct usb_hcd *hcd)
{
struct platform_device *pdev = to_platform_device(hcd->self.controller);
struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
if (pdata->pre_setup) {
retval = pdata->pre_setup(hcd);
if (retval < 0)
return retval;
}
ehci->caps = hcd->regs + pdata->caps_offset;
retval = ehci_setup(hcd);
if (retval)
return retval;
if (pdata->no_io_watchdog)
ehci->need_io_watchdog = 0;
if (of_device_is_compatible(pdev->dev.of_node, "brcm,xgs-iproc-ehci"))
ehci_writel(ehci, BCM_USB_FIFO_THRESHOLD,
&ehci->regs->brcm_insnreg[1]);
return 0;
}
static int ehci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk, ret;
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) {
ret = clk_prepare_enable(priv->clks[clk]);
if (ret)
goto err_disable_clks;
}
return 0;
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(priv->clks[clk]);
return ret;
}
static void ehci_platform_power_off(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk;
for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
if (priv->clks[clk])
clk_disable_unprepare(priv->clks[clk]);
}
static struct hc_driver __read_mostly ehci_platform_hc_driver;
static const struct ehci_driver_overrides platform_overrides __initconst = {
.reset = ehci_platform_reset,
.extra_priv_size = sizeof(struct ehci_platform_priv),
};
static struct usb_ehci_pdata ehci_platform_defaults = {
.power_on = ehci_platform_power_on,
.power_suspend = ehci_platform_power_off,
.power_off = ehci_platform_power_off,
};
/**
* quirk_poll_check_port_status - Poll port_status if the device sticks
* @ehci: the ehci hcd pointer
*
* Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting
* stuck very rarely after a full/low usb device was disconnected. To
* detect such a situation, the controllers require a special way which poll
* the EHCI PORTSC register.
*
* Return: true if the controller's port_status indicated getting stuck
*/
static bool quirk_poll_check_port_status(struct ehci_hcd *ehci)
{
u32 port_status = ehci_readl(ehci, &ehci->regs->port_status[0]);
if (!(port_status & PORT_OWNER) &&
(port_status & PORT_POWER) &&
!(port_status & PORT_CONNECT) &&
(port_status & PORT_LS_MASK))
return true;
return false;
}
/**
* quirk_poll_rebind_companion - rebind comanion device to recover
* @ehci: the ehci hcd pointer
*
* Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting
* stuck very rarely after a full/low usb device was disconnected. To
* recover from such a situation, the controllers require changing the OHCI
* functional state.
*/
static void quirk_poll_rebind_companion(struct ehci_hcd *ehci)
{
struct device *companion_dev;
struct usb_hcd *hcd = ehci_to_hcd(ehci);
companion_dev = usb_of_get_companion_dev(hcd->self.controller);
if (!companion_dev)
return;
device_release_driver(companion_dev);
if (device_attach(companion_dev) < 0)
ehci_err(ehci, "%s: failed\n", __func__);
put_device(companion_dev);
}
static void quirk_poll_work(struct work_struct *work)
{
struct ehci_platform_priv *priv =
container_of(to_delayed_work(work), struct ehci_platform_priv,
poll_work);
struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd,
priv);
/* check the status twice to reduce misdetection rate */
if (!quirk_poll_check_port_status(ehci))
return;
udelay(10);
if (!quirk_poll_check_port_status(ehci))
return;
ehci_dbg(ehci, "%s: detected getting stuck. rebind now!\n", __func__);
quirk_poll_rebind_companion(ehci);
}
static void quirk_poll_timer(struct timer_list *t)
{
struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer);
struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd,
priv);
if (quirk_poll_check_port_status(ehci)) {
/*
* Now scheduling the work for testing the port more. Note that
* updating the status is possible to be delayed when
* reconnection. So, this uses delayed work with 5 ms delay
* to avoid misdetection.
*/
schedule_delayed_work(&priv->poll_work, msecs_to_jiffies(5));
}
mod_timer(&priv->poll_timer, jiffies + HZ);
}
static void quirk_poll_init(struct ehci_platform_priv *priv)
{
INIT_DELAYED_WORK(&priv->poll_work, quirk_poll_work);
timer_setup(&priv->poll_timer, quirk_poll_timer, 0);
mod_timer(&priv->poll_timer, jiffies + HZ);
}
static void quirk_poll_end(struct ehci_platform_priv *priv)
{
del_timer_sync(&priv->poll_timer);
cancel_delayed_work(&priv->poll_work);
}
static const struct soc_device_attribute quirk_poll_match[] = {
{ .family = "R-Car Gen3" },
{ /* sentinel*/ }
};
static int ehci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv;
struct ehci_hcd *ehci;
int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
/*
* Use reasonable defaults so platforms don't have to provide these
* with DT probing on ARM.
*/
if (!pdata)
pdata = &ehci_platform_defaults;
err = dma_coerce_mask_and_coherent(&dev->dev,
pdata->dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
if (err) {
dev_err(&dev->dev, "Error: DMA mask configuration failed\n");
return err;
}
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
if (!hcd)
return -ENOMEM;
platform_set_drvdata(dev, hcd);
dev->dev.platform_data = pdata;
priv = hcd_to_ehci_priv(hcd);
ehci = hcd_to_ehci(hcd);
if (pdata == &ehci_platform_defaults && dev->dev.of_node) {
if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
ehci->big_endian_mmio = 1;
if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
ehci->big_endian_desc = 1;
if (of_property_read_bool(dev->dev.of_node, "big-endian"))
ehci->big_endian_mmio = ehci->big_endian_desc = 1;
if (of_property_read_bool(dev->dev.of_node, "spurious-oc"))
ehci->spurious_oc = 1;
if (of_property_read_bool(dev->dev.of_node,
"needs-reset-on-resume"))
priv->reset_on_resume = true;
if (of_property_read_bool(dev->dev.of_node,
"has-transaction-translator"))
hcd->has_tt = 1;
if (of_device_is_compatible(dev->dev.of_node,
"aspeed,ast2500-ehci") ||
of_device_is_compatible(dev->dev.of_node,
"aspeed,ast2600-ehci"))
ehci->is_aspeed = 1;
if (soc_device_match(quirk_poll_match))
priv->quirk_poll = true;
for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
if (IS_ERR(priv->clks[clk])) {
err = PTR_ERR(priv->clks[clk]);
if (err == -EPROBE_DEFER)
goto err_put_clks;
priv->clks[clk] = NULL;
break;
}
}
}
priv->rsts = devm_reset_control_array_get_optional_shared(&dev->dev);
if (IS_ERR(priv->rsts)) {
err = PTR_ERR(priv->rsts);
goto err_put_clks;
}
err = reset_control_deassert(priv->rsts);
if (err)
goto err_put_clks;
if (pdata->big_endian_desc)
ehci->big_endian_desc = 1;
if (pdata->big_endian_mmio)
ehci->big_endian_mmio = 1;
if (pdata->has_tt)
hcd->has_tt = 1;
if (pdata->reset_on_resume)
priv->reset_on_resume = true;
if (pdata->spurious_oc)
ehci->spurious_oc = 1;
#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
if (ehci->big_endian_mmio) {
dev_err(&dev->dev,
"Error: CONFIG_USB_EHCI_BIG_ENDIAN_MMIO not set\n");
err = -EINVAL;
goto err_reset;
}
#endif
#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
if (ehci->big_endian_desc) {
dev_err(&dev->dev,
"Error: CONFIG_USB_EHCI_BIG_ENDIAN_DESC not set\n");
err = -EINVAL;
goto err_reset;
}
#endif
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
goto err_reset;
}
hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
}
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
hcd->tpl_support = of_usb_host_tpl_support(dev->dev.of_node);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
device_wakeup_enable(hcd->self.controller);
device_enable_async_suspend(hcd->self.controller);
platform_set_drvdata(dev, hcd);
if (priv->quirk_poll)
quirk_poll_init(priv);
return err;
err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
reset_control_assert(priv->rsts);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
usb_put_hcd(hcd);
return err;
}
static void ehci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk;
if (priv->quirk_poll)
quirk_poll_end(priv);
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
reset_control_assert(priv->rsts);
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
usb_put_hcd(hcd);
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
}
static int __maybe_unused ehci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
bool do_wakeup = device_may_wakeup(dev);
int ret;
if (priv->quirk_poll)
quirk_poll_end(priv);
ret = ehci_suspend(hcd, do_wakeup);
if (ret)
return ret;
if (pdata->power_suspend)
pdata->power_suspend(pdev);
return ret;
}
static int __maybe_unused ehci_platform_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
struct device *companion_dev;
if (pdata->power_on) {
int err = pdata->power_on(pdev);
if (err < 0)
return err;
}
companion_dev = usb_of_get_companion_dev(hcd->self.controller);
if (companion_dev) {
device_pm_wait_for_dev(hcd->self.controller, companion_dev);
put_device(companion_dev);
}
ehci_resume(hcd, priv->reset_on_resume);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
if (priv->quirk_poll)
quirk_poll_init(priv);
return 0;
}
static const struct of_device_id vt8500_ehci_ids[] = {
{ .compatible = "via,vt8500-ehci", },
{ .compatible = "wm,prizm-ehci", },
{ .compatible = "generic-ehci", },
{ .compatible = "cavium,octeon-6335-ehci", },
{}
};
MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id ehci_acpi_match[] = {
{ "PNP0D20", 0 }, /* EHCI controller without debug */
{ }
};
MODULE_DEVICE_TABLE(acpi, ehci_acpi_match);
#endif
static const struct platform_device_id ehci_platform_table[] = {
{ "ehci-platform", 0 },
{ }
};
MODULE_DEVICE_TABLE(platform, ehci_platform_table);
static SIMPLE_DEV_PM_OPS(ehci_platform_pm_ops, ehci_platform_suspend,
ehci_platform_resume);
static struct platform_driver ehci_platform_driver = {
.id_table = ehci_platform_table,
.probe = ehci_platform_probe,
.remove_new = ehci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-platform",
.pm = pm_ptr(&ehci_platform_pm_ops),
.of_match_table = vt8500_ehci_ids,
.acpi_match_table = ACPI_PTR(ehci_acpi_match),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
};
static int __init ehci_platform_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ehci_platform_driver);
}
module_init(ehci_platform_init);
static void __exit ehci_platform_cleanup(void)
{
platform_driver_unregister(&ehci_platform_driver);
}
module_exit(ehci_platform_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_AUTHOR("Alan Stern");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ehci-platform.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "fhci.h"
#define DUMMY_BD_BUFFER 0xdeadbeef
#define DUMMY2_BD_BUFFER 0xbaadf00d
/* Transaction Descriptors bits */
#define TD_R 0x8000 /* ready bit */
#define TD_W 0x2000 /* wrap bit */
#define TD_I 0x1000 /* interrupt on completion */
#define TD_L 0x0800 /* last */
#define TD_TC 0x0400 /* transmit CRC */
#define TD_CNF 0x0200 /* CNF - Must be always 1 */
#define TD_LSP 0x0100 /* Low-speed transaction */
#define TD_PID 0x00c0 /* packet id */
#define TD_RXER 0x0020 /* Rx error or not */
#define TD_NAK 0x0010 /* No ack. */
#define TD_STAL 0x0008 /* Stall received */
#define TD_TO 0x0004 /* time out */
#define TD_UN 0x0002 /* underrun */
#define TD_NO 0x0010 /* Rx Non Octet Aligned Packet */
#define TD_AB 0x0008 /* Frame Aborted */
#define TD_CR 0x0004 /* CRC Error */
#define TD_OV 0x0002 /* Overrun */
#define TD_BOV 0x0001 /* Buffer Overrun */
#define TD_ERRORS (TD_NAK | TD_STAL | TD_TO | TD_UN | \
TD_NO | TD_AB | TD_CR | TD_OV | TD_BOV)
#define TD_PID_DATA0 0x0080 /* Data 0 toggle */
#define TD_PID_DATA1 0x00c0 /* Data 1 toggle */
#define TD_PID_TOGGLE 0x00c0 /* Data 0/1 toggle mask */
#define TD_TOK_SETUP 0x0000
#define TD_TOK_OUT 0x4000
#define TD_TOK_IN 0x8000
#define TD_ISO 0x1000
#define TD_ENDP 0x0780
#define TD_ADDR 0x007f
#define TD_ENDP_SHIFT 7
struct usb_td {
__be16 status;
__be16 length;
__be32 buf_ptr;
__be16 extra;
__be16 reserved;
};
static struct usb_td __iomem *next_bd(struct usb_td __iomem *base,
struct usb_td __iomem *td,
u16 status)
{
if (status & TD_W)
return base;
else
return ++td;
}
void fhci_push_dummy_bd(struct endpoint *ep)
{
if (!ep->already_pushed_dummy_bd) {
u16 td_status = in_be16(&ep->empty_td->status);
out_be32(&ep->empty_td->buf_ptr, DUMMY_BD_BUFFER);
/* get the next TD in the ring */
ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status);
ep->already_pushed_dummy_bd = true;
}
}
/* destroy an USB endpoint */
void fhci_ep0_free(struct fhci_usb *usb)
{
struct endpoint *ep;
int size;
ep = usb->ep0;
if (ep) {
if (ep->td_base)
cpm_muram_free(cpm_muram_offset(ep->td_base));
if (kfifo_initialized(&ep->conf_frame_Q)) {
size = cq_howmany(&ep->conf_frame_Q);
for (; size; size--) {
struct packet *pkt = cq_get(&ep->conf_frame_Q);
kfree(pkt);
}
cq_delete(&ep->conf_frame_Q);
}
if (kfifo_initialized(&ep->empty_frame_Q)) {
size = cq_howmany(&ep->empty_frame_Q);
for (; size; size--) {
struct packet *pkt = cq_get(&ep->empty_frame_Q);
kfree(pkt);
}
cq_delete(&ep->empty_frame_Q);
}
if (kfifo_initialized(&ep->dummy_packets_Q)) {
size = cq_howmany(&ep->dummy_packets_Q);
for (; size; size--) {
u8 *buff = cq_get(&ep->dummy_packets_Q);
kfree(buff);
}
cq_delete(&ep->dummy_packets_Q);
}
kfree(ep);
usb->ep0 = NULL;
}
}
/*
* create the endpoint structure
*
* arguments:
* usb A pointer to the data structure of the USB
* data_mem The data memory partition(BUS)
* ring_len TD ring length
*/
u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
u32 ring_len)
{
struct endpoint *ep;
struct usb_td __iomem *td;
unsigned long ep_offset;
char *err_for = "endpoint PRAM";
int ep_mem_size;
u32 i;
/* we need at least 3 TDs in the ring */
if (!(ring_len > 2)) {
fhci_err(usb->fhci, "illegal TD ring length parameters\n");
return -EINVAL;
}
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
ep_mem_size = ring_len * sizeof(*td) + sizeof(struct fhci_ep_pram);
ep_offset = cpm_muram_alloc(ep_mem_size, 32);
if (IS_ERR_VALUE(ep_offset))
goto err;
ep->td_base = cpm_muram_addr(ep_offset);
/* zero all queue pointers */
if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
cq_new(&ep->empty_frame_Q, ring_len + 2) ||
cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
err_for = "frame_queues";
goto err;
}
for (i = 0; i < (ring_len + 1); i++) {
struct packet *pkt;
u8 *buff;
pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
if (!pkt) {
err_for = "frame";
goto err;
}
buff = kmalloc_array(1028, sizeof(*buff), GFP_KERNEL);
if (!buff) {
kfree(pkt);
err_for = "buffer";
goto err;
}
cq_put(&ep->empty_frame_Q, pkt);
cq_put(&ep->dummy_packets_Q, buff);
}
/* we put the endpoint parameter RAM right behind the TD ring */
ep->ep_pram_ptr = (void __iomem *)ep->td_base + sizeof(*td) * ring_len;
ep->conf_td = ep->td_base;
ep->empty_td = ep->td_base;
ep->already_pushed_dummy_bd = false;
/* initialize tds */
td = ep->td_base;
for (i = 0; i < ring_len; i++) {
out_be32(&td->buf_ptr, 0);
out_be16(&td->status, 0);
out_be16(&td->length, 0);
out_be16(&td->extra, 0);
td++;
}
td--;
out_be16(&td->status, TD_W); /* for last TD set Wrap bit */
out_be16(&td->length, 0);
/* endpoint structure has been created */
usb->ep0 = ep;
return 0;
err:
fhci_ep0_free(usb);
kfree(ep);
fhci_err(usb->fhci, "no memory for the %s\n", err_for);
return -ENOMEM;
}
/*
* initialize the endpoint register according to the given parameters
*
* artuments:
* usb A pointer to the data strucutre of the USB
* ep A pointer to the endpoint structre
* data_mem The data memory partition(BUS)
*/
void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
enum fhci_mem_alloc data_mem)
{
u8 rt;
/* set the endpoint registers according to the endpoint */
out_be16(&usb->fhci->regs->usb_usep[0],
USB_TRANS_CTR | USB_EP_MF | USB_EP_RTE);
out_be16(&usb->fhci->pram->ep_ptr[0],
cpm_muram_offset(ep->ep_pram_ptr));
rt = (BUS_MODE_BO_BE | BUS_MODE_GBL);
#ifdef MULTI_DATA_BUS
if (data_mem == MEM_SECONDARY)
rt |= BUS_MODE_DTB;
#endif
out_8(&ep->ep_pram_ptr->rx_func_code, rt);
out_8(&ep->ep_pram_ptr->tx_func_code, rt);
out_be16(&ep->ep_pram_ptr->rx_buff_len, 1028);
out_be16(&ep->ep_pram_ptr->rx_base, 0);
out_be16(&ep->ep_pram_ptr->tx_base, cpm_muram_offset(ep->td_base));
out_be16(&ep->ep_pram_ptr->rx_bd_ptr, 0);
out_be16(&ep->ep_pram_ptr->tx_bd_ptr, cpm_muram_offset(ep->td_base));
out_be32(&ep->ep_pram_ptr->tx_state, 0);
}
/*
* Collect the submitted frames and inform the application about them
* It is also preparing the TDs for new frames. If the Tx interrupts
* are disabled, the application should call that routine to get
* confirmation about the submitted frames. Otherwise, the routine is
* called from the interrupt service routine during the Tx interrupt.
* In that case the application is informed by calling the application
* specific 'fhci_transaction_confirm' routine
*/
static void fhci_td_transaction_confirm(struct fhci_usb *usb)
{
struct endpoint *ep = usb->ep0;
struct packet *pkt;
struct usb_td __iomem *td;
u16 extra_data;
u16 td_status;
u16 td_length;
u32 buf;
/*
* collect transmitted BDs from the chip. The routine clears all BDs
* with R bit = 0 and the pointer to data buffer is not NULL, that is
* BDs which point to the transmitted data buffer
*/
while (1) {
td = ep->conf_td;
td_status = in_be16(&td->status);
td_length = in_be16(&td->length);
buf = in_be32(&td->buf_ptr);
extra_data = in_be16(&td->extra);
/* check if the TD is empty */
if (!(!(td_status & TD_R) && ((td_status & ~TD_W) || buf)))
break;
/* check if it is a dummy buffer */
else if ((buf == DUMMY_BD_BUFFER) && !(td_status & ~TD_W))
break;
/* mark TD as empty */
clrbits16(&td->status, ~TD_W);
out_be16(&td->length, 0);
out_be32(&td->buf_ptr, 0);
out_be16(&td->extra, 0);
/* advance the TD pointer */
ep->conf_td = next_bd(ep->td_base, ep->conf_td, td_status);
/* check if it is a dummy buffer(type2) */
if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
continue;
pkt = cq_get(&ep->conf_frame_Q);
if (!pkt)
fhci_err(usb->fhci, "no frame to confirm\n");
if (td_status & TD_ERRORS) {
if (td_status & TD_RXER) {
if (td_status & TD_CR)
pkt->status = USB_TD_RX_ER_CRC;
else if (td_status & TD_AB)
pkt->status = USB_TD_RX_ER_BITSTUFF;
else if (td_status & TD_OV)
pkt->status = USB_TD_RX_ER_OVERUN;
else if (td_status & TD_BOV)
pkt->status = USB_TD_RX_DATA_OVERUN;
else if (td_status & TD_NO)
pkt->status = USB_TD_RX_ER_NONOCT;
else
fhci_err(usb->fhci, "illegal error "
"occurred\n");
} else if (td_status & TD_NAK)
pkt->status = USB_TD_TX_ER_NAK;
else if (td_status & TD_TO)
pkt->status = USB_TD_TX_ER_TIMEOUT;
else if (td_status & TD_UN)
pkt->status = USB_TD_TX_ER_UNDERUN;
else if (td_status & TD_STAL)
pkt->status = USB_TD_TX_ER_STALL;
else
fhci_err(usb->fhci, "illegal error occurred\n");
} else if ((extra_data & TD_TOK_IN) &&
pkt->len > td_length - CRC_SIZE) {
pkt->status = USB_TD_RX_DATA_UNDERUN;
}
if (extra_data & TD_TOK_IN)
pkt->len = td_length - CRC_SIZE;
else if (pkt->info & PKT_ZLP)
pkt->len = 0;
else
pkt->len = td_length;
fhci_transaction_confirm(usb, pkt);
}
}
/*
* Submitting a data frame to a specified endpoint of a USB device
* The frame is put in the driver's transmit queue for this endpoint
*
* Arguments:
* usb A pointer to the USB structure
* pkt A pointer to the user frame structure
* trans_type Transaction tyep - IN,OUT or SETUP
* dest_addr Device address - 0~127
* dest_ep Endpoint number of the device - 0~16
* trans_mode Pipe type - ISO,Interrupt,bulk or control
* dest_speed USB speed - Low speed or FULL speed
* data_toggle Data sequence toggle - 0 or 1
*/
u32 fhci_host_transaction(struct fhci_usb *usb,
struct packet *pkt,
enum fhci_ta_type trans_type,
u8 dest_addr,
u8 dest_ep,
enum fhci_tf_mode trans_mode,
enum fhci_speed dest_speed, u8 data_toggle)
{
struct endpoint *ep = usb->ep0;
struct usb_td __iomem *td;
u16 extra_data;
u16 td_status;
fhci_usb_disable_interrupt(usb);
/* start from the next BD that should be filled */
td = ep->empty_td;
td_status = in_be16(&td->status);
if (td_status & TD_R && in_be16(&td->length)) {
/* if the TD is not free */
fhci_usb_enable_interrupt(usb);
return -1;
}
/* get the next TD in the ring */
ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status);
fhci_usb_enable_interrupt(usb);
pkt->priv_data = td;
out_be32(&td->buf_ptr, virt_to_phys(pkt->data));
/* sets up transaction parameters - addr,endp,dir,and type */
extra_data = (dest_ep << TD_ENDP_SHIFT) | dest_addr;
switch (trans_type) {
case FHCI_TA_IN:
extra_data |= TD_TOK_IN;
break;
case FHCI_TA_OUT:
extra_data |= TD_TOK_OUT;
break;
case FHCI_TA_SETUP:
extra_data |= TD_TOK_SETUP;
break;
}
if (trans_mode == FHCI_TF_ISO)
extra_data |= TD_ISO;
out_be16(&td->extra, extra_data);
/* sets up the buffer descriptor */
td_status = ((td_status & TD_W) | TD_R | TD_L | TD_I | TD_CNF);
if (!(pkt->info & PKT_NO_CRC))
td_status |= TD_TC;
switch (trans_type) {
case FHCI_TA_IN:
if (data_toggle)
pkt->info |= PKT_PID_DATA1;
else
pkt->info |= PKT_PID_DATA0;
break;
default:
if (data_toggle) {
td_status |= TD_PID_DATA1;
pkt->info |= PKT_PID_DATA1;
} else {
td_status |= TD_PID_DATA0;
pkt->info |= PKT_PID_DATA0;
}
break;
}
if ((dest_speed == FHCI_LOW_SPEED) &&
(usb->port_status == FHCI_PORT_FULL))
td_status |= TD_LSP;
out_be16(&td->status, td_status);
/* set up buffer length */
if (trans_type == FHCI_TA_IN)
out_be16(&td->length, pkt->len + CRC_SIZE);
else
out_be16(&td->length, pkt->len);
/* put the frame to the confirmation queue */
cq_put(&ep->conf_frame_Q, pkt);
if (cq_howmany(&ep->conf_frame_Q) == 1)
out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
return 0;
}
/* Reset the Tx BD ring */
void fhci_flush_bds(struct fhci_usb *usb)
{
u16 td_status;
struct usb_td __iomem *td;
struct endpoint *ep = usb->ep0;
td = ep->td_base;
while (1) {
td_status = in_be16(&td->status);
in_be32(&td->buf_ptr);
in_be16(&td->extra);
/* if the TD is not empty - we'll confirm it as Timeout */
if (td_status & TD_R)
out_be16(&td->status, (td_status & ~TD_R) | TD_TO);
/* if this TD is dummy - let's skip this TD */
else if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER)
out_be32(&td->buf_ptr, DUMMY2_BD_BUFFER);
/* if this is the last TD - break */
if (td_status & TD_W)
break;
td++;
}
fhci_td_transaction_confirm(usb);
td = ep->td_base;
do {
out_be16(&td->status, 0);
out_be16(&td->length, 0);
out_be32(&td->buf_ptr, 0);
out_be16(&td->extra, 0);
td++;
} while (!(in_be16(&td->status) & TD_W));
out_be16(&td->status, TD_W); /* for last TD set Wrap bit */
out_be16(&td->length, 0);
out_be32(&td->buf_ptr, 0);
out_be16(&td->extra, 0);
out_be16(&ep->ep_pram_ptr->tx_bd_ptr,
in_be16(&ep->ep_pram_ptr->tx_base));
out_be32(&ep->ep_pram_ptr->tx_state, 0);
out_be16(&ep->ep_pram_ptr->tx_cnt, 0);
ep->empty_td = ep->td_base;
ep->conf_td = ep->td_base;
}
/*
* Flush all transmitted packets from TDs in the actual frame.
* This routine is called when something wrong with the controller and
* we want to get rid of the actual frame and start again next frame
*/
void fhci_flush_actual_frame(struct fhci_usb *usb)
{
u8 mode;
u16 tb_ptr;
u16 td_status;
u32 buf_ptr;
struct usb_td __iomem *td;
struct endpoint *ep = usb->ep0;
/* disable the USB controller */
mode = in_8(&usb->fhci->regs->usb_usmod);
out_8(&usb->fhci->regs->usb_usmod, mode & ~USB_MODE_EN);
tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
td = cpm_muram_addr(tb_ptr);
td_status = in_be16(&td->status);
buf_ptr = in_be32(&td->buf_ptr);
in_be16(&td->extra);
do {
if (td_status & TD_R) {
out_be16(&td->status, (td_status & ~TD_R) | TD_TO);
} else {
out_be32(&td->buf_ptr, 0);
ep->already_pushed_dummy_bd = false;
break;
}
/* advance the TD pointer */
td = next_bd(ep->td_base, td, td_status);
td_status = in_be16(&td->status);
buf_ptr = in_be32(&td->buf_ptr);
in_be16(&td->extra);
} while ((td_status & TD_R) || buf_ptr);
fhci_td_transaction_confirm(usb);
out_be16(&ep->ep_pram_ptr->tx_bd_ptr,
in_be16(&ep->ep_pram_ptr->tx_base));
out_be32(&ep->ep_pram_ptr->tx_state, 0);
out_be16(&ep->ep_pram_ptr->tx_cnt, 0);
ep->empty_td = ep->td_base;
ep->conf_td = ep->td_base;
usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
/* reset the event register */
out_be16(&usb->fhci->regs->usb_usber, 0xffff);
/* enable the USB controller */
out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
}
/* handles Tx confirm and Tx error interrupt */
void fhci_tx_conf_interrupt(struct fhci_usb *usb)
{
fhci_td_transaction_confirm(usb);
/*
* Schedule another transaction to this frame only if we have
* already confirmed all transaction in the frame.
*/
if (((fhci_get_sof_timer_count(usb) < usb->max_frame_usage) ||
(usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)) &&
(list_empty(&usb->actual_frame->tds_list)))
fhci_schedule_transactions(usb);
}
void fhci_host_transmit_actual_frame(struct fhci_usb *usb)
{
u16 tb_ptr;
u16 td_status;
struct usb_td __iomem *td;
struct endpoint *ep = usb->ep0;
tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
td = cpm_muram_addr(tb_ptr);
if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER) {
struct usb_td __iomem *old_td = td;
ep->already_pushed_dummy_bd = false;
td_status = in_be16(&td->status);
/* gets the next TD in the ring */
td = next_bd(ep->td_base, td, td_status);
tb_ptr = cpm_muram_offset(td);
out_be16(&ep->ep_pram_ptr->tx_bd_ptr, tb_ptr);
/* start transmit only if we have something in the TDs */
if (in_be16(&td->status) & TD_R)
out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
if (in_be32(&ep->conf_td->buf_ptr) == DUMMY_BD_BUFFER) {
out_be32(&old_td->buf_ptr, 0);
ep->conf_td = next_bd(ep->td_base, ep->conf_td,
td_status);
} else {
out_be32(&old_td->buf_ptr, DUMMY2_BD_BUFFER);
}
}
}
| linux-master | drivers/usb/host/fhci-tds.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver for HiSilicon STB SoCs
*
* Copyright (C) 2017-2018 HiSilicon Co., Ltd. http://www.hisilicon.com
*
* Authors: Jianguo Sun <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include "xhci.h"
#define GTXTHRCFG 0xc108
#define GRXTHRCFG 0xc10c
#define REG_GUSB2PHYCFG0 0xc200
#define BIT_UTMI_8_16 BIT(3)
#define BIT_UTMI_ULPI BIT(4)
#define BIT_FREECLK_EXIST BIT(30)
#define REG_GUSB3PIPECTL0 0xc2c0
#define USB3_DEEMPHASIS_MASK GENMASK(2, 1)
#define USB3_DEEMPHASIS0 BIT(1)
#define USB3_TX_MARGIN1 BIT(4)
struct xhci_hcd_histb {
struct device *dev;
struct usb_hcd *hcd;
void __iomem *ctrl;
struct clk *bus_clk;
struct clk *utmi_clk;
struct clk *pipe_clk;
struct clk *suspend_clk;
struct reset_control *soft_reset;
};
static inline struct xhci_hcd_histb *hcd_to_histb(struct usb_hcd *hcd)
{
return dev_get_drvdata(hcd->self.controller);
}
static int xhci_histb_config(struct xhci_hcd_histb *histb)
{
struct device_node *np = histb->dev->of_node;
u32 regval;
if (of_property_match_string(np, "phys-names", "inno") >= 0) {
/* USB2 PHY chose ulpi 8bit interface */
regval = readl(histb->ctrl + REG_GUSB2PHYCFG0);
regval &= ~BIT_UTMI_ULPI;
regval &= ~(BIT_UTMI_8_16);
regval &= ~BIT_FREECLK_EXIST;
writel(regval, histb->ctrl + REG_GUSB2PHYCFG0);
}
if (of_property_match_string(np, "phys-names", "combo") >= 0) {
/*
* write 0x010c0012 to GUSB3PIPECTL0
* GUSB3PIPECTL0[5:3] = 010 : Tx Margin = 900mV ,
* decrease TX voltage
* GUSB3PIPECTL0[2:1] = 01 : Tx Deemphasis = -3.5dB,
* refer to xHCI spec
*/
regval = readl(histb->ctrl + REG_GUSB3PIPECTL0);
regval &= ~USB3_DEEMPHASIS_MASK;
regval |= USB3_DEEMPHASIS0;
regval |= USB3_TX_MARGIN1;
writel(regval, histb->ctrl + REG_GUSB3PIPECTL0);
}
writel(0x23100000, histb->ctrl + GTXTHRCFG);
writel(0x23100000, histb->ctrl + GRXTHRCFG);
return 0;
}
static int xhci_histb_clks_get(struct xhci_hcd_histb *histb)
{
struct device *dev = histb->dev;
histb->bus_clk = devm_clk_get(dev, "bus");
if (IS_ERR(histb->bus_clk)) {
dev_err(dev, "fail to get bus clk\n");
return PTR_ERR(histb->bus_clk);
}
histb->utmi_clk = devm_clk_get(dev, "utmi");
if (IS_ERR(histb->utmi_clk)) {
dev_err(dev, "fail to get utmi clk\n");
return PTR_ERR(histb->utmi_clk);
}
histb->pipe_clk = devm_clk_get(dev, "pipe");
if (IS_ERR(histb->pipe_clk)) {
dev_err(dev, "fail to get pipe clk\n");
return PTR_ERR(histb->pipe_clk);
}
histb->suspend_clk = devm_clk_get(dev, "suspend");
if (IS_ERR(histb->suspend_clk)) {
dev_err(dev, "fail to get suspend clk\n");
return PTR_ERR(histb->suspend_clk);
}
return 0;
}
static int xhci_histb_host_enable(struct xhci_hcd_histb *histb)
{
int ret;
ret = clk_prepare_enable(histb->bus_clk);
if (ret) {
dev_err(histb->dev, "failed to enable bus clk\n");
return ret;
}
ret = clk_prepare_enable(histb->utmi_clk);
if (ret) {
dev_err(histb->dev, "failed to enable utmi clk\n");
goto err_utmi_clk;
}
ret = clk_prepare_enable(histb->pipe_clk);
if (ret) {
dev_err(histb->dev, "failed to enable pipe clk\n");
goto err_pipe_clk;
}
ret = clk_prepare_enable(histb->suspend_clk);
if (ret) {
dev_err(histb->dev, "failed to enable suspend clk\n");
goto err_suspend_clk;
}
reset_control_deassert(histb->soft_reset);
return 0;
err_suspend_clk:
clk_disable_unprepare(histb->pipe_clk);
err_pipe_clk:
clk_disable_unprepare(histb->utmi_clk);
err_utmi_clk:
clk_disable_unprepare(histb->bus_clk);
return ret;
}
static void xhci_histb_host_disable(struct xhci_hcd_histb *histb)
{
reset_control_assert(histb->soft_reset);
clk_disable_unprepare(histb->suspend_clk);
clk_disable_unprepare(histb->pipe_clk);
clk_disable_unprepare(histb->utmi_clk);
clk_disable_unprepare(histb->bus_clk);
}
/* called during probe() after chip reset completes */
static int xhci_histb_setup(struct usb_hcd *hcd)
{
struct xhci_hcd_histb *histb = hcd_to_histb(hcd);
int ret;
if (usb_hcd_is_primary_hcd(hcd)) {
ret = xhci_histb_config(histb);
if (ret)
return ret;
}
return xhci_gen_setup(hcd, NULL);
}
static const struct xhci_driver_overrides xhci_histb_overrides __initconst = {
.reset = xhci_histb_setup,
};
static struct hc_driver __read_mostly xhci_histb_hc_driver;
static int xhci_histb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct xhci_hcd_histb *histb;
const struct hc_driver *driver;
struct usb_hcd *hcd;
struct xhci_hcd *xhci;
struct resource *res;
int irq;
int ret = -ENODEV;
if (usb_disabled())
return -ENODEV;
driver = &xhci_histb_hc_driver;
histb = devm_kzalloc(dev, sizeof(*histb), GFP_KERNEL);
if (!histb)
return -ENOMEM;
histb->dev = dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
histb->ctrl = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(histb->ctrl))
return PTR_ERR(histb->ctrl);
ret = xhci_histb_clks_get(histb);
if (ret)
return ret;
histb->soft_reset = devm_reset_control_get(dev, "soft");
if (IS_ERR(histb->soft_reset)) {
dev_err(dev, "failed to get soft reset\n");
return PTR_ERR(histb->soft_reset);
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
device_enable_async_suspend(dev);
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
goto disable_pm;
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
goto disable_pm;
}
hcd->regs = histb->ctrl;
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
histb->hcd = hcd;
dev_set_drvdata(hcd->self.controller, histb);
ret = xhci_histb_host_enable(histb);
if (ret)
goto put_hcd;
xhci = hcd_to_xhci(hcd);
device_wakeup_enable(hcd->self.controller);
xhci->main_hcd = hcd;
xhci->shared_hcd = usb_create_shared_hcd(driver, dev, dev_name(dev),
hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto disable_host;
}
if (device_property_read_bool(dev, "usb2-lpm-disable"))
xhci->quirks |= XHCI_HW_LPM_DISABLE;
if (device_property_read_bool(dev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
device_property_read_u32(dev, "imod-interval-ns",
&xhci->imod_interval);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto dealloc_usb2_hcd;
device_enable_async_suspend(dev);
pm_runtime_put_noidle(dev);
/*
* Prevent runtime pm from being on as default, users should enable
* runtime pm using power/control in sysfs.
*/
pm_runtime_forbid(dev);
return 0;
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
disable_host:
xhci_histb_host_disable(histb);
put_hcd:
usb_put_hcd(hcd);
disable_pm:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
static void xhci_histb_remove(struct platform_device *dev)
{
struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;
xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(shared_hcd);
xhci->shared_hcd = NULL;
device_wakeup_disable(&dev->dev);
usb_remove_hcd(hcd);
usb_put_hcd(shared_hcd);
xhci_histb_host_disable(histb);
usb_put_hcd(hcd);
pm_runtime_put_sync(&dev->dev);
pm_runtime_disable(&dev->dev);
}
static int __maybe_unused xhci_histb_suspend(struct device *dev)
{
struct xhci_hcd_histb *histb = dev_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
ret = xhci_suspend(xhci, device_may_wakeup(dev));
if (!device_may_wakeup(dev))
xhci_histb_host_disable(histb);
return ret;
}
static int __maybe_unused xhci_histb_resume(struct device *dev)
{
struct xhci_hcd_histb *histb = dev_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (!device_may_wakeup(dev))
xhci_histb_host_enable(histb);
return xhci_resume(xhci, PMSG_RESUME);
}
static const struct dev_pm_ops xhci_histb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_histb_suspend, xhci_histb_resume)
};
#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &xhci_histb_pm_ops : NULL)
#ifdef CONFIG_OF
static const struct of_device_id histb_xhci_of_match[] = {
{ .compatible = "hisilicon,hi3798cv200-xhci"},
{ },
};
MODULE_DEVICE_TABLE(of, histb_xhci_of_match);
#endif
static struct platform_driver histb_xhci_driver = {
.probe = xhci_histb_probe,
.remove_new = xhci_histb_remove,
.driver = {
.name = "xhci-histb",
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(histb_xhci_of_match),
},
};
MODULE_ALIAS("platform:xhci-histb");
static int __init xhci_histb_init(void)
{
xhci_init_driver(&xhci_histb_hc_driver, &xhci_histb_overrides);
return platform_driver_register(&histb_xhci_driver);
}
module_init(xhci_histb_init);
static void __exit xhci_histb_exit(void)
{
platform_driver_unregister(&histb_xhci_driver);
}
module_exit(xhci_histb_exit);
MODULE_DESCRIPTION("HiSilicon STB xHCI Host Controller Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/xhci-histb.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
* (C) Copyright 2002 Hewlett-Packard Company
*
* SA1111 Bus Glue
*
* Written by Christopher Hoover <[email protected]>
* Based on fragments of previous driver by Russell King et al.
*
* This file is licenced under the GPL.
*/
#include <asm/mach-types.h>
#include <asm/hardware/sa1111.h>
#ifndef CONFIG_SA1111
#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined."
#endif
#define USB_STATUS 0x0118
#define USB_RESET 0x011c
#define USB_IRQTEST 0x0120
#define USB_RESET_FORCEIFRESET (1 << 0)
#define USB_RESET_FORCEHCRESET (1 << 1)
#define USB_RESET_CLKGENRESET (1 << 2)
#define USB_RESET_SIMSCALEDOWN (1 << 3)
#define USB_RESET_USBINTTEST (1 << 4)
#define USB_RESET_SLEEPSTBYEN (1 << 5)
#define USB_RESET_PWRSENSELOW (1 << 6)
#define USB_RESET_PWRCTRLLOW (1 << 7)
#define USB_STATUS_IRQHCIRMTWKUP (1 << 7)
#define USB_STATUS_IRQHCIBUFFACC (1 << 8)
#define USB_STATUS_NIRQHCIM (1 << 9)
#define USB_STATUS_NHCIMFCLR (1 << 10)
#define USB_STATUS_USBPWRSENSE (1 << 11)
#if 0
static void dump_hci_status(struct usb_hcd *hcd, const char *label)
{
unsigned long status = readl_relaxed(hcd->regs + USB_STATUS);
printk(KERN_DEBUG "%s USB_STATUS = { %s%s%s%s%s}\n", label,
((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
}
#endif
static int ohci_sa1111_reset(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
return ohci_init(ohci);
}
static int ohci_sa1111_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
ret = ohci_run(ohci);
if (ret < 0) {
ohci_err(ohci, "can't start\n");
ohci_stop(hcd);
}
return ret;
}
static const struct hc_driver ohci_sa1111_hc_driver = {
.description = hcd_name,
.product_desc = "SA-1111 OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_DMA | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.reset = ohci_sa1111_reset,
.start = ohci_sa1111_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
static int sa1111_start_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst = 0;
int ret;
dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
if (machine_is_assabet())
usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW;
/*
* Configure the power sense and control lines. Place the USB
* host controller in reset.
*/
writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
* Now, carefully enable the USB clock, and take
* the USB host controller out of reset.
*/
ret = sa1111_enable_device(dev);
if (ret == 0) {
udelay(11);
writel_relaxed(usb_rst, dev->mapbase + USB_RESET);
}
return ret;
}
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
dev_dbg(&dev->dev, "stopping SA-1111 OHCI USB Controller\n");
/*
* Put the USB host controller into reset.
*/
usb_rst = readl_relaxed(dev->mapbase + USB_RESET);
writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
* Stop the USB clock.
*/
sa1111_disable_device(dev);
}
/**
* ohci_hcd_sa1111_probe - initialize SA-1111-based HCDs
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it.
*/
static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
{
struct usb_hcd *hcd;
int ret, irq;
if (usb_disabled())
return -ENODEV;
/*
* We don't call dma_set_mask_and_coherent() here because the
* DMA mask has already been appropraitely setup by the core
* SA-1111 bus code (which includes bug workarounds.)
*/
hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = dev->res.start;
hcd->rsrc_len = resource_size(&dev->res);
irq = sa1111_get_irq(dev, 1);
if (irq <= 0) {
ret = irq ? : -ENXIO;
goto err1;
}
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*
* As a workaround, use a bounce buffer in addressable memory
* as local_mem, relying on ZONE_DMA to provide an area that
* fits within the above constraints.
*
* SZ_64K is an estimate for what size this might need.
*/
ret = usb_hcd_setup_local_mem(hcd, 0, 0, SZ_64K);
if (ret)
goto err1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&dev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto err1;
}
hcd->regs = dev->mapbase;
ret = sa1111_start_hc(dev);
if (ret)
goto err2;
ret = usb_add_hcd(hcd, irq, 0);
if (ret == 0) {
device_wakeup_enable(hcd->self.controller);
return ret;
}
sa1111_stop_hc(dev);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return ret;
}
/**
* ohci_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
* @dev: USB Host Controller being removed
*
* Reverses the effect of ohci_hcd_sa1111_probe(), first invoking
* the HCD's stop() method.
*/
static void ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
{
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
usb_remove_hcd(hcd);
sa1111_stop_hc(dev);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
static void ohci_hcd_sa1111_shutdown(struct device *_dev)
{
struct sa1111_dev *dev = to_sa1111_device(_dev);
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
hcd->driver->shutdown(hcd);
sa1111_stop_hc(dev);
}
}
static struct sa1111_driver ohci_hcd_sa1111_driver = {
.drv = {
.name = "sa1111-ohci",
.owner = THIS_MODULE,
.shutdown = ohci_hcd_sa1111_shutdown,
},
.devid = SA1111_DEVID_USB,
.probe = ohci_hcd_sa1111_probe,
.remove = ohci_hcd_sa1111_remove,
};
| linux-master | drivers/usb/host/ohci-sa1111.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* SAMSUNG EXYNOS USB HOST OHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ohci.h"
#define DRIVER_DESC "OHCI Exynos driver"
static struct hc_driver __read_mostly exynos_ohci_hc_driver;
#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
#define PHY_NUMBER 3
struct exynos_ohci_hcd {
struct clk *clk;
struct device_node *of_node;
struct phy *phy[PHY_NUMBER];
bool legacy_phy;
};
static int exynos_ohci_get_phy(struct device *dev,
struct exynos_ohci_hcd *exynos_ohci)
{
struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
/* Get PHYs for the controller */
num_phys = of_count_phandle_with_args(dev->of_node, "phys",
"#phy-cells");
for (phy_number = 0; phy_number < num_phys; phy_number++) {
phy = devm_of_phy_get_by_index(dev, dev->of_node, phy_number);
if (IS_ERR(phy))
return PTR_ERR(phy);
exynos_ohci->phy[phy_number] = phy;
}
if (num_phys > 0)
return 0;
/* Get PHYs using legacy bindings */
for_each_available_child_of_node(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ohci->phy[phy_number] = phy;
if (IS_ERR(phy)) {
of_node_put(child);
return PTR_ERR(phy);
}
}
exynos_ohci->legacy_phy = true;
return 0;
}
static int exynos_ohci_phy_enable(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
int i;
int ret = 0;
for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
ret = phy_power_on(exynos_ohci->phy[i]);
if (ret)
for (i--; i >= 0; i--)
phy_power_off(exynos_ohci->phy[i]);
return ret;
}
static void exynos_ohci_phy_disable(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
int i;
for (i = 0; i < PHY_NUMBER; i++)
phy_power_off(exynos_ohci->phy[i]);
}
static int exynos_ohci_probe(struct platform_device *pdev)
{
struct exynos_ohci_hcd *exynos_ohci;
struct usb_hcd *hcd;
struct resource *res;
int irq;
int err;
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
return err;
hcd = usb_create_hcd(&exynos_ohci_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
return -ENOMEM;
}
exynos_ohci = to_exynos_ohci(hcd);
err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci);
if (err)
goto fail_clk;
exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ohci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(exynos_ohci->clk);
goto fail_clk;
}
err = clk_prepare_enable(exynos_ohci->clk);
if (err)
goto fail_clk;
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto fail_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
goto fail_io;
}
platform_set_drvdata(pdev, hcd);
err = exynos_ohci_phy_enable(&pdev->dev);
if (err) {
dev_err(&pdev->dev, "Failed to enable USB phy\n");
goto fail_io;
}
/*
* Workaround: reset of_node pointer to avoid conflict between legacy
* Exynos OHCI port subnodes and generic USB device bindings
*/
exynos_ohci->of_node = pdev->dev.of_node;
if (exynos_ohci->legacy_phy)
pdev->dev.of_node = NULL;
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail_add_hcd;
}
device_wakeup_enable(hcd->self.controller);
return 0;
fail_add_hcd:
exynos_ohci_phy_disable(&pdev->dev);
pdev->dev.of_node = exynos_ohci->of_node;
fail_io:
clk_disable_unprepare(exynos_ohci->clk);
fail_clk:
usb_put_hcd(hcd);
return err;
}
static void exynos_ohci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
pdev->dev.of_node = exynos_ohci->of_node;
usb_remove_hcd(hcd);
exynos_ohci_phy_disable(&pdev->dev);
clk_disable_unprepare(exynos_ohci->clk);
usb_put_hcd(hcd);
}
static void exynos_ohci_shutdown(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
#ifdef CONFIG_PM
static int exynos_ohci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
bool do_wakeup = device_may_wakeup(dev);
int rc = ohci_suspend(hcd, do_wakeup);
if (rc)
return rc;
exynos_ohci_phy_disable(dev);
clk_disable_unprepare(exynos_ohci->clk);
return 0;
}
static int exynos_ohci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
int ret;
clk_prepare_enable(exynos_ohci->clk);
ret = exynos_ohci_phy_enable(dev);
if (ret) {
dev_err(dev, "Failed to enable USB phy\n");
clk_disable_unprepare(exynos_ohci->clk);
return ret;
}
ohci_resume(hcd, false);
return 0;
}
#else
#define exynos_ohci_suspend NULL
#define exynos_ohci_resume NULL
#endif
static const struct ohci_driver_overrides exynos_overrides __initconst = {
.extra_priv_size = sizeof(struct exynos_ohci_hcd),
};
static const struct dev_pm_ops exynos_ohci_pm_ops = {
.suspend = exynos_ohci_suspend,
.resume = exynos_ohci_resume,
};
#ifdef CONFIG_OF
static const struct of_device_id exynos_ohci_match[] = {
{ .compatible = "samsung,exynos4210-ohci" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_ohci_match);
#endif
static struct platform_driver exynos_ohci_driver = {
.probe = exynos_ohci_probe,
.remove_new = exynos_ohci_remove,
.shutdown = exynos_ohci_shutdown,
.driver = {
.name = "exynos-ohci",
.pm = &exynos_ohci_pm_ops,
.of_match_table = of_match_ptr(exynos_ohci_match),
}
};
static int __init ohci_exynos_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
return platform_driver_register(&exynos_ohci_driver);
}
module_init(ohci_exynos_init);
static void __exit ohci_exynos_cleanup(void)
{
platform_driver_unregister(&exynos_ohci_driver);
}
module_exit(ohci_exynos_cleanup);
MODULE_ALIAS("platform:exynos-ohci");
MODULE_AUTHOR("Jingoo Han <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/ohci-exynos.c |
// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 HCD (Host Controller Driver)
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
* Portions Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
* Portions Copyright (C) 2004-2005 David Brownell
* Portions Copyright (C) 1999 Roman Weissgaerber
*
* Author : Yoshihiro Shimoda <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include "r8a66597.h"
MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:r8a66597_hcd");
#define DRIVER_VERSION "2009-05-26"
static const char hcd_name[] = "r8a66597_hcd";
static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
static int r8a66597_get_frame(struct usb_hcd *hcd);
/* this function must be called with interrupt disabled */
static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
unsigned long reg)
{
u16 tmp;
tmp = r8a66597_read(r8a66597, INTENB0);
r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
r8a66597_bset(r8a66597, 1 << pipenum, reg);
r8a66597_write(r8a66597, tmp, INTENB0);
}
/* this function must be called with interrupt disabled */
static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
unsigned long reg)
{
u16 tmp;
tmp = r8a66597_read(r8a66597, INTENB0);
r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
r8a66597_bclr(r8a66597, 1 << pipenum, reg);
r8a66597_write(r8a66597, tmp, INTENB0);
}
static void set_devadd_reg(struct r8a66597 *r8a66597, u8 r8a66597_address,
u16 usbspd, u8 upphub, u8 hubport, int port)
{
u16 val;
unsigned long devadd_reg = get_devadd_addr(r8a66597_address);
val = (upphub << 11) | (hubport << 8) | (usbspd << 6) | (port & 0x0001);
r8a66597_write(r8a66597, val, devadd_reg);
}
static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
{
u16 tmp;
int i = 0;
if (r8a66597->pdata->on_chip) {
clk_prepare_enable(r8a66597->clk);
do {
r8a66597_write(r8a66597, SCKE, SYSCFG0);
tmp = r8a66597_read(r8a66597, SYSCFG0);
if (i++ > 1000) {
printk(KERN_ERR "r8a66597: reg access fail.\n");
return -ENXIO;
}
} while ((tmp & SCKE) != SCKE);
r8a66597_write(r8a66597, 0x04, 0x02);
} else {
do {
r8a66597_write(r8a66597, USBE, SYSCFG0);
tmp = r8a66597_read(r8a66597, SYSCFG0);
if (i++ > 1000) {
printk(KERN_ERR "r8a66597: reg access fail.\n");
return -ENXIO;
}
} while ((tmp & USBE) != USBE);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
XTAL, SYSCFG0);
i = 0;
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
do {
msleep(1);
tmp = r8a66597_read(r8a66597, SYSCFG0);
if (i++ > 500) {
printk(KERN_ERR "r8a66597: reg access fail.\n");
return -ENXIO;
}
} while ((tmp & SCKE) != SCKE);
}
return 0;
}
static void r8a66597_clock_disable(struct r8a66597 *r8a66597)
{
r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
udelay(1);
if (r8a66597->pdata->on_chip) {
clk_disable_unprepare(r8a66597->clk);
} else {
r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
}
}
static void r8a66597_enable_port(struct r8a66597 *r8a66597, int port)
{
u16 val;
val = port ? DRPD : DCFM | DRPD;
r8a66597_bset(r8a66597, val, get_syscfg_reg(port));
r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port));
r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR, get_dmacfg_reg(port));
r8a66597_bclr(r8a66597, DTCHE, get_intenb_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
}
static void r8a66597_disable_port(struct r8a66597 *r8a66597, int port)
{
u16 val, tmp;
r8a66597_write(r8a66597, 0, get_intenb_reg(port));
r8a66597_write(r8a66597, 0, get_intsts_reg(port));
r8a66597_port_power(r8a66597, port, 0);
do {
tmp = r8a66597_read(r8a66597, SOFCFG) & EDGESTS;
udelay(640);
} while (tmp == EDGESTS);
val = port ? DRPD : DCFM | DRPD;
r8a66597_bclr(r8a66597, val, get_syscfg_reg(port));
r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port));
}
static int enable_controller(struct r8a66597 *r8a66597)
{
int ret, port;
u16 vif = r8a66597->pdata->vif ? LDRV : 0;
u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
ret = r8a66597_clock_enable(r8a66597);
if (ret < 0)
return ret;
r8a66597_bset(r8a66597, vif & LDRV, PINCFG);
r8a66597_bset(r8a66597, USBE, SYSCFG0);
r8a66597_bset(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
r8a66597_bset(r8a66597, irq_sense & INTL, SOFCFG);
r8a66597_bset(r8a66597, BRDY0, BRDYENB);
r8a66597_bset(r8a66597, BEMP0, BEMPENB);
r8a66597_bset(r8a66597, endian & BIGEND, CFIFOSEL);
r8a66597_bset(r8a66597, endian & BIGEND, D0FIFOSEL);
r8a66597_bset(r8a66597, endian & BIGEND, D1FIFOSEL);
r8a66597_bset(r8a66597, TRNENSEL, SOFCFG);
r8a66597_bset(r8a66597, SIGNE | SACKE, INTENB1);
for (port = 0; port < r8a66597->max_root_hub; port++)
r8a66597_enable_port(r8a66597, port);
return 0;
}
static void disable_controller(struct r8a66597 *r8a66597)
{
int port;
/* disable interrupts */
r8a66597_write(r8a66597, 0, INTENB0);
r8a66597_write(r8a66597, 0, INTENB1);
r8a66597_write(r8a66597, 0, BRDYENB);
r8a66597_write(r8a66597, 0, BEMPENB);
r8a66597_write(r8a66597, 0, NRDYENB);
/* clear status */
r8a66597_write(r8a66597, 0, BRDYSTS);
r8a66597_write(r8a66597, 0, NRDYSTS);
r8a66597_write(r8a66597, 0, BEMPSTS);
for (port = 0; port < r8a66597->max_root_hub; port++)
r8a66597_disable_port(r8a66597, port);
r8a66597_clock_disable(r8a66597);
}
static int get_parent_r8a66597_address(struct r8a66597 *r8a66597,
struct usb_device *udev)
{
struct r8a66597_device *dev;
if (udev->parent && udev->parent->devnum != 1)
udev = udev->parent;
dev = dev_get_drvdata(&udev->dev);
if (dev)
return dev->address;
else
return 0;
}
static int is_child_device(char *devpath)
{
return (devpath[2] ? 1 : 0);
}
static int is_hub_limit(char *devpath)
{
return ((strlen(devpath) >= 4) ? 1 : 0);
}
static void get_port_number(struct r8a66597 *r8a66597,
char *devpath, u16 *root_port, u16 *hub_port)
{
if (root_port) {
*root_port = (devpath[0] & 0x0F) - 1;
if (*root_port >= r8a66597->max_root_hub)
printk(KERN_ERR "r8a66597: Illegal root port number.\n");
}
if (hub_port)
*hub_port = devpath[2] & 0x0F;
}
static u16 get_r8a66597_usb_speed(enum usb_device_speed speed)
{
u16 usbspd = 0;
switch (speed) {
case USB_SPEED_LOW:
usbspd = LSMODE;
break;
case USB_SPEED_FULL:
usbspd = FSMODE;
break;
case USB_SPEED_HIGH:
usbspd = HSMODE;
break;
default:
printk(KERN_ERR "r8a66597: unknown speed\n");
break;
}
return usbspd;
}
static void set_child_connect_map(struct r8a66597 *r8a66597, int address)
{
int idx;
idx = address / 32;
r8a66597->child_connect_map[idx] |= 1 << (address % 32);
}
static void put_child_connect_map(struct r8a66597 *r8a66597, int address)
{
int idx;
idx = address / 32;
r8a66597->child_connect_map[idx] &= ~(1 << (address % 32));
}
static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch)
{
u16 pipenum = pipe->info.pipenum;
const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
if (dma_ch > R8A66597_PIPE_NO_DMA) /* dma fifo not use? */
dma_ch = R8A66597_PIPE_NO_DMA;
pipe->fifoaddr = fifoaddr[dma_ch];
pipe->fifosel = fifosel[dma_ch];
pipe->fifoctr = fifoctr[dma_ch];
if (pipenum == 0)
pipe->pipectr = DCPCTR;
else
pipe->pipectr = get_pipectr_addr(pipenum);
if (check_bulk_or_isoc(pipenum)) {
pipe->pipetre = get_pipetre_addr(pipenum);
pipe->pipetrn = get_pipetrn_addr(pipenum);
} else {
pipe->pipetre = 0;
pipe->pipetrn = 0;
}
}
static struct r8a66597_device *
get_urb_to_r8a66597_dev(struct r8a66597 *r8a66597, struct urb *urb)
{
if (usb_pipedevice(urb->pipe) == 0)
return &r8a66597->device0;
return dev_get_drvdata(&urb->dev->dev);
}
static int make_r8a66597_device(struct r8a66597 *r8a66597,
struct urb *urb, u8 addr)
{
struct r8a66597_device *dev;
int usb_address = urb->setup_packet[2]; /* urb->pipe is address 0 */
dev = kzalloc(sizeof(struct r8a66597_device), GFP_ATOMIC);
if (dev == NULL)
return -ENOMEM;
dev_set_drvdata(&urb->dev->dev, dev);
dev->udev = urb->dev;
dev->address = addr;
dev->usb_address = usb_address;
dev->state = USB_STATE_ADDRESS;
dev->ep_in_toggle = 0;
dev->ep_out_toggle = 0;
INIT_LIST_HEAD(&dev->device_list);
list_add_tail(&dev->device_list, &r8a66597->child_device);
get_port_number(r8a66597, urb->dev->devpath,
&dev->root_port, &dev->hub_port);
if (!is_child_device(urb->dev->devpath))
r8a66597->root_hub[dev->root_port].dev = dev;
set_devadd_reg(r8a66597, dev->address,
get_r8a66597_usb_speed(urb->dev->speed),
get_parent_r8a66597_address(r8a66597, urb->dev),
dev->hub_port, dev->root_port);
return 0;
}
/* this function must be called with interrupt disabled */
static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
{
u8 addr; /* R8A66597's address */
struct r8a66597_device *dev;
if (is_hub_limit(urb->dev->devpath)) {
dev_err(&urb->dev->dev, "External hub limit reached.\n");
return 0;
}
dev = get_urb_to_r8a66597_dev(r8a66597, urb);
if (dev && dev->state >= USB_STATE_ADDRESS)
return dev->address;
for (addr = 1; addr <= R8A66597_MAX_DEVICE; addr++) {
if (r8a66597->address_map & (1 << addr))
continue;
dev_dbg(&urb->dev->dev, "alloc_address: r8a66597_addr=%d\n", addr);
r8a66597->address_map |= 1 << addr;
if (make_r8a66597_device(r8a66597, urb, addr) < 0)
return 0;
return addr;
}
dev_err(&urb->dev->dev,
"cannot communicate with a USB device more than 10.(%x)\n",
r8a66597->address_map);
return 0;
}
/* this function must be called with interrupt disabled */
static void free_usb_address(struct r8a66597 *r8a66597,
struct r8a66597_device *dev, int reset)
{
int port;
if (!dev)
return;
dev_dbg(&dev->udev->dev, "free_addr: addr=%d\n", dev->address);
dev->state = USB_STATE_DEFAULT;
r8a66597->address_map &= ~(1 << dev->address);
dev->address = 0;
/*
* Only when resetting USB, it is necessary to erase drvdata. When
* a usb device with usb hub is disconnect, "dev->udev" is already
* freed on usb_desconnect(). So we cannot access the data.
*/
if (reset)
dev_set_drvdata(&dev->udev->dev, NULL);
list_del(&dev->device_list);
kfree(dev);
for (port = 0; port < r8a66597->max_root_hub; port++) {
if (r8a66597->root_hub[port].dev == dev) {
r8a66597->root_hub[port].dev = NULL;
break;
}
}
}
static void r8a66597_reg_wait(struct r8a66597 *r8a66597, unsigned long reg,
u16 mask, u16 loop)
{
u16 tmp;
int i = 0;
do {
tmp = r8a66597_read(r8a66597, reg);
if (i++ > 1000000) {
printk(KERN_ERR "r8a66597: register%lx, loop %x "
"is timeout\n", reg, loop);
break;
}
ndelay(1);
} while ((tmp & mask) != loop);
}
/* this function must be called with interrupt disabled */
static void pipe_start(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe)
{
u16 tmp;
tmp = r8a66597_read(r8a66597, pipe->pipectr) & PID;
if ((pipe->info.pipenum != 0) & ((tmp & PID_STALL) != 0)) /* stall? */
r8a66597_mdfy(r8a66597, PID_NAK, PID, pipe->pipectr);
r8a66597_mdfy(r8a66597, PID_BUF, PID, pipe->pipectr);
}
/* this function must be called with interrupt disabled */
static void pipe_stop(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe)
{
u16 tmp;
tmp = r8a66597_read(r8a66597, pipe->pipectr) & PID;
if ((tmp & PID_STALL11) != PID_STALL11) /* force stall? */
r8a66597_mdfy(r8a66597, PID_STALL, PID, pipe->pipectr);
r8a66597_mdfy(r8a66597, PID_NAK, PID, pipe->pipectr);
r8a66597_reg_wait(r8a66597, pipe->pipectr, PBUSY, 0);
}
/* this function must be called with interrupt disabled */
static void clear_all_buffer(struct r8a66597 *r8a66597,
struct r8a66597_pipe *pipe)
{
if (!pipe || pipe->info.pipenum == 0)
return;
pipe_stop(r8a66597, pipe);
r8a66597_bset(r8a66597, ACLRM, pipe->pipectr);
r8a66597_read(r8a66597, pipe->pipectr);
r8a66597_read(r8a66597, pipe->pipectr);
r8a66597_read(r8a66597, pipe->pipectr);
r8a66597_bclr(r8a66597, ACLRM, pipe->pipectr);
}
/* this function must be called with interrupt disabled */
static void r8a66597_pipe_toggle(struct r8a66597 *r8a66597,
struct r8a66597_pipe *pipe, int toggle)
{
if (toggle)
r8a66597_bset(r8a66597, SQSET, pipe->pipectr);
else
r8a66597_bset(r8a66597, SQCLR, pipe->pipectr);
}
static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
{
if (r8a66597->pdata->on_chip)
return MBW_32;
else
return MBW_16;
}
/* this function must be called with interrupt disabled */
static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum)
{
unsigned short mbw = mbw_value(r8a66597);
r8a66597_mdfy(r8a66597, mbw | pipenum, mbw | CURPIPE, CFIFOSEL);
r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, pipenum);
}
/* this function must be called with interrupt disabled */
static inline void fifo_change_from_pipe(struct r8a66597 *r8a66597,
struct r8a66597_pipe *pipe)
{
unsigned short mbw = mbw_value(r8a66597);
cfifo_change(r8a66597, 0);
r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D0FIFOSEL);
r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D1FIFOSEL);
r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, mbw | CURPIPE,
pipe->fifosel);
r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum);
}
static u16 r8a66597_get_pipenum(struct urb *urb, struct usb_host_endpoint *hep)
{
struct r8a66597_pipe *pipe = hep->hcpriv;
if (usb_pipeendpoint(urb->pipe) == 0)
return 0;
else
return pipe->info.pipenum;
}
static u16 get_urb_to_r8a66597_addr(struct r8a66597 *r8a66597, struct urb *urb)
{
struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
return (usb_pipedevice(urb->pipe) == 0) ? 0 : dev->address;
}
static unsigned short *get_toggle_pointer(struct r8a66597_device *dev,
int urb_pipe)
{
if (!dev)
return NULL;
return usb_pipein(urb_pipe) ? &dev->ep_in_toggle : &dev->ep_out_toggle;
}
/* this function must be called with interrupt disabled */
static void pipe_toggle_set(struct r8a66597 *r8a66597,
struct r8a66597_pipe *pipe,
struct urb *urb, int set)
{
struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
unsigned char endpoint = usb_pipeendpoint(urb->pipe);
unsigned short *toggle = get_toggle_pointer(dev, urb->pipe);
if (!toggle)
return;
if (set)
*toggle |= 1 << endpoint;
else
*toggle &= ~(1 << endpoint);
}
/* this function must be called with interrupt disabled */
static void pipe_toggle_save(struct r8a66597 *r8a66597,
struct r8a66597_pipe *pipe,
struct urb *urb)
{
if (r8a66597_read(r8a66597, pipe->pipectr) & SQMON)
pipe_toggle_set(r8a66597, pipe, urb, 1);
else
pipe_toggle_set(r8a66597, pipe, urb, 0);
}
/* this function must be called with interrupt disabled */
static void pipe_toggle_restore(struct r8a66597 *r8a66597,
struct r8a66597_pipe *pipe,
struct urb *urb)
{
struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
unsigned char endpoint = usb_pipeendpoint(urb->pipe);
unsigned short *toggle = get_toggle_pointer(dev, urb->pipe);
if (!toggle)
return;
r8a66597_pipe_toggle(r8a66597, pipe, *toggle & (1 << endpoint));
}
/* this function must be called with interrupt disabled */
static void pipe_buffer_setting(struct r8a66597 *r8a66597,
struct r8a66597_pipe_info *info)
{
u16 val = 0;
if (info->pipenum == 0)
return;
r8a66597_bset(r8a66597, ACLRM, get_pipectr_addr(info->pipenum));
r8a66597_bclr(r8a66597, ACLRM, get_pipectr_addr(info->pipenum));
r8a66597_write(r8a66597, info->pipenum, PIPESEL);
if (!info->dir_in)
val |= R8A66597_DIR;
if (info->type == R8A66597_BULK && info->dir_in)
val |= R8A66597_DBLB | R8A66597_SHTNAK;
val |= info->type | info->epnum;
r8a66597_write(r8a66597, val, PIPECFG);
r8a66597_write(r8a66597, (info->buf_bsize << 10) | (info->bufnum),
PIPEBUF);
r8a66597_write(r8a66597, make_devsel(info->address) | info->maxpacket,
PIPEMAXP);
r8a66597_write(r8a66597, info->interval, PIPEPERI);
}
/* this function must be called with interrupt disabled */
static void pipe_setting(struct r8a66597 *r8a66597, struct r8a66597_td *td)
{
struct r8a66597_pipe_info *info;
struct urb *urb = td->urb;
if (td->pipenum > 0) {
info = &td->pipe->info;
cfifo_change(r8a66597, 0);
pipe_buffer_setting(r8a66597, info);
if (!usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe)) &&
!usb_pipecontrol(urb->pipe)) {
r8a66597_pipe_toggle(r8a66597, td->pipe, 0);
pipe_toggle_set(r8a66597, td->pipe, urb, 0);
clear_all_buffer(r8a66597, td->pipe);
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 1);
}
pipe_toggle_restore(r8a66597, td->pipe, urb);
}
}
/* this function must be called with interrupt disabled */
static u16 get_empty_pipenum(struct r8a66597 *r8a66597,
struct usb_endpoint_descriptor *ep)
{
u16 array[R8A66597_MAX_NUM_PIPE], i = 0, min;
memset(array, 0, sizeof(array));
switch (usb_endpoint_type(ep)) {
case USB_ENDPOINT_XFER_BULK:
if (usb_endpoint_dir_in(ep))
array[i++] = 4;
else {
array[i++] = 3;
array[i++] = 5;
}
break;
case USB_ENDPOINT_XFER_INT:
if (usb_endpoint_dir_in(ep)) {
array[i++] = 6;
array[i++] = 7;
array[i++] = 8;
} else
array[i++] = 9;
break;
case USB_ENDPOINT_XFER_ISOC:
if (usb_endpoint_dir_in(ep))
array[i++] = 2;
else
array[i++] = 1;
break;
default:
printk(KERN_ERR "r8a66597: Illegal type\n");
return 0;
}
i = 1;
min = array[0];
while (array[i] != 0) {
if (r8a66597->pipe_cnt[min] > r8a66597->pipe_cnt[array[i]])
min = array[i];
i++;
}
return min;
}
static u16 get_r8a66597_type(__u8 type)
{
u16 r8a66597_type;
switch (type) {
case USB_ENDPOINT_XFER_BULK:
r8a66597_type = R8A66597_BULK;
break;
case USB_ENDPOINT_XFER_INT:
r8a66597_type = R8A66597_INT;
break;
case USB_ENDPOINT_XFER_ISOC:
r8a66597_type = R8A66597_ISO;
break;
default:
printk(KERN_ERR "r8a66597: Illegal type\n");
r8a66597_type = 0x0000;
break;
}
return r8a66597_type;
}
static u16 get_bufnum(u16 pipenum)
{
u16 bufnum = 0;
if (pipenum == 0)
bufnum = 0;
else if (check_bulk_or_isoc(pipenum))
bufnum = 8 + (pipenum - 1) * R8A66597_BUF_BSIZE*2;
else if (check_interrupt(pipenum))
bufnum = 4 + (pipenum - 6);
else
printk(KERN_ERR "r8a66597: Illegal pipenum (%d)\n", pipenum);
return bufnum;
}
static u16 get_buf_bsize(u16 pipenum)
{
u16 buf_bsize = 0;
if (pipenum == 0)
buf_bsize = 3;
else if (check_bulk_or_isoc(pipenum))
buf_bsize = R8A66597_BUF_BSIZE - 1;
else if (check_interrupt(pipenum))
buf_bsize = 0;
else
printk(KERN_ERR "r8a66597: Illegal pipenum (%d)\n", pipenum);
return buf_bsize;
}
/* this function must be called with interrupt disabled */
static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597,
struct r8a66597_device *dev,
struct r8a66597_pipe *pipe,
struct urb *urb)
{
int i;
struct r8a66597_pipe_info *info = &pipe->info;
unsigned short mbw = mbw_value(r8a66597);
/* pipe dma is only for external controlles */
if (r8a66597->pdata->on_chip)
return;
if ((pipe->info.pipenum != 0) && (info->type != R8A66597_INT)) {
for (i = 0; i < R8A66597_MAX_DMA_CHANNEL; i++) {
if ((r8a66597->dma_map & (1 << i)) != 0)
continue;
dev_info(&dev->udev->dev,
"address %d, EndpointAddress 0x%02x use "
"DMA FIFO\n", usb_pipedevice(urb->pipe),
info->dir_in ?
USB_ENDPOINT_DIR_MASK + info->epnum
: info->epnum);
r8a66597->dma_map |= 1 << i;
dev->dma_map |= 1 << i;
set_pipe_reg_addr(pipe, i);
cfifo_change(r8a66597, 0);
r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum,
mbw | CURPIPE, pipe->fifosel);
r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE,
pipe->info.pipenum);
r8a66597_bset(r8a66597, BCLR, pipe->fifoctr);
break;
}
}
}
/* this function must be called with interrupt disabled */
static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
struct usb_host_endpoint *hep,
struct r8a66597_pipe_info *info)
{
struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
struct r8a66597_pipe *pipe = hep->hcpriv;
dev_dbg(&dev->udev->dev, "enable_pipe:\n");
pipe->info = *info;
set_pipe_reg_addr(pipe, R8A66597_PIPE_NO_DMA);
r8a66597->pipe_cnt[pipe->info.pipenum]++;
dev->pipe_cnt[pipe->info.pipenum]++;
enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
}
static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
int status)
__releases(r8a66597->lock)
__acquires(r8a66597->lock)
{
if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
void *ptr;
for (ptr = urb->transfer_buffer;
ptr < urb->transfer_buffer + urb->transfer_buffer_length;
ptr += PAGE_SIZE)
flush_dcache_page(virt_to_page(ptr));
}
usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
spin_unlock(&r8a66597->lock);
usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
spin_lock(&r8a66597->lock);
}
/* this function must be called with interrupt disabled */
static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
{
struct r8a66597_td *td, *next;
struct urb *urb;
struct list_head *list = &r8a66597->pipe_queue[pipenum];
if (list_empty(list))
return;
list_for_each_entry_safe(td, next, list, queue) {
if (td->address != address)
continue;
urb = td->urb;
list_del(&td->queue);
kfree(td);
if (urb)
r8a66597_urb_done(r8a66597, urb, -ENODEV);
break;
}
}
/* this function must be called with interrupt disabled */
static void disable_r8a66597_pipe_all(struct r8a66597 *r8a66597,
struct r8a66597_device *dev)
{
int check_ep0 = 0;
u16 pipenum;
if (!dev)
return;
for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
if (!dev->pipe_cnt[pipenum])
continue;
if (!check_ep0) {
check_ep0 = 1;
force_dequeue(r8a66597, 0, dev->address);
}
r8a66597->pipe_cnt[pipenum] -= dev->pipe_cnt[pipenum];
dev->pipe_cnt[pipenum] = 0;
force_dequeue(r8a66597, pipenum, dev->address);
}
dev_dbg(&dev->udev->dev, "disable_pipe\n");
r8a66597->dma_map &= ~(dev->dma_map);
dev->dma_map = 0;
}
static u16 get_interval(struct urb *urb, __u8 interval)
{
u16 time = 1;
int i;
if (urb->dev->speed == USB_SPEED_HIGH) {
if (interval > IITV)
time = IITV;
else
time = interval ? interval - 1 : 0;
} else {
if (interval > 128) {
time = IITV;
} else {
/* calculate the nearest value for PIPEPERI */
for (i = 0; i < 7; i++) {
if ((1 << i) < interval &&
(1 << (i + 1) > interval))
time = 1 << i;
}
}
}
return time;
}
static unsigned long get_timer_interval(struct urb *urb, __u8 interval)
{
__u8 i;
unsigned long time = 1;
if (usb_pipeisoc(urb->pipe))
return 0;
if (get_r8a66597_usb_speed(urb->dev->speed) == HSMODE) {
for (i = 0; i < (interval - 1); i++)
time *= 2;
time = time * 125 / 1000; /* uSOF -> msec */
} else {
time = interval;
}
return time;
}
/* this function must be called with interrupt disabled */
static void init_pipe_info(struct r8a66597 *r8a66597, struct urb *urb,
struct usb_host_endpoint *hep,
struct usb_endpoint_descriptor *ep)
{
struct r8a66597_pipe_info info;
info.pipenum = get_empty_pipenum(r8a66597, ep);
info.address = get_urb_to_r8a66597_addr(r8a66597, urb);
info.epnum = usb_endpoint_num(ep);
info.maxpacket = usb_endpoint_maxp(ep);
info.type = get_r8a66597_type(usb_endpoint_type(ep));
info.bufnum = get_bufnum(info.pipenum);
info.buf_bsize = get_buf_bsize(info.pipenum);
if (info.type == R8A66597_BULK) {
info.interval = 0;
info.timer_interval = 0;
} else {
info.interval = get_interval(urb, ep->bInterval);
info.timer_interval = get_timer_interval(urb, ep->bInterval);
}
if (usb_endpoint_dir_in(ep))
info.dir_in = 1;
else
info.dir_in = 0;
enable_r8a66597_pipe(r8a66597, urb, hep, &info);
}
static void init_pipe_config(struct r8a66597 *r8a66597, struct urb *urb)
{
struct r8a66597_device *dev;
dev = get_urb_to_r8a66597_dev(r8a66597, urb);
dev->state = USB_STATE_CONFIGURED;
}
static void pipe_irq_enable(struct r8a66597 *r8a66597, struct urb *urb,
u16 pipenum)
{
if (pipenum == 0 && usb_pipeout(urb->pipe))
enable_irq_empty(r8a66597, pipenum);
else
enable_irq_ready(r8a66597, pipenum);
if (!usb_pipeisoc(urb->pipe))
enable_irq_nrdy(r8a66597, pipenum);
}
static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
{
disable_irq_ready(r8a66597, pipenum);
disable_irq_nrdy(r8a66597, pipenum);
}
static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
{
mod_timer(&r8a66597->rh_timer,
jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
}
static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
int connect)
{
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
rh->scount = R8A66597_MAX_SAMPLING;
if (connect)
rh->port |= USB_PORT_STAT_CONNECTION;
else
rh->port &= ~USB_PORT_STAT_CONNECTION;
rh->port |= USB_PORT_STAT_C_CONNECTION << 16;
r8a66597_root_hub_start_polling(r8a66597);
}
/* this function must be called with interrupt disabled */
static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
u16 syssts)
__releases(r8a66597->lock)
__acquires(r8a66597->lock)
{
if (syssts == SE0) {
r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
} else {
if (syssts == FS_JSTS)
r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port));
else if (syssts == LS_JSTS)
r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port));
r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port));
if (r8a66597->bus_suspended)
usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
}
spin_unlock(&r8a66597->lock);
usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
spin_lock(&r8a66597->lock);
}
/* this function must be called with interrupt disabled */
static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port)
{
u16 speed = get_rh_usb_speed(r8a66597, port);
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
rh->port &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED);
if (speed == HSMODE)
rh->port |= USB_PORT_STAT_HIGH_SPEED;
else if (speed == LSMODE)
rh->port |= USB_PORT_STAT_LOW_SPEED;
rh->port &= ~USB_PORT_STAT_RESET;
rh->port |= USB_PORT_STAT_ENABLE;
}
/* this function must be called with interrupt disabled */
static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
{
struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev, 0);
start_root_hub_sampling(r8a66597, port, 0);
}
/* this function must be called with interrupt disabled */
static void prepare_setup_packet(struct r8a66597 *r8a66597,
struct r8a66597_td *td)
{
int i;
__le16 *p = (__le16 *)td->urb->setup_packet;
unsigned long setup_addr = USBREQ;
r8a66597_write(r8a66597, make_devsel(td->address) | td->maxpacket,
DCPMAXP);
r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1);
for (i = 0; i < 4; i++) {
r8a66597_write(r8a66597, le16_to_cpu(p[i]), setup_addr);
setup_addr += 2;
}
r8a66597_write(r8a66597, SUREQ, DCPCTR);
}
/* this function must be called with interrupt disabled */
static void prepare_packet_read(struct r8a66597 *r8a66597,
struct r8a66597_td *td)
{
struct urb *urb = td->urb;
if (usb_pipecontrol(urb->pipe)) {
r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG);
r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL);
r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
if (urb->actual_length == 0) {
r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
r8a66597_write(r8a66597, BCLR, CFIFOCTR);
}
pipe_irq_disable(r8a66597, td->pipenum);
pipe_start(r8a66597, td->pipe);
pipe_irq_enable(r8a66597, urb, td->pipenum);
} else {
if (urb->actual_length == 0) {
pipe_irq_disable(r8a66597, td->pipenum);
pipe_setting(r8a66597, td);
pipe_stop(r8a66597, td->pipe);
r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS);
if (td->pipe->pipetre) {
r8a66597_write(r8a66597, TRCLR,
td->pipe->pipetre);
r8a66597_write(r8a66597,
DIV_ROUND_UP
(urb->transfer_buffer_length,
td->maxpacket),
td->pipe->pipetrn);
r8a66597_bset(r8a66597, TRENB,
td->pipe->pipetre);
}
pipe_start(r8a66597, td->pipe);
pipe_irq_enable(r8a66597, urb, td->pipenum);
}
}
}
/* this function must be called with interrupt disabled */
static void prepare_packet_write(struct r8a66597 *r8a66597,
struct r8a66597_td *td)
{
u16 tmp;
struct urb *urb = td->urb;
if (usb_pipecontrol(urb->pipe)) {
pipe_stop(r8a66597, td->pipe);
r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG);
r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL);
r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
if (urb->actual_length == 0) {
r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
r8a66597_write(r8a66597, BCLR, CFIFOCTR);
}
} else {
if (urb->actual_length == 0)
pipe_setting(r8a66597, td);
if (td->pipe->pipetre)
r8a66597_bclr(r8a66597, TRENB, td->pipe->pipetre);
}
r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS);
fifo_change_from_pipe(r8a66597, td->pipe);
tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
if (unlikely((tmp & FRDY) == 0))
pipe_irq_enable(r8a66597, urb, td->pipenum);
else
packet_write(r8a66597, td->pipenum);
pipe_start(r8a66597, td->pipe);
}
/* this function must be called with interrupt disabled */
static void prepare_status_packet(struct r8a66597 *r8a66597,
struct r8a66597_td *td)
{
struct urb *urb = td->urb;
r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
pipe_stop(r8a66597, td->pipe);
if (urb->setup_packet[0] & USB_ENDPOINT_DIR_MASK) {
r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG);
r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL);
r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
r8a66597_write(r8a66597, BCLR | BVAL, CFIFOCTR);
enable_irq_empty(r8a66597, 0);
} else {
r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG);
r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL);
r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
r8a66597_write(r8a66597, BCLR, CFIFOCTR);
enable_irq_ready(r8a66597, 0);
}
enable_irq_nrdy(r8a66597, 0);
pipe_start(r8a66597, td->pipe);
}
static int is_set_address(unsigned char *setup_packet)
{
if (((setup_packet[0] & USB_TYPE_MASK) == USB_TYPE_STANDARD) &&
setup_packet[1] == USB_REQ_SET_ADDRESS)
return 1;
else
return 0;
}
/* this function must be called with interrupt disabled */
static int start_transfer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
{
BUG_ON(!td);
switch (td->type) {
case USB_PID_SETUP:
if (is_set_address(td->urb->setup_packet)) {
td->set_address = 1;
td->urb->setup_packet[2] = alloc_usb_address(r8a66597,
td->urb);
if (td->urb->setup_packet[2] == 0)
return -EPIPE;
}
prepare_setup_packet(r8a66597, td);
break;
case USB_PID_IN:
prepare_packet_read(r8a66597, td);
break;
case USB_PID_OUT:
prepare_packet_write(r8a66597, td);
break;
case USB_PID_ACK:
prepare_status_packet(r8a66597, td);
break;
default:
printk(KERN_ERR "r8a66597: invalid type.\n");
break;
}
return 0;
}
static int check_transfer_finish(struct r8a66597_td *td, struct urb *urb)
{
if (usb_pipeisoc(urb->pipe)) {
if (urb->number_of_packets == td->iso_cnt)
return 1;
}
/* control or bulk or interrupt */
if ((urb->transfer_buffer_length <= urb->actual_length) ||
(td->short_packet) || (td->zero_packet))
return 1;
return 0;
}
/* this function must be called with interrupt disabled */
static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
{
unsigned long time;
BUG_ON(!td);
if (!list_empty(&r8a66597->pipe_queue[td->pipenum]) &&
!usb_pipecontrol(td->urb->pipe) && usb_pipein(td->urb->pipe)) {
r8a66597->timeout_map |= 1 << td->pipenum;
switch (usb_pipetype(td->urb->pipe)) {
case PIPE_INTERRUPT:
case PIPE_ISOCHRONOUS:
time = 30;
break;
default:
time = 50;
break;
}
mod_timer(&r8a66597->timers[td->pipenum].td,
jiffies + msecs_to_jiffies(time));
}
}
/* this function must be called with interrupt disabled */
static void finish_request(struct r8a66597 *r8a66597, struct r8a66597_td *td,
u16 pipenum, struct urb *urb, int status)
__releases(r8a66597->lock) __acquires(r8a66597->lock)
{
int restart = 0;
struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
r8a66597->timeout_map &= ~(1 << pipenum);
if (likely(td)) {
if (td->set_address && (status != 0 || urb->unlinked))
r8a66597->address_map &= ~(1 << urb->setup_packet[2]);
pipe_toggle_save(r8a66597, td->pipe, urb);
list_del(&td->queue);
kfree(td);
}
if (!list_empty(&r8a66597->pipe_queue[pipenum]))
restart = 1;
if (likely(urb)) {
if (usb_pipeisoc(urb->pipe))
urb->start_frame = r8a66597_get_frame(hcd);
r8a66597_urb_done(r8a66597, urb, status);
}
if (restart) {
td = r8a66597_get_td(r8a66597, pipenum);
if (unlikely(!td))
return;
start_transfer(r8a66597, td);
set_td_timer(r8a66597, td);
}
}
static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
{
u16 tmp;
int rcv_len, bufsize, urb_len, size;
u16 *buf;
struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
struct urb *urb;
int finish = 0;
int status = 0;
if (unlikely(!td))
return;
urb = td->urb;
fifo_change_from_pipe(r8a66597, td->pipe);
tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
if (unlikely((tmp & FRDY) == 0)) {
pipe_stop(r8a66597, td->pipe);
pipe_irq_disable(r8a66597, pipenum);
printk(KERN_ERR "r8a66597: in fifo not ready (%d)\n", pipenum);
finish_request(r8a66597, td, pipenum, td->urb, -EPIPE);
return;
}
/* prepare parameters */
rcv_len = tmp & DTLN;
if (usb_pipeisoc(urb->pipe)) {
buf = (u16 *)(urb->transfer_buffer +
urb->iso_frame_desc[td->iso_cnt].offset);
urb_len = urb->iso_frame_desc[td->iso_cnt].length;
} else {
buf = (void *)urb->transfer_buffer + urb->actual_length;
urb_len = urb->transfer_buffer_length - urb->actual_length;
}
bufsize = min(urb_len, (int) td->maxpacket);
if (rcv_len <= bufsize) {
size = rcv_len;
} else {
size = bufsize;
status = -EOVERFLOW;
finish = 1;
}
/* update parameters */
urb->actual_length += size;
if (rcv_len == 0)
td->zero_packet = 1;
if (rcv_len < bufsize) {
td->short_packet = 1;
}
if (usb_pipeisoc(urb->pipe)) {
urb->iso_frame_desc[td->iso_cnt].actual_length = size;
urb->iso_frame_desc[td->iso_cnt].status = status;
td->iso_cnt++;
finish = 0;
}
/* check transfer finish */
if (finish || check_transfer_finish(td, urb)) {
pipe_stop(r8a66597, td->pipe);
pipe_irq_disable(r8a66597, pipenum);
finish = 1;
}
/* read fifo */
if (urb->transfer_buffer) {
if (size == 0)
r8a66597_write(r8a66597, BCLR, td->pipe->fifoctr);
else
r8a66597_read_fifo(r8a66597, td->pipe->fifoaddr,
buf, size);
}
if (finish && pipenum != 0)
finish_request(r8a66597, td, pipenum, urb, status);
}
static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
{
u16 tmp;
int bufsize, size;
u16 *buf;
struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
struct urb *urb;
if (unlikely(!td))
return;
urb = td->urb;
fifo_change_from_pipe(r8a66597, td->pipe);
tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
if (unlikely((tmp & FRDY) == 0)) {
pipe_stop(r8a66597, td->pipe);
pipe_irq_disable(r8a66597, pipenum);
printk(KERN_ERR "r8a66597: out fifo not ready (%d)\n", pipenum);
finish_request(r8a66597, td, pipenum, urb, -EPIPE);
return;
}
/* prepare parameters */
bufsize = td->maxpacket;
if (usb_pipeisoc(urb->pipe)) {
buf = (u16 *)(urb->transfer_buffer +
urb->iso_frame_desc[td->iso_cnt].offset);
size = min(bufsize,
(int)urb->iso_frame_desc[td->iso_cnt].length);
} else {
buf = (u16 *)(urb->transfer_buffer + urb->actual_length);
size = min_t(u32, bufsize,
urb->transfer_buffer_length - urb->actual_length);
}
/* write fifo */
if (pipenum > 0)
r8a66597_write(r8a66597, ~(1 << pipenum), BEMPSTS);
if (urb->transfer_buffer) {
r8a66597_write_fifo(r8a66597, td->pipe, buf, size);
if (!usb_pipebulk(urb->pipe) || td->maxpacket != size)
r8a66597_write(r8a66597, BVAL, td->pipe->fifoctr);
}
/* update parameters */
urb->actual_length += size;
if (usb_pipeisoc(urb->pipe)) {
urb->iso_frame_desc[td->iso_cnt].actual_length = size;
urb->iso_frame_desc[td->iso_cnt].status = 0;
td->iso_cnt++;
}
/* check transfer finish */
if (check_transfer_finish(td, urb)) {
disable_irq_ready(r8a66597, pipenum);
enable_irq_empty(r8a66597, pipenum);
if (!usb_pipeisoc(urb->pipe))
enable_irq_nrdy(r8a66597, pipenum);
} else
pipe_irq_enable(r8a66597, urb, pipenum);
}
static void check_next_phase(struct r8a66597 *r8a66597, int status)
{
struct r8a66597_td *td = r8a66597_get_td(r8a66597, 0);
struct urb *urb;
u8 finish = 0;
if (unlikely(!td))
return;
urb = td->urb;
switch (td->type) {
case USB_PID_IN:
case USB_PID_OUT:
if (check_transfer_finish(td, urb))
td->type = USB_PID_ACK;
break;
case USB_PID_SETUP:
if (urb->transfer_buffer_length == urb->actual_length)
td->type = USB_PID_ACK;
else if (usb_pipeout(urb->pipe))
td->type = USB_PID_OUT;
else
td->type = USB_PID_IN;
break;
case USB_PID_ACK:
finish = 1;
break;
}
if (finish || status != 0 || urb->unlinked)
finish_request(r8a66597, td, 0, urb, status);
else
start_transfer(r8a66597, td);
}
static int get_urb_error(struct r8a66597 *r8a66597, u16 pipenum)
{
struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
if (td) {
u16 pid = r8a66597_read(r8a66597, td->pipe->pipectr) & PID;
if (pid == PID_NAK)
return -ECONNRESET;
else
return -EPIPE;
}
return 0;
}
static void irq_pipe_ready(struct r8a66597 *r8a66597)
{
u16 check;
u16 pipenum;
u16 mask;
struct r8a66597_td *td;
mask = r8a66597_read(r8a66597, BRDYSTS)
& r8a66597_read(r8a66597, BRDYENB);
r8a66597_write(r8a66597, ~mask, BRDYSTS);
if (mask & BRDY0) {
td = r8a66597_get_td(r8a66597, 0);
if (td && td->type == USB_PID_IN)
packet_read(r8a66597, 0);
else
pipe_irq_disable(r8a66597, 0);
check_next_phase(r8a66597, 0);
}
for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
check = 1 << pipenum;
if (mask & check) {
td = r8a66597_get_td(r8a66597, pipenum);
if (unlikely(!td))
continue;
if (td->type == USB_PID_IN)
packet_read(r8a66597, pipenum);
else if (td->type == USB_PID_OUT)
packet_write(r8a66597, pipenum);
}
}
}
static void irq_pipe_empty(struct r8a66597 *r8a66597)
{
u16 tmp;
u16 check;
u16 pipenum;
u16 mask;
struct r8a66597_td *td;
mask = r8a66597_read(r8a66597, BEMPSTS)
& r8a66597_read(r8a66597, BEMPENB);
r8a66597_write(r8a66597, ~mask, BEMPSTS);
if (mask & BEMP0) {
cfifo_change(r8a66597, 0);
td = r8a66597_get_td(r8a66597, 0);
if (td && td->type != USB_PID_OUT)
disable_irq_empty(r8a66597, 0);
check_next_phase(r8a66597, 0);
}
for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
check = 1 << pipenum;
if (mask & check) {
struct r8a66597_td *td;
td = r8a66597_get_td(r8a66597, pipenum);
if (unlikely(!td))
continue;
tmp = r8a66597_read(r8a66597, td->pipe->pipectr);
if ((tmp & INBUFM) == 0) {
disable_irq_empty(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
finish_request(r8a66597, td, pipenum, td->urb,
0);
}
}
}
}
static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
{
u16 check;
u16 pipenum;
u16 mask;
int status;
mask = r8a66597_read(r8a66597, NRDYSTS)
& r8a66597_read(r8a66597, NRDYENB);
r8a66597_write(r8a66597, ~mask, NRDYSTS);
if (mask & NRDY0) {
cfifo_change(r8a66597, 0);
status = get_urb_error(r8a66597, 0);
pipe_irq_disable(r8a66597, 0);
check_next_phase(r8a66597, status);
}
for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
check = 1 << pipenum;
if (mask & check) {
struct r8a66597_td *td;
td = r8a66597_get_td(r8a66597, pipenum);
if (unlikely(!td))
continue;
status = get_urb_error(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
pipe_stop(r8a66597, td->pipe);
finish_request(r8a66597, td, pipenum, td->urb, status);
}
}
}
static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
u16 intsts0, intsts1, intsts2;
u16 intenb0, intenb1, intenb2;
u16 mask0, mask1, mask2;
int status;
spin_lock(&r8a66597->lock);
intsts0 = r8a66597_read(r8a66597, INTSTS0);
intsts1 = r8a66597_read(r8a66597, INTSTS1);
intsts2 = r8a66597_read(r8a66597, INTSTS2);
intenb0 = r8a66597_read(r8a66597, INTENB0);
intenb1 = r8a66597_read(r8a66597, INTENB1);
intenb2 = r8a66597_read(r8a66597, INTENB2);
mask2 = intsts2 & intenb2;
mask1 = intsts1 & intenb1;
mask0 = intsts0 & intenb0 & (BEMP | NRDY | BRDY);
if (mask2) {
if (mask2 & ATTCH) {
r8a66597_write(r8a66597, ~ATTCH, INTSTS2);
r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
/* start usb bus sampling */
start_root_hub_sampling(r8a66597, 1, 1);
}
if (mask2 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS2);
r8a66597_bclr(r8a66597, DTCHE, INTENB2);
r8a66597_usb_disconnect(r8a66597, 1);
}
if (mask2 & BCHG) {
r8a66597_write(r8a66597, ~BCHG, INTSTS2);
r8a66597_bclr(r8a66597, BCHGE, INTENB2);
usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
}
}
if (mask1) {
if (mask1 & ATTCH) {
r8a66597_write(r8a66597, ~ATTCH, INTSTS1);
r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
/* start usb bus sampling */
start_root_hub_sampling(r8a66597, 0, 1);
}
if (mask1 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS1);
r8a66597_bclr(r8a66597, DTCHE, INTENB1);
r8a66597_usb_disconnect(r8a66597, 0);
}
if (mask1 & BCHG) {
r8a66597_write(r8a66597, ~BCHG, INTSTS1);
r8a66597_bclr(r8a66597, BCHGE, INTENB1);
usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
}
if (mask1 & SIGN) {
r8a66597_write(r8a66597, ~SIGN, INTSTS1);
status = get_urb_error(r8a66597, 0);
check_next_phase(r8a66597, status);
}
if (mask1 & SACK) {
r8a66597_write(r8a66597, ~SACK, INTSTS1);
check_next_phase(r8a66597, 0);
}
}
if (mask0) {
if (mask0 & BRDY)
irq_pipe_ready(r8a66597);
if (mask0 & BEMP)
irq_pipe_empty(r8a66597);
if (mask0 & NRDY)
irq_pipe_nrdy(r8a66597);
}
spin_unlock(&r8a66597->lock);
return IRQ_HANDLED;
}
/* this function must be called with interrupt disabled */
static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
{
u16 tmp;
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
if (rh->port & USB_PORT_STAT_RESET) {
unsigned long dvstctr_reg = get_dvstctr_reg(port);
tmp = r8a66597_read(r8a66597, dvstctr_reg);
if ((tmp & USBRST) == USBRST) {
r8a66597_mdfy(r8a66597, UACT, USBRST | UACT,
dvstctr_reg);
r8a66597_root_hub_start_polling(r8a66597);
} else
r8a66597_usb_connect(r8a66597, port);
}
if (!(rh->port & USB_PORT_STAT_CONNECTION)) {
r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
}
if (rh->scount > 0) {
tmp = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
if (tmp == rh->old_syssts) {
rh->scount--;
if (rh->scount == 0)
r8a66597_check_syssts(r8a66597, port, tmp);
else
r8a66597_root_hub_start_polling(r8a66597);
} else {
rh->scount = R8A66597_MAX_SAMPLING;
rh->old_syssts = tmp;
r8a66597_root_hub_start_polling(r8a66597);
}
}
}
static void r8a66597_interval_timer(struct timer_list *t)
{
struct r8a66597_timers *timers = from_timer(timers, t, interval);
struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
struct r8a66597_td *td;
spin_lock_irqsave(&r8a66597->lock, flags);
for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
if (!(r8a66597->interval_map & (1 << pipenum)))
continue;
if (timer_pending(&r8a66597->timers[pipenum].interval))
continue;
td = r8a66597_get_td(r8a66597, pipenum);
if (td)
start_transfer(r8a66597, td);
}
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
static void r8a66597_td_timer(struct timer_list *t)
{
struct r8a66597_timers *timers = from_timer(timers, t, td);
struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
struct r8a66597_td *td, *new_td = NULL;
struct r8a66597_pipe *pipe;
spin_lock_irqsave(&r8a66597->lock, flags);
for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
if (!(r8a66597->timeout_map & (1 << pipenum)))
continue;
if (timer_pending(&r8a66597->timers[pipenum].td))
continue;
td = r8a66597_get_td(r8a66597, pipenum);
if (!td) {
r8a66597->timeout_map &= ~(1 << pipenum);
continue;
}
if (td->urb->actual_length) {
set_td_timer(r8a66597, td);
break;
}
pipe = td->pipe;
pipe_stop(r8a66597, pipe);
/* Select a different address or endpoint */
new_td = td;
do {
list_move_tail(&new_td->queue,
&r8a66597->pipe_queue[pipenum]);
new_td = r8a66597_get_td(r8a66597, pipenum);
if (!new_td) {
new_td = td;
break;
}
} while (td != new_td && td->address == new_td->address &&
td->pipe->info.epnum == new_td->pipe->info.epnum);
start_transfer(r8a66597, new_td);
if (td == new_td)
r8a66597->timeout_map &= ~(1 << pipenum);
else
set_td_timer(r8a66597, new_td);
break;
}
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
static void r8a66597_timer(struct timer_list *t)
{
struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
unsigned long flags;
int port;
spin_lock_irqsave(&r8a66597->lock, flags);
for (port = 0; port < r8a66597->max_root_hub; port++)
r8a66597_root_hub_control(r8a66597, port);
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
static int check_pipe_config(struct r8a66597 *r8a66597, struct urb *urb)
{
struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
if (dev && dev->address && dev->state != USB_STATE_CONFIGURED &&
(urb->dev->state == USB_STATE_CONFIGURED))
return 1;
else
return 0;
}
static int r8a66597_start(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
hcd->state = HC_STATE_RUNNING;
return enable_controller(r8a66597);
}
static void r8a66597_stop(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
disable_controller(r8a66597);
}
static void set_address_zero(struct r8a66597 *r8a66597, struct urb *urb)
{
unsigned int usb_address = usb_pipedevice(urb->pipe);
u16 root_port, hub_port;
if (usb_address == 0) {
get_port_number(r8a66597, urb->dev->devpath,
&root_port, &hub_port);
set_devadd_reg(r8a66597, 0,
get_r8a66597_usb_speed(urb->dev->speed),
get_parent_r8a66597_address(r8a66597, urb->dev),
hub_port, root_port);
}
}
static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
struct urb *urb,
struct usb_host_endpoint *hep)
{
struct r8a66597_td *td;
u16 pipenum;
td = kzalloc(sizeof(struct r8a66597_td), GFP_ATOMIC);
if (td == NULL)
return NULL;
pipenum = r8a66597_get_pipenum(urb, hep);
td->pipenum = pipenum;
td->pipe = hep->hcpriv;
td->urb = urb;
td->address = get_urb_to_r8a66597_addr(r8a66597, urb);
td->maxpacket = usb_maxpacket(urb->dev, urb->pipe);
if (usb_pipecontrol(urb->pipe))
td->type = USB_PID_SETUP;
else if (usb_pipein(urb->pipe))
td->type = USB_PID_IN;
else
td->type = USB_PID_OUT;
INIT_LIST_HEAD(&td->queue);
return td;
}
static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct usb_host_endpoint *hep = urb->ep;
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_td *td = NULL;
int ret, request = 0;
unsigned long flags;
spin_lock_irqsave(&r8a66597->lock, flags);
if (!get_urb_to_r8a66597_dev(r8a66597, urb)) {
ret = -ENODEV;
goto error_not_linked;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto error_not_linked;
if (!hep->hcpriv) {
hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe),
GFP_ATOMIC);
if (!hep->hcpriv) {
ret = -ENOMEM;
goto error;
}
set_pipe_reg_addr(hep->hcpriv, R8A66597_PIPE_NO_DMA);
if (usb_pipeendpoint(urb->pipe))
init_pipe_info(r8a66597, urb, hep, &hep->desc);
}
if (unlikely(check_pipe_config(r8a66597, urb)))
init_pipe_config(r8a66597, urb);
set_address_zero(r8a66597, urb);
td = r8a66597_make_td(r8a66597, urb, hep);
if (td == NULL) {
ret = -ENOMEM;
goto error;
}
if (list_empty(&r8a66597->pipe_queue[td->pipenum]))
request = 1;
list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]);
urb->hcpriv = td;
if (request) {
if (td->pipe->info.timer_interval) {
r8a66597->interval_map |= 1 << td->pipenum;
mod_timer(&r8a66597->timers[td->pipenum].interval,
jiffies + msecs_to_jiffies(
td->pipe->info.timer_interval));
} else {
ret = start_transfer(r8a66597, td);
if (ret < 0) {
list_del(&td->queue);
kfree(td);
}
}
} else
set_td_timer(r8a66597, td);
error:
if (ret)
usb_hcd_unlink_urb_from_ep(hcd, urb);
error_not_linked:
spin_unlock_irqrestore(&r8a66597->lock, flags);
return ret;
}
static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_td *td;
unsigned long flags;
int rc;
spin_lock_irqsave(&r8a66597->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
if (urb->hcpriv) {
td = urb->hcpriv;
pipe_stop(r8a66597, td->pipe);
pipe_irq_disable(r8a66597, td->pipenum);
disable_irq_empty(r8a66597, td->pipenum);
finish_request(r8a66597, td, td->pipenum, urb, status);
}
done:
spin_unlock_irqrestore(&r8a66597->lock, flags);
return rc;
}
static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *hep)
__acquires(r8a66597->lock)
__releases(r8a66597->lock)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
struct r8a66597_td *td;
struct urb *urb = NULL;
u16 pipenum;
unsigned long flags;
if (pipe == NULL)
return;
pipenum = pipe->info.pipenum;
spin_lock_irqsave(&r8a66597->lock, flags);
if (pipenum == 0) {
kfree(hep->hcpriv);
hep->hcpriv = NULL;
spin_unlock_irqrestore(&r8a66597->lock, flags);
return;
}
pipe_stop(r8a66597, pipe);
pipe_irq_disable(r8a66597, pipenum);
disable_irq_empty(r8a66597, pipenum);
td = r8a66597_get_td(r8a66597, pipenum);
if (td)
urb = td->urb;
finish_request(r8a66597, td, pipenum, urb, -ESHUTDOWN);
kfree(hep->hcpriv);
hep->hcpriv = NULL;
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
static int r8a66597_get_frame(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
}
static void collect_usb_address_map(struct usb_device *udev, unsigned long *map)
{
int chix;
struct usb_device *childdev;
if (udev->state == USB_STATE_CONFIGURED &&
udev->parent && udev->parent->devnum > 1 &&
udev->parent->descriptor.bDeviceClass == USB_CLASS_HUB)
map[udev->devnum/32] |= (1 << (udev->devnum % 32));
usb_hub_for_each_child(udev, chix, childdev)
collect_usb_address_map(childdev, map);
}
/* this function must be called with interrupt disabled */
static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597,
int addr)
{
struct r8a66597_device *dev;
struct list_head *list = &r8a66597->child_device;
list_for_each_entry(dev, list, device_list) {
if (dev->usb_address != addr)
continue;
return dev;
}
printk(KERN_ERR "r8a66597: get_r8a66597_device fail.(%d)\n", addr);
return NULL;
}
static void update_usb_address_map(struct r8a66597 *r8a66597,
struct usb_device *root_hub,
unsigned long *map)
{
int i, j, addr;
unsigned long diff;
unsigned long flags;
for (i = 0; i < 4; i++) {
diff = r8a66597->child_connect_map[i] ^ map[i];
if (!diff)
continue;
for (j = 0; j < 32; j++) {
if (!(diff & (1 << j)))
continue;
addr = i * 32 + j;
if (map[i] & (1 << j))
set_child_connect_map(r8a66597, addr);
else {
struct r8a66597_device *dev;
spin_lock_irqsave(&r8a66597->lock, flags);
dev = get_r8a66597_device(r8a66597, addr);
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev, 0);
put_child_connect_map(r8a66597, addr);
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
}
}
}
static void r8a66597_check_detect_child(struct r8a66597 *r8a66597,
struct usb_hcd *hcd)
{
struct usb_bus *bus;
unsigned long now_map[4];
memset(now_map, 0, sizeof(now_map));
mutex_lock(&usb_bus_idr_lock);
bus = idr_find(&usb_bus_idr, hcd->self.busnum);
if (bus && bus->root_hub) {
collect_usb_address_map(bus->root_hub, now_map);
update_usb_address_map(r8a66597, bus->root_hub, now_map);
}
mutex_unlock(&usb_bus_idr_lock);
}
static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
unsigned long flags;
int i;
r8a66597_check_detect_child(r8a66597, hcd);
spin_lock_irqsave(&r8a66597->lock, flags);
*buf = 0; /* initialize (no change) */
for (i = 0; i < r8a66597->max_root_hub; i++) {
if (r8a66597->root_hub[i].port & 0xffff0000)
*buf |= 1 << (i + 1);
}
spin_unlock_irqrestore(&r8a66597->lock, flags);
return (*buf != 0);
}
static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597,
struct usb_hub_descriptor *desc)
{
desc->bDescriptorType = USB_DT_HUB;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = r8a66597->max_root_hub;
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
desc->wHubCharacteristics =
cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM);
desc->u.hs.DeviceRemovable[0] =
((1 << r8a66597->max_root_hub) - 1) << 1;
desc->u.hs.DeviceRemovable[1] = ~0;
}
static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
int ret;
int port = (wIndex & 0x00FF) - 1;
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
unsigned long flags;
ret = 0;
spin_lock_irqsave(&r8a66597->lock, flags);
switch (typeReq) {
case ClearHubFeature:
case SetHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (wIndex > r8a66597->max_root_hub)
goto error;
if (wLength != 0)
goto error;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
rh->port &= ~USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_SUSPEND:
break;
case USB_PORT_FEAT_POWER:
r8a66597_port_power(r8a66597, port, 0);
break;
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_RESET:
break;
default:
goto error;
}
rh->port &= ~(1 << wValue);
break;
case GetHubDescriptor:
r8a66597_hub_descriptor(r8a66597,
(struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
*buf = 0x00;
break;
case GetPortStatus:
if (wIndex > r8a66597->max_root_hub)
goto error;
*(__le32 *)buf = cpu_to_le32(rh->port);
break;
case SetPortFeature:
if (wIndex > r8a66597->max_root_hub)
goto error;
if (wLength != 0)
goto error;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
break;
case USB_PORT_FEAT_POWER:
r8a66597_port_power(r8a66597, port, 1);
rh->port |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_RESET: {
struct r8a66597_device *dev = rh->dev;
rh->port |= USB_PORT_STAT_RESET;
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev, 1);
r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
get_dvstctr_reg(port));
mod_timer(&r8a66597->rh_timer,
jiffies + msecs_to_jiffies(50));
}
break;
default:
goto error;
}
rh->port |= 1 << wValue;
break;
default:
error:
ret = -EPIPE;
break;
}
spin_unlock_irqrestore(&r8a66597->lock, flags);
return ret;
}
#if defined(CONFIG_PM)
static int r8a66597_bus_suspend(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
int port;
dev_dbg(&r8a66597->device0.udev->dev, "%s\n", __func__);
for (port = 0; port < r8a66597->max_root_hub; port++) {
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
unsigned long dvstctr_reg = get_dvstctr_reg(port);
if (!(rh->port & USB_PORT_STAT_ENABLE))
continue;
dev_dbg(&rh->dev->udev->dev, "suspend port = %d\n", port);
r8a66597_bclr(r8a66597, UACT, dvstctr_reg); /* suspend */
rh->port |= USB_PORT_STAT_SUSPEND;
if (rh->dev->udev->do_remote_wakeup) {
msleep(3); /* waiting last SOF */
r8a66597_bset(r8a66597, RWUPE, dvstctr_reg);
r8a66597_write(r8a66597, ~BCHG, get_intsts_reg(port));
r8a66597_bset(r8a66597, BCHGE, get_intenb_reg(port));
}
}
r8a66597->bus_suspended = 1;
return 0;
}
static int r8a66597_bus_resume(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
int port;
dev_dbg(&r8a66597->device0.udev->dev, "%s\n", __func__);
for (port = 0; port < r8a66597->max_root_hub; port++) {
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
unsigned long dvstctr_reg = get_dvstctr_reg(port);
if (!(rh->port & USB_PORT_STAT_SUSPEND))
continue;
dev_dbg(&rh->dev->udev->dev, "resume port = %d\n", port);
rh->port &= ~USB_PORT_STAT_SUSPEND;
rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
msleep(USB_RESUME_TIMEOUT);
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
}
return 0;
}
#else
#define r8a66597_bus_suspend NULL
#define r8a66597_bus_resume NULL
#endif
static const struct hc_driver r8a66597_hc_driver = {
.description = hcd_name,
.hcd_priv_size = sizeof(struct r8a66597),
.irq = r8a66597_irq,
/*
* generic hardware linkage
*/
.flags = HCD_USB2,
.start = r8a66597_start,
.stop = r8a66597_stop,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = r8a66597_urb_enqueue,
.urb_dequeue = r8a66597_urb_dequeue,
.endpoint_disable = r8a66597_endpoint_disable,
/*
* periodic schedule support
*/
.get_frame_number = r8a66597_get_frame,
/*
* root hub support
*/
.hub_status_data = r8a66597_hub_status_data,
.hub_control = r8a66597_hub_control,
.bus_suspend = r8a66597_bus_suspend,
.bus_resume = r8a66597_bus_resume,
};
#if defined(CONFIG_PM)
static int r8a66597_suspend(struct device *dev)
{
struct r8a66597 *r8a66597 = dev_get_drvdata(dev);
int port;
dev_dbg(dev, "%s\n", __func__);
disable_controller(r8a66597);
for (port = 0; port < r8a66597->max_root_hub; port++) {
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
rh->port = 0x00000000;
}
return 0;
}
static int r8a66597_resume(struct device *dev)
{
struct r8a66597 *r8a66597 = dev_get_drvdata(dev);
struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
dev_dbg(dev, "%s\n", __func__);
enable_controller(r8a66597);
usb_root_hub_lost_power(hcd->self.root_hub);
return 0;
}
static const struct dev_pm_ops r8a66597_dev_pm_ops = {
.suspend = r8a66597_suspend,
.resume = r8a66597_resume,
.poweroff = r8a66597_suspend,
.restore = r8a66597_resume,
};
#define R8A66597_DEV_PM_OPS (&r8a66597_dev_pm_ops)
#else /* if defined(CONFIG_PM) */
#define R8A66597_DEV_PM_OPS NULL
#endif
static void r8a66597_remove(struct platform_device *pdev)
{
struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
del_timer_sync(&r8a66597->rh_timer);
usb_remove_hcd(hcd);
iounmap(r8a66597->reg);
if (r8a66597->pdata->on_chip)
clk_put(r8a66597->clk);
usb_put_hcd(hcd);
}
static int r8a66597_probe(struct platform_device *pdev)
{
char clk_name[8];
struct resource *res = NULL, *ires;
int irq = -1;
void __iomem *reg = NULL;
struct usb_hcd *hcd = NULL;
struct r8a66597 *r8a66597;
int ret = 0;
int i;
unsigned long irq_trigger;
if (usb_disabled())
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
dev_err(&pdev->dev, "platform_get_resource error.\n");
goto clean_up;
}
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!ires) {
ret = -ENODEV;
dev_err(&pdev->dev,
"platform_get_resource IORESOURCE_IRQ error.\n");
goto clean_up;
}
irq = ires->start;
irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
reg = ioremap(res->start, resource_size(res));
if (reg == NULL) {
ret = -ENOMEM;
dev_err(&pdev->dev, "ioremap error.\n");
goto clean_up;
}
if (pdev->dev.platform_data == NULL) {
dev_err(&pdev->dev, "no platform data\n");
ret = -ENODEV;
goto clean_up;
}
/* initialize hcd */
hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
if (!hcd) {
ret = -ENOMEM;
dev_err(&pdev->dev, "Failed to create hcd\n");
goto clean_up;
}
r8a66597 = hcd_to_r8a66597(hcd);
memset(r8a66597, 0, sizeof(struct r8a66597));
platform_set_drvdata(pdev, r8a66597);
r8a66597->pdata = dev_get_platdata(&pdev->dev);
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
if (r8a66597->pdata->on_chip) {
snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
r8a66597->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(r8a66597->clk)) {
dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
clk_name);
ret = PTR_ERR(r8a66597->clk);
goto clean_up2;
}
r8a66597->max_root_hub = 1;
} else
r8a66597->max_root_hub = 2;
spin_lock_init(&r8a66597->lock);
timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
r8a66597->reg = reg;
/* make sure no interrupts are pending */
ret = r8a66597_clock_enable(r8a66597);
if (ret < 0)
goto clean_up3;
disable_controller(r8a66597);
for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
r8a66597->timers[i].r8a66597 = r8a66597;
timer_setup(&r8a66597->timers[i].td, r8a66597_td_timer, 0);
timer_setup(&r8a66597->timers[i].interval,
r8a66597_interval_timer, 0);
}
INIT_LIST_HEAD(&r8a66597->child_device);
hcd->rsrc_start = res->start;
hcd->has_tt = 1;
ret = usb_add_hcd(hcd, irq, irq_trigger);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd\n");
goto clean_up3;
}
device_wakeup_enable(hcd->self.controller);
return 0;
clean_up3:
if (r8a66597->pdata->on_chip)
clk_put(r8a66597->clk);
clean_up2:
usb_put_hcd(hcd);
clean_up:
if (reg)
iounmap(reg);
return ret;
}
static struct platform_driver r8a66597_driver = {
.probe = r8a66597_probe,
.remove_new = r8a66597_remove,
.driver = {
.name = hcd_name,
.pm = R8A66597_DEV_PM_OPS,
},
};
module_platform_driver(r8a66597_driver);
| linux-master | drivers/usb/host/r8a66597-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ISP116x HCD (Host Controller Driver) for USB.
*
* Derived from the SL811 HCD, rewritten for ISP116x.
* Copyright (C) 2005 Olav Kongas <[email protected]>
*
* Portions:
* Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
* Copyright (C) 2004 David Brownell
*
* Periodic scheduling is based on Roman's OHCI code
* Copyright (C) 1999 Roman Weissgaerber
*
*/
/*
* The driver basically works. A number of people have used it with a range
* of devices.
*
* The driver passes all usbtests 1-14.
*
* Suspending/resuming of root hub via sysfs works. Remote wakeup works too.
* And suspending/resuming of platform device works too. Suspend/resume
* via HCD operations vector is not implemented.
*
* Iso transfer support is not implemented. Adding this would include
* implementing recovery from the failure to service the processed ITL
* fifo ram in time, which will involve chip reset.
*
* TODO:
+ More testing of suspend/resume.
*/
/*
ISP116x chips require certain delays between accesses to its
registers. The following timing options exist.
1. Configure your memory controller (the best)
2. Implement platform-specific delay function possibly
combined with configuring the memory controller; see
include/linux/usb-isp116x.h for more info. Some broken
memory controllers line LH7A400 SMC need this. Also,
uncomment for that to work the following
USE_PLATFORM_DELAY macro.
3. Use ndelay (easiest, poorest). For that, uncomment
the following USE_NDELAY macro.
*/
#define USE_PLATFORM_DELAY
//#define USE_NDELAY
//#define DEBUG
//#define VERBOSE
/* Transfer descriptors. See dump_ptd() for printout format */
//#define PTD_TRACE
/* enqueuing/finishing log of urbs */
//#define URB_TRACE
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/isp116x.h>
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include "isp116x.h"
#define DRIVER_VERSION "03 Nov 2005"
#define DRIVER_DESC "ISP116x USB Host Controller Driver"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static const char hcd_name[] = "isp116x-hcd";
/*-----------------------------------------------------------------*/
/*
Write len bytes to fifo, pad till 32-bit boundary
*/
static void write_ptddata_to_fifo(struct isp116x *isp116x, void *buf, int len)
{
u8 *dp = (u8 *) buf;
u16 *dp2 = (u16 *) buf;
u16 w;
int quot = len % 4;
/* buffer is already in 'usb data order', which is LE. */
/* When reading buffer as u16, we have to take care byte order */
/* doesn't get mixed up */
if ((unsigned long)dp2 & 1) {
/* not aligned */
for (; len > 1; len -= 2) {
w = *dp++;
w |= *dp++ << 8;
isp116x_raw_write_data16(isp116x, w);
}
if (len)
isp116x_write_data16(isp116x, (u16) * dp);
} else {
/* aligned */
for (; len > 1; len -= 2) {
/* Keep byte order ! */
isp116x_raw_write_data16(isp116x, cpu_to_le16(*dp2++));
}
if (len)
isp116x_write_data16(isp116x, 0xff & *((u8 *) dp2));
}
if (quot == 1 || quot == 2)
isp116x_raw_write_data16(isp116x, 0);
}
/*
Read len bytes from fifo and then read till 32-bit boundary.
*/
static void read_ptddata_from_fifo(struct isp116x *isp116x, void *buf, int len)
{
u8 *dp = (u8 *) buf;
u16 *dp2 = (u16 *) buf;
u16 w;
int quot = len % 4;
/* buffer is already in 'usb data order', which is LE. */
/* When reading buffer as u16, we have to take care byte order */
/* doesn't get mixed up */
if ((unsigned long)dp2 & 1) {
/* not aligned */
for (; len > 1; len -= 2) {
w = isp116x_raw_read_data16(isp116x);
*dp++ = w & 0xff;
*dp++ = (w >> 8) & 0xff;
}
if (len)
*dp = 0xff & isp116x_read_data16(isp116x);
} else {
/* aligned */
for (; len > 1; len -= 2) {
/* Keep byte order! */
*dp2++ = le16_to_cpu(isp116x_raw_read_data16(isp116x));
}
if (len)
*(u8 *) dp2 = 0xff & isp116x_read_data16(isp116x);
}
if (quot == 1 || quot == 2)
isp116x_raw_read_data16(isp116x);
}
/*
Write ptd's and data for scheduled transfers into
the fifo ram. Fifo must be empty and ready.
*/
static void pack_fifo(struct isp116x *isp116x)
{
struct isp116x_ep *ep;
struct ptd *ptd;
int buflen = isp116x->atl_last_dir == PTD_DIR_IN
? isp116x->atl_bufshrt : isp116x->atl_buflen;
isp116x_write_reg16(isp116x, HCuPINT, HCuPINT_AIIEOT);
isp116x_write_reg16(isp116x, HCXFERCTR, buflen);
isp116x_write_addr(isp116x, HCATLPORT | ISP116x_WRITE_OFFSET);
for (ep = isp116x->atl_active; ep; ep = ep->active) {
ptd = &ep->ptd;
dump_ptd(ptd);
dump_ptd_out_data(ptd, ep->data);
isp116x_write_data16(isp116x, ptd->count);
isp116x_write_data16(isp116x, ptd->mps);
isp116x_write_data16(isp116x, ptd->len);
isp116x_write_data16(isp116x, ptd->faddr);
buflen -= sizeof(struct ptd);
/* Skip writing data for last IN PTD */
if (ep->active || (isp116x->atl_last_dir != PTD_DIR_IN)) {
write_ptddata_to_fifo(isp116x, ep->data, ep->length);
buflen -= ALIGN(ep->length, 4);
}
}
BUG_ON(buflen);
}
/*
Read the processed ptd's and data from fifo ram back to
URBs' buffers. Fifo must be full and done
*/
static void unpack_fifo(struct isp116x *isp116x)
{
struct isp116x_ep *ep;
struct ptd *ptd;
int buflen = isp116x->atl_last_dir == PTD_DIR_IN
? isp116x->atl_buflen : isp116x->atl_bufshrt;
isp116x_write_reg16(isp116x, HCuPINT, HCuPINT_AIIEOT);
isp116x_write_reg16(isp116x, HCXFERCTR, buflen);
isp116x_write_addr(isp116x, HCATLPORT);
for (ep = isp116x->atl_active; ep; ep = ep->active) {
ptd = &ep->ptd;
ptd->count = isp116x_read_data16(isp116x);
ptd->mps = isp116x_read_data16(isp116x);
ptd->len = isp116x_read_data16(isp116x);
ptd->faddr = isp116x_read_data16(isp116x);
buflen -= sizeof(struct ptd);
/* Skip reading data for last Setup or Out PTD */
if (ep->active || (isp116x->atl_last_dir == PTD_DIR_IN)) {
read_ptddata_from_fifo(isp116x, ep->data, ep->length);
buflen -= ALIGN(ep->length, 4);
}
dump_ptd(ptd);
dump_ptd_in_data(ptd, ep->data);
}
BUG_ON(buflen);
}
/*---------------------------------------------------------------*/
/*
Set up PTD's.
*/
static void preproc_atl_queue(struct isp116x *isp116x)
{
struct isp116x_ep *ep;
struct urb *urb;
struct ptd *ptd;
u16 len;
for (ep = isp116x->atl_active; ep; ep = ep->active) {
u16 toggle = 0, dir = PTD_DIR_SETUP;
BUG_ON(list_empty(&ep->hep->urb_list));
urb = container_of(ep->hep->urb_list.next,
struct urb, urb_list);
ptd = &ep->ptd;
len = ep->length;
ep->data = (unsigned char *)urb->transfer_buffer
+ urb->actual_length;
switch (ep->nextpid) {
case USB_PID_IN:
toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
dir = PTD_DIR_IN;
break;
case USB_PID_OUT:
toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
dir = PTD_DIR_OUT;
break;
case USB_PID_SETUP:
len = sizeof(struct usb_ctrlrequest);
ep->data = urb->setup_packet;
break;
case USB_PID_ACK:
toggle = 1;
len = 0;
dir = (urb->transfer_buffer_length
&& usb_pipein(urb->pipe))
? PTD_DIR_OUT : PTD_DIR_IN;
break;
default:
ERR("%s %d: ep->nextpid %d\n", __func__, __LINE__,
ep->nextpid);
BUG();
}
ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
ptd->mps = PTD_MPS(ep->maxpacket)
| PTD_SPD(urb->dev->speed == USB_SPEED_LOW)
| PTD_EP(ep->epnum);
ptd->len = PTD_LEN(len) | PTD_DIR(dir);
ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
if (!ep->active) {
ptd->mps |= PTD_LAST_MSK;
isp116x->atl_last_dir = dir;
}
isp116x->atl_bufshrt = sizeof(struct ptd) + isp116x->atl_buflen;
isp116x->atl_buflen = isp116x->atl_bufshrt + ALIGN(len, 4);
}
}
/*
Take done or failed requests out of schedule. Give back
processed urbs.
*/
static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep,
struct urb *urb, int status)
__releases(isp116x->lock) __acquires(isp116x->lock)
{
unsigned i;
ep->error_count = 0;
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_SETUP;
urb_dbg(urb, "Finish");
usb_hcd_unlink_urb_from_ep(isp116x_to_hcd(isp116x), urb);
spin_unlock(&isp116x->lock);
usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb, status);
spin_lock(&isp116x->lock);
/* take idle endpoints out of the schedule */
if (!list_empty(&ep->hep->urb_list))
return;
/* async deschedule */
if (!list_empty(&ep->schedule)) {
list_del_init(&ep->schedule);
return;
}
/* periodic deschedule */
DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct isp116x_ep *temp;
struct isp116x_ep **prev = &isp116x->periodic[i];
while (*prev && ((temp = *prev) != ep))
prev = &temp->next;
if (*prev)
*prev = ep->next;
isp116x->load[i] -= ep->load;
}
ep->branch = PERIODIC_SIZE;
isp116x_to_hcd(isp116x)->self.bandwidth_allocated -=
ep->load / ep->period;
/* switch irq type? */
if (!--isp116x->periodic_count) {
isp116x->irqenb &= ~HCuPINT_SOF;
isp116x->irqenb |= HCuPINT_ATL;
}
}
/*
Analyze transfer results, handle partial transfers and errors
*/
static void postproc_atl_queue(struct isp116x *isp116x)
{
struct isp116x_ep *ep;
struct urb *urb;
struct usb_device *udev;
struct ptd *ptd;
int short_not_ok;
int status;
u8 cc;
for (ep = isp116x->atl_active; ep; ep = ep->active) {
BUG_ON(list_empty(&ep->hep->urb_list));
urb =
container_of(ep->hep->urb_list.next, struct urb, urb_list);
udev = urb->dev;
ptd = &ep->ptd;
cc = PTD_GET_CC(ptd);
short_not_ok = 1;
status = -EINPROGRESS;
/* Data underrun is special. For allowed underrun
we clear the error and continue as normal. For
forbidden underrun we finish the DATA stage
immediately while for control transfer,
we do a STATUS stage. */
if (cc == TD_DATAUNDERRUN) {
if (!(urb->transfer_flags & URB_SHORT_NOT_OK) ||
usb_pipecontrol(urb->pipe)) {
DBG("Allowed or control data underrun\n");
cc = TD_CC_NOERROR;
short_not_ok = 0;
} else {
ep->error_count = 1;
usb_settoggle(udev, ep->epnum,
ep->nextpid == USB_PID_OUT,
PTD_GET_TOGGLE(ptd));
urb->actual_length += PTD_GET_COUNT(ptd);
status = cc_to_error[TD_DATAUNDERRUN];
goto done;
}
}
if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED
&& (++ep->error_count >= 3 || cc == TD_CC_STALL
|| cc == TD_DATAOVERRUN)) {
status = cc_to_error[cc];
if (ep->nextpid == USB_PID_ACK)
ep->nextpid = 0;
goto done;
}
/* According to usb spec, zero-length Int transfer signals
finishing of the urb. Hey, does this apply only
for IN endpoints? */
if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) {
status = 0;
goto done;
}
/* Relax after previously failed, but later succeeded
or correctly NAK'ed retransmission attempt */
if (ep->error_count
&& (cc == TD_CC_NOERROR || cc == TD_NOTACCESSED))
ep->error_count = 0;
/* Take into account idiosyncracies of the isp116x chip
regarding toggle bit for failed transfers */
if (ep->nextpid == USB_PID_OUT)
usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd)
^ (ep->error_count > 0));
else if (ep->nextpid == USB_PID_IN)
usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd)
^ (ep->error_count > 0));
switch (ep->nextpid) {
case USB_PID_IN:
case USB_PID_OUT:
urb->actual_length += PTD_GET_COUNT(ptd);
if (PTD_GET_ACTIVE(ptd)
|| (cc != TD_CC_NOERROR && cc < 0x0E))
break;
if (urb->transfer_buffer_length != urb->actual_length) {
if (short_not_ok)
break;
} else {
if (urb->transfer_flags & URB_ZERO_PACKET
&& ep->nextpid == USB_PID_OUT
&& !(PTD_GET_COUNT(ptd) % ep->maxpacket)) {
DBG("Zero packet requested\n");
break;
}
}
/* All data for this URB is transferred, let's finish */
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_ACK;
else
status = 0;
break;
case USB_PID_SETUP:
if (PTD_GET_ACTIVE(ptd)
|| (cc != TD_CC_NOERROR && cc < 0x0E))
break;
if (urb->transfer_buffer_length == urb->actual_length)
ep->nextpid = USB_PID_ACK;
else if (usb_pipeout(urb->pipe)) {
usb_settoggle(udev, 0, 1, 1);
ep->nextpid = USB_PID_OUT;
} else {
usb_settoggle(udev, 0, 0, 1);
ep->nextpid = USB_PID_IN;
}
break;
case USB_PID_ACK:
if (PTD_GET_ACTIVE(ptd)
|| (cc != TD_CC_NOERROR && cc < 0x0E))
break;
status = 0;
ep->nextpid = 0;
break;
default:
BUG();
}
done:
if (status != -EINPROGRESS || urb->unlinked)
finish_request(isp116x, ep, urb, status);
}
}
/*
Scan transfer lists, schedule transfers, send data off
to chip.
*/
static void start_atl_transfers(struct isp116x *isp116x)
{
struct isp116x_ep *last_ep = NULL, *ep;
struct urb *urb;
u16 load = 0;
int len, index, speed, byte_time;
if (atomic_read(&isp116x->atl_finishing))
return;
if (!HC_IS_RUNNING(isp116x_to_hcd(isp116x)->state))
return;
/* FIFO not empty? */
if (isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_FULL)
return;
isp116x->atl_active = NULL;
isp116x->atl_buflen = isp116x->atl_bufshrt = 0;
/* Schedule int transfers */
if (isp116x->periodic_count) {
isp116x->fmindex = index =
(isp116x->fmindex + 1) & (PERIODIC_SIZE - 1);
load = isp116x->load[index];
if (load) {
/* Bring all int transfers for this frame
into the active queue */
isp116x->atl_active = last_ep =
isp116x->periodic[index];
while (last_ep->next)
last_ep = (last_ep->active = last_ep->next);
last_ep->active = NULL;
}
}
/* Schedule control/bulk transfers */
list_for_each_entry(ep, &isp116x->async, schedule) {
urb = container_of(ep->hep->urb_list.next,
struct urb, urb_list);
speed = urb->dev->speed;
byte_time = speed == USB_SPEED_LOW
? BYTE_TIME_LOWSPEED : BYTE_TIME_FULLSPEED;
if (ep->nextpid == USB_PID_SETUP) {
len = sizeof(struct usb_ctrlrequest);
} else if (ep->nextpid == USB_PID_ACK) {
len = 0;
} else {
/* Find current free length ... */
len = (MAX_LOAD_LIMIT - load) / byte_time;
/* ... then limit it to configured max size ... */
len = min(len, speed == USB_SPEED_LOW ?
MAX_TRANSFER_SIZE_LOWSPEED :
MAX_TRANSFER_SIZE_FULLSPEED);
/* ... and finally cut to the multiple of MaxPacketSize,
or to the real length if there's enough room. */
if (len <
(urb->transfer_buffer_length -
urb->actual_length)) {
len -= len % ep->maxpacket;
if (!len)
continue;
} else
len = urb->transfer_buffer_length -
urb->actual_length;
BUG_ON(len < 0);
}
load += len * byte_time;
if (load > MAX_LOAD_LIMIT)
break;
ep->active = NULL;
ep->length = len;
if (last_ep)
last_ep->active = ep;
else
isp116x->atl_active = ep;
last_ep = ep;
}
/* Avoid starving of endpoints */
if ((&isp116x->async)->next != (&isp116x->async)->prev)
list_move(&isp116x->async, (&isp116x->async)->next);
if (isp116x->atl_active) {
preproc_atl_queue(isp116x);
pack_fifo(isp116x);
}
}
/*
Finish the processed transfers
*/
static void finish_atl_transfers(struct isp116x *isp116x)
{
if (!isp116x->atl_active)
return;
/* Fifo not ready? */
if (!(isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_DONE))
return;
atomic_inc(&isp116x->atl_finishing);
unpack_fifo(isp116x);
postproc_atl_queue(isp116x);
atomic_dec(&isp116x->atl_finishing);
}
static irqreturn_t isp116x_irq(struct usb_hcd *hcd)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
u16 irqstat;
irqreturn_t ret = IRQ_NONE;
spin_lock(&isp116x->lock);
isp116x_write_reg16(isp116x, HCuPINTENB, 0);
irqstat = isp116x_read_reg16(isp116x, HCuPINT);
isp116x_write_reg16(isp116x, HCuPINT, irqstat);
if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) {
ret = IRQ_HANDLED;
finish_atl_transfers(isp116x);
}
if (irqstat & HCuPINT_OPR) {
u32 intstat = isp116x_read_reg32(isp116x, HCINTSTAT);
isp116x_write_reg32(isp116x, HCINTSTAT, intstat);
if (intstat & HCINT_UE) {
ERR("Unrecoverable error, HC is dead!\n");
/* IRQ's are off, we do no DMA,
perfectly ready to die ... */
hcd->state = HC_STATE_HALT;
usb_hc_died(hcd);
ret = IRQ_HANDLED;
goto done;
}
if (intstat & HCINT_RHSC)
/* When root hub or any of its ports is going
to come out of suspend, it may take more
than 10ms for status bits to stabilize. */
mod_timer(&hcd->rh_timer, jiffies
+ msecs_to_jiffies(20) + 1);
if (intstat & HCINT_RD) {
DBG("---- remote wakeup\n");
usb_hcd_resume_root_hub(hcd);
}
irqstat &= ~HCuPINT_OPR;
ret = IRQ_HANDLED;
}
if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) {
start_atl_transfers(isp116x);
}
isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb);
done:
spin_unlock(&isp116x->lock);
return ret;
}
/*-----------------------------------------------------------------*/
/* usb 1.1 says max 90% of a frame is available for periodic transfers.
* this driver doesn't promise that much since it's got to handle an
* IRQ per packet; irq handling latencies also use up that time.
*/
/* out of 1000 us */
#define MAX_PERIODIC_LOAD 600
static int balance(struct isp116x *isp116x, u16 period, u16 load)
{
int i, branch = -ENOSPC;
/* search for the least loaded schedule branch of that period
which has enough bandwidth left unreserved. */
for (i = 0; i < period; i++) {
if (branch < 0 || isp116x->load[branch] > isp116x->load[i]) {
int j;
for (j = i; j < PERIODIC_SIZE; j += period) {
if ((isp116x->load[j] + load)
> MAX_PERIODIC_LOAD)
break;
}
if (j < PERIODIC_SIZE)
continue;
branch = i;
}
}
return branch;
}
/* NB! ALL the code above this point runs with isp116x->lock
held, irqs off
*/
/*-----------------------------------------------------------------*/
static int isp116x_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
struct usb_device *udev = urb->dev;
unsigned int pipe = urb->pipe;
int is_out = !usb_pipein(pipe);
int type = usb_pipetype(pipe);
int epnum = usb_pipeendpoint(pipe);
struct usb_host_endpoint *hep = urb->ep;
struct isp116x_ep *ep = NULL;
unsigned long flags;
int i;
int ret = 0;
urb_dbg(urb, "Enqueue");
if (type == PIPE_ISOCHRONOUS) {
ERR("Isochronous transfers not supported\n");
urb_dbg(urb, "Refused to enqueue");
return -ENXIO;
}
/* avoid all allocations within spinlocks: request or endpoint */
if (!hep->hcpriv) {
ep = kzalloc(sizeof *ep, mem_flags);
if (!ep)
return -ENOMEM;
}
spin_lock_irqsave(&isp116x->lock, flags);
if (!HC_IS_RUNNING(hcd->state)) {
kfree(ep);
ret = -ENODEV;
goto fail_not_linked;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret) {
kfree(ep);
goto fail_not_linked;
}
if (hep->hcpriv)
ep = hep->hcpriv;
else {
INIT_LIST_HEAD(&ep->schedule);
ep->udev = udev;
ep->epnum = epnum;
ep->maxpacket = usb_maxpacket(udev, urb->pipe);
usb_settoggle(udev, epnum, is_out, 0);
if (type == PIPE_CONTROL) {
ep->nextpid = USB_PID_SETUP;
} else if (is_out) {
ep->nextpid = USB_PID_OUT;
} else {
ep->nextpid = USB_PID_IN;
}
if (urb->interval) {
/*
With INT URBs submitted, the driver works with SOF
interrupt enabled and ATL interrupt disabled. After
the PTDs are written to fifo ram, the chip starts
fifo processing and usb transfers after the next
SOF and continues until the transfers are finished
(succeeded or failed) or the frame ends. Therefore,
the transfers occur only in every second frame,
while fifo reading/writing and data processing
occur in every other second frame. */
if (urb->interval < 2)
urb->interval = 2;
if (urb->interval > 2 * PERIODIC_SIZE)
urb->interval = 2 * PERIODIC_SIZE;
ep->period = urb->interval >> 1;
ep->branch = PERIODIC_SIZE;
ep->load = usb_calc_bus_time(udev->speed,
!is_out,
(type == PIPE_ISOCHRONOUS),
usb_maxpacket(udev, pipe)) /
1000;
}
hep->hcpriv = ep;
ep->hep = hep;
}
/* maybe put endpoint into schedule */
switch (type) {
case PIPE_CONTROL:
case PIPE_BULK:
if (list_empty(&ep->schedule))
list_add_tail(&ep->schedule, &isp116x->async);
break;
case PIPE_INTERRUPT:
urb->interval = ep->period;
ep->length = min_t(u32, ep->maxpacket,
urb->transfer_buffer_length);
/* urb submitted for already existing endpoint */
if (ep->branch < PERIODIC_SIZE)
break;
ep->branch = ret = balance(isp116x, ep->period, ep->load);
if (ret < 0)
goto fail;
ret = 0;
urb->start_frame = (isp116x->fmindex & (PERIODIC_SIZE - 1))
+ ep->branch;
/* sort each schedule branch by period (slow before fast)
to share the faster parts of the tree without needing
dummy/placeholder nodes */
DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct isp116x_ep **prev = &isp116x->periodic[i];
struct isp116x_ep *here = *prev;
while (here && ep != here) {
if (ep->period > here->period)
break;
prev = &here->next;
here = *prev;
}
if (ep != here) {
ep->next = here;
*prev = ep;
}
isp116x->load[i] += ep->load;
}
hcd->self.bandwidth_allocated += ep->load / ep->period;
/* switch over to SOFint */
if (!isp116x->periodic_count++) {
isp116x->irqenb &= ~HCuPINT_ATL;
isp116x->irqenb |= HCuPINT_SOF;
isp116x_write_reg16(isp116x, HCuPINTENB,
isp116x->irqenb);
}
}
urb->hcpriv = hep;
start_atl_transfers(isp116x);
fail:
if (ret)
usb_hcd_unlink_urb_from_ep(hcd, urb);
fail_not_linked:
spin_unlock_irqrestore(&isp116x->lock, flags);
return ret;
}
/*
Dequeue URBs.
*/
static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
struct usb_host_endpoint *hep;
struct isp116x_ep *ep, *ep_act;
unsigned long flags;
int rc;
spin_lock_irqsave(&isp116x->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
hep = urb->hcpriv;
ep = hep->hcpriv;
WARN_ON(hep != ep->hep);
/* In front of queue? */
if (ep->hep->urb_list.next == &urb->urb_list)
/* active? */
for (ep_act = isp116x->atl_active; ep_act;
ep_act = ep_act->active)
if (ep_act == ep) {
VDBG("dequeue, urb %p active; wait for irq\n",
urb);
urb = NULL;
break;
}
if (urb)
finish_request(isp116x, ep, urb, status);
done:
spin_unlock_irqrestore(&isp116x->lock, flags);
return rc;
}
static void isp116x_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *hep)
{
int i;
struct isp116x_ep *ep = hep->hcpriv;
if (!ep)
return;
/* assume we'd just wait for the irq */
for (i = 0; i < 100 && !list_empty(&hep->urb_list); i++)
msleep(3);
if (!list_empty(&hep->urb_list))
WARNING("ep %p not empty?\n", ep);
kfree(ep);
hep->hcpriv = NULL;
}
static int isp116x_get_frame(struct usb_hcd *hcd)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
u32 fmnum;
unsigned long flags;
spin_lock_irqsave(&isp116x->lock, flags);
fmnum = isp116x_read_reg32(isp116x, HCFMNUM);
spin_unlock_irqrestore(&isp116x->lock, flags);
return (int)fmnum;
}
/*
Adapted from ohci-hub.c. Currently we don't support autosuspend.
*/
static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
int ports, i, changed = 0;
unsigned long flags;
if (!HC_IS_RUNNING(hcd->state))
return -ESHUTDOWN;
/* Report no status change now, if we are scheduled to be
called later */
if (timer_pending(&hcd->rh_timer))
return 0;
ports = isp116x->rhdesca & RH_A_NDP;
spin_lock_irqsave(&isp116x->lock, flags);
isp116x->rhstatus = isp116x_read_reg32(isp116x, HCRHSTATUS);
if (isp116x->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
buf[0] = changed = 1;
else
buf[0] = 0;
for (i = 0; i < ports; i++) {
u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1);
if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC
| RH_PS_OCIC | RH_PS_PRSC)) {
changed = 1;
buf[0] |= 1 << (i + 1);
}
}
spin_unlock_irqrestore(&isp116x->lock, flags);
return changed;
}
static void isp116x_hub_descriptor(struct isp116x *isp116x,
struct usb_hub_descriptor *desc)
{
u32 reg = isp116x->rhdesca;
desc->bDescriptorType = USB_DT_HUB;
desc->bDescLength = 9;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = (u8) (reg & 0x3);
/* Power switching, device type, overcurrent. */
desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) &
(HUB_CHAR_LPSM |
HUB_CHAR_COMPOUND |
HUB_CHAR_OCPM)));
desc->bPwrOn2PwrGood = (u8) ((reg >> 24) & 0xff);
/* ports removable, and legacy PortPwrCtrlMask */
desc->u.hs.DeviceRemovable[0] = 0;
desc->u.hs.DeviceRemovable[1] = ~0;
}
/* Perform reset of a given port.
It would be great to just start the reset and let the
USB core to clear the reset in due time. However,
root hub ports should be reset for at least 50 ms, while
our chip stays in reset for about 10 ms. I.e., we must
repeatedly reset it ourself here.
*/
static inline void root_port_reset(struct isp116x *isp116x, unsigned port)
{
u32 tmp;
unsigned long flags, t;
/* Root hub reset should be 50 ms, but some devices
want it even longer. */
t = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, t)) {
spin_lock_irqsave(&isp116x->lock, flags);
/* spin until any current reset finishes */
for (;;) {
tmp = isp116x_read_reg32(isp116x, port ?
HCRHPORT2 : HCRHPORT1);
if (!(tmp & RH_PS_PRS))
break;
udelay(500);
}
/* Don't reset a disconnected port */
if (!(tmp & RH_PS_CCS)) {
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
}
/* Reset lasts 10ms (claims datasheet) */
isp116x_write_reg32(isp116x, port ? HCRHPORT2 :
HCRHPORT1, (RH_PS_PRS));
spin_unlock_irqrestore(&isp116x->lock, flags);
msleep(10);
}
}
/* Adapted from ohci-hub.c */
static int isp116x_hub_control(struct usb_hcd *hcd,
u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
int ret = 0;
unsigned long flags;
int ports = isp116x->rhdesca & RH_A_NDP;
u32 tmp = 0;
switch (typeReq) {
case ClearHubFeature:
DBG("ClearHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
DBG("C_HUB_OVER_CURRENT\n");
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp116x->lock, flags);
fallthrough;
case C_HUB_LOCAL_POWER:
DBG("C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
}
break;
case SetHubFeature:
DBG("SetHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
DBG("C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
}
break;
case GetHubDescriptor:
DBG("GetHubDescriptor\n");
isp116x_hub_descriptor(isp116x,
(struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
DBG("GetHubStatus\n");
*(__le32 *) buf = 0;
break;
case GetPortStatus:
DBG("GetPortStatus\n");
if (!wIndex || wIndex > ports)
goto error;
spin_lock_irqsave(&isp116x->lock, flags);
tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1);
spin_unlock_irqrestore(&isp116x->lock, flags);
*(__le32 *) buf = cpu_to_le32(tmp);
DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp);
break;
case ClearPortFeature:
DBG("ClearPortFeature: ");
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
DBG("USB_PORT_FEAT_ENABLE\n");
tmp = RH_PS_CCS;
break;
case USB_PORT_FEAT_C_ENABLE:
DBG("USB_PORT_FEAT_C_ENABLE\n");
tmp = RH_PS_PESC;
break;
case USB_PORT_FEAT_SUSPEND:
DBG("USB_PORT_FEAT_SUSPEND\n");
tmp = RH_PS_POCI;
break;
case USB_PORT_FEAT_C_SUSPEND:
DBG("USB_PORT_FEAT_C_SUSPEND\n");
tmp = RH_PS_PSSC;
break;
case USB_PORT_FEAT_POWER:
DBG("USB_PORT_FEAT_POWER\n");
tmp = RH_PS_LSDA;
break;
case USB_PORT_FEAT_C_CONNECTION:
DBG("USB_PORT_FEAT_C_CONNECTION\n");
tmp = RH_PS_CSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
DBG("USB_PORT_FEAT_C_OVER_CURRENT\n");
tmp = RH_PS_OCIC;
break;
case USB_PORT_FEAT_C_RESET:
DBG("USB_PORT_FEAT_C_RESET\n");
tmp = RH_PS_PRSC;
break;
default:
goto error;
}
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, wIndex
? HCRHPORT2 : HCRHPORT1, tmp);
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
case SetPortFeature:
DBG("SetPortFeature: ");
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
DBG("USB_PORT_FEAT_SUSPEND\n");
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, wIndex
? HCRHPORT2 : HCRHPORT1, RH_PS_PSS);
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
case USB_PORT_FEAT_POWER:
DBG("USB_PORT_FEAT_POWER\n");
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, wIndex
? HCRHPORT2 : HCRHPORT1, RH_PS_PPS);
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
case USB_PORT_FEAT_RESET:
DBG("USB_PORT_FEAT_RESET\n");
root_port_reset(isp116x, wIndex);
break;
default:
goto error;
}
break;
default:
error:
/* "protocol stall" on error */
DBG("PROTOCOL STALL\n");
ret = -EPIPE;
}
return ret;
}
/*-----------------------------------------------------------------*/
#ifdef CONFIG_DEBUG_FS
static void dump_irq(struct seq_file *s, char *label, u16 mask)
{
seq_printf(s, "%s %04x%s%s%s%s%s%s\n", label, mask,
mask & HCuPINT_CLKRDY ? " clkrdy" : "",
mask & HCuPINT_SUSP ? " susp" : "",
mask & HCuPINT_OPR ? " opr" : "",
mask & HCuPINT_AIIEOT ? " eot" : "",
mask & HCuPINT_ATL ? " atl" : "",
mask & HCuPINT_SOF ? " sof" : "");
}
static void dump_int(struct seq_file *s, char *label, u32 mask)
{
seq_printf(s, "%s %08x%s%s%s%s%s%s%s\n", label, mask,
mask & HCINT_MIE ? " MIE" : "",
mask & HCINT_RHSC ? " rhsc" : "",
mask & HCINT_FNO ? " fno" : "",
mask & HCINT_UE ? " ue" : "",
mask & HCINT_RD ? " rd" : "",
mask & HCINT_SF ? " sof" : "", mask & HCINT_SO ? " so" : "");
}
static int isp116x_debug_show(struct seq_file *s, void *unused)
{
struct isp116x *isp116x = s->private;
seq_printf(s, "%s\n%s version %s\n",
isp116x_to_hcd(isp116x)->product_desc, hcd_name,
DRIVER_VERSION);
if (HC_IS_SUSPENDED(isp116x_to_hcd(isp116x)->state)) {
seq_printf(s, "HCD is suspended\n");
return 0;
}
if (!HC_IS_RUNNING(isp116x_to_hcd(isp116x)->state)) {
seq_printf(s, "HCD not running\n");
return 0;
}
spin_lock_irq(&isp116x->lock);
dump_irq(s, "hc_irq_enable", isp116x_read_reg16(isp116x, HCuPINTENB));
dump_irq(s, "hc_irq_status", isp116x_read_reg16(isp116x, HCuPINT));
dump_int(s, "hc_int_enable", isp116x_read_reg32(isp116x, HCINTENB));
dump_int(s, "hc_int_status", isp116x_read_reg32(isp116x, HCINTSTAT));
isp116x_show_regs_seq(isp116x, s);
spin_unlock_irq(&isp116x->lock);
seq_printf(s, "\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(isp116x_debug);
static void create_debug_file(struct isp116x *isp116x)
{
debugfs_create_file(hcd_name, S_IRUGO, usb_debug_root, isp116x,
&isp116x_debug_fops);
}
static void remove_debug_file(struct isp116x *isp116x)
{
debugfs_lookup_and_remove(hcd_name, usb_debug_root);
}
#else
static inline void create_debug_file(struct isp116x *isp116x) { }
static inline void remove_debug_file(struct isp116x *isp116x) { }
#endif /* CONFIG_DEBUG_FS */
/*-----------------------------------------------------------------*/
/*
Software reset - can be called from any contect.
*/
static int isp116x_sw_reset(struct isp116x *isp116x)
{
int retries = 15;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg16(isp116x, HCSWRES, HCSWRES_MAGIC);
isp116x_write_reg32(isp116x, HCCMDSTAT, HCCMDSTAT_HCR);
while (--retries) {
/* It usually resets within 1 ms */
mdelay(1);
if (!(isp116x_read_reg32(isp116x, HCCMDSTAT) & HCCMDSTAT_HCR))
break;
}
if (!retries) {
ERR("Software reset timeout\n");
ret = -ETIME;
}
spin_unlock_irqrestore(&isp116x->lock, flags);
return ret;
}
static int isp116x_reset(struct usb_hcd *hcd)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
unsigned long t;
u16 clkrdy = 0;
int ret, timeout = 15 /* ms */ ;
ret = isp116x_sw_reset(isp116x);
if (ret)
return ret;
t = jiffies + msecs_to_jiffies(timeout);
while (time_before_eq(jiffies, t)) {
msleep(4);
spin_lock_irq(&isp116x->lock);
clkrdy = isp116x_read_reg16(isp116x, HCuPINT) & HCuPINT_CLKRDY;
spin_unlock_irq(&isp116x->lock);
if (clkrdy)
break;
}
if (!clkrdy) {
ERR("Clock not ready after %dms\n", timeout);
/* After sw_reset the clock won't report to be ready, if
H_WAKEUP pin is high. */
ERR("Please make sure that the H_WAKEUP pin is pulled low!\n");
ret = -ENODEV;
}
return ret;
}
static void isp116x_stop(struct usb_hcd *hcd)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
unsigned long flags;
u32 val;
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg16(isp116x, HCuPINTENB, 0);
/* Switch off ports' power, some devices don't come up
after next 'insmod' without this */
val = isp116x_read_reg32(isp116x, HCRHDESCA);
val &= ~(RH_A_NPS | RH_A_PSM);
isp116x_write_reg32(isp116x, HCRHDESCA, val);
isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_LPS);
spin_unlock_irqrestore(&isp116x->lock, flags);
isp116x_sw_reset(isp116x);
}
/*
Configure the chip. The chip must be successfully reset by now.
*/
static int isp116x_start(struct usb_hcd *hcd)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
struct isp116x_platform_data *board = isp116x->board;
u32 val;
unsigned long flags;
spin_lock_irqsave(&isp116x->lock, flags);
/* clear interrupt status and disable all interrupt sources */
isp116x_write_reg16(isp116x, HCuPINT, 0xff);
isp116x_write_reg16(isp116x, HCuPINTENB, 0);
val = isp116x_read_reg16(isp116x, HCCHIPID);
if ((val & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
ERR("Invalid chip ID %04x\n", val);
spin_unlock_irqrestore(&isp116x->lock, flags);
return -ENODEV;
}
/* To be removed in future */
hcd->uses_new_polling = 1;
isp116x_write_reg16(isp116x, HCITLBUFLEN, ISP116x_ITL_BUFSIZE);
isp116x_write_reg16(isp116x, HCATLBUFLEN, ISP116x_ATL_BUFSIZE);
/* ----- HW conf */
val = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
if (board->sel15Kres)
val |= HCHWCFG_15KRSEL;
/* Remote wakeup won't work without working clock */
if (board->remote_wakeup_enable)
val |= HCHWCFG_CLKNOTSTOP;
if (board->oc_enable)
val |= HCHWCFG_ANALOG_OC;
if (board->int_act_high)
val |= HCHWCFG_INT_POL;
if (board->int_edge_triggered)
val |= HCHWCFG_INT_TRIGGER;
isp116x_write_reg16(isp116x, HCHWCFG, val);
/* ----- Root hub conf */
val = (25 << 24) & RH_A_POTPGT;
/* AN10003_1.pdf recommends RH_A_NPS (no power switching) to
be always set. Yet, instead, we request individual port
power switching. */
val |= RH_A_PSM;
/* Report overcurrent per port */
val |= RH_A_OCPM;
isp116x_write_reg32(isp116x, HCRHDESCA, val);
isp116x->rhdesca = isp116x_read_reg32(isp116x, HCRHDESCA);
val = RH_B_PPCM;
isp116x_write_reg32(isp116x, HCRHDESCB, val);
isp116x->rhdescb = isp116x_read_reg32(isp116x, HCRHDESCB);
val = 0;
if (board->remote_wakeup_enable) {
if (!device_can_wakeup(hcd->self.controller))
device_init_wakeup(hcd->self.controller, 1);
val |= RH_HS_DRWE;
}
isp116x_write_reg32(isp116x, HCRHSTATUS, val);
isp116x->rhstatus = isp116x_read_reg32(isp116x, HCRHSTATUS);
isp116x_write_reg32(isp116x, HCFMINTVL, 0x27782edf);
hcd->state = HC_STATE_RUNNING;
/* Set up interrupts */
isp116x->intenb = HCINT_MIE | HCINT_RHSC | HCINT_UE;
if (board->remote_wakeup_enable)
isp116x->intenb |= HCINT_RD;
isp116x->irqenb = HCuPINT_ATL | HCuPINT_OPR; /* | HCuPINT_SUSP; */
isp116x_write_reg32(isp116x, HCINTENB, isp116x->intenb);
isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb);
/* Go operational */
val = HCCONTROL_USB_OPER;
if (board->remote_wakeup_enable)
val |= HCCONTROL_RWE;
isp116x_write_reg32(isp116x, HCCONTROL, val);
/* Disable ports to avoid race in device enumeration */
isp116x_write_reg32(isp116x, HCRHPORT1, RH_PS_CCS);
isp116x_write_reg32(isp116x, HCRHPORT2, RH_PS_CCS);
isp116x_show_regs_log(isp116x);
spin_unlock_irqrestore(&isp116x->lock, flags);
return 0;
}
#ifdef CONFIG_PM
static int isp116x_bus_suspend(struct usb_hcd *hcd)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
unsigned long flags;
u32 val;
int ret = 0;
spin_lock_irqsave(&isp116x->lock, flags);
val = isp116x_read_reg32(isp116x, HCCONTROL);
switch (val & HCCONTROL_HCFS) {
case HCCONTROL_USB_OPER:
spin_unlock_irqrestore(&isp116x->lock, flags);
val &= (~HCCONTROL_HCFS & ~HCCONTROL_RWE);
val |= HCCONTROL_USB_SUSPEND;
if (hcd->self.root_hub->do_remote_wakeup)
val |= HCCONTROL_RWE;
/* Wait for usb transfers to finish */
msleep(2);
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, HCCONTROL, val);
spin_unlock_irqrestore(&isp116x->lock, flags);
/* Wait for devices to suspend */
msleep(5);
break;
case HCCONTROL_USB_RESUME:
isp116x_write_reg32(isp116x, HCCONTROL,
(val & ~HCCONTROL_HCFS) |
HCCONTROL_USB_RESET);
fallthrough;
case HCCONTROL_USB_RESET:
ret = -EBUSY;
fallthrough;
default: /* HCCONTROL_USB_SUSPEND */
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
}
return ret;
}
static int isp116x_bus_resume(struct usb_hcd *hcd)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
u32 val;
msleep(5);
spin_lock_irq(&isp116x->lock);
val = isp116x_read_reg32(isp116x, HCCONTROL);
switch (val & HCCONTROL_HCFS) {
case HCCONTROL_USB_SUSPEND:
val &= ~HCCONTROL_HCFS;
val |= HCCONTROL_USB_RESUME;
isp116x_write_reg32(isp116x, HCCONTROL, val);
break;
case HCCONTROL_USB_RESUME:
break;
case HCCONTROL_USB_OPER:
spin_unlock_irq(&isp116x->lock);
return 0;
default:
/* HCCONTROL_USB_RESET: this may happen, when during
suspension the HC lost power. Reinitialize completely */
spin_unlock_irq(&isp116x->lock);
DBG("Chip has been reset while suspended. Reinit from scratch.\n");
isp116x_reset(hcd);
isp116x_start(hcd);
isp116x_hub_control(hcd, SetPortFeature,
USB_PORT_FEAT_POWER, 1, NULL, 0);
if ((isp116x->rhdesca & RH_A_NDP) == 2)
isp116x_hub_control(hcd, SetPortFeature,
USB_PORT_FEAT_POWER, 2, NULL, 0);
return 0;
}
val = isp116x->rhdesca & RH_A_NDP;
while (val--) {
u32 stat =
isp116x_read_reg32(isp116x, val ? HCRHPORT2 : HCRHPORT1);
/* force global, not selective, resume */
if (!(stat & RH_PS_PSS))
continue;
DBG("%s: Resuming port %d\n", __func__, val);
isp116x_write_reg32(isp116x, RH_PS_POCI, val
? HCRHPORT2 : HCRHPORT1);
}
spin_unlock_irq(&isp116x->lock);
hcd->state = HC_STATE_RESUMING;
msleep(USB_RESUME_TIMEOUT);
/* Go operational */
spin_lock_irq(&isp116x->lock);
val = isp116x_read_reg32(isp116x, HCCONTROL);
isp116x_write_reg32(isp116x, HCCONTROL,
(val & ~HCCONTROL_HCFS) | HCCONTROL_USB_OPER);
spin_unlock_irq(&isp116x->lock);
hcd->state = HC_STATE_RUNNING;
return 0;
}
#else
#define isp116x_bus_suspend NULL
#define isp116x_bus_resume NULL
#endif
static const struct hc_driver isp116x_hc_driver = {
.description = hcd_name,
.product_desc = "ISP116x Host Controller",
.hcd_priv_size = sizeof(struct isp116x),
.irq = isp116x_irq,
.flags = HCD_USB11,
.reset = isp116x_reset,
.start = isp116x_start,
.stop = isp116x_stop,
.urb_enqueue = isp116x_urb_enqueue,
.urb_dequeue = isp116x_urb_dequeue,
.endpoint_disable = isp116x_endpoint_disable,
.get_frame_number = isp116x_get_frame,
.hub_status_data = isp116x_hub_status_data,
.hub_control = isp116x_hub_control,
.bus_suspend = isp116x_bus_suspend,
.bus_resume = isp116x_bus_resume,
};
/*----------------------------------------------------------------*/
static void isp116x_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp116x *isp116x;
struct resource *res;
if (!hcd)
return;
isp116x = hcd_to_isp116x(hcd);
remove_debug_file(isp116x);
usb_remove_hcd(hcd);
iounmap(isp116x->data_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res)
release_mem_region(res->start, 2);
iounmap(isp116x->addr_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, 2);
usb_put_hcd(hcd);
}
static int isp116x_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp116x *isp116x;
struct resource *addr, *data, *ires;
void __iomem *addr_reg;
void __iomem *data_reg;
int irq;
int ret = 0;
unsigned long irqflags;
if (usb_disabled())
return -ENODEV;
if (pdev->num_resources < 3) {
ret = -ENODEV;
goto err1;
}
data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!addr || !data || !ires) {
ret = -ENODEV;
goto err1;
}
irq = ires->start;
irqflags = ires->flags & IRQF_TRIGGER_MASK;
if (!request_mem_region(addr->start, 2, hcd_name)) {
ret = -EBUSY;
goto err1;
}
addr_reg = ioremap(addr->start, resource_size(addr));
if (addr_reg == NULL) {
ret = -ENOMEM;
goto err2;
}
if (!request_mem_region(data->start, 2, hcd_name)) {
ret = -EBUSY;
goto err3;
}
data_reg = ioremap(data->start, resource_size(data));
if (data_reg == NULL) {
ret = -ENOMEM;
goto err4;
}
/* allocate and initialize hcd */
hcd = usb_create_hcd(&isp116x_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
ret = -ENOMEM;
goto err5;
}
/* this rsrc_start is bogus */
hcd->rsrc_start = addr->start;
isp116x = hcd_to_isp116x(hcd);
isp116x->data_reg = data_reg;
isp116x->addr_reg = addr_reg;
spin_lock_init(&isp116x->lock);
INIT_LIST_HEAD(&isp116x->async);
isp116x->board = dev_get_platdata(&pdev->dev);
if (!isp116x->board) {
ERR("Platform data structure not initialized\n");
ret = -ENODEV;
goto err6;
}
if (isp116x_check_platform_delay(isp116x)) {
ERR("USE_PLATFORM_DELAY defined, but delay function not "
"implemented.\n");
ERR("See comments in drivers/usb/host/isp116x-hcd.c\n");
ret = -ENODEV;
goto err6;
}
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err6;
device_wakeup_enable(hcd->self.controller);
create_debug_file(isp116x);
return 0;
err6:
usb_put_hcd(hcd);
err5:
iounmap(data_reg);
err4:
release_mem_region(data->start, 2);
err3:
iounmap(addr_reg);
err2:
release_mem_region(addr->start, 2);
err1:
ERR("init error, %d\n", ret);
return ret;
}
#ifdef CONFIG_PM
/*
Suspend of platform device
*/
static int isp116x_suspend(struct platform_device *dev, pm_message_t state)
{
VDBG("%s: state %x\n", __func__, state.event);
return 0;
}
/*
Resume platform device
*/
static int isp116x_resume(struct platform_device *dev)
{
VDBG("%s\n", __func__);
return 0;
}
#else
#define isp116x_suspend NULL
#define isp116x_resume NULL
#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:isp116x-hcd");
static struct platform_driver isp116x_driver = {
.probe = isp116x_probe,
.remove_new = isp116x_remove,
.suspend = isp116x_suspend,
.resume = isp116x_resume,
.driver = {
.name = hcd_name,
},
};
module_platform_driver(isp116x_driver);
| linux-master | drivers/usb/host/isp116x-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xhci-dbgtty.c - tty glue for xHCI debug capability
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Lu Baolu <[email protected]>
*/
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/idr.h>
#include "xhci.h"
#include "xhci-dbgcap.h"
static struct tty_driver *dbc_tty_driver;
static struct idr dbc_tty_minors;
static DEFINE_MUTEX(dbc_tty_minors_lock);
static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
{
return dbc->priv;
}
static unsigned int
dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
{
unsigned int len;
len = kfifo_len(&port->write_fifo);
if (len < size)
size = len;
if (size != 0)
size = kfifo_out(&port->write_fifo, packet, size);
return size;
}
static int dbc_start_tx(struct dbc_port *port)
__releases(&port->port_lock)
__acquires(&port->port_lock)
{
int len;
struct dbc_request *req;
int status = 0;
bool do_tty_wake = false;
struct list_head *pool = &port->write_pool;
while (!list_empty(pool)) {
req = list_entry(pool->next, struct dbc_request, list_pool);
len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
if (len == 0)
break;
do_tty_wake = true;
req->length = len;
list_del(&req->list_pool);
spin_unlock(&port->port_lock);
status = dbc_ep_queue(req);
spin_lock(&port->port_lock);
if (status) {
list_add(&req->list_pool, pool);
break;
}
}
if (do_tty_wake && port->port.tty)
tty_wakeup(port->port.tty);
return status;
}
static void dbc_start_rx(struct dbc_port *port)
__releases(&port->port_lock)
__acquires(&port->port_lock)
{
struct dbc_request *req;
int status;
struct list_head *pool = &port->read_pool;
while (!list_empty(pool)) {
if (!port->port.tty)
break;
req = list_entry(pool->next, struct dbc_request, list_pool);
list_del(&req->list_pool);
req->length = DBC_MAX_PACKET;
spin_unlock(&port->port_lock);
status = dbc_ep_queue(req);
spin_lock(&port->port_lock);
if (status) {
list_add(&req->list_pool, pool);
break;
}
}
}
static void
dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
{
unsigned long flags;
struct dbc_port *port = dbc_to_port(dbc);
spin_lock_irqsave(&port->port_lock, flags);
list_add_tail(&req->list_pool, &port->read_queue);
tasklet_schedule(&port->push);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
{
unsigned long flags;
struct dbc_port *port = dbc_to_port(dbc);
spin_lock_irqsave(&port->port_lock, flags);
list_add(&req->list_pool, &port->write_pool);
switch (req->status) {
case 0:
dbc_start_tx(port);
break;
case -ESHUTDOWN:
break;
default:
dev_warn(dbc->dev, "unexpected write complete status %d\n",
req->status);
break;
}
spin_unlock_irqrestore(&port->port_lock, flags);
}
static void xhci_dbc_free_req(struct dbc_request *req)
{
kfree(req->buf);
dbc_free_request(req);
}
static int
xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
struct list_head *head,
void (*fn)(struct xhci_dbc *, struct dbc_request *))
{
int i;
struct dbc_request *req;
for (i = 0; i < DBC_QUEUE_SIZE; i++) {
req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
if (!req)
break;
req->length = DBC_MAX_PACKET;
req->buf = kmalloc(req->length, GFP_KERNEL);
if (!req->buf) {
dbc_free_request(req);
break;
}
req->complete = fn;
list_add_tail(&req->list_pool, head);
}
return list_empty(head) ? -ENOMEM : 0;
}
static void
xhci_dbc_free_requests(struct list_head *head)
{
struct dbc_request *req;
while (!list_empty(head)) {
req = list_entry(head->next, struct dbc_request, list_pool);
list_del(&req->list_pool);
xhci_dbc_free_req(req);
}
}
static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct dbc_port *port;
mutex_lock(&dbc_tty_minors_lock);
port = idr_find(&dbc_tty_minors, tty->index);
mutex_unlock(&dbc_tty_minors_lock);
if (!port)
return -ENXIO;
tty->driver_data = port;
return tty_port_install(&port->port, driver, tty);
}
static int dbc_tty_open(struct tty_struct *tty, struct file *file)
{
struct dbc_port *port = tty->driver_data;
return tty_port_open(&port->port, tty, file);
}
static void dbc_tty_close(struct tty_struct *tty, struct file *file)
{
struct dbc_port *port = tty->driver_data;
tty_port_close(&port->port, tty, file);
}
static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&port->port_lock, flags);
if (count)
count = kfifo_in(&port->write_fifo, buf, count);
dbc_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return count;
}
static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
int status;
spin_lock_irqsave(&port->port_lock, flags);
status = kfifo_put(&port->write_fifo, ch);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
}
static void dbc_tty_flush_chars(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&port->port_lock, flags);
dbc_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static unsigned int dbc_tty_write_room(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
unsigned int room;
spin_lock_irqsave(&port->port_lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return room;
}
static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return chars;
}
static void dbc_tty_unthrottle(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&port->port_lock, flags);
tasklet_schedule(&port->push);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static const struct tty_operations dbc_tty_ops = {
.install = dbc_tty_install,
.open = dbc_tty_open,
.close = dbc_tty_close,
.write = dbc_tty_write,
.put_char = dbc_tty_put_char,
.flush_chars = dbc_tty_flush_chars,
.write_room = dbc_tty_write_room,
.chars_in_buffer = dbc_tty_chars_in_buffer,
.unthrottle = dbc_tty_unthrottle,
};
static void dbc_rx_push(struct tasklet_struct *t)
{
struct dbc_request *req;
struct tty_struct *tty;
unsigned long flags;
bool do_push = false;
bool disconnect = false;
struct dbc_port *port = from_tasklet(port, t, push);
struct list_head *queue = &port->read_queue;
spin_lock_irqsave(&port->port_lock, flags);
tty = port->port.tty;
while (!list_empty(queue)) {
req = list_first_entry(queue, struct dbc_request, list_pool);
if (tty && tty_throttled(tty))
break;
switch (req->status) {
case 0:
break;
case -ESHUTDOWN:
disconnect = true;
break;
default:
pr_warn("ttyDBC0: unexpected RX status %d\n",
req->status);
break;
}
if (req->actual) {
char *packet = req->buf;
unsigned int n, size = req->actual;
int count;
n = port->n_read;
if (n) {
packet += n;
size -= n;
}
count = tty_insert_flip_string(&port->port, packet,
size);
if (count)
do_push = true;
if (count != size) {
port->n_read += count;
break;
}
port->n_read = 0;
}
list_move(&req->list_pool, &port->read_pool);
}
if (do_push)
tty_flip_buffer_push(&port->port);
if (!list_empty(queue) && tty) {
if (!tty_throttled(tty)) {
if (do_push)
tasklet_schedule(&port->push);
else
pr_warn("ttyDBC0: RX not scheduled?\n");
}
}
if (!disconnect)
dbc_start_rx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
{
unsigned long flags;
struct dbc_port *port = container_of(_port, struct dbc_port, port);
spin_lock_irqsave(&port->port_lock, flags);
dbc_start_rx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return 0;
}
static const struct tty_port_operations dbc_port_ops = {
.activate = dbc_port_activate,
};
static void
xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
{
tty_port_init(&port->port);
spin_lock_init(&port->port_lock);
tasklet_setup(&port->push, dbc_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
INIT_LIST_HEAD(&port->write_pool);
port->port.ops = &dbc_port_ops;
port->n_read = 0;
}
static void
xhci_dbc_tty_exit_port(struct dbc_port *port)
{
tasklet_kill(&port->push);
tty_port_destroy(&port->port);
}
static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
{
int ret;
struct device *tty_dev;
struct dbc_port *port = dbc_to_port(dbc);
if (port->registered)
return -EBUSY;
xhci_dbc_tty_init_port(dbc, port);
mutex_lock(&dbc_tty_minors_lock);
port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
mutex_unlock(&dbc_tty_minors_lock);
if (port->minor < 0) {
ret = port->minor;
goto err_idr;
}
ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
if (ret)
goto err_exit_port;
ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
dbc_read_complete);
if (ret)
goto err_free_fifo;
ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
dbc_write_complete);
if (ret)
goto err_free_requests;
tty_dev = tty_port_register_device(&port->port,
dbc_tty_driver, port->minor, NULL);
if (IS_ERR(tty_dev)) {
ret = PTR_ERR(tty_dev);
goto err_free_requests;
}
port->registered = true;
return 0;
err_free_requests:
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->write_pool);
err_free_fifo:
kfifo_free(&port->write_fifo);
err_exit_port:
idr_remove(&dbc_tty_minors, port->minor);
err_idr:
xhci_dbc_tty_exit_port(port);
dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
return ret;
}
static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
{
struct dbc_port *port = dbc_to_port(dbc);
if (!port->registered)
return;
tty_unregister_device(dbc_tty_driver, port->minor);
xhci_dbc_tty_exit_port(port);
port->registered = false;
mutex_lock(&dbc_tty_minors_lock);
idr_remove(&dbc_tty_minors, port->minor);
mutex_unlock(&dbc_tty_minors_lock);
kfifo_free(&port->write_fifo);
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->read_queue);
xhci_dbc_free_requests(&port->write_pool);
}
static const struct dbc_driver dbc_driver = {
.configure = xhci_dbc_tty_register_device,
.disconnect = xhci_dbc_tty_unregister_device,
};
int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
{
struct xhci_dbc *dbc;
struct dbc_port *port;
int status;
if (!dbc_tty_driver)
return -ENODEV;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
if (!dbc) {
status = -ENOMEM;
goto out2;
}
dbc->priv = port;
/* get rid of xhci once this is a real driver binding to a device */
xhci->dbc = dbc;
return 0;
out2:
kfree(port);
return status;
}
/*
* undo what probe did, assume dbc is stopped already.
* we also assume tty_unregister_device() is called before this
*/
void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
{
struct dbc_port *port = dbc_to_port(dbc);
xhci_dbc_remove(dbc);
kfree(port);
}
int dbc_tty_init(void)
{
int ret;
idr_init(&dbc_tty_minors);
dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(dbc_tty_driver)) {
idr_destroy(&dbc_tty_minors);
return PTR_ERR(dbc_tty_driver);
}
dbc_tty_driver->driver_name = "dbc_serial";
dbc_tty_driver->name = "ttyDBC";
dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
dbc_tty_driver->init_termios = tty_std_termios;
dbc_tty_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
dbc_tty_driver->init_termios.c_ispeed = 9600;
dbc_tty_driver->init_termios.c_ospeed = 9600;
tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
ret = tty_register_driver(dbc_tty_driver);
if (ret) {
pr_err("Can't register dbc tty driver\n");
tty_driver_kref_put(dbc_tty_driver);
idr_destroy(&dbc_tty_minors);
}
return ret;
}
void dbc_tty_exit(void)
{
if (dbc_tty_driver) {
tty_unregister_driver(dbc_tty_driver);
tty_driver_kref_put(dbc_tty_driver);
dbc_tty_driver = NULL;
}
idr_destroy(&dbc_tty_minors);
}
| linux-master | drivers/usb/host/xhci-dbgtty.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include "fhci.h"
/* virtual root hub specific descriptor */
static u8 root_hub_des[] = {
0x09, /* blength */
USB_DT_HUB, /* bDescriptorType;hub-descriptor */
0x01, /* bNbrPorts */
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM, /* wHubCharacteristics */
0x00, /* per-port power, no overcurrent */
0x01, /* bPwrOn2pwrGood;2ms */
0x00, /* bHubContrCurrent;0mA */
0x00, /* DeviceRemoveable */
0xff, /* PortPwrCtrlMask */
};
static void fhci_gpio_set_value(struct fhci_hcd *fhci, int gpio_nr, bool on)
{
struct gpio_desc *gpiod = fhci->gpiods[gpio_nr];
if (!gpiod)
return;
gpiod_set_value(gpiod, on);
mdelay(5);
}
void fhci_config_transceiver(struct fhci_hcd *fhci,
enum fhci_port_status status)
{
fhci_dbg(fhci, "-> %s: %d\n", __func__, status);
switch (status) {
case FHCI_PORT_POWER_OFF:
fhci_gpio_set_value(fhci, GPIO_POWER, false);
break;
case FHCI_PORT_DISABLED:
case FHCI_PORT_WAITING:
fhci_gpio_set_value(fhci, GPIO_POWER, true);
break;
case FHCI_PORT_LOW:
fhci_gpio_set_value(fhci, GPIO_SPEED, false);
break;
case FHCI_PORT_FULL:
fhci_gpio_set_value(fhci, GPIO_SPEED, true);
break;
default:
WARN_ON(1);
break;
}
fhci_dbg(fhci, "<- %s: %d\n", __func__, status);
}
/* disable the USB port by clearing the EN bit in the USBMOD register */
void fhci_port_disable(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = (struct fhci_usb *)fhci->usb_lld;
enum fhci_port_status port_status;
fhci_dbg(fhci, "-> %s\n", __func__);
fhci_stop_sof_timer(fhci);
fhci_flush_all_transmissions(usb);
fhci_usb_disable_interrupt((struct fhci_usb *)fhci->usb_lld);
port_status = usb->port_status;
usb->port_status = FHCI_PORT_DISABLED;
/* Enable IDLE since we want to know if something comes along */
usb->saved_msk |= USB_E_IDLE_MASK;
out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
/* check if during the disconnection process attached new device */
if (port_status == FHCI_PORT_WAITING)
fhci_device_connected_interrupt(fhci);
usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_ENABLE;
usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_ENABLE;
fhci_usb_enable_interrupt((struct fhci_usb *)fhci->usb_lld);
fhci_dbg(fhci, "<- %s\n", __func__);
}
/* enable the USB port by setting the EN bit in the USBMOD register */
void fhci_port_enable(void *lld)
{
struct fhci_usb *usb = (struct fhci_usb *)lld;
struct fhci_hcd *fhci = usb->fhci;
fhci_dbg(fhci, "-> %s\n", __func__);
fhci_config_transceiver(fhci, usb->port_status);
if ((usb->port_status != FHCI_PORT_FULL) &&
(usb->port_status != FHCI_PORT_LOW))
fhci_start_sof_timer(fhci);
usb->vroot_hub->port.wPortStatus |= USB_PORT_STAT_ENABLE;
usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_ENABLE;
fhci_dbg(fhci, "<- %s\n", __func__);
}
void fhci_io_port_generate_reset(struct fhci_hcd *fhci)
{
fhci_dbg(fhci, "-> %s\n", __func__);
gpiod_direction_output(fhci->gpiods[GPIO_USBOE], 0);
gpiod_direction_output(fhci->gpiods[GPIO_USBTP], 0);
gpiod_direction_output(fhci->gpiods[GPIO_USBTN], 0);
mdelay(5);
qe_pin_set_dedicated(fhci->pins[PIN_USBOE]);
qe_pin_set_dedicated(fhci->pins[PIN_USBTP]);
qe_pin_set_dedicated(fhci->pins[PIN_USBTN]);
fhci_dbg(fhci, "<- %s\n", __func__);
}
/* generate the RESET condition on the bus */
void fhci_port_reset(void *lld)
{
struct fhci_usb *usb = (struct fhci_usb *)lld;
struct fhci_hcd *fhci = usb->fhci;
u8 mode;
u16 mask;
fhci_dbg(fhci, "-> %s\n", __func__);
fhci_stop_sof_timer(fhci);
/* disable the USB controller */
mode = in_8(&fhci->regs->usb_usmod);
out_8(&fhci->regs->usb_usmod, mode & (~USB_MODE_EN));
/* disable idle interrupts */
mask = in_be16(&fhci->regs->usb_usbmr);
out_be16(&fhci->regs->usb_usbmr, mask & (~USB_E_IDLE_MASK));
fhci_io_port_generate_reset(fhci);
/* enable interrupt on this endpoint */
out_be16(&fhci->regs->usb_usbmr, mask);
/* enable the USB controller */
mode = in_8(&fhci->regs->usb_usmod);
out_8(&fhci->regs->usb_usmod, mode | USB_MODE_EN);
fhci_start_sof_timer(fhci);
fhci_dbg(fhci, "<- %s\n", __func__);
}
int fhci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
int ret = 0;
unsigned long flags;
fhci_dbg(fhci, "-> %s\n", __func__);
spin_lock_irqsave(&fhci->lock, flags);
if (fhci->vroot_hub->port.wPortChange & (USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE | USB_PORT_STAT_C_SUSPEND |
USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_OVERCURRENT)) {
*buf = 1 << 1;
ret = 1;
fhci_dbg(fhci, "-- %s\n", __func__);
}
spin_unlock_irqrestore(&fhci->lock, flags);
fhci_dbg(fhci, "<- %s\n", __func__);
return ret;
}
int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
int retval = 0;
struct usb_hub_status *hub_status;
struct usb_port_status *port_status;
unsigned long flags;
spin_lock_irqsave(&fhci->lock, flags);
fhci_dbg(fhci, "-> %s\n", __func__);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
break;
default:
goto error;
}
break;
case ClearPortFeature:
fhci->vroot_hub->feature &= (1 << wValue);
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
fhci->vroot_hub->port.wPortStatus &=
~USB_PORT_STAT_ENABLE;
fhci_port_disable(fhci);
break;
case USB_PORT_FEAT_C_ENABLE:
fhci->vroot_hub->port.wPortChange &=
~USB_PORT_STAT_C_ENABLE;
break;
case USB_PORT_FEAT_SUSPEND:
fhci->vroot_hub->port.wPortStatus &=
~USB_PORT_STAT_SUSPEND;
fhci_stop_sof_timer(fhci);
break;
case USB_PORT_FEAT_C_SUSPEND:
fhci->vroot_hub->port.wPortChange &=
~USB_PORT_STAT_C_SUSPEND;
break;
case USB_PORT_FEAT_POWER:
fhci->vroot_hub->port.wPortStatus &=
~USB_PORT_STAT_POWER;
fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
break;
case USB_PORT_FEAT_C_CONNECTION:
fhci->vroot_hub->port.wPortChange &=
~USB_PORT_STAT_C_CONNECTION;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
fhci->vroot_hub->port.wPortChange &=
~USB_PORT_STAT_C_OVERCURRENT;
break;
case USB_PORT_FEAT_C_RESET:
fhci->vroot_hub->port.wPortChange &=
~USB_PORT_STAT_C_RESET;
break;
default:
goto error;
}
break;
case GetHubDescriptor:
memcpy(buf, root_hub_des, sizeof(root_hub_des));
break;
case GetHubStatus:
hub_status = (struct usb_hub_status *)buf;
hub_status->wHubStatus =
cpu_to_le16(fhci->vroot_hub->hub.wHubStatus);
hub_status->wHubChange =
cpu_to_le16(fhci->vroot_hub->hub.wHubChange);
break;
case GetPortStatus:
port_status = (struct usb_port_status *)buf;
port_status->wPortStatus =
cpu_to_le16(fhci->vroot_hub->port.wPortStatus);
port_status->wPortChange =
cpu_to_le16(fhci->vroot_hub->port.wPortChange);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto error;
}
break;
case SetPortFeature:
fhci->vroot_hub->feature |= (1 << wValue);
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
fhci->vroot_hub->port.wPortStatus |=
USB_PORT_STAT_ENABLE;
fhci_port_enable(fhci->usb_lld);
break;
case USB_PORT_FEAT_SUSPEND:
fhci->vroot_hub->port.wPortStatus |=
USB_PORT_STAT_SUSPEND;
fhci_stop_sof_timer(fhci);
break;
case USB_PORT_FEAT_RESET:
fhci->vroot_hub->port.wPortStatus |=
USB_PORT_STAT_RESET;
fhci_port_reset(fhci->usb_lld);
fhci->vroot_hub->port.wPortStatus |=
USB_PORT_STAT_ENABLE;
fhci->vroot_hub->port.wPortStatus &=
~USB_PORT_STAT_RESET;
break;
case USB_PORT_FEAT_POWER:
fhci->vroot_hub->port.wPortStatus |=
USB_PORT_STAT_POWER;
fhci_config_transceiver(fhci, FHCI_PORT_WAITING);
break;
default:
goto error;
}
break;
default:
error:
retval = -EPIPE;
}
fhci_dbg(fhci, "<- %s\n", __func__);
spin_unlock_irqrestore(&fhci->lock, flags);
return retval;
}
| linux-master | drivers/usb/host/fhci-hub.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* ehci-omap.c - driver for USBHOST on OMAP3/4 processors
*
* Bus Glue for the EHCI controllers in OMAP3/4
* Tested on several OMAP3 boards, and OMAP4 Pandaboard
*
* Copyright (C) 2007-2013 Texas Instruments, Inc.
* Author: Vikram Pandita <[email protected]>
* Author: Anand Gadiyar <[email protected]>
* Author: Keshava Munegowda <[email protected]>
* Author: Roger Quadros <[email protected]>
*
* Copyright (C) 2009 Nokia Corporation
* Contact: Felipe Balbi <[email protected]>
*
* Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb/ulpi.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/of.h>
#include <linux/dma-mapping.h>
#include "ehci.h"
#include <linux/platform_data/usb-omap.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
#define EHCI_INSNREG05_ULPI (0xA4)
#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
#define DRIVER_DESC "OMAP-EHCI Host Controller driver"
static const char hcd_name[] = "ehci-omap";
/*-------------------------------------------------------------------------*/
struct omap_hcd {
struct usb_phy *phy[OMAP3_HS_USB_PORTS]; /* one PHY for each port */
int nports;
};
static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
{
__raw_writel(val, base + reg);
}
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
static struct hc_driver __read_mostly ehci_omap_hc_driver;
static const struct ehci_driver_overrides ehci_omap_overrides __initconst = {
.extra_priv_size = sizeof(struct omap_hcd),
};
/**
* ehci_hcd_omap_probe - initialize TI-based HCDs
* @pdev: Pointer to this platform device's information
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int ehci_hcd_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
int ret;
int irq;
int i;
struct omap_hcd *omap;
if (usb_disabled())
return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
return -ENODEV;
}
/* For DT boot, get platform data from parent. i.e. usbhshost */
if (dev->of_node) {
pdata = dev_get_platdata(dev->parent);
dev->platform_data = pdata;
}
if (!pdata) {
dev_err(dev, "Missing platform data\n");
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
ret = -ENODEV;
hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
dev_name(dev));
if (!hcd) {
dev_err(dev, "Failed to create HCD\n");
return -ENOMEM;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
hcd_to_ehci(hcd)->caps = regs;
omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
omap->nports = pdata->nports;
platform_set_drvdata(pdev, hcd);
/* get the PHY devices if needed */
for (i = 0 ; i < omap->nports ; i++) {
struct usb_phy *phy;
/* get the PHY device */
phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret == -ENODEV) { /* no PHY */
phy = NULL;
continue;
}
if (ret != -EPROBE_DEFER)
dev_err(dev, "Can't get PHY for port %d: %d\n",
i, ret);
goto err_phy;
}
omap->phy[i] = phy;
if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) {
usb_phy_init(omap->phy[i]);
/* bring PHY out of suspend */
usb_phy_set_suspend(omap->phy[i], 0);
}
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
* causes suspended ports to be taken out of suspend when
* the USBCMD.Run/Stop bit is cleared (for example when
* we do ehci_bus_suspend).
* This breaks suspend-resume if the root-hub is allowed
* to suspend. Writing 1 to this undocumented register bit
* disables this feature and restores normal behavior.
*/
ehci_write(regs, EHCI_INSNREG04,
EHCI_INSNREG04_DISABLE_UNSUSPEND);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_err(dev, "failed to add hcd with err %d\n", ret);
goto err_pm_runtime;
}
device_wakeup_enable(hcd->self.controller);
/*
* Bring PHYs out of reset for non PHY modes.
* Even though HSIC mode is a PHY-less mode, the reset
* line exists between the chips and can be modelled
* as a PHY device for reset control.
*/
for (i = 0; i < omap->nports; i++) {
if (!omap->phy[i] ||
pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY)
continue;
usb_phy_init(omap->phy[i]);
/* bring PHY out of suspend */
usb_phy_set_suspend(omap->phy[i], 0);
}
return 0;
err_pm_runtime:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
err_phy:
for (i = 0; i < omap->nports; i++) {
if (omap->phy[i])
usb_phy_shutdown(omap->phy[i]);
}
usb_put_hcd(hcd);
return ret;
}
/**
* ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
* @pdev: USB Host Controller being removed
*
* Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
static void ehci_hcd_omap_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct omap_hcd *omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
int i;
usb_remove_hcd(hcd);
for (i = 0; i < omap->nports; i++) {
if (omap->phy[i])
usb_phy_shutdown(omap->phy[i]);
}
usb_put_hcd(hcd);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
}
static const struct of_device_id omap_ehci_dt_ids[] = {
{ .compatible = "ti,ehci-omap" },
{ }
};
MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids);
static struct platform_driver ehci_hcd_omap_driver = {
.probe = ehci_hcd_omap_probe,
.remove_new = ehci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
/*.suspend = ehci_hcd_omap_suspend, */
/*.resume = ehci_hcd_omap_resume, */
.driver = {
.name = hcd_name,
.of_match_table = omap_ehci_dt_ids,
}
};
/*-------------------------------------------------------------------------*/
static int __init ehci_omap_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&ehci_omap_hc_driver, &ehci_omap_overrides);
return platform_driver_register(&ehci_hcd_omap_driver);
}
module_init(ehci_omap_init);
static void __exit ehci_omap_cleanup(void)
{
platform_driver_unregister(&ehci_hcd_omap_driver);
}
module_exit(ehci_omap_cleanup);
MODULE_ALIAS("platform:ehci-omap");
MODULE_AUTHOR("Texas Instruments, Inc.");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
MODULE_AUTHOR("Roger Quadros <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ehci-omap.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "fhci.h"
/* maps the hardware error code to the USB error code */
static int status_to_error(u32 status)
{
if (status == USB_TD_OK)
return 0;
else if (status & USB_TD_RX_ER_CRC)
return -EILSEQ;
else if (status & USB_TD_RX_ER_NONOCT)
return -EPROTO;
else if (status & USB_TD_RX_ER_OVERUN)
return -ECOMM;
else if (status & USB_TD_RX_ER_BITSTUFF)
return -EPROTO;
else if (status & USB_TD_RX_ER_PID)
return -EILSEQ;
else if (status & (USB_TD_TX_ER_NAK | USB_TD_TX_ER_TIMEOUT))
return -ETIMEDOUT;
else if (status & USB_TD_TX_ER_STALL)
return -EPIPE;
else if (status & USB_TD_TX_ER_UNDERUN)
return -ENOSR;
else if (status & USB_TD_RX_DATA_UNDERUN)
return -EREMOTEIO;
else if (status & USB_TD_RX_DATA_OVERUN)
return -EOVERFLOW;
else
return -EINVAL;
}
void fhci_add_td_to_frame(struct fhci_time_frame *frame, struct td *td)
{
list_add_tail(&td->frame_lh, &frame->tds_list);
}
void fhci_add_tds_to_ed(struct ed *ed, struct td **td_list, int number)
{
int i;
for (i = 0; i < number; i++) {
struct td *td = td_list[i];
list_add_tail(&td->node, &ed->td_list);
}
if (ed->td_head == NULL)
ed->td_head = td_list[0];
}
static struct td *peek_td_from_ed(struct ed *ed)
{
struct td *td;
if (!list_empty(&ed->td_list))
td = list_entry(ed->td_list.next, struct td, node);
else
td = NULL;
return td;
}
struct td *fhci_remove_td_from_frame(struct fhci_time_frame *frame)
{
struct td *td;
if (!list_empty(&frame->tds_list)) {
td = list_entry(frame->tds_list.next, struct td, frame_lh);
list_del_init(frame->tds_list.next);
} else
td = NULL;
return td;
}
struct td *fhci_peek_td_from_frame(struct fhci_time_frame *frame)
{
struct td *td;
if (!list_empty(&frame->tds_list))
td = list_entry(frame->tds_list.next, struct td, frame_lh);
else
td = NULL;
return td;
}
struct td *fhci_remove_td_from_ed(struct ed *ed)
{
struct td *td;
if (!list_empty(&ed->td_list)) {
td = list_entry(ed->td_list.next, struct td, node);
list_del_init(ed->td_list.next);
/* if this TD was the ED's head, find next TD */
if (!list_empty(&ed->td_list))
ed->td_head = list_entry(ed->td_list.next, struct td,
node);
else
ed->td_head = NULL;
} else
td = NULL;
return td;
}
struct td *fhci_remove_td_from_done_list(struct fhci_controller_list *p_list)
{
struct td *td;
if (!list_empty(&p_list->done_list)) {
td = list_entry(p_list->done_list.next, struct td, node);
list_del_init(p_list->done_list.next);
} else
td = NULL;
return td;
}
void fhci_move_td_from_ed_to_done_list(struct fhci_usb *usb, struct ed *ed)
{
struct td *td;
td = ed->td_head;
list_del_init(&td->node);
/* If this TD was the ED's head,find next TD */
if (!list_empty(&ed->td_list))
ed->td_head = list_entry(ed->td_list.next, struct td, node);
else {
ed->td_head = NULL;
ed->state = FHCI_ED_SKIP;
}
ed->toggle_carry = td->toggle;
list_add_tail(&td->node, &usb->hc_list->done_list);
if (td->ioc)
usb->transfer_confirm(usb->fhci);
}
/* free done FHCI URB resource such as ED and TD */
static void free_urb_priv(struct fhci_hcd *fhci, struct urb *urb)
{
int i;
struct urb_priv *urb_priv = urb->hcpriv;
struct ed *ed = urb_priv->ed;
for (i = 0; i < urb_priv->num_of_tds; i++) {
list_del_init(&urb_priv->tds[i]->node);
fhci_recycle_empty_td(fhci, urb_priv->tds[i]);
}
/* if this TD was the ED's head,find the next TD */
if (!list_empty(&ed->td_list))
ed->td_head = list_entry(ed->td_list.next, struct td, node);
else
ed->td_head = NULL;
kfree(urb_priv->tds);
kfree(urb_priv);
urb->hcpriv = NULL;
/* if this TD was the ED's head,find next TD */
if (ed->td_head == NULL)
list_del_init(&ed->node);
fhci->active_urbs--;
}
/* this routine called to complete and free done URB */
void fhci_urb_complete_free(struct fhci_hcd *fhci, struct urb *urb)
{
free_urb_priv(fhci, urb);
if (urb->status == -EINPROGRESS) {
if (urb->actual_length != urb->transfer_buffer_length &&
urb->transfer_flags & URB_SHORT_NOT_OK)
urb->status = -EREMOTEIO;
else
urb->status = 0;
}
usb_hcd_unlink_urb_from_ep(fhci_to_hcd(fhci), urb);
spin_unlock(&fhci->lock);
usb_hcd_giveback_urb(fhci_to_hcd(fhci), urb, urb->status);
spin_lock(&fhci->lock);
}
/*
* caculate transfer length/stats and update the urb
* Precondition: irqsafe(only for urb-?status locking)
*/
void fhci_done_td(struct urb *urb, struct td *td)
{
struct ed *ed = td->ed;
u32 cc = td->status;
/* ISO...drivers see per-TD length/status */
if (ed->mode == FHCI_TF_ISO) {
u32 len;
if (!(urb->transfer_flags & URB_SHORT_NOT_OK &&
cc == USB_TD_RX_DATA_UNDERUN))
cc = USB_TD_OK;
if (usb_pipeout(urb->pipe))
len = urb->iso_frame_desc[td->iso_index].length;
else
len = td->actual_len;
urb->actual_length += len;
urb->iso_frame_desc[td->iso_index].actual_length = len;
urb->iso_frame_desc[td->iso_index].status =
status_to_error(cc);
}
/* BULK,INT,CONTROL... drivers see aggregate length/status,
* except that "setup" bytes aren't counted and "short" transfers
* might not be reported as errors.
*/
else {
if (td->error_cnt >= 3)
urb->error_count = 3;
/* control endpoint only have soft stalls */
/* update packet status if needed(short may be ok) */
if (!(urb->transfer_flags & URB_SHORT_NOT_OK) &&
cc == USB_TD_RX_DATA_UNDERUN) {
ed->state = FHCI_ED_OPER;
cc = USB_TD_OK;
}
if (cc != USB_TD_OK) {
if (urb->status == -EINPROGRESS)
urb->status = status_to_error(cc);
}
/* count all non-empty packets except control SETUP packet */
if (td->type != FHCI_TA_SETUP || td->iso_index != 0)
urb->actual_length += td->actual_len;
}
}
/* there are some pedning request to unlink */
void fhci_del_ed_list(struct fhci_hcd *fhci, struct ed *ed)
{
struct td *td = peek_td_from_ed(ed);
struct urb *urb = td->urb;
struct urb_priv *urb_priv = urb->hcpriv;
if (urb_priv->state == URB_DEL) {
td = fhci_remove_td_from_ed(ed);
/* HC may have partly processed this TD */
if (td->status != USB_TD_INPROGRESS)
fhci_done_td(urb, td);
/* URB is done;clean up */
if (++(urb_priv->tds_cnt) == urb_priv->num_of_tds)
fhci_urb_complete_free(fhci, urb);
}
}
| linux-master | drivers/usb/host/fhci-q.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include "xhci.h"
#include "xhci-trace.h"
#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
PORT_RC | PORT_PLC | PORT_PE)
/* Default sublink speed attribute of each lane */
static u32 ssp_cap_default_ssa[] = {
0x00050034, /* USB 3.0 SS Gen1x1 id:4 symmetric rx 5Gbps */
0x000500b4, /* USB 3.0 SS Gen1x1 id:4 symmetric tx 5Gbps */
0x000a4035, /* USB 3.1 SSP Gen2x1 id:5 symmetric rx 10Gbps */
0x000a40b5, /* USB 3.1 SSP Gen2x1 id:5 symmetric tx 10Gbps */
0x00054036, /* USB 3.2 SSP Gen1x2 id:6 symmetric rx 5Gbps */
0x000540b6, /* USB 3.2 SSP Gen1x2 id:6 symmetric tx 5Gbps */
0x000a4037, /* USB 3.2 SSP Gen2x2 id:7 symmetric rx 10Gbps */
0x000a40b7, /* USB 3.2 SSP Gen2x2 id:7 symmetric tx 10Gbps */
};
static int xhci_create_usb3x_bos_desc(struct xhci_hcd *xhci, char *buf,
u16 wLength)
{
struct usb_bos_descriptor *bos;
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
struct xhci_port_cap *port_cap = NULL;
u16 bcdUSB;
u32 reg;
u32 min_rate = 0;
u8 min_ssid;
u8 ssac;
u8 ssic;
int offset;
int i;
/* BOS descriptor */
bos = (struct usb_bos_descriptor *)buf;
bos->bLength = USB_DT_BOS_SIZE;
bos->bDescriptorType = USB_DT_BOS;
bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE +
USB_DT_USB_SS_CAP_SIZE);
bos->bNumDeviceCaps = 1;
/* Create the descriptor for port with the highest revision */
for (i = 0; i < xhci->num_port_caps; i++) {
u8 major = xhci->port_caps[i].maj_rev;
u8 minor = xhci->port_caps[i].min_rev;
u16 rev = (major << 8) | minor;
if (i == 0 || bcdUSB < rev) {
bcdUSB = rev;
port_cap = &xhci->port_caps[i];
}
}
if (bcdUSB >= 0x0310) {
if (port_cap->psi_count) {
u8 num_sym_ssa = 0;
for (i = 0; i < port_cap->psi_count; i++) {
if ((port_cap->psi[i] & PLT_MASK) == PLT_SYM)
num_sym_ssa++;
}
ssac = port_cap->psi_count + num_sym_ssa - 1;
ssic = port_cap->psi_uid_count - 1;
} else {
if (bcdUSB >= 0x0320)
ssac = 7;
else
ssac = 3;
ssic = (ssac + 1) / 2 - 1;
}
bos->bNumDeviceCaps++;
bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE +
USB_DT_USB_SS_CAP_SIZE +
USB_DT_USB_SSP_CAP_SIZE(ssac));
}
if (wLength < USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE)
return wLength;
/* SuperSpeed USB Device Capability */
ss_cap = (struct usb_ss_cap_descriptor *)&buf[USB_DT_BOS_SIZE];
ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
ss_cap->bmAttributes = 0; /* set later */
ss_cap->wSpeedSupported = cpu_to_le16(USB_5GBPS_OPERATION);
ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
ss_cap->bU1devExitLat = 0; /* set later */
ss_cap->bU2DevExitLat = 0; /* set later */
reg = readl(&xhci->cap_regs->hcc_params);
if (HCC_LTC(reg))
ss_cap->bmAttributes |= USB_LTM_SUPPORT;
if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
reg = readl(&xhci->cap_regs->hcs_params3);
ss_cap->bU1devExitLat = HCS_U1_LATENCY(reg);
ss_cap->bU2DevExitLat = cpu_to_le16(HCS_U2_LATENCY(reg));
}
if (wLength < le16_to_cpu(bos->wTotalLength))
return wLength;
if (bcdUSB < 0x0310)
return le16_to_cpu(bos->wTotalLength);
ssp_cap = (struct usb_ssp_cap_descriptor *)&buf[USB_DT_BOS_SIZE +
USB_DT_USB_SS_CAP_SIZE];
ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(ssac);
ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
ssp_cap->bReserved = 0;
ssp_cap->wReserved = 0;
ssp_cap->bmAttributes =
cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_ATTRIBS, ssac) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_IDS, ssic));
if (!port_cap->psi_count) {
for (i = 0; i < ssac + 1; i++)
ssp_cap->bmSublinkSpeedAttr[i] =
cpu_to_le32(ssp_cap_default_ssa[i]);
min_ssid = 4;
goto out;
}
offset = 0;
for (i = 0; i < port_cap->psi_count; i++) {
u32 psi;
u32 attr;
u8 ssid;
u8 lp;
u8 lse;
u8 psie;
u16 lane_mantissa;
u16 psim;
u16 plt;
psi = port_cap->psi[i];
ssid = XHCI_EXT_PORT_PSIV(psi);
lp = XHCI_EXT_PORT_LP(psi);
psie = XHCI_EXT_PORT_PSIE(psi);
psim = XHCI_EXT_PORT_PSIM(psi);
plt = psi & PLT_MASK;
lse = psie;
lane_mantissa = psim;
/* Shift to Gbps and set SSP Link Protocol if 10Gpbs */
for (; psie < USB_SSP_SUBLINK_SPEED_LSE_GBPS; psie++)
psim /= 1000;
if (!min_rate || psim < min_rate) {
min_ssid = ssid;
min_rate = psim;
}
/* Some host controllers don't set the link protocol for SSP */
if (psim >= 10)
lp = USB_SSP_SUBLINK_SPEED_LP_SSP;
/*
* PSIM and PSIE represent the total speed of PSI. The BOS
* descriptor SSP sublink speed attribute lane mantissa
* describes the lane speed. E.g. PSIM and PSIE for gen2x2
* is 20Gbps, but the BOS descriptor lane speed mantissa is
* 10Gbps. Check and modify the mantissa value to match the
* lane speed.
*/
if (bcdUSB == 0x0320 && plt == PLT_SYM) {
/*
* The PSI dword for gen1x2 and gen2x1 share the same
* values. But the lane speed for gen1x2 is 5Gbps while
* gen2x1 is 10Gbps. If the previous PSI dword SSID is
* 5 and the PSIE and PSIM match with SSID 6, let's
* assume that the controller follows the default speed
* id with SSID 6 for gen1x2.
*/
if (ssid == 6 && psie == 3 && psim == 10 && i) {
u32 prev = port_cap->psi[i - 1];
if ((prev & PLT_MASK) == PLT_SYM &&
XHCI_EXT_PORT_PSIV(prev) == 5 &&
XHCI_EXT_PORT_PSIE(prev) == 3 &&
XHCI_EXT_PORT_PSIM(prev) == 10) {
lse = USB_SSP_SUBLINK_SPEED_LSE_GBPS;
lane_mantissa = 5;
}
}
if (psie == 3 && psim > 10) {
lse = USB_SSP_SUBLINK_SPEED_LSE_GBPS;
lane_mantissa = 10;
}
}
attr = (FIELD_PREP(USB_SSP_SUBLINK_SPEED_SSID, ssid) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_LP, lp) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSE, lse) |
FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSM, lane_mantissa));
switch (plt) {
case PLT_SYM:
attr |= FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST,
USB_SSP_SUBLINK_SPEED_ST_SYM_RX);
ssp_cap->bmSublinkSpeedAttr[offset++] = cpu_to_le32(attr);
attr &= ~USB_SSP_SUBLINK_SPEED_ST;
attr |= FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST,
USB_SSP_SUBLINK_SPEED_ST_SYM_TX);
ssp_cap->bmSublinkSpeedAttr[offset++] = cpu_to_le32(attr);
break;
case PLT_ASYM_RX:
attr |= FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST,
USB_SSP_SUBLINK_SPEED_ST_ASYM_RX);
ssp_cap->bmSublinkSpeedAttr[offset++] = cpu_to_le32(attr);
break;
case PLT_ASYM_TX:
attr |= FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST,
USB_SSP_SUBLINK_SPEED_ST_ASYM_TX);
ssp_cap->bmSublinkSpeedAttr[offset++] = cpu_to_le32(attr);
break;
}
}
out:
ssp_cap->wFunctionalitySupport =
cpu_to_le16(FIELD_PREP(USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID,
min_ssid) |
FIELD_PREP(USB_SSP_MIN_RX_LANE_COUNT, 1) |
FIELD_PREP(USB_SSP_MIN_TX_LANE_COUNT, 1));
return le16_to_cpu(bos->wTotalLength);
}
static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
struct usb_hub_descriptor *desc, int ports)
{
u16 temp;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 0;
/* Bits 1:0 - support per-port power switching, or power always on */
if (HCC_PPC(xhci->hcc_params))
temp |= HUB_CHAR_INDV_PORT_LPSM;
else
temp |= HUB_CHAR_NO_LPSM;
/* Bit 2 - root hubs are not part of a compound device */
/* Bits 4:3 - individual port over current protection */
temp |= HUB_CHAR_INDV_PORT_OCPM;
/* Bits 6:5 - no TTs in root ports */
/* Bit 7 - no port indicators */
desc->wHubCharacteristics = cpu_to_le16(temp);
}
/* Fill in the USB 2.0 roothub descriptor */
static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
struct usb_hub_descriptor *desc)
{
int ports;
u16 temp;
__u8 port_removable[(USB_MAXCHILDREN + 1 + 7) / 8];
u32 portsc;
unsigned int i;
struct xhci_hub *rhub;
rhub = &xhci->usb2_rhub;
ports = rhub->num_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
desc->bDescriptorType = USB_DT_HUB;
temp = 1 + (ports / 8);
desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.8 says 20ms */
/* The Device Removable bits are reported on a byte granularity.
* If the port doesn't exist within that byte, the bit is set to 0.
*/
memset(port_removable, 0, sizeof(port_removable));
for (i = 0; i < ports; i++) {
portsc = readl(rhub->ports[i]->addr);
/* If a device is removable, PORTSC reports a 0, same as in the
* hub descriptor DeviceRemovable bits.
*/
if (portsc & PORT_DEV_REMOVE)
/* This math is hairy because bit 0 of DeviceRemovable
* is reserved, and bit 1 is for port 1, etc.
*/
port_removable[(i + 1) / 8] |= 1 << ((i + 1) % 8);
}
/* ch11.h defines a hub descriptor that has room for USB_MAXCHILDREN
* ports on it. The USB 2.0 specification says that there are two
* variable length fields at the end of the hub descriptor:
* DeviceRemovable and PortPwrCtrlMask. But since we can have less than
* USB_MAXCHILDREN ports, we may need to use the DeviceRemovable array
* to set PortPwrCtrlMask bits. PortPwrCtrlMask must always be set to
* 0xFF, so we initialize the both arrays (DeviceRemovable and
* PortPwrCtrlMask) to 0xFF. Then we set the DeviceRemovable for each
* set of ports that actually exist.
*/
memset(desc->u.hs.DeviceRemovable, 0xff,
sizeof(desc->u.hs.DeviceRemovable));
memset(desc->u.hs.PortPwrCtrlMask, 0xff,
sizeof(desc->u.hs.PortPwrCtrlMask));
for (i = 0; i < (ports + 1 + 7) / 8; i++)
memset(&desc->u.hs.DeviceRemovable[i], port_removable[i],
sizeof(__u8));
}
/* Fill in the USB 3.0 roothub descriptor */
static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
struct usb_hub_descriptor *desc)
{
int ports;
u16 port_removable;
u32 portsc;
unsigned int i;
struct xhci_hub *rhub;
rhub = &xhci->usb3_rhub;
ports = rhub->num_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
desc->bDescriptorType = USB_DT_SS_HUB;
desc->bDescLength = USB_DT_SS_HUB_SIZE;
desc->bPwrOn2PwrGood = 50; /* usb 3.1 may fail if less than 100ms */
/* header decode latency should be zero for roothubs,
* see section 4.23.5.2.
*/
desc->u.ss.bHubHdrDecLat = 0;
desc->u.ss.wHubDelay = 0;
port_removable = 0;
/* bit 0 is reserved, bit 1 is for port 1, etc. */
for (i = 0; i < ports; i++) {
portsc = readl(rhub->ports[i]->addr);
if (portsc & PORT_DEV_REMOVE)
port_removable |= 1 << (i + 1);
}
desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
}
static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
struct usb_hub_descriptor *desc)
{
if (hcd->speed >= HCD_USB3)
xhci_usb3_hub_descriptor(hcd, xhci, desc);
else
xhci_usb2_hub_descriptor(hcd, xhci, desc);
}
static unsigned int xhci_port_speed(unsigned int port_status)
{
if (DEV_LOWSPEED(port_status))
return USB_PORT_STAT_LOW_SPEED;
if (DEV_HIGHSPEED(port_status))
return USB_PORT_STAT_HIGH_SPEED;
/*
* FIXME: Yes, we should check for full speed, but the core uses that as
* a default in portspeed() in usb/core/hub.c (which is the only place
* USB_PORT_STAT_*_SPEED is used).
*/
return 0;
}
/*
* These bits are Read Only (RO) and should be saved and written to the
* registers: 0, 3, 10:13, 30
* connect status, over-current status, port speed, and device removable.
* connect status and port speed are also sticky - meaning they're in
* the AUX well and they aren't changed by a hot, warm, or cold reset.
*/
#define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
/*
* These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
* bits 5:8, 9, 14:15, 25:27
* link state, port power, port indicator state, "wake on" enable state
*/
#define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
/*
* These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
* bit 4 (port reset)
*/
#define XHCI_PORT_RW1S ((1<<4))
/*
* These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
* bits 1, 17, 18, 19, 20, 21, 22, 23
* port enable/disable, and
* change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
* over-current, reset, link state, and L1 change
*/
#define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17))
/*
* Bit 16 is RW, and writing a '1' to it causes the link state control to be
* latched in
*/
#define XHCI_PORT_RW ((1<<16))
/*
* These bits are Reserved Zero (RsvdZ) and zero should be written to them:
* bits 2, 24, 28:31
*/
#define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28))
/**
* xhci_port_state_to_neutral() - Clean up read portsc value back into writeable
* @state: u32 port value read from portsc register to be cleanup up
*
* Given a port state, this function returns a value that would result in the
* port being in the same state, if the value was written to the port status
* control register.
* Save Read Only (RO) bits and save read/write bits where
* writing a 0 clears the bit and writing a 1 sets the bit (RWS).
* For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
*
* Return: u32 value that can be written back to portsc register without
* changing port state.
*/
u32 xhci_port_state_to_neutral(u32 state)
{
/* Save read-only status and port state */
return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
}
EXPORT_SYMBOL_GPL(xhci_port_state_to_neutral);
/**
* xhci_find_slot_id_by_port() - Find slot id of a usb device on a roothub port
* @hcd: pointer to hcd of the roothub
* @xhci: pointer to xhci structure
* @port: one-based port number of the port in this roothub.
*
* Return: Slot id of the usb device connected to the root port, 0 if not found
*/
int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
u16 port)
{
int slot_id;
int i;
enum usb_device_speed speed;
slot_id = 0;
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i] || !xhci->devs[i]->udev)
continue;
speed = xhci->devs[i]->udev->speed;
if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
&& xhci->devs[i]->fake_port == port) {
slot_id = i;
break;
}
}
return slot_id;
}
EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
/*
* Stop device
* It issues stop endpoint command for EP 0 to 30. And wait the last command
* to complete.
* suspend will set to 1, if suspend bit need to set in command.
*/
static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
{
struct xhci_virt_device *virt_dev;
struct xhci_command *cmd;
unsigned long flags;
int ret;
int i;
ret = 0;
virt_dev = xhci->devs[slot_id];
if (!virt_dev)
return -ENODEV;
trace_xhci_stop_device(virt_dev);
cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
if (!cmd)
return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags);
for (i = LAST_EP_INDEX; i > 0; i--) {
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
struct xhci_ep_ctx *ep_ctx;
struct xhci_command *command;
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
/* Check ep is running, required by AMD SNPS 3.1 xHC */
if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
continue;
command = xhci_alloc_command(xhci, false, GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
ret = -ENOMEM;
goto cmd_cleanup;
}
ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
i, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, command);
goto cmd_cleanup;
}
}
}
ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
goto cmd_cleanup;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for last stop endpoint command to finish */
wait_for_completion(cmd->completion);
if (cmd->status == COMP_COMMAND_ABORTED ||
cmd->status == COMP_COMMAND_RING_STOPPED) {
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
}
cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
/*
* Ring device, it rings the all doorbells unconditionally.
*/
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
{
int i, s;
struct xhci_virt_ep *ep;
for (i = 0; i < LAST_EP_INDEX + 1; i++) {
ep = &xhci->devs[slot_id]->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
for (s = 1; s < ep->stream_info->num_streams; s++)
xhci_ring_ep_doorbell(xhci, slot_id, i, s);
} else if (ep->ring && ep->ring->dequeue) {
xhci_ring_ep_doorbell(xhci, slot_id, i, 0);
}
}
return;
}
static void xhci_disable_port(struct xhci_hcd *xhci, struct xhci_port *port)
{
struct usb_hcd *hcd;
u32 portsc;
hcd = port->rhub->hcd;
/* Don't allow the USB core to disable SuperSpeed ports. */
if (hcd->speed >= HCD_USB3) {
xhci_dbg(xhci, "Ignoring request to disable SuperSpeed port.\n");
return;
}
if (xhci->quirks & XHCI_BROKEN_PORT_PED) {
xhci_dbg(xhci,
"Broken Port Enabled/Disabled, ignoring port disable request.\n");
return;
}
portsc = readl(port->addr);
portsc = xhci_port_state_to_neutral(portsc);
/* Write 1 to disable the port */
writel(portsc | PORT_PE, port->addr);
portsc = readl(port->addr);
xhci_dbg(xhci, "disable port %d-%d, portsc: 0x%x\n",
hcd->self.busnum, port->hcd_portnum + 1, portsc);
}
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
u16 wIndex, __le32 __iomem *addr, u32 port_status)
{
char *port_change_bit;
u32 status;
switch (wValue) {
case USB_PORT_FEAT_C_RESET:
status = PORT_RC;
port_change_bit = "reset";
break;
case USB_PORT_FEAT_C_BH_PORT_RESET:
status = PORT_WRC;
port_change_bit = "warm(BH) reset";
break;
case USB_PORT_FEAT_C_CONNECTION:
status = PORT_CSC;
port_change_bit = "connect";
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
status = PORT_OCC;
port_change_bit = "over-current";
break;
case USB_PORT_FEAT_C_ENABLE:
status = PORT_PEC;
port_change_bit = "enable/disable";
break;
case USB_PORT_FEAT_C_SUSPEND:
status = PORT_PLC;
port_change_bit = "suspend/resume";
break;
case USB_PORT_FEAT_C_PORT_LINK_STATE:
status = PORT_PLC;
port_change_bit = "link state";
break;
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
status = PORT_CEC;
port_change_bit = "config error";
break;
default:
/* Should never happen */
return;
}
/* Change bits are all write 1 to clear */
writel(port_status | status, addr);
port_status = readl(addr);
xhci_dbg(xhci, "clear port%d %s change, portsc: 0x%x\n",
wIndex + 1, port_change_bit, port_status);
}
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (hcd->speed >= HCD_USB3)
return &xhci->usb3_rhub;
return &xhci->usb2_rhub;
}
/*
* xhci_set_port_power() must be called with xhci->lock held.
* It will release and re-aquire the lock while calling ACPI
* method.
*/
static void xhci_set_port_power(struct xhci_hcd *xhci, struct xhci_port *port,
bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
struct usb_hcd *hcd;
u32 temp;
hcd = port->rhub->hcd;
temp = readl(port->addr);
xhci_dbg(xhci, "set port power %d-%d %s, portsc: 0x%x\n",
hcd->self.busnum, port->hcd_portnum + 1, on ? "ON" : "OFF", temp);
temp = xhci_port_state_to_neutral(temp);
if (on) {
/* Power on */
writel(temp | PORT_POWER, port->addr);
readl(port->addr);
} else {
/* Power off */
writel(temp & ~PORT_POWER, port->addr);
}
spin_unlock_irqrestore(&xhci->lock, *flags);
temp = usb_acpi_power_manageable(hcd->self.root_hub,
port->hcd_portnum);
if (temp)
usb_acpi_set_power_state(hcd->self.root_hub,
port->hcd_portnum, on);
spin_lock_irqsave(&xhci->lock, *flags);
}
static void xhci_port_set_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex)
{
u32 temp;
struct xhci_port *port;
/* xhci only supports test mode for usb2 ports */
port = xhci->usb2_rhub.ports[wIndex];
temp = readl(port->addr + PORTPMSC);
temp |= test_mode << PORT_TEST_MODE_SHIFT;
writel(temp, port->addr + PORTPMSC);
xhci->test_mode = test_mode;
if (test_mode == USB_TEST_FORCE_ENABLE)
xhci_start(xhci);
}
static int xhci_enter_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex, unsigned long *flags)
__must_hold(&xhci->lock)
{
int i, retval;
/* Disable all Device Slots */
xhci_dbg(xhci, "Disable all slots\n");
spin_unlock_irqrestore(&xhci->lock, *flags);
for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
if (!xhci->devs[i])
continue;
retval = xhci_disable_slot(xhci, i);
xhci_free_virt_device(xhci, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
}
spin_lock_irqsave(&xhci->lock, *flags);
/* Put all ports to the Disable state by clear PP */
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
xhci_set_port_power(xhci, xhci->usb3_rhub.ports[i], false, flags);
/* Power off USB2 ports*/
for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
xhci_set_port_power(xhci, xhci->usb2_rhub.ports[i], false, flags);
/* Stop the controller */
xhci_dbg(xhci, "Stop controller\n");
retval = xhci_halt(xhci);
if (retval)
return retval;
/* Disable runtime PM for test mode */
pm_runtime_forbid(xhci_to_hcd(xhci)->self.controller);
/* Set PORTPMSC.PTC field to enter selected test mode */
/* Port is selected by wIndex. port_id = wIndex + 1 */
xhci_dbg(xhci, "Enter Test Mode: %d, Port_id=%d\n",
test_mode, wIndex + 1);
xhci_port_set_test_mode(xhci, test_mode, wIndex);
return retval;
}
static int xhci_exit_test_mode(struct xhci_hcd *xhci)
{
int retval;
if (!xhci->test_mode) {
xhci_err(xhci, "Not in test mode, do nothing.\n");
return 0;
}
if (xhci->test_mode == USB_TEST_FORCE_ENABLE &&
!(xhci->xhc_state & XHCI_STATE_HALTED)) {
retval = xhci_halt(xhci);
if (retval)
return retval;
}
pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
xhci->test_mode = 0;
return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
}
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 link_state)
{
u32 temp;
u32 portsc;
portsc = readl(port->addr);
temp = xhci_port_state_to_neutral(portsc);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | link_state;
writel(temp, port->addr);
xhci_dbg(xhci, "Set port %d-%d link state, portsc: 0x%x, write 0x%x",
port->rhub->hcd->self.busnum, port->hcd_portnum + 1,
portsc, temp);
}
static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
struct xhci_port *port, u16 wake_mask)
{
u32 temp;
temp = readl(port->addr);
temp = xhci_port_state_to_neutral(temp);
if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_CONNECT)
temp |= PORT_WKCONN_E;
else
temp &= ~PORT_WKCONN_E;
if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT)
temp |= PORT_WKDISC_E;
else
temp &= ~PORT_WKDISC_E;
if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT)
temp |= PORT_WKOC_E;
else
temp &= ~PORT_WKOC_E;
writel(temp, port->addr);
}
/* Test and clear port RWC bit */
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
u32 port_bit)
{
u32 temp;
temp = readl(port->addr);
if (temp & port_bit) {
temp = xhci_port_state_to_neutral(temp);
temp |= port_bit;
writel(temp, port->addr);
}
}
/* Updates Link Status for super Speed port */
static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
u32 *status, u32 status_reg)
{
u32 pls = status_reg & PORT_PLS_MASK;
/* When the CAS bit is set then warm reset
* should be performed on port
*/
if (status_reg & PORT_CAS) {
/* The CAS bit can be set while the port is
* in any link state.
* Only roothubs have CAS bit, so we
* pretend to be in compliance mode
* unless we're already in compliance
* or the inactive state.
*/
if (pls != USB_SS_PORT_LS_COMP_MOD &&
pls != USB_SS_PORT_LS_SS_INACTIVE) {
pls = USB_SS_PORT_LS_COMP_MOD;
}
/* Return also connection bit -
* hub state machine resets port
* when this bit is set.
*/
pls |= USB_PORT_STAT_CONNECTION;
} else {
/*
* Resume state is an xHCI internal state. Do not report it to
* usb core, instead, pretend to be U3, thus usb core knows
* it's not ready for transfer.
*/
if (pls == XDEV_RESUME) {
*status |= USB_SS_PORT_LS_U3;
return;
}
/*
* If CAS bit isn't set but the Port is already at
* Compliance Mode, fake a connection so the USB core
* notices the Compliance state and resets the port.
* This resolves an issue generated by the SN65LVPE502CP
* in which sometimes the port enters compliance mode
* caused by a delay on the host-device negotiation.
*/
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(pls == USB_SS_PORT_LS_COMP_MOD))
pls |= USB_PORT_STAT_CONNECTION;
}
/* update status field */
*status |= pls;
}
/*
* Function for Compliance Mode Quirk.
*
* This Function verifies if all xhc USB3 ports have entered U0, if so,
* the compliance mode timer is deleted. A port won't enter
* compliance mode if it has previously entered U0.
*/
static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
u16 wIndex)
{
u32 all_ports_seen_u0 = ((1 << xhci->usb3_rhub.num_ports) - 1);
bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
return;
if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
xhci->port_status_u0 |= 1 << wIndex;
if (xhci->port_status_u0 == all_ports_seen_u0) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"All USB3 ports have entered U0 already!");
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance Mode Recovery Timer Deleted.");
}
}
}
static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
u32 portsc,
unsigned long *flags)
{
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
int slot_id;
u32 wIndex;
hcd = port->rhub->hcd;
bus_state = &port->rhub->bus_state;
xhci = hcd_to_xhci(hcd);
wIndex = port->hcd_portnum;
if ((portsc & PORT_RESET) || !(portsc & PORT_PE)) {
return -EINVAL;
}
/* did port event handler already start resume timing? */
if (!port->resume_timestamp) {
/* If not, maybe we are in a host initated resume? */
if (test_bit(wIndex, &bus_state->resuming_ports)) {
/* Host initated resume doesn't time the resume
* signalling using resume_done[].
* It manually sets RESUME state, sleeps 20ms
* and sets U0 state. This should probably be
* changed, but not right now.
*/
} else {
/* port resume was discovered now and here,
* start resume timing
*/
unsigned long timeout = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(wIndex, &bus_state->resuming_ports);
port->resume_timestamp = timeout;
mod_timer(&hcd->rh_timer, timeout);
usb_hcd_start_port_resume(&hcd->self, wIndex);
}
/* Has resume been signalled for USB_RESUME_TIME yet? */
} else if (time_after_eq(jiffies, port->resume_timestamp)) {
int time_left;
xhci_dbg(xhci, "resume USB2 port %d-%d\n",
hcd->self.busnum, wIndex + 1);
port->resume_timestamp = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
reinit_completion(&port->rexit_done);
port->rexit_active = true;
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
xhci_set_link_state(xhci, port, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, *flags);
time_left = wait_for_completion_timeout(
&port->rexit_done,
msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS));
spin_lock_irqsave(&xhci->lock, *flags);
if (time_left) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
return -ENODEV;
}
xhci_ring_device(xhci, slot_id);
} else {
int port_status = readl(port->addr);
xhci_warn(xhci, "Port resume timed out, port %d-%d: 0x%x\n",
hcd->self.busnum, wIndex + 1, port_status);
/*
* keep rexit_active set if U0 transition failed so we
* know to report PORT_STAT_SUSPEND status back to
* usbcore. It will be cleared later once the port is
* out of RESUME/U3 state
*/
}
usb_hcd_end_port_resume(&hcd->self, wIndex);
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
}
return 0;
}
static u32 xhci_get_ext_port_status(u32 raw_port_status, u32 port_li)
{
u32 ext_stat = 0;
int speed_id;
/* only support rx and tx lane counts of 1 in usb3.1 spec */
speed_id = DEV_PORT_SPEED(raw_port_status);
ext_stat |= speed_id; /* bits 3:0, RX speed id */
ext_stat |= speed_id << 4; /* bits 7:4, TX speed id */
ext_stat |= PORT_RX_LANES(port_li) << 8; /* bits 11:8 Rx lane count */
ext_stat |= PORT_TX_LANES(port_li) << 12; /* bits 15:12 Tx lane count */
return ext_stat;
}
static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
u32 portsc)
{
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
u32 link_state;
u32 portnum;
bus_state = &port->rhub->bus_state;
xhci = hcd_to_xhci(port->rhub->hcd);
hcd = port->rhub->hcd;
link_state = portsc & PORT_PLS_MASK;
portnum = port->hcd_portnum;
/* USB3 specific wPortChange bits
*
* Port link change with port in resume state should not be
* reported to usbcore, as this is an internal state to be
* handled by xhci driver. Reporting PLC to usbcore may
* cause usbcore clearing PLC first and port change event
* irq won't be generated.
*/
if (portsc & PORT_PLC && (link_state != XDEV_RESUME))
*status |= USB_PORT_STAT_C_LINK_STATE << 16;
if (portsc & PORT_WRC)
*status |= USB_PORT_STAT_C_BH_RESET << 16;
if (portsc & PORT_CEC)
*status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
/* USB3 specific wPortStatus bits */
if (portsc & PORT_POWER) {
*status |= USB_SS_PORT_STAT_POWER;
/* link state handling */
if (link_state == XDEV_U0)
bus_state->suspended_ports &= ~(1 << portnum);
}
/* remote wake resume signaling complete */
if (bus_state->port_remote_wakeup & (1 << portnum) &&
link_state != XDEV_RESUME &&
link_state != XDEV_RECOVERY) {
bus_state->port_remote_wakeup &= ~(1 << portnum);
usb_hcd_end_port_resume(&hcd->self, portnum);
}
xhci_hub_report_usb3_link_state(xhci, status, portsc);
xhci_del_comp_mod_timer(xhci, portsc, portnum);
}
static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
u32 portsc, unsigned long *flags)
{
struct xhci_bus_state *bus_state;
u32 link_state;
u32 portnum;
int err;
bus_state = &port->rhub->bus_state;
link_state = portsc & PORT_PLS_MASK;
portnum = port->hcd_portnum;
/* USB2 wPortStatus bits */
if (portsc & PORT_POWER) {
*status |= USB_PORT_STAT_POWER;
/* link state is only valid if port is powered */
if (link_state == XDEV_U3)
*status |= USB_PORT_STAT_SUSPEND;
if (link_state == XDEV_U2)
*status |= USB_PORT_STAT_L1;
if (link_state == XDEV_U0) {
if (bus_state->suspended_ports & (1 << portnum)) {
bus_state->suspended_ports &= ~(1 << portnum);
bus_state->port_c_suspend |= 1 << portnum;
}
}
if (link_state == XDEV_RESUME) {
err = xhci_handle_usb2_port_link_resume(port, portsc,
flags);
if (err < 0)
*status = 0xffffffff;
else if (port->resume_timestamp || port->rexit_active)
*status |= USB_PORT_STAT_SUSPEND;
}
}
/*
* Clear usb2 resume signalling variables if port is no longer suspended
* or resuming. Port either resumed to U0/U1/U2, disconnected, or in a
* error state. Resume related variables should be cleared in all those cases.
*/
if (link_state != XDEV_U3 && link_state != XDEV_RESUME) {
if (port->resume_timestamp ||
test_bit(portnum, &bus_state->resuming_ports)) {
port->resume_timestamp = 0;
clear_bit(portnum, &bus_state->resuming_ports);
usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum);
}
port->rexit_active = 0;
}
}
/*
* Converts a raw xHCI port status into the format that external USB 2.0 or USB
* 3.0 hubs use.
*
* Possible side effects:
* - Mark a port as being done with device resume,
* and ring the endpoint doorbells.
* - Stop the Synopsys redriver Compliance Mode polling.
* - Drop and reacquire the xHCI lock, in order to wait for port resume.
*/
static u32 xhci_get_port_status(struct usb_hcd *hcd,
struct xhci_bus_state *bus_state,
u16 wIndex, u32 raw_port_status,
unsigned long *flags)
__releases(&xhci->lock)
__acquires(&xhci->lock)
{
u32 status = 0;
struct xhci_hub *rhub;
struct xhci_port *port;
rhub = xhci_get_rhub(hcd);
port = rhub->ports[wIndex];
/* common wPortChange bits */
if (raw_port_status & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
if (raw_port_status & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
if ((raw_port_status & PORT_OCC))
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
if ((raw_port_status & PORT_RC))
status |= USB_PORT_STAT_C_RESET << 16;
/* common wPortStatus bits */
if (raw_port_status & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
status |= xhci_port_speed(raw_port_status);
}
if (raw_port_status & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
if (raw_port_status & PORT_OC)
status |= USB_PORT_STAT_OVERCURRENT;
if (raw_port_status & PORT_RESET)
status |= USB_PORT_STAT_RESET;
/* USB2 and USB3 specific bits, including Port Link State */
if (hcd->speed >= HCD_USB3)
xhci_get_usb3_port_status(port, &status, raw_port_status);
else
xhci_get_usb2_port_status(port, &status, raw_port_status,
flags);
if (bus_state->port_c_suspend & (1 << wIndex))
status |= USB_PORT_STAT_C_SUSPEND << 16;
return status;
}
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports;
unsigned long flags;
u32 temp, status;
int retval = 0;
int slot_id;
struct xhci_bus_state *bus_state;
u16 link_state = 0;
u16 wake_mask = 0;
u16 timeout = 0;
u16 test_mode = 0;
struct xhci_hub *rhub;
struct xhci_port **ports;
struct xhci_port *port;
int portnum1;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &rhub->bus_state;
portnum1 = wIndex & 0xff;
spin_lock_irqsave(&xhci->lock, flags);
switch (typeReq) {
case GetHubStatus:
/* No power source, over-current reported per port */
memset(buf, 0, 4);
break;
case GetHubDescriptor:
/* Check to make sure userspace is asking for the USB 3.0 hub
* descriptor for the USB 3.0 roothub. If not, we stall the
* endpoint, like external hubs do.
*/
if (hcd->speed >= HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
xhci_dbg(xhci, "Wrong hub descriptor type for "
"USB 3.0 roothub.\n");
goto error;
}
xhci_hub_descriptor(hcd, xhci,
(struct usb_hub_descriptor *) buf);
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
if ((wValue & 0xff00) != (USB_DT_BOS << 8))
goto error;
if (hcd->speed < HCD_USB3)
goto error;
retval = xhci_create_usb3x_bos_desc(xhci, buf, wLength);
spin_unlock_irqrestore(&xhci->lock, flags);
return retval;
case GetPortStatus:
if (!portnum1 || portnum1 > max_ports)
goto error;
wIndex--;
port = ports[portnum1 - 1];
temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
trace_xhci_get_port_status(wIndex, temp);
status = xhci_get_port_status(hcd, bus_state, wIndex, temp,
&flags);
if (status == 0xffffffff)
goto error;
xhci_dbg(xhci, "Get port status %d-%d read: 0x%x, return 0x%x",
hcd->self.busnum, portnum1, temp, status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
/* if USB 3.1 extended port status return additional 4 bytes */
if (wValue == 0x02) {
u32 port_li;
if (hcd->speed < HCD_USB31 || wLength != 8) {
xhci_err(xhci, "get ext port status invalid parameter\n");
retval = -EINVAL;
break;
}
port_li = readl(port->addr + PORTLI);
status = xhci_get_ext_port_status(temp, port_li);
put_unaligned_le32(status, &buf[4]);
}
break;
case SetPortFeature:
if (wValue == USB_PORT_FEAT_LINK_STATE)
link_state = (wIndex & 0xff00) >> 3;
if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK)
wake_mask = wIndex & 0xff00;
if (wValue == USB_PORT_FEAT_TEST)
test_mode = (wIndex & 0xff00) >> 8;
/* The MSB of wIndex is the U1/U2 timeout */
timeout = (wIndex & 0xff00) >> 8;
wIndex &= 0xff;
if (!portnum1 || portnum1 > max_ports)
goto error;
port = ports[portnum1 - 1];
wIndex--;
temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
temp = xhci_port_state_to_neutral(temp);
/* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
temp = readl(port->addr);
if ((temp & PORT_PLS_MASK) != XDEV_U0) {
/* Resume the port to U0 first */
xhci_set_link_state(xhci, port, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10);
spin_lock_irqsave(&xhci->lock, flags);
}
/* In spec software should not attempt to suspend
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
*/
temp = readl(port->addr);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending port %d-%d not in U0/U1/U2\n",
hcd->self.busnum, portnum1);
goto error;
}
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
portnum1);
if (!slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
}
/* unlock to execute stop endpoint commands */
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port, XDEV_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
temp = readl(port->addr);
bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_LINK_STATE:
temp = readl(port->addr);
/* Disable port */
if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
xhci_dbg(xhci, "Disable port %d-%d\n",
hcd->self.busnum, portnum1);
temp = xhci_port_state_to_neutral(temp);
/*
* Clear all change bits, so that we get a new
* connection event.
*/
temp |= PORT_CSC | PORT_PEC | PORT_WRC |
PORT_OCC | PORT_RC | PORT_PLC |
PORT_CEC;
writel(temp | PORT_PE, port->addr);
temp = readl(port->addr);
break;
}
/* Put link in RxDetect (enable port) */
if (link_state == USB_SS_PORT_LS_RX_DETECT) {
xhci_dbg(xhci, "Enable port %d-%d\n",
hcd->self.busnum, portnum1);
xhci_set_link_state(xhci, port, link_state);
temp = readl(port->addr);
break;
}
/*
* For xHCI 1.1 according to section 4.19.1.2.4.1 a
* root hub port's transition to compliance mode upon
* detecting LFPS timeout may be controlled by an
* Compliance Transition Enabled (CTE) flag (not
* software visible). This flag is set by writing 0xA
* to PORTSC PLS field which will allow transition to
* compliance mode the next time LFPS timeout is
* encountered. A warm reset will clear it.
*
* The CTE flag is only supported if the HCCPARAMS2 CTC
* flag is set, otherwise, the compliance substate is
* automatically entered as on 1.0 and prior.
*/
if (link_state == USB_SS_PORT_LS_COMP_MOD) {
if (!HCC2_CTC(xhci->hcc_params2)) {
xhci_dbg(xhci, "CTC flag is 0, port already supports entering compliance mode\n");
break;
}
if ((temp & PORT_CONNECT)) {
xhci_warn(xhci, "Can't set compliance mode when port is connected\n");
goto error;
}
xhci_dbg(xhci, "Enable compliance mode transition for port %d-%d\n",
hcd->self.busnum, portnum1);
xhci_set_link_state(xhci, port, link_state);
temp = readl(port->addr);
break;
}
/* Port must be enabled */
if (!(temp & PORT_PE)) {
retval = -ENODEV;
break;
}
/* Can't set port link state above '3' (U3) */
if (link_state > USB_SS_PORT_LS_U3) {
xhci_warn(xhci, "Cannot set port %d-%d link state %d\n",
hcd->self.busnum, portnum1, link_state);
goto error;
}
/*
* set link to U0, steps depend on current link state.
* U3: set link to U0 and wait for u3exit completion.
* U1/U2: no PLC complete event, only set link to U0.
* Resume/Recovery: device initiated U0, only wait for
* completion
*/
if (link_state == USB_SS_PORT_LS_U0) {
u32 pls = temp & PORT_PLS_MASK;
bool wait_u0 = false;
/* already in U0 */
if (pls == XDEV_U0)
break;
if (pls == XDEV_U3 ||
pls == XDEV_RESUME ||
pls == XDEV_RECOVERY) {
wait_u0 = true;
reinit_completion(&port->u3exit_done);
}
if (pls <= XDEV_U3) /* U1, U2, U3 */
xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U0);
if (!wait_u0) {
if (pls > XDEV_U3)
goto error;
break;
}
spin_unlock_irqrestore(&xhci->lock, flags);
if (!wait_for_completion_timeout(&port->u3exit_done,
msecs_to_jiffies(500)))
xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
hcd->self.busnum, portnum1);
spin_lock_irqsave(&xhci->lock, flags);
temp = readl(port->addr);
break;
}
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
portnum1);
if (slot_id) {
/* unlock to execute stop endpoint
* commands */
spin_unlock_irqrestore(&xhci->lock,
flags);
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
while (retries--) {
usleep_range(4000, 8000);
temp = readl(port->addr);
if ((temp & PORT_PLS_MASK) == XDEV_U3)
break;
}
spin_lock_irqsave(&xhci->lock, flags);
temp = readl(port->addr);
bus_state->suspended_ports |= 1 << wIndex;
}
break;
case USB_PORT_FEAT_POWER:
/*
* Turn on ports, even if there isn't per-port switching.
* HC will report connect events even before this is set.
* However, hub_wq will ignore the roothub events until
* the roothub is registered.
*/
xhci_set_port_power(xhci, port, true, &flags);
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
writel(temp, port->addr);
temp = readl(port->addr);
xhci_dbg(xhci, "set port reset, actual port %d-%d status = 0x%x\n",
hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_REMOTE_WAKE_MASK:
xhci_set_remote_wake_mask(xhci, port, wake_mask);
temp = readl(port->addr);
xhci_dbg(xhci, "set port remote wake mask, actual port %d-%d status = 0x%x\n",
hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
temp |= PORT_WR;
writel(temp, port->addr);
temp = readl(port->addr);
break;
case USB_PORT_FEAT_U1_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
temp = readl(port->addr + PORTPMSC);
temp &= ~PORT_U1_TIMEOUT_MASK;
temp |= PORT_U1_TIMEOUT(timeout);
writel(temp, port->addr + PORTPMSC);
break;
case USB_PORT_FEAT_U2_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
temp = readl(port->addr + PORTPMSC);
temp &= ~PORT_U2_TIMEOUT_MASK;
temp |= PORT_U2_TIMEOUT(timeout);
writel(temp, port->addr + PORTPMSC);
break;
case USB_PORT_FEAT_TEST:
/* 4.19.6 Port Test Modes (USB2 Test Mode) */
if (hcd->speed != HCD_USB2)
goto error;
if (test_mode > USB_TEST_FORCE_ENABLE ||
test_mode < USB_TEST_J)
goto error;
retval = xhci_enter_test_mode(xhci, test_mode, wIndex,
&flags);
break;
default:
goto error;
}
/* unblock any posted writes */
temp = readl(port->addr);
break;
case ClearPortFeature:
if (!portnum1 || portnum1 > max_ports)
goto error;
port = ports[portnum1 - 1];
wIndex--;
temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
/* FIXME: What new port features do we need to support? */
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
temp = readl(port->addr);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
goto error;
if ((temp & PORT_PLS_MASK) == XDEV_U3) {
if ((temp & PORT_PE) == 0)
goto error;
set_bit(wIndex, &bus_state->resuming_ports);
usb_hcd_start_port_resume(&hcd->self, wIndex);
xhci_set_link_state(xhci, port, XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port, XDEV_U0);
clear_bit(wIndex, &bus_state->resuming_ports);
usb_hcd_end_port_resume(&hcd->self, wIndex);
}
bus_state->port_c_suspend |= 1 << wIndex;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
portnum1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
}
xhci_ring_device(xhci, slot_id);
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
fallthrough;
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_BH_PORT_RESET:
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_PORT_LINK_STATE:
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
port->addr, temp);
break;
case USB_PORT_FEAT_ENABLE:
xhci_disable_port(xhci, port);
break;
case USB_PORT_FEAT_POWER:
xhci_set_port_power(xhci, port, false, &flags);
break;
case USB_PORT_FEAT_TEST:
retval = xhci_exit_test_mode(xhci);
break;
default:
goto error;
}
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&xhci->lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(xhci_hub_control);
/*
* Returns 0 if the status hasn't changed, or the number of bytes in buf.
* Ports are 0-indexed from the HCD point of view,
* and 1-indexed from the USB core pointer of view.
*
* Note that the status change bits will be cleared as soon as a port status
* change event is generated, so we use the saved status from that event.
*/
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
unsigned long flags;
u32 temp, status;
u32 mask;
int i, retval;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports;
struct xhci_bus_state *bus_state;
bool reset_change = false;
struct xhci_hub *rhub;
struct xhci_port **ports;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &rhub->bus_state;
/* Initial status is no changes */
retval = (max_ports + 8) / 8;
memset(buf, 0, retval);
/*
* Inform the usbcore about resume-in-progress by returning
* a non-zero value even if there are no status changes.
*/
spin_lock_irqsave(&xhci->lock, flags);
status = bus_state->resuming_ports;
/*
* SS devices are only visible to roothub after link training completes.
* Keep polling roothubs for a grace period after xHC start
*/
if (xhci->run_graceperiod) {
if (time_before(jiffies, xhci->run_graceperiod))
status = 1;
else
xhci->run_graceperiod = 0;
}
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
temp = readl(ports[i]->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
trace_xhci_hub_status_data(i, temp);
if ((temp & mask) != 0 ||
(bus_state->port_c_suspend & 1 << i) ||
(ports[i]->resume_timestamp && time_after_eq(
jiffies, ports[i]->resume_timestamp))) {
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
if ((temp & PORT_RC))
reset_change = true;
if (temp & PORT_OC)
status = 1;
}
if (!status && !reset_change) {
xhci_dbg(xhci, "%s: stopping usb%d port polling\n",
__func__, hcd->self.busnum);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
}
spin_unlock_irqrestore(&xhci->lock, flags);
return status ? retval : 0;
}
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports, port_index;
struct xhci_bus_state *bus_state;
unsigned long flags;
struct xhci_hub *rhub;
struct xhci_port **ports;
u32 portsc_buf[USB_MAXCHILDREN];
bool wake_enabled;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &rhub->bus_state;
wake_enabled = hcd->self.root_hub->do_remote_wakeup;
spin_lock_irqsave(&xhci->lock, flags);
if (wake_enabled) {
if (bus_state->resuming_ports || /* USB2 */
bus_state->port_remote_wakeup) { /* USB3 */
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "usb%d bus suspend to fail because a port is resuming\n",
hcd->self.busnum);
return -EBUSY;
}
}
/*
* Prepare ports for suspend, but don't write anything before all ports
* are checked and we know bus suspend can proceed
*/
bus_state->bus_suspended = 0;
port_index = max_ports;
while (port_index--) {
u32 t1, t2;
int retries = 10;
retry:
t1 = readl(ports[port_index]->addr);
t2 = xhci_port_state_to_neutral(t1);
portsc_buf[port_index] = 0;
/*
* Give a USB3 port in link training time to finish, but don't
* prevent suspend as port might be stuck
*/
if ((hcd->speed >= HCD_USB3) && retries-- &&
(t1 & PORT_PLS_MASK) == XDEV_POLLING) {
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(XHCI_PORT_POLLING_LFPS_TIME);
spin_lock_irqsave(&xhci->lock, flags);
xhci_dbg(xhci, "port %d-%d polling in bus suspend, waiting\n",
hcd->self.busnum, port_index + 1);
goto retry;
}
/* bail out if port detected a over-current condition */
if (t1 & PORT_OC) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n");
return -EBUSY;
}
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
return -EBUSY;
}
xhci_dbg(xhci, "port %d-%d not suspended\n",
hcd->self.busnum, port_index + 1);
t2 &= ~PORT_PLS_MASK;
t2 |= PORT_LINK_STROBE | XDEV_U3;
set_bit(port_index, &bus_state->bus_suspended);
}
/* USB core sets remote wake mask for USB 3.0 hubs,
* including the USB 3.0 roothub, but only if CONFIG_PM
* is enabled, so also enable remote wake here.
*/
if (wake_enabled) {
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
} else {
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
(hcd->speed < HCD_USB3)) {
if (usb_amd_pt_check_port(hcd->self.controller,
port_index))
t2 &= ~PORT_WAKE_BITS;
}
} else
t2 &= ~PORT_WAKE_BITS;
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
portsc_buf[port_index] = t2;
}
/* write port settings, stopping and suspending ports if needed */
port_index = max_ports;
while (port_index--) {
if (!portsc_buf[port_index])
continue;
if (test_bit(port_index, &bus_state->bus_suspended)) {
int slot_id;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
port_index + 1);
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
}
writel(portsc_buf[port_index], ports[port_index]->addr);
}
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irqrestore(&xhci->lock, flags);
if (bus_state->bus_suspended)
usleep_range(5000, 10000);
return 0;
}
/*
* Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
* warm reset a USB3 device stuck in polling or compliance mode after resume.
* See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
*/
static bool xhci_port_missing_cas_quirk(struct xhci_port *port)
{
u32 portsc;
portsc = readl(port->addr);
/* if any of these are set we are not stuck */
if (portsc & (PORT_CONNECT | PORT_CAS))
return false;
if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
return false;
/* clear wakeup/change bits, and do a warm port reset */
portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
portsc |= PORT_WR;
writel(portsc, port->addr);
/* flush write */
readl(port->addr);
return true;
}
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_bus_state *bus_state;
unsigned long flags;
int max_ports, port_index;
int slot_id;
int sret;
u32 next_state;
u32 temp, portsc;
struct xhci_hub *rhub;
struct xhci_port **ports;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &rhub->bus_state;
if (time_before(jiffies, bus_state->next_statechange))
msleep(5);
spin_lock_irqsave(&xhci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd)) {
spin_unlock_irqrestore(&xhci->lock, flags);
return -ESHUTDOWN;
}
/* delay the irqs */
temp = readl(&xhci->op_regs->command);
temp &= ~CMD_EIE;
writel(temp, &xhci->op_regs->command);
/* bus specific resume for ports we suspended at bus_suspend */
if (hcd->speed >= HCD_USB3)
next_state = XDEV_U0;
else
next_state = XDEV_RESUME;
port_index = max_ports;
while (port_index--) {
portsc = readl(ports[port_index]->addr);
/* warm reset CAS limited ports stuck in polling/compliance */
if ((xhci->quirks & XHCI_MISSING_CAS) &&
(hcd->speed >= HCD_USB3) &&
xhci_port_missing_cas_quirk(ports[port_index])) {
xhci_dbg(xhci, "reset stuck port %d-%d\n",
hcd->self.busnum, port_index + 1);
clear_bit(port_index, &bus_state->bus_suspended);
continue;
}
/* resume if we suspended the link, and it is still suspended */
if (test_bit(port_index, &bus_state->bus_suspended))
switch (portsc & PORT_PLS_MASK) {
case XDEV_U3:
portsc = xhci_port_state_to_neutral(portsc);
portsc &= ~PORT_PLS_MASK;
portsc |= PORT_LINK_STROBE | next_state;
break;
case XDEV_RESUME:
/* resume already initiated */
break;
default:
/* not in a resumeable state, ignore it */
clear_bit(port_index,
&bus_state->bus_suspended);
break;
}
/* disable wake for all ports, write new link state if needed */
portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
writel(portsc, ports[port_index]->addr);
}
/* USB2 specific resume signaling delay and U0 link state transition */
if (hcd->speed < HCD_USB3) {
if (bus_state->bus_suspended) {
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
}
for_each_set_bit(port_index, &bus_state->bus_suspended,
BITS_PER_LONG) {
/* Clear PLC to poll it later for U0 transition */
xhci_test_and_clear_bit(xhci, ports[port_index],
PORT_PLC);
xhci_set_link_state(xhci, ports[port_index], XDEV_U0);
}
}
/* poll for U0 link state complete, both USB2 and USB3 */
for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) {
sret = xhci_handshake(ports[port_index]->addr, PORT_PLC,
PORT_PLC, 10 * 1000);
if (sret) {
xhci_warn(xhci, "port %d-%d resume PLC timeout\n",
hcd->self.busnum, port_index + 1);
continue;
}
xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC);
slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1);
if (slot_id)
xhci_ring_device(xhci, slot_id);
}
(void) readl(&xhci->op_regs->command);
bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
/* re-enable irqs */
temp = readl(&xhci->op_regs->command);
temp |= CMD_EIE;
writel(temp, &xhci->op_regs->command);
temp = readl(&xhci->op_regs->command);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
{
struct xhci_hub *rhub = xhci_get_rhub(hcd);
/* USB3 port wakeups are reported via usb_wakeup_notification() */
return rhub->bus_state.resuming_ports; /* USB2 ports only */
}
#endif /* CONFIG_PM */
| linux-master | drivers/usb/host/xhci-hub.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UHCI HCD (Host Controller Driver) PCI Bus Glue.
*
* Extracted from uhci-hcd.c:
* Maintainer: Alan Stern <[email protected]>
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, [email protected]
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, [email protected]
* (C) Copyright 1999 Deti Fliegl, [email protected]
* (C) Copyright 1999 Thomas Sailer, [email protected]
* (C) Copyright 1999 Roman Weissgaerber, [email protected]
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
* support from usb-ohci.c by Adam Richter, [email protected]).
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
* (C) Copyright 2004-2007 Alan Stern, [email protected]
*/
#include "pci-quirks.h"
/*
* Make sure the controller is completely inactive, unable to
* generate interrupts or do DMA.
*/
static void uhci_pci_reset_hc(struct uhci_hcd *uhci)
{
uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr);
}
/*
* Initialize a controller that was newly discovered or has just been
* resumed. In either case we can't be sure of its previous state.
*
* Returns: 1 if the controller was reset, 0 otherwise.
*/
static int uhci_pci_check_and_reset_hc(struct uhci_hcd *uhci)
{
return uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)),
uhci->io_addr);
}
/*
* Store the basic register settings needed by the controller.
* This function is called at the end of configure_hc in uhci-hcd.c.
*/
static void uhci_pci_configure_hc(struct uhci_hcd *uhci)
{
struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
/* Enable PIRQ */
pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT);
/* Disable platform-specific non-PME# wakeup */
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
pci_write_config_byte(pdev, USBRES_INTEL, 0);
}
static int uhci_pci_resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
{
int port;
switch (to_pci_dev(uhci_dev(uhci))->vendor) {
default:
break;
case PCI_VENDOR_ID_GENESYS:
/* Genesys Logic's GL880S controllers don't generate
* resume-detect interrupts.
*/
return 1;
case PCI_VENDOR_ID_INTEL:
/* Some of Intel's USB controllers have a bug that causes
* resume-detect interrupts if any port has an over-current
* condition. To make matters worse, some motherboards
* hardwire unused USB ports' over-current inputs active!
* To prevent problems, we will not enable resume-detect
* interrupts if any ports are OC.
*/
for (port = 0; port < uhci->rh_numports; ++port) {
if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
USBPORTSC_OC)
return 1;
}
break;
}
return 0;
}
static int uhci_pci_global_suspend_mode_is_broken(struct uhci_hcd *uhci)
{
int port;
const char *sys_info;
static const char bad_Asus_board[] = "A7V8X";
/* One of Asus's motherboards has a bug which causes it to
* wake up immediately from suspend-to-RAM if any of the ports
* are connected. In such cases we will not set EGSM.
*/
sys_info = dmi_get_system_info(DMI_BOARD_NAME);
if (sys_info && !strcmp(sys_info, bad_Asus_board)) {
for (port = 0; port < uhci->rh_numports; ++port) {
if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
USBPORTSC_CCS)
return 1;
}
}
return 0;
}
static int uhci_pci_init(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
uhci->io_addr = (unsigned long) hcd->rsrc_start;
uhci->rh_numports = uhci_count_ports(hcd);
/*
* Intel controllers report the OverCurrent bit active on. VIA
* and ZHAOXIN controllers report it active off, so we'll adjust
* the bit value. (It's not standardized in the UHCI spec.)
*/
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA ||
to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN)
uhci->oc_low = 1;
/* HP's server management chip requires a longer port reset delay. */
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
uhci->wait_for_hp = 1;
/* Intel controllers use non-PME wakeup signalling */
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
device_set_wakeup_capable(uhci_dev(uhci), true);
/* Set up pointers to PCI-specific functions */
uhci->reset_hc = uhci_pci_reset_hc;
uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
uhci->configure_hc = uhci_pci_configure_hc;
uhci->resume_detect_interrupts_are_broken =
uhci_pci_resume_detect_interrupts_are_broken;
uhci->global_suspend_mode_is_broken =
uhci_pci_global_suspend_mode_is_broken;
/* Kick BIOS off this hardware and reset if the controller
* isn't already safely quiescent.
*/
check_and_reset_hc(uhci);
return 0;
}
/* Make sure the controller is quiescent and that we're not using it
* any more. This is mainly for the benefit of programs which, like kexec,
* expect the hardware to be idle: not doing DMA or generating IRQs.
*
* This routine may be called in a damaged or failing kernel. Hence we
* do not acquire the spinlock before shutting down the controller.
*/
static void uhci_shutdown(struct pci_dev *pdev)
{
struct usb_hcd *hcd = pci_get_drvdata(pdev);
uhci_hc_died(hcd_to_uhci(hcd));
}
#ifdef CONFIG_PM
static int uhci_pci_resume(struct usb_hcd *hcd, pm_message_t state);
static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
int rc = 0;
dev_dbg(uhci_dev(uhci), "%s\n", __func__);
spin_lock_irq(&uhci->lock);
if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done_okay; /* Already suspended or dead */
/* All PCI host controllers are required to disable IRQ generation
* at the source, so we must turn off PIRQ.
*/
pci_write_config_word(pdev, USBLEGSUP, 0);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
/* Enable platform-specific non-PME# wakeup */
if (do_wakeup) {
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
pci_write_config_byte(pdev, USBRES_INTEL,
USBPORT1EN | USBPORT2EN);
}
done_okay:
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irq(&uhci->lock);
synchronize_irq(hcd->irq);
/* Check for race with a wakeup request */
if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
uhci_pci_resume(hcd, PMSG_SUSPEND);
rc = -EBUSY;
}
return rc;
}
static int uhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
bool hibernated = (msg.event == PM_EVENT_RESTORE);
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
dev_dbg(uhci_dev(uhci), "%s\n", __func__);
/* Since we aren't in D3 any more, it's safe to set this flag
* even if the controller was dead.
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_lock_irq(&uhci->lock);
/* Make sure resume from hibernation re-enumerates everything */
if (hibernated) {
uhci->reset_hc(uhci);
finish_reset(uhci);
}
/* The firmware may have changed the controller settings during
* a system wakeup. Check it and reconfigure to avoid problems.
*/
else {
check_and_reset_hc(uhci);
}
configure_hc(uhci);
/* Tell the core if the controller had to be reset */
if (uhci->rh_state == UHCI_RH_RESET)
usb_root_hub_lost_power(hcd->self.root_hub);
spin_unlock_irq(&uhci->lock);
/* If interrupts don't work and remote wakeup is enabled then
* the suspended root hub needs to be polled.
*/
if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup)
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
/* Does the root hub have a port wakeup pending? */
usb_hcd_poll_rh_status(hcd);
return 0;
}
#endif
static const struct hc_driver uhci_driver = {
.description = hcd_name,
.product_desc = "UHCI Host Controller",
.hcd_priv_size = sizeof(struct uhci_hcd),
/* Generic hardware linkage */
.irq = uhci_irq,
.flags = HCD_DMA | HCD_USB11,
/* Basic lifecycle operations */
.reset = uhci_pci_init,
.start = uhci_start,
#ifdef CONFIG_PM
.pci_suspend = uhci_pci_suspend,
.pci_resume = uhci_pci_resume,
.bus_suspend = uhci_rh_suspend,
.bus_resume = uhci_rh_resume,
#endif
.stop = uhci_stop,
.urb_enqueue = uhci_urb_enqueue,
.urb_dequeue = uhci_urb_dequeue,
.endpoint_disable = uhci_hcd_endpoint_disable,
.get_frame_number = uhci_hcd_get_frame_number,
.hub_status_data = uhci_hub_status_data,
.hub_control = uhci_hub_control,
};
static const struct pci_device_id uhci_pci_ids[] = { {
/* handle any USB UHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
static int uhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
return usb_hcd_pci_probe(dev, &uhci_driver);
}
static struct pci_driver uhci_pci_driver = {
.name = hcd_name,
.id_table = uhci_pci_ids,
.probe = uhci_pci_probe,
.remove = usb_hcd_pci_remove,
.shutdown = uhci_shutdown,
#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
#endif
};
MODULE_SOFTDEP("pre: ehci_pci");
| linux-master | drivers/usb/host/uhci-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* XHCI extended capability handling
*
* Copyright (c) 2017 Hans de Goede <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pci.h>
#include "xhci.h"
#define USB_SW_DRV_NAME "intel_xhci_usb_sw"
#define USB_SW_RESOURCE_SIZE 0x400
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
static const struct property_entry role_switch_props[] = {
PROPERTY_ENTRY_BOOL("sw_switch_disable"),
{},
};
static void xhci_intel_unregister_pdev(void *arg)
{
platform_device_unregister(arg);
}
static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct device *dev = hcd->self.controller;
struct platform_device *pdev;
struct pci_dev *pci = to_pci_dev(dev);
struct resource res = { 0, };
int ret;
pdev = platform_device_alloc(USB_SW_DRV_NAME, PLATFORM_DEVID_NONE);
if (!pdev) {
xhci_err(xhci, "couldn't allocate %s platform device\n",
USB_SW_DRV_NAME);
return -ENOMEM;
}
res.start = hcd->rsrc_start + cap_offset;
res.end = res.start + USB_SW_RESOURCE_SIZE - 1;
res.name = USB_SW_DRV_NAME;
res.flags = IORESOURCE_MEM;
ret = platform_device_add_resources(pdev, &res, 1);
if (ret) {
dev_err(dev, "couldn't add resources to intel_xhci_usb_sw pdev\n");
platform_device_put(pdev);
return ret;
}
if (pci->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
ret = device_create_managed_software_node(&pdev->dev, role_switch_props,
NULL);
if (ret) {
dev_err(dev, "failed to register device properties\n");
platform_device_put(pdev);
return ret;
}
}
pdev->dev.parent = dev;
ret = platform_device_add(pdev);
if (ret) {
dev_err(dev, "couldn't register intel_xhci_usb_sw pdev\n");
platform_device_put(pdev);
return ret;
}
ret = devm_add_action_or_reset(dev, xhci_intel_unregister_pdev, pdev);
if (ret) {
dev_err(dev, "couldn't add unregister action for intel_xhci_usb_sw pdev\n");
return ret;
}
return 0;
}
int xhci_ext_cap_init(struct xhci_hcd *xhci)
{
void __iomem *base = &xhci->cap_regs->hc_capbase;
u32 offset, val;
int ret;
offset = xhci_find_next_ext_cap(base, 0, 0);
while (offset) {
val = readl(base + offset);
switch (XHCI_EXT_CAPS_ID(val)) {
case XHCI_EXT_CAPS_VENDOR_INTEL:
if (xhci->quirks & XHCI_INTEL_USB_ROLE_SW) {
ret = xhci_create_intel_xhci_sw_pdev(xhci,
offset);
if (ret)
return ret;
}
break;
}
offset = xhci_find_next_ext_cap(base, offset, 0);
}
return 0;
}
EXPORT_SYMBOL_GPL(xhci_ext_cap_init);
| linux-master | drivers/usb/host/xhci-ext-caps.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2005 David Brownell
* (C) Copyright 2002 Hewlett-Packard Company
* (C) Copyright 2008 Magnus Damm
*
* SM501 Bus Glue - based on ohci-omap.c
*
* This file is licenced under the GPL.
*/
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
static int ohci_sm501_init(struct usb_hcd *hcd)
{
return ohci_init(hcd_to_ohci(hcd));
}
static int ohci_sm501_start(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
int ret;
ret = ohci_run(hcd_to_ohci(hcd));
if (ret < 0) {
dev_err(dev, "can't start %s", hcd->self.bus_name);
ohci_stop(hcd);
}
return ret;
}
/*-------------------------------------------------------------------------*/
static const struct hc_driver ohci_sm501_hc_driver = {
.description = hcd_name,
.product_desc = "SM501 OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.reset = ohci_sm501_init,
.start = ohci_sm501_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
/*-------------------------------------------------------------------------*/
static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
{
const struct hc_driver *driver = &ohci_sm501_hc_driver;
struct device *dev = &pdev->dev;
struct resource *res, *mem;
int retval, irq;
struct usb_hcd *hcd = NULL;
irq = retval = platform_get_irq(pdev, 0);
if (retval < 0)
goto err0;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (mem == NULL) {
dev_err(dev, "no resource definition for memory\n");
retval = -ENOENT;
goto err0;
}
if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
dev_err(dev, "request_mem_region failed\n");
retval = -EBUSY;
goto err0;
}
/* allocate, reserve and remap resources for registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no resource definition for registers\n");
retval = -ENOENT;
goto err1;
}
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err1;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, pdev->name)) {
dev_err(dev, "request_mem_region failed\n");
retval = -EBUSY;
goto err3;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_err(dev, "cannot remap registers\n");
retval = -ENXIO;
goto err4;
}
ohci_hcd_init(hcd_to_ohci(hcd));
/* The sm501 chip is equipped with local memory that may be used
* by on-chip devices such as the video controller and the usb host.
* This driver uses genalloc so that usb allocations with
* gen_pool_dma_alloc() allocate from this local memory. The dma_handle
* returned by gen_pool_dma_alloc() will be an offset starting from 0
* for the first local memory byte.
*
* So as long as data is allocated using gen_pool_dma_alloc() all is
* fine. This is however not always the case - buffers may be allocated
* using kmalloc() - so the usb core needs to be told that it must copy
* data into our local memory if the buffers happen to be placed in
* regular memory. A non-null hcd->localmem_pool initialized by
* the call to usb_hcd_setup_local_mem() below does just that.
*/
retval = usb_hcd_setup_local_mem(hcd, mem->start,
mem->start - mem->parent->start,
resource_size(mem));
if (retval < 0)
goto err5;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto err5;
device_wakeup_enable(hcd->self.controller);
/* enable power and unmask interrupts */
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 1);
sm501_modify_reg(dev->parent, SM501_IRQ_MASK, 1 << 6, 0);
return 0;
err5:
iounmap(hcd->regs);
err4:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err3:
usb_put_hcd(hcd);
err1:
release_mem_region(mem->start, resource_size(mem));
err0:
return retval;
}
static void ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct resource *mem;
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
release_mem_region(mem->start, resource_size(mem));
/* mask interrupts and disable power */
sm501_modify_reg(pdev->dev.parent, SM501_IRQ_MASK, 0, 1 << 6);
sm501_unit_power(pdev->dev.parent, SM501_GATE_USB_HOST, 0);
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
static int ohci_sm501_suspend(struct platform_device *pdev, pm_message_t msg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
bool do_wakeup = device_may_wakeup(dev);
int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
ret = ohci_suspend(hcd, do_wakeup);
if (ret)
return ret;
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 0);
return ret;
}
static int ohci_sm501_resume(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 1);
ohci_resume(hcd, false);
return 0;
}
#else
#define ohci_sm501_suspend NULL
#define ohci_sm501_resume NULL
#endif
/*-------------------------------------------------------------------------*/
/*
* Driver definition to register with the SM501 bus
*/
static struct platform_driver ohci_hcd_sm501_driver = {
.probe = ohci_hcd_sm501_drv_probe,
.remove_new = ohci_hcd_sm501_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.suspend = ohci_sm501_suspend,
.resume = ohci_sm501_resume,
.driver = {
.name = "sm501-usb",
},
};
MODULE_ALIAS("platform:sm501-usb");
| linux-master | drivers/usb/host/ohci-sm501.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SL811HS HCD (Host Controller Driver) for USB.
*
* Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
* Copyright (C) 2004-2005 David Brownell
*
* Periodic scheduling is based on Roman's OHCI code
* Copyright (C) 1999 Roman Weissgaerber
*
* The SL811HS controller handles host side USB (like the SL11H, but with
* another register set and SOF generation) as well as peripheral side USB
* (like the SL811S). This driver version doesn't implement the Gadget API
* for the peripheral role; or OTG (that'd need much external circuitry).
*
* For documentation, see the SL811HS spec and the "SL811HS Embedded Host"
* document (providing significant pieces missing from that spec); plus
* the SL811S spec if you want peripheral side info.
*/
/*
* Status: Passed basic stress testing, works with hubs, mice, keyboards,
* and usb-storage.
*
* TODO:
* - usb suspend/resume triggered by sl811
* - various issues noted in the code
* - performance work; use both register banks; ...
* - use urb->iso_frame_desc[] with ISO transfers
*/
#undef VERBOSE
#undef PACKET_TRACE
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/sl811.h>
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include "sl811.h"
MODULE_DESCRIPTION("SL811HS USB Host Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sl811-hcd");
#define DRIVER_VERSION "19 May 2005"
/* for now, use only one transfer register bank */
#undef USE_B
// #define QUIRK2
#define QUIRK3
static const char hcd_name[] = "sl811-hcd";
/*-------------------------------------------------------------------------*/
static void port_power(struct sl811 *sl811, int is_on)
{
struct usb_hcd *hcd = sl811_to_hcd(sl811);
/* hub is inactive unless the port is powered */
if (is_on) {
if (sl811->port1 & USB_PORT_STAT_POWER)
return;
sl811->port1 = USB_PORT_STAT_POWER;
sl811->irq_enable = SL11H_INTMASK_INSRMV;
} else {
sl811->port1 = 0;
sl811->irq_enable = 0;
hcd->state = HC_STATE_HALT;
}
sl811->ctrl1 = 0;
sl811_write(sl811, SL11H_IRQ_ENABLE, 0);
sl811_write(sl811, SL11H_IRQ_STATUS, ~0);
if (sl811->board && sl811->board->port_power) {
/* switch VBUS, at 500mA unless hub power budget gets set */
dev_dbg(hcd->self.controller, "power %s\n",
is_on ? "on" : "off");
sl811->board->port_power(hcd->self.controller, is_on);
}
/* reset as thoroughly as we can */
if (sl811->board && sl811->board->reset)
sl811->board->reset(hcd->self.controller);
else {
sl811_write(sl811, SL11H_CTLREG1, SL11H_CTL1MASK_SE0);
mdelay(20);
}
sl811_write(sl811, SL11H_IRQ_ENABLE, 0);
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
sl811_write(sl811, SL811HS_CTLREG2, SL811HS_CTL2_INIT);
sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
// if !is_on, put into lowpower mode now
}
/*-------------------------------------------------------------------------*/
/* This is a PIO-only HCD. Queueing appends URBs to the endpoint's queue,
* and may start I/O. Endpoint queues are scanned during completion irq
* handlers (one per packet: ACK, NAK, faults, etc) and urb cancellation.
*
* Using an external DMA engine to copy a packet at a time could work,
* though setup/teardown costs may be too big to make it worthwhile.
*/
/* SETUP starts a new control request. Devices are not allowed to
* STALL or NAK these; they must cancel any pending control requests.
*/
static void setup_packet(
struct sl811 *sl811,
struct sl811h_ep *ep,
struct urb *urb,
u8 bank,
u8 control
)
{
u8 addr;
u8 len;
void __iomem *data_reg;
addr = SL811HS_PACKET_BUF(bank == 0);
len = sizeof(struct usb_ctrlrequest);
data_reg = sl811->data_reg;
sl811_write_buf(sl811, addr, urb->setup_packet, len);
/* autoincrementing */
sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
writeb(len, data_reg);
writeb(SL_SETUP /* | ep->epnum */, data_reg);
writeb(usb_pipedevice(urb->pipe), data_reg);
/* always OUT/data0 */
sl811_write(sl811, bank + SL11H_HOSTCTLREG,
control | SL11H_HCTLMASK_OUT);
ep->length = 0;
PACKET("SETUP qh%p\n", ep);
}
/* STATUS finishes control requests, often after IN or OUT data packets */
static void status_packet(
struct sl811 *sl811,
struct sl811h_ep *ep,
struct urb *urb,
u8 bank,
u8 control
)
{
int do_out;
void __iomem *data_reg;
do_out = urb->transfer_buffer_length && usb_pipein(urb->pipe);
data_reg = sl811->data_reg;
/* autoincrementing */
sl811_write(sl811, bank + SL11H_BUFADDRREG, 0);
writeb(0, data_reg);
writeb((do_out ? SL_OUT : SL_IN) /* | ep->epnum */, data_reg);
writeb(usb_pipedevice(urb->pipe), data_reg);
/* always data1; sometimes IN */
control |= SL11H_HCTLMASK_TOGGLE;
if (do_out)
control |= SL11H_HCTLMASK_OUT;
sl811_write(sl811, bank + SL11H_HOSTCTLREG, control);
ep->length = 0;
PACKET("STATUS%s/%s qh%p\n", ep->nak_count ? "/retry" : "",
do_out ? "out" : "in", ep);
}
/* IN packets can be used with any type of endpoint. here we just
* start the transfer, data from the peripheral may arrive later.
* urb->iso_frame_desc is currently ignored here...
*/
static void in_packet(
struct sl811 *sl811,
struct sl811h_ep *ep,
struct urb *urb,
u8 bank,
u8 control
)
{
u8 addr;
u8 len;
void __iomem *data_reg;
/* avoid losing data on overflow */
len = ep->maxpacket;
addr = SL811HS_PACKET_BUF(bank == 0);
if (!(control & SL11H_HCTLMASK_ISOCH)
&& usb_gettoggle(urb->dev, ep->epnum, 0))
control |= SL11H_HCTLMASK_TOGGLE;
data_reg = sl811->data_reg;
/* autoincrementing */
sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
writeb(len, data_reg);
writeb(SL_IN | ep->epnum, data_reg);
writeb(usb_pipedevice(urb->pipe), data_reg);
sl811_write(sl811, bank + SL11H_HOSTCTLREG, control);
ep->length = min_t(u32, len,
urb->transfer_buffer_length - urb->actual_length);
PACKET("IN%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "",
!!usb_gettoggle(urb->dev, ep->epnum, 0), ep, len);
}
/* OUT packets can be used with any type of endpoint.
* urb->iso_frame_desc is currently ignored here...
*/
static void out_packet(
struct sl811 *sl811,
struct sl811h_ep *ep,
struct urb *urb,
u8 bank,
u8 control
)
{
void *buf;
u8 addr;
u8 len;
void __iomem *data_reg;
buf = urb->transfer_buffer + urb->actual_length;
prefetch(buf);
len = min_t(u32, ep->maxpacket,
urb->transfer_buffer_length - urb->actual_length);
if (!(control & SL11H_HCTLMASK_ISOCH)
&& usb_gettoggle(urb->dev, ep->epnum, 1))
control |= SL11H_HCTLMASK_TOGGLE;
addr = SL811HS_PACKET_BUF(bank == 0);
data_reg = sl811->data_reg;
sl811_write_buf(sl811, addr, buf, len);
/* autoincrementing */
sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
writeb(len, data_reg);
writeb(SL_OUT | ep->epnum, data_reg);
writeb(usb_pipedevice(urb->pipe), data_reg);
sl811_write(sl811, bank + SL11H_HOSTCTLREG,
control | SL11H_HCTLMASK_OUT);
ep->length = len;
PACKET("OUT%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "",
!!usb_gettoggle(urb->dev, ep->epnum, 1), ep, len);
}
/*-------------------------------------------------------------------------*/
/* caller updates on-chip enables later */
static inline void sofirq_on(struct sl811 *sl811)
{
if (sl811->irq_enable & SL11H_INTMASK_SOFINTR)
return;
dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq on\n");
sl811->irq_enable |= SL11H_INTMASK_SOFINTR;
}
static inline void sofirq_off(struct sl811 *sl811)
{
if (!(sl811->irq_enable & SL11H_INTMASK_SOFINTR))
return;
dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq off\n");
sl811->irq_enable &= ~SL11H_INTMASK_SOFINTR;
}
/*-------------------------------------------------------------------------*/
/* pick the next endpoint for a transaction, and issue it.
* frames start with periodic transfers (after whatever is pending
* from the previous frame), and the rest of the time is async
* transfers, scheduled round-robin.
*/
static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
{
struct sl811h_ep *ep;
struct urb *urb;
int fclock;
u8 control;
/* use endpoint at schedule head */
if (sl811->next_periodic) {
ep = sl811->next_periodic;
sl811->next_periodic = ep->next;
} else {
if (sl811->next_async)
ep = sl811->next_async;
else if (!list_empty(&sl811->async))
ep = container_of(sl811->async.next,
struct sl811h_ep, schedule);
else {
/* could set up the first fullspeed periodic
* transfer for the next frame ...
*/
return NULL;
}
#ifdef USE_B
if ((bank && sl811->active_b == ep) || sl811->active_a == ep)
return NULL;
#endif
if (ep->schedule.next == &sl811->async)
sl811->next_async = NULL;
else
sl811->next_async = container_of(ep->schedule.next,
struct sl811h_ep, schedule);
}
if (unlikely(list_empty(&ep->hep->urb_list))) {
dev_dbg(sl811_to_hcd(sl811)->self.controller,
"empty %p queue?\n", ep);
return NULL;
}
urb = container_of(ep->hep->urb_list.next, struct urb, urb_list);
control = ep->defctrl;
/* if this frame doesn't have enough time left to transfer this
* packet, wait till the next frame. too-simple algorithm...
*/
fclock = sl811_read(sl811, SL11H_SOFTMRREG) << 6;
fclock -= 100; /* setup takes not much time */
if (urb->dev->speed == USB_SPEED_LOW) {
if (control & SL11H_HCTLMASK_PREAMBLE) {
/* also note erratum 1: some hubs won't work */
fclock -= 800;
}
fclock -= ep->maxpacket << 8;
/* erratum 2: AFTERSOF only works for fullspeed */
if (fclock < 0) {
if (ep->period)
sl811->stat_overrun++;
sofirq_on(sl811);
return NULL;
}
} else {
fclock -= 12000 / 19; /* 19 64byte packets/msec */
if (fclock < 0) {
if (ep->period)
sl811->stat_overrun++;
control |= SL11H_HCTLMASK_AFTERSOF;
/* throttle bulk/control irq noise */
} else if (ep->nak_count)
control |= SL11H_HCTLMASK_AFTERSOF;
}
switch (ep->nextpid) {
case USB_PID_IN:
in_packet(sl811, ep, urb, bank, control);
break;
case USB_PID_OUT:
out_packet(sl811, ep, urb, bank, control);
break;
case USB_PID_SETUP:
setup_packet(sl811, ep, urb, bank, control);
break;
case USB_PID_ACK: /* for control status */
status_packet(sl811, ep, urb, bank, control);
break;
default:
dev_dbg(sl811_to_hcd(sl811)->self.controller,
"bad ep%p pid %02x\n", ep, ep->nextpid);
ep = NULL;
}
return ep;
}
#define MIN_JIFFIES ((msecs_to_jiffies(2) > 1) ? msecs_to_jiffies(2) : 2)
static inline void start_transfer(struct sl811 *sl811)
{
if (sl811->port1 & USB_PORT_STAT_SUSPEND)
return;
if (sl811->active_a == NULL) {
sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF));
if (sl811->active_a != NULL)
sl811->jiffies_a = jiffies + MIN_JIFFIES;
}
#ifdef USE_B
if (sl811->active_b == NULL) {
sl811->active_b = start(sl811, SL811_EP_B(SL811_HOST_BUF));
if (sl811->active_b != NULL)
sl811->jiffies_b = jiffies + MIN_JIFFIES;
}
#endif
}
static void finish_request(
struct sl811 *sl811,
struct sl811h_ep *ep,
struct urb *urb,
int status
) __releases(sl811->lock) __acquires(sl811->lock)
{
unsigned i;
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_SETUP;
usb_hcd_unlink_urb_from_ep(sl811_to_hcd(sl811), urb);
spin_unlock(&sl811->lock);
usb_hcd_giveback_urb(sl811_to_hcd(sl811), urb, status);
spin_lock(&sl811->lock);
/* leave active endpoints in the schedule */
if (!list_empty(&ep->hep->urb_list))
return;
/* async deschedule? */
if (!list_empty(&ep->schedule)) {
list_del_init(&ep->schedule);
if (ep == sl811->next_async)
sl811->next_async = NULL;
return;
}
/* periodic deschedule */
dev_dbg(sl811_to_hcd(sl811)->self.controller,
"deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct sl811h_ep *temp;
struct sl811h_ep **prev = &sl811->periodic[i];
while (*prev && ((temp = *prev) != ep))
prev = &temp->next;
if (*prev)
*prev = ep->next;
sl811->load[i] -= ep->load;
}
ep->branch = PERIODIC_SIZE;
sl811->periodic_count--;
sl811_to_hcd(sl811)->self.bandwidth_allocated
-= ep->load / ep->period;
if (ep == sl811->next_periodic)
sl811->next_periodic = ep->next;
/* we might turn SOFs back on again for the async schedule */
if (sl811->periodic_count == 0)
sofirq_off(sl811);
}
static void
done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
{
u8 status;
struct urb *urb;
int urbstat = -EINPROGRESS;
if (unlikely(!ep))
return;
status = sl811_read(sl811, bank + SL11H_PKTSTATREG);
urb = container_of(ep->hep->urb_list.next, struct urb, urb_list);
/* we can safely ignore NAKs */
if (status & SL11H_STATMASK_NAK) {
// PACKET("...NAK_%02x qh%p\n", bank, ep);
if (!ep->period)
ep->nak_count++;
ep->error_count = 0;
/* ACK advances transfer, toggle, and maybe queue */
} else if (status & SL11H_STATMASK_ACK) {
struct usb_device *udev = urb->dev;
int len;
unsigned char *buf;
/* urb->iso_frame_desc is currently ignored here... */
ep->nak_count = ep->error_count = 0;
switch (ep->nextpid) {
case USB_PID_OUT:
// PACKET("...ACK/out_%02x qh%p\n", bank, ep);
urb->actual_length += ep->length;
usb_dotoggle(udev, ep->epnum, 1);
if (urb->actual_length
== urb->transfer_buffer_length) {
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_ACK;
/* some bulk protocols terminate OUT transfers
* by a short packet, using ZLPs not padding.
*/
else if (ep->length < ep->maxpacket
|| !(urb->transfer_flags
& URB_ZERO_PACKET))
urbstat = 0;
}
break;
case USB_PID_IN:
// PACKET("...ACK/in_%02x qh%p\n", bank, ep);
buf = urb->transfer_buffer + urb->actual_length;
prefetchw(buf);
len = ep->maxpacket - sl811_read(sl811,
bank + SL11H_XFERCNTREG);
if (len > ep->length) {
len = ep->length;
urbstat = -EOVERFLOW;
}
urb->actual_length += len;
sl811_read_buf(sl811, SL811HS_PACKET_BUF(bank == 0),
buf, len);
usb_dotoggle(udev, ep->epnum, 0);
if (urbstat == -EINPROGRESS &&
(len < ep->maxpacket ||
urb->actual_length ==
urb->transfer_buffer_length)) {
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_ACK;
else
urbstat = 0;
}
break;
case USB_PID_SETUP:
// PACKET("...ACK/setup_%02x qh%p\n", bank, ep);
if (urb->transfer_buffer_length == urb->actual_length)
ep->nextpid = USB_PID_ACK;
else if (usb_pipeout(urb->pipe)) {
usb_settoggle(udev, 0, 1, 1);
ep->nextpid = USB_PID_OUT;
} else {
usb_settoggle(udev, 0, 0, 1);
ep->nextpid = USB_PID_IN;
}
break;
case USB_PID_ACK:
// PACKET("...ACK/status_%02x qh%p\n", bank, ep);
urbstat = 0;
break;
}
/* STALL stops all transfers */
} else if (status & SL11H_STATMASK_STALL) {
PACKET("...STALL_%02x qh%p\n", bank, ep);
ep->nak_count = ep->error_count = 0;
urbstat = -EPIPE;
/* error? retry, until "3 strikes" */
} else if (++ep->error_count >= 3) {
if (status & SL11H_STATMASK_TMOUT)
urbstat = -ETIME;
else if (status & SL11H_STATMASK_OVF)
urbstat = -EOVERFLOW;
else
urbstat = -EPROTO;
ep->error_count = 0;
PACKET("...3STRIKES_%02x %02x qh%p stat %d\n",
bank, status, ep, urbstat);
}
if (urbstat != -EINPROGRESS || urb->unlinked)
finish_request(sl811, ep, urb, urbstat);
}
static inline u8 checkdone(struct sl811 *sl811)
{
u8 ctl;
u8 irqstat = 0;
if (sl811->active_a && time_before_eq(sl811->jiffies_a, jiffies)) {
ctl = sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG));
if (ctl & SL11H_HCTLMASK_ARM)
sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
dev_dbg(sl811_to_hcd(sl811)->self.controller,
"%s DONE_A: ctrl %02x sts %02x\n",
(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
ctl,
sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
irqstat |= SL11H_INTMASK_DONE_A;
}
#ifdef USE_B
if (sl811->active_b && time_before_eq(sl811->jiffies_b, jiffies)) {
ctl = sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG));
if (ctl & SL11H_HCTLMASK_ARM)
sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
dev_dbg(sl811_to_hcd(sl811)->self.controller,
"%s DONE_B: ctrl %02x sts %02x\n",
(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
ctl,
sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
irqstat |= SL11H_INTMASK_DONE_A;
}
#endif
return irqstat;
}
static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
u8 irqstat;
irqreturn_t ret = IRQ_NONE;
unsigned retries = 5;
spin_lock(&sl811->lock);
retry:
irqstat = sl811_read(sl811, SL11H_IRQ_STATUS) & ~SL11H_INTMASK_DP;
if (irqstat) {
sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
irqstat &= sl811->irq_enable;
}
#ifdef QUIRK2
/* this may no longer be necessary ... */
if (irqstat == 0) {
irqstat = checkdone(sl811);
if (irqstat)
sl811->stat_lost++;
}
#endif
/* USB packets, not necessarily handled in the order they're
* issued ... that's fine if they're different endpoints.
*/
if (irqstat & SL11H_INTMASK_DONE_A) {
done(sl811, sl811->active_a, SL811_EP_A(SL811_HOST_BUF));
sl811->active_a = NULL;
sl811->stat_a++;
}
#ifdef USE_B
if (irqstat & SL11H_INTMASK_DONE_B) {
done(sl811, sl811->active_b, SL811_EP_B(SL811_HOST_BUF));
sl811->active_b = NULL;
sl811->stat_b++;
}
#endif
if (irqstat & SL11H_INTMASK_SOFINTR) {
unsigned index;
index = sl811->frame++ % (PERIODIC_SIZE - 1);
sl811->stat_sof++;
/* be graceful about almost-inevitable periodic schedule
* overruns: continue the previous frame's transfers iff
* this one has nothing scheduled.
*/
if (sl811->next_periodic) {
// dev_err(hcd->self.controller, "overrun to slot %d\n", index);
sl811->stat_overrun++;
}
if (sl811->periodic[index])
sl811->next_periodic = sl811->periodic[index];
}
/* hub_wq manages debouncing and wakeup */
if (irqstat & SL11H_INTMASK_INSRMV) {
sl811->stat_insrmv++;
/* most stats are reset for each VBUS session */
sl811->stat_wake = 0;
sl811->stat_sof = 0;
sl811->stat_a = 0;
sl811->stat_b = 0;
sl811->stat_lost = 0;
sl811->ctrl1 = 0;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
sl811->irq_enable = SL11H_INTMASK_INSRMV;
sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
/* usbcore nukes other pending transactions on disconnect */
if (sl811->active_a) {
sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
finish_request(sl811, sl811->active_a,
container_of(sl811->active_a
->hep->urb_list.next,
struct urb, urb_list),
-ESHUTDOWN);
sl811->active_a = NULL;
}
#ifdef USE_B
if (sl811->active_b) {
sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
finish_request(sl811, sl811->active_b,
container_of(sl811->active_b
->hep->urb_list.next,
struct urb, urb_list),
NULL, -ESHUTDOWN);
sl811->active_b = NULL;
}
#endif
/* port status seems weird until after reset, so
* force the reset and make hub_wq clean up later.
*/
if (irqstat & SL11H_INTMASK_RD)
sl811->port1 &= ~USB_PORT_STAT_CONNECTION;
else
sl811->port1 |= USB_PORT_STAT_CONNECTION;
sl811->port1 |= USB_PORT_STAT_C_CONNECTION << 16;
} else if (irqstat & SL11H_INTMASK_RD) {
if (sl811->port1 & USB_PORT_STAT_SUSPEND) {
dev_dbg(hcd->self.controller, "wakeup\n");
sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16;
sl811->stat_wake++;
} else
irqstat &= ~SL11H_INTMASK_RD;
}
if (irqstat) {
if (sl811->port1 & USB_PORT_STAT_ENABLE)
start_transfer(sl811);
ret = IRQ_HANDLED;
if (retries--)
goto retry;
}
if (sl811->periodic_count == 0 && list_empty(&sl811->async))
sofirq_off(sl811);
sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
spin_unlock(&sl811->lock);
return ret;
}
/*-------------------------------------------------------------------------*/
/* usb 1.1 says max 90% of a frame is available for periodic transfers.
* this driver doesn't promise that much since it's got to handle an
* IRQ per packet; irq handling latencies also use up that time.
*
* NOTE: the periodic schedule is a sparse tree, with the load for
* each branch minimized. see fig 3.5 in the OHCI spec for example.
*/
#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */
static int balance(struct sl811 *sl811, u16 period, u16 load)
{
int i, branch = -ENOSPC;
/* search for the least loaded schedule branch of that period
* which has enough bandwidth left unreserved.
*/
for (i = 0; i < period ; i++) {
if (branch < 0 || sl811->load[branch] > sl811->load[i]) {
int j;
for (j = i; j < PERIODIC_SIZE; j += period) {
if ((sl811->load[j] + load)
> MAX_PERIODIC_LOAD)
break;
}
if (j < PERIODIC_SIZE)
continue;
branch = i;
}
}
return branch;
}
/*-------------------------------------------------------------------------*/
static int sl811h_urb_enqueue(
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct sl811 *sl811 = hcd_to_sl811(hcd);
struct usb_device *udev = urb->dev;
unsigned int pipe = urb->pipe;
int is_out = !usb_pipein(pipe);
int type = usb_pipetype(pipe);
int epnum = usb_pipeendpoint(pipe);
struct sl811h_ep *ep = NULL;
unsigned long flags;
int i;
int retval;
struct usb_host_endpoint *hep = urb->ep;
#ifndef CONFIG_USB_SL811_HCD_ISO
if (type == PIPE_ISOCHRONOUS)
return -ENOSPC;
#endif
/* avoid all allocations within spinlocks */
if (!hep->hcpriv) {
ep = kzalloc(sizeof *ep, mem_flags);
if (ep == NULL)
return -ENOMEM;
}
spin_lock_irqsave(&sl811->lock, flags);
/* don't submit to a dead or disabled port */
if (!(sl811->port1 & USB_PORT_STAT_ENABLE)
|| !HC_IS_RUNNING(hcd->state)) {
retval = -ENODEV;
kfree(ep);
goto fail_not_linked;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval) {
kfree(ep);
goto fail_not_linked;
}
if (hep->hcpriv) {
kfree(ep);
ep = hep->hcpriv;
} else if (!ep) {
retval = -ENOMEM;
goto fail;
} else {
INIT_LIST_HEAD(&ep->schedule);
ep->udev = udev;
ep->epnum = epnum;
ep->maxpacket = usb_maxpacket(udev, urb->pipe);
ep->defctrl = SL11H_HCTLMASK_ARM | SL11H_HCTLMASK_ENABLE;
usb_settoggle(udev, epnum, is_out, 0);
if (type == PIPE_CONTROL)
ep->nextpid = USB_PID_SETUP;
else if (is_out)
ep->nextpid = USB_PID_OUT;
else
ep->nextpid = USB_PID_IN;
if (ep->maxpacket > H_MAXPACKET) {
/* iso packets up to 240 bytes could work... */
dev_dbg(hcd->self.controller,
"dev %d ep%d maxpacket %d\n", udev->devnum,
epnum, ep->maxpacket);
retval = -EINVAL;
kfree(ep);
goto fail;
}
if (udev->speed == USB_SPEED_LOW) {
/* send preamble for external hub? */
if (!(sl811->ctrl1 & SL11H_CTL1MASK_LSPD))
ep->defctrl |= SL11H_HCTLMASK_PREAMBLE;
}
switch (type) {
case PIPE_ISOCHRONOUS:
case PIPE_INTERRUPT:
if (urb->interval > PERIODIC_SIZE)
urb->interval = PERIODIC_SIZE;
ep->period = urb->interval;
ep->branch = PERIODIC_SIZE;
if (type == PIPE_ISOCHRONOUS)
ep->defctrl |= SL11H_HCTLMASK_ISOCH;
ep->load = usb_calc_bus_time(udev->speed, !is_out,
type == PIPE_ISOCHRONOUS,
usb_maxpacket(udev, pipe))
/ 1000;
break;
}
ep->hep = hep;
hep->hcpriv = ep;
}
/* maybe put endpoint into schedule */
switch (type) {
case PIPE_CONTROL:
case PIPE_BULK:
if (list_empty(&ep->schedule))
list_add_tail(&ep->schedule, &sl811->async);
break;
case PIPE_ISOCHRONOUS:
case PIPE_INTERRUPT:
urb->interval = ep->period;
if (ep->branch < PERIODIC_SIZE) {
/* NOTE: the phase is correct here, but the value
* needs offsetting by the transfer queue depth.
* All current drivers ignore start_frame, so this
* is unlikely to ever matter...
*/
urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
+ ep->branch;
break;
}
retval = balance(sl811, ep->period, ep->load);
if (retval < 0)
goto fail;
ep->branch = retval;
retval = 0;
urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
+ ep->branch;
/* sort each schedule branch by period (slow before fast)
* to share the faster parts of the tree without needing
* dummy/placeholder nodes
*/
dev_dbg(hcd->self.controller, "schedule qh%d/%p branch %d\n",
ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct sl811h_ep **prev = &sl811->periodic[i];
struct sl811h_ep *here = *prev;
while (here && ep != here) {
if (ep->period > here->period)
break;
prev = &here->next;
here = *prev;
}
if (ep != here) {
ep->next = here;
*prev = ep;
}
sl811->load[i] += ep->load;
}
sl811->periodic_count++;
hcd->self.bandwidth_allocated += ep->load / ep->period;
sofirq_on(sl811);
}
urb->hcpriv = hep;
start_transfer(sl811);
sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
fail:
if (retval)
usb_hcd_unlink_urb_from_ep(hcd, urb);
fail_not_linked:
spin_unlock_irqrestore(&sl811->lock, flags);
return retval;
}
static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
struct usb_host_endpoint *hep;
unsigned long flags;
struct sl811h_ep *ep;
int retval;
spin_lock_irqsave(&sl811->lock, flags);
retval = usb_hcd_check_unlink_urb(hcd, urb, status);
if (retval)
goto fail;
hep = urb->hcpriv;
ep = hep->hcpriv;
if (ep) {
/* finish right away if this urb can't be active ...
* note that some drivers wrongly expect delays
*/
if (ep->hep->urb_list.next != &urb->urb_list) {
/* not front of queue? never active */
/* for active transfers, we expect an IRQ */
} else if (sl811->active_a == ep) {
if (time_before_eq(sl811->jiffies_a, jiffies)) {
/* happens a lot with lowspeed?? */
dev_dbg(hcd->self.controller,
"giveup on DONE_A: ctrl %02x sts %02x\n",
sl811_read(sl811,
SL811_EP_A(SL11H_HOSTCTLREG)),
sl811_read(sl811,
SL811_EP_A(SL11H_PKTSTATREG)));
sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG),
0);
sl811->active_a = NULL;
} else
urb = NULL;
#ifdef USE_B
} else if (sl811->active_b == ep) {
if (time_before_eq(sl811->jiffies_a, jiffies)) {
/* happens a lot with lowspeed?? */
dev_dbg(hcd->self.controller,
"giveup on DONE_B: ctrl %02x sts %02x\n",
sl811_read(sl811,
SL811_EP_B(SL11H_HOSTCTLREG)),
sl811_read(sl811,
SL811_EP_B(SL11H_PKTSTATREG)));
sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG),
0);
sl811->active_b = NULL;
} else
urb = NULL;
#endif
} else {
/* front of queue for inactive endpoint */
}
if (urb)
finish_request(sl811, ep, urb, 0);
else
dev_dbg(sl811_to_hcd(sl811)->self.controller,
"dequeue, urb %p active %s; wait4irq\n", urb,
(sl811->active_a == ep) ? "A" : "B");
} else
retval = -EINVAL;
fail:
spin_unlock_irqrestore(&sl811->lock, flags);
return retval;
}
static void
sl811h_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
struct sl811h_ep *ep = hep->hcpriv;
if (!ep)
return;
/* assume we'd just wait for the irq */
if (!list_empty(&hep->urb_list))
msleep(3);
if (!list_empty(&hep->urb_list))
dev_warn(hcd->self.controller, "ep %p not empty?\n", ep);
kfree(ep);
hep->hcpriv = NULL;
}
static int
sl811h_get_frame(struct usb_hcd *hcd)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
/* wrong except while periodic transfers are scheduled;
* never matches the on-the-wire frame;
* subject to overruns.
*/
return sl811->frame;
}
/*-------------------------------------------------------------------------*/
/* the virtual root hub timer IRQ checks for hub status */
static int
sl811h_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
#ifdef QUIRK3
unsigned long flags;
/* non-SMP HACK: use root hub timer as i/o watchdog
* this seems essential when SOF IRQs aren't in use...
*/
local_irq_save(flags);
if (!timer_pending(&sl811->timer)) {
if (sl811h_irq( /* ~0, */ hcd) != IRQ_NONE)
sl811->stat_lost++;
}
local_irq_restore(flags);
#endif
if (!(sl811->port1 & (0xffff << 16)))
return 0;
/* tell hub_wq port 1 changed */
*buf = (1 << 1);
return 1;
}
static void
sl811h_hub_descriptor (
struct sl811 *sl811,
struct usb_hub_descriptor *desc
) {
u16 temp = 0;
desc->bDescriptorType = USB_DT_HUB;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = 1;
desc->bDescLength = 9;
/* per-port power switching (gang of one!), or none */
desc->bPwrOn2PwrGood = 0;
if (sl811->board && sl811->board->port_power) {
desc->bPwrOn2PwrGood = sl811->board->potpg;
if (!desc->bPwrOn2PwrGood)
desc->bPwrOn2PwrGood = 10;
temp = HUB_CHAR_INDV_PORT_LPSM;
} else
temp = HUB_CHAR_NO_LPSM;
/* no overcurrent errors detection/handling */
temp |= HUB_CHAR_NO_OCPM;
desc->wHubCharacteristics = cpu_to_le16(temp);
/* ports removable, and legacy PortPwrCtrlMask */
desc->u.hs.DeviceRemovable[0] = 0 << 1;
desc->u.hs.DeviceRemovable[1] = ~0;
}
static void
sl811h_timer(struct timer_list *t)
{
struct sl811 *sl811 = from_timer(sl811, t, timer);
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
const u32 mask = USB_PORT_STAT_CONNECTION
| USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED;
spin_lock_irqsave(&sl811->lock, flags);
/* stop special signaling */
sl811->ctrl1 &= ~SL11H_CTL1MASK_FORCE;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
udelay(3);
irqstat = sl811_read(sl811, SL11H_IRQ_STATUS);
switch (signaling) {
case SL11H_CTL1MASK_SE0:
dev_dbg(sl811_to_hcd(sl811)->self.controller, "end reset\n");
sl811->port1 = (USB_PORT_STAT_C_RESET << 16)
| USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
/* don't wrongly ack RD */
if (irqstat & SL11H_INTMASK_INSRMV)
irqstat &= ~SL11H_INTMASK_RD;
break;
case SL11H_CTL1MASK_K:
dev_dbg(sl811_to_hcd(sl811)->self.controller, "end resume\n");
sl811->port1 &= ~USB_PORT_STAT_SUSPEND;
break;
default:
dev_dbg(sl811_to_hcd(sl811)->self.controller,
"odd timer signaling: %02x\n", signaling);
break;
}
sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
if (irqstat & SL11H_INTMASK_RD) {
/* usbcore nukes all pending transactions on disconnect */
if (sl811->port1 & USB_PORT_STAT_CONNECTION)
sl811->port1 |= (USB_PORT_STAT_C_CONNECTION << 16)
| (USB_PORT_STAT_C_ENABLE << 16);
sl811->port1 &= ~mask;
sl811->irq_enable = SL11H_INTMASK_INSRMV;
} else {
sl811->port1 |= mask;
if (irqstat & SL11H_INTMASK_DP)
sl811->port1 &= ~USB_PORT_STAT_LOW_SPEED;
sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD;
}
if (sl811->port1 & USB_PORT_STAT_CONNECTION) {
u8 ctrl2 = SL811HS_CTL2_INIT;
sl811->irq_enable |= SL11H_INTMASK_DONE_A;
#ifdef USE_B
sl811->irq_enable |= SL11H_INTMASK_DONE_B;
#endif
if (sl811->port1 & USB_PORT_STAT_LOW_SPEED) {
sl811->ctrl1 |= SL11H_CTL1MASK_LSPD;
ctrl2 |= SL811HS_CTL2MASK_DSWAP;
}
/* start SOFs flowing, kickstarting with A registers */
sl811->ctrl1 |= SL11H_CTL1MASK_SOF_ENA;
sl811_write(sl811, SL11H_SOFLOWREG, 0xe0);
sl811_write(sl811, SL811HS_CTLREG2, ctrl2);
/* autoincrementing */
sl811_write(sl811, SL811_EP_A(SL11H_BUFLNTHREG), 0);
writeb(SL_SOF, sl811->data_reg);
writeb(0, sl811->data_reg);
sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG),
SL11H_HCTLMASK_ARM);
/* hub_wq provides debounce delay */
} else {
sl811->ctrl1 = 0;
}
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
/* reenable irqs */
sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
spin_unlock_irqrestore(&sl811->lock, flags);
}
static int
sl811h_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
) {
struct sl811 *sl811 = hcd_to_sl811(hcd);
int retval = 0;
unsigned long flags;
spin_lock_irqsave(&sl811->lock, flags);
switch (typeReq) {
case ClearHubFeature:
case SetHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (wIndex != 1 || wLength != 0)
goto error;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
sl811->port1 &= USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
sl811->irq_enable = SL11H_INTMASK_INSRMV;
sl811_write(sl811, SL11H_IRQ_ENABLE,
sl811->irq_enable);
break;
case USB_PORT_FEAT_SUSPEND:
if (!(sl811->port1 & USB_PORT_STAT_SUSPEND))
break;
/* 20 msec of resume/K signaling, other irqs blocked */
dev_dbg(hcd->self.controller, "start resume...\n");
sl811->irq_enable = 0;
sl811_write(sl811, SL11H_IRQ_ENABLE,
sl811->irq_enable);
sl811->ctrl1 |= SL11H_CTL1MASK_K;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
mod_timer(&sl811->timer, jiffies
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
break;
case USB_PORT_FEAT_POWER:
port_power(sl811, 0);
break;
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_RESET:
break;
default:
goto error;
}
sl811->port1 &= ~(1 << wValue);
break;
case GetHubDescriptor:
sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
put_unaligned_le32(0, buf);
break;
case GetPortStatus:
if (wIndex != 1)
goto error;
put_unaligned_le32(sl811->port1, buf);
if (__is_defined(VERBOSE) ||
*(u16*)(buf+2)) /* only if wPortChange is interesting */
dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
sl811->port1);
break;
case SetPortFeature:
if (wIndex != 1 || wLength != 0)
goto error;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (sl811->port1 & USB_PORT_STAT_RESET)
goto error;
if (!(sl811->port1 & USB_PORT_STAT_ENABLE))
goto error;
dev_dbg(hcd->self.controller,"suspend...\n");
sl811->ctrl1 &= ~SL11H_CTL1MASK_SOF_ENA;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
break;
case USB_PORT_FEAT_POWER:
port_power(sl811, 1);
break;
case USB_PORT_FEAT_RESET:
if (sl811->port1 & USB_PORT_STAT_SUSPEND)
goto error;
if (!(sl811->port1 & USB_PORT_STAT_POWER))
break;
/* 50 msec of reset/SE0 signaling, irqs blocked */
sl811->irq_enable = 0;
sl811_write(sl811, SL11H_IRQ_ENABLE,
sl811->irq_enable);
sl811->ctrl1 = SL11H_CTL1MASK_SE0;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
sl811->port1 |= USB_PORT_STAT_RESET;
mod_timer(&sl811->timer, jiffies
+ msecs_to_jiffies(50));
break;
default:
goto error;
}
sl811->port1 |= 1 << wValue;
break;
default:
error:
/* "protocol stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&sl811->lock, flags);
return retval;
}
#ifdef CONFIG_PM
static int
sl811h_bus_suspend(struct usb_hcd *hcd)
{
// SOFs off
dev_dbg(hcd->self.controller, "%s\n", __func__);
return 0;
}
static int
sl811h_bus_resume(struct usb_hcd *hcd)
{
// SOFs on
dev_dbg(hcd->self.controller, "%s\n", __func__);
return 0;
}
#else
#define sl811h_bus_suspend NULL
#define sl811h_bus_resume NULL
#endif
/*-------------------------------------------------------------------------*/
static void dump_irq(struct seq_file *s, char *label, u8 mask)
{
seq_printf(s, "%s %02x%s%s%s%s%s%s\n", label, mask,
(mask & SL11H_INTMASK_DONE_A) ? " done_a" : "",
(mask & SL11H_INTMASK_DONE_B) ? " done_b" : "",
(mask & SL11H_INTMASK_SOFINTR) ? " sof" : "",
(mask & SL11H_INTMASK_INSRMV) ? " ins/rmv" : "",
(mask & SL11H_INTMASK_RD) ? " rd" : "",
(mask & SL11H_INTMASK_DP) ? " dp" : "");
}
static int sl811h_debug_show(struct seq_file *s, void *unused)
{
struct sl811 *sl811 = s->private;
struct sl811h_ep *ep;
unsigned i;
seq_printf(s, "%s\n%s version %s\nportstatus[1] = %08x\n",
sl811_to_hcd(sl811)->product_desc,
hcd_name, DRIVER_VERSION,
sl811->port1);
seq_printf(s, "insert/remove: %ld\n", sl811->stat_insrmv);
seq_printf(s, "current session: done_a %ld done_b %ld "
"wake %ld sof %ld overrun %ld lost %ld\n\n",
sl811->stat_a, sl811->stat_b,
sl811->stat_wake, sl811->stat_sof,
sl811->stat_overrun, sl811->stat_lost);
spin_lock_irq(&sl811->lock);
if (sl811->ctrl1 & SL11H_CTL1MASK_SUSPEND)
seq_printf(s, "(suspended)\n\n");
else {
u8 t = sl811_read(sl811, SL11H_CTLREG1);
seq_printf(s, "ctrl1 %02x%s%s%s%s\n", t,
(t & SL11H_CTL1MASK_SOF_ENA) ? " sofgen" : "",
({char *s; switch (t & SL11H_CTL1MASK_FORCE) {
case SL11H_CTL1MASK_NORMAL: s = ""; break;
case SL11H_CTL1MASK_SE0: s = " se0/reset"; break;
case SL11H_CTL1MASK_K: s = " k/resume"; break;
default: s = "j"; break;
} s; }),
(t & SL11H_CTL1MASK_LSPD) ? " lowspeed" : "",
(t & SL11H_CTL1MASK_SUSPEND) ? " suspend" : "");
dump_irq(s, "irq_enable",
sl811_read(sl811, SL11H_IRQ_ENABLE));
dump_irq(s, "irq_status",
sl811_read(sl811, SL11H_IRQ_STATUS));
seq_printf(s, "frame clocks remaining: %d\n",
sl811_read(sl811, SL11H_SOFTMRREG) << 6);
}
seq_printf(s, "A: qh%p ctl %02x sts %02x\n", sl811->active_a,
sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG)),
sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
seq_printf(s, "B: qh%p ctl %02x sts %02x\n", sl811->active_b,
sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG)),
sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
seq_printf(s, "\n");
list_for_each_entry (ep, &sl811->async, schedule) {
struct urb *urb;
seq_printf(s, "%s%sqh%p, ep%d%s, maxpacket %d"
" nak %d err %d\n",
(ep == sl811->active_a) ? "(A) " : "",
(ep == sl811->active_b) ? "(B) " : "",
ep, ep->epnum,
({ char *s; switch (ep->nextpid) {
case USB_PID_IN: s = "in"; break;
case USB_PID_OUT: s = "out"; break;
case USB_PID_SETUP: s = "setup"; break;
case USB_PID_ACK: s = "status"; break;
default: s = "?"; break;
} s;}),
ep->maxpacket,
ep->nak_count, ep->error_count);
list_for_each_entry (urb, &ep->hep->urb_list, urb_list) {
seq_printf(s, " urb%p, %d/%d\n", urb,
urb->actual_length,
urb->transfer_buffer_length);
}
}
if (!list_empty(&sl811->async))
seq_printf(s, "\n");
seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
for (i = 0; i < PERIODIC_SIZE; i++) {
ep = sl811->periodic[i];
if (!ep)
continue;
seq_printf(s, "%2d [%3d]:\n", i, sl811->load[i]);
/* DUMB: prints shared entries multiple times */
do {
seq_printf(s,
" %s%sqh%d/%p (%sdev%d ep%d%s max %d) "
"err %d\n",
(ep == sl811->active_a) ? "(A) " : "",
(ep == sl811->active_b) ? "(B) " : "",
ep->period, ep,
(ep->udev->speed == USB_SPEED_FULL)
? "" : "ls ",
ep->udev->devnum, ep->epnum,
(ep->epnum == 0) ? ""
: ((ep->nextpid == USB_PID_IN)
? "in"
: "out"),
ep->maxpacket, ep->error_count);
ep = ep->next;
} while (ep);
}
spin_unlock_irq(&sl811->lock);
seq_printf(s, "\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(sl811h_debug);
/* expect just one sl811 per system */
static void create_debug_file(struct sl811 *sl811)
{
debugfs_create_file("sl811h", S_IRUGO, usb_debug_root, sl811,
&sl811h_debug_fops);
}
static void remove_debug_file(struct sl811 *sl811)
{
debugfs_lookup_and_remove("sl811h", usb_debug_root);
}
/*-------------------------------------------------------------------------*/
static void
sl811h_stop(struct usb_hcd *hcd)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
unsigned long flags;
del_timer_sync(&hcd->rh_timer);
spin_lock_irqsave(&sl811->lock, flags);
port_power(sl811, 0);
spin_unlock_irqrestore(&sl811->lock, flags);
}
static int
sl811h_start(struct usb_hcd *hcd)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
/* chip has been reset, VBUS power is off */
hcd->state = HC_STATE_RUNNING;
if (sl811->board) {
if (!device_can_wakeup(hcd->self.controller))
device_init_wakeup(hcd->self.controller,
sl811->board->can_wakeup);
hcd->power_budget = sl811->board->power * 2;
}
/* enable power and interrupts */
port_power(sl811, 1);
return 0;
}
/*-------------------------------------------------------------------------*/
static const struct hc_driver sl811h_hc_driver = {
.description = hcd_name,
.hcd_priv_size = sizeof(struct sl811),
/*
* generic hardware linkage
*/
.irq = sl811h_irq,
.flags = HCD_USB11 | HCD_MEMORY,
/* Basic lifecycle operations */
.start = sl811h_start,
.stop = sl811h_stop,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = sl811h_urb_enqueue,
.urb_dequeue = sl811h_urb_dequeue,
.endpoint_disable = sl811h_endpoint_disable,
/*
* periodic schedule support
*/
.get_frame_number = sl811h_get_frame,
/*
* root hub support
*/
.hub_status_data = sl811h_hub_status_data,
.hub_control = sl811h_hub_control,
.bus_suspend = sl811h_bus_suspend,
.bus_resume = sl811h_bus_resume,
};
/*-------------------------------------------------------------------------*/
static void
sl811h_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct sl811 *sl811 = hcd_to_sl811(hcd);
struct resource *res;
remove_debug_file(sl811);
usb_remove_hcd(hcd);
/* some platforms may use IORESOURCE_IO */
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (res)
iounmap(sl811->data_reg);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res)
iounmap(sl811->addr_reg);
usb_put_hcd(hcd);
}
static int
sl811h_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct sl811 *sl811;
struct resource *addr, *data, *ires;
int irq;
void __iomem *addr_reg;
void __iomem *data_reg;
int retval;
u8 tmp, ioaddr;
unsigned long irqflags;
if (usb_disabled())
return -ENODEV;
/* the chip may be wired for either kind of addressing */
addr = platform_get_mem_or_io(dev, 0);
data = platform_get_mem_or_io(dev, 1);
if (!addr || !data || resource_type(addr) != resource_type(data))
return -ENODEV;
/* basic sanity checks first. board-specific init logic should
* have initialized these three resources and probably board
* specific platform_data. we don't probe for IRQs, and do only
* minimal sanity checking.
*/
ires = platform_get_resource(dev, IORESOURCE_IRQ, 0);
if (dev->num_resources < 3 || !ires)
return -ENODEV;
irq = ires->start;
irqflags = ires->flags & IRQF_TRIGGER_MASK;
ioaddr = resource_type(addr) == IORESOURCE_IO;
if (ioaddr) {
/*
* NOTE: 64-bit resource->start is getting truncated
* to avoid compiler warning, assuming that ->start
* is always 32-bit for this case
*/
addr_reg = (void __iomem *) (unsigned long) addr->start;
data_reg = (void __iomem *) (unsigned long) data->start;
} else {
addr_reg = ioremap(addr->start, 1);
if (addr_reg == NULL) {
retval = -ENOMEM;
goto err2;
}
data_reg = ioremap(data->start, 1);
if (data_reg == NULL) {
retval = -ENOMEM;
goto err4;
}
}
/* allocate and initialize hcd */
hcd = usb_create_hcd(&sl811h_hc_driver, &dev->dev, dev_name(&dev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err5;
}
hcd->rsrc_start = addr->start;
sl811 = hcd_to_sl811(hcd);
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
sl811->board = dev_get_platdata(&dev->dev);
timer_setup(&sl811->timer, sl811h_timer, 0);
sl811->addr_reg = addr_reg;
sl811->data_reg = data_reg;
spin_lock_irq(&sl811->lock);
port_power(sl811, 0);
spin_unlock_irq(&sl811->lock);
msleep(200);
tmp = sl811_read(sl811, SL11H_HWREVREG);
switch (tmp >> 4) {
case 1:
hcd->product_desc = "SL811HS v1.2";
break;
case 2:
hcd->product_desc = "SL811HS v1.5";
break;
default:
/* reject case 0, SL11S is less functional */
dev_dbg(&dev->dev, "chiprev %02x\n", tmp);
retval = -ENXIO;
goto err6;
}
/* The chip's IRQ is level triggered, active high. A requirement
* for platform device setup is to cope with things like signal
* inverters (e.g. CF is active low) or working only with edge
* triggers (e.g. most ARM CPUs). Initial driver stress testing
* was on a system with single edge triggering, so most sorts of
* triggering arrangement should work.
*
* Use resource IRQ flags if set by platform device setup.
*/
irqflags |= IRQF_SHARED;
retval = usb_add_hcd(hcd, irq, irqflags);
if (retval != 0)
goto err6;
device_wakeup_enable(hcd->self.controller);
create_debug_file(sl811);
return retval;
err6:
usb_put_hcd(hcd);
err5:
if (!ioaddr)
iounmap(data_reg);
err4:
if (!ioaddr)
iounmap(addr_reg);
err2:
dev_dbg(&dev->dev, "init error, %d\n", retval);
return retval;
}
#ifdef CONFIG_PM
/* for this device there's no useful distinction between the controller
* and its root hub.
*/
static int
sl811h_suspend(struct platform_device *dev, pm_message_t state)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct sl811 *sl811 = hcd_to_sl811(hcd);
int retval = 0;
switch (state.event) {
case PM_EVENT_FREEZE:
retval = sl811h_bus_suspend(hcd);
break;
case PM_EVENT_SUSPEND:
case PM_EVENT_HIBERNATE:
case PM_EVENT_PRETHAW: /* explicitly discard hw state */
port_power(sl811, 0);
break;
}
return retval;
}
static int
sl811h_resume(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct sl811 *sl811 = hcd_to_sl811(hcd);
/* with no "check to see if VBUS is still powered" board hook,
* let's assume it'd only be powered to enable remote wakeup.
*/
if (!sl811->port1 || !device_can_wakeup(&hcd->self.root_hub->dev)) {
sl811->port1 = 0;
port_power(sl811, 1);
usb_root_hub_lost_power(hcd->self.root_hub);
return 0;
}
return sl811h_bus_resume(hcd);
}
#else
#define sl811h_suspend NULL
#define sl811h_resume NULL
#endif
/* this driver is exported so sl811_cs can depend on it */
struct platform_driver sl811h_driver = {
.probe = sl811h_probe,
.remove_new = sl811h_remove,
.suspend = sl811h_suspend,
.resume = sl811h_resume,
.driver = {
.name = hcd_name,
},
};
EXPORT_SYMBOL(sl811h_driver);
module_platform_driver(sl811h_driver);
| linux-master | drivers/usb/host/sl811-hcd.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2002 David Brownell <[email protected]>
* (C) Copyright 2002 Hewlett-Packard Company
*
* USB Bus Glue for Samsung S3C2410
*
* Written by Christopher Hoover <[email protected]>
* Based on fragments of previous driver by Russell King et al.
*
* Modified for S3C2410 from ohci-sa1111.c, ohci-omap.c and ohci-lh7a40.c
* by Ben Dooks, <[email protected]>
* Copyright (C) 2004 Simtec Electronics
*
* Thanks to [email protected] for updates to newer kernels
*
* This file is licenced under the GPL.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-ohci-s3c2410.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ohci.h"
#define valid_port(idx) ((idx) == 1 || (idx) == 2)
/* clock device associated with the hcd */
#define DRIVER_DESC "OHCI S3C2410 driver"
static struct clk *clk;
static struct clk *usb_clk;
static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
/* forward definitions */
static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
/* conversion functions */
static struct s3c2410_hcd_info *to_s3c2410_info(struct usb_hcd *hcd)
{
return dev_get_platdata(hcd->self.controller);
}
static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
{
struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
clk_prepare_enable(usb_clk);
mdelay(2); /* let the bus clock stabilise */
clk_prepare_enable(clk);
if (info != NULL) {
info->hcd = hcd;
info->report_oc = s3c2410_hcd_oc;
if (info->enable_oc != NULL)
(info->enable_oc)(info, 1);
}
}
static void s3c2410_stop_hc(struct platform_device *dev)
{
struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
dev_dbg(&dev->dev, "s3c2410_stop_hc:\n");
if (info != NULL) {
info->report_oc = NULL;
info->hcd = NULL;
if (info->enable_oc != NULL)
(info->enable_oc)(info, 0);
}
clk_disable_unprepare(clk);
clk_disable_unprepare(usb_clk);
}
/* ohci_s3c2410_hub_status_data
*
* update the status data from the hub with anything that
* has been detected by our system
*/
static int
ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct s3c2410_hcd_info *info = to_s3c2410_info(hcd);
struct s3c2410_hcd_port *port;
int orig;
int portno;
orig = ohci_hub_status_data(hcd, buf);
if (info == NULL)
return orig;
port = &info->port[0];
/* mark any changed port as changed */
for (portno = 0; portno < 2; port++, portno++) {
if (port->oc_changed == 1 &&
port->flags & S3C_HCDFLG_USED) {
dev_dbg(hcd->self.controller,
"oc change on port %d\n", portno);
if (orig < 1)
orig = 1;
buf[0] |= 1<<(portno+1);
}
}
return orig;
}
/* s3c2410_usb_set_power
*
* configure the power on a port, by calling the platform device
* routine registered with the platform device
*/
static void s3c2410_usb_set_power(struct s3c2410_hcd_info *info,
int port, int to)
{
if (info == NULL)
return;
if (info->power_control != NULL) {
info->port[port-1].power = to;
(info->power_control)(port-1, to);
}
}
/* ohci_s3c2410_hub_control
*
* look at control requests to the hub, and see if we need
* to take any action or over-ride the results from the
* request.
*/
static int ohci_s3c2410_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength)
{
struct s3c2410_hcd_info *info = to_s3c2410_info(hcd);
struct usb_hub_descriptor *desc;
int ret = -EINVAL;
u32 *data = (u32 *)buf;
dev_dbg(hcd->self.controller,
"s3c2410_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
hcd, typeReq, wValue, wIndex, buf, wLength);
/* if we are only an humble host without any special capabilities
* process the request straight away and exit */
if (info == NULL) {
ret = ohci_hub_control(hcd, typeReq, wValue,
wIndex, buf, wLength);
goto out;
}
/* check the request to see if it needs handling */
switch (typeReq) {
case SetPortFeature:
if (wValue == USB_PORT_FEAT_POWER) {
dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
s3c2410_usb_set_power(info, wIndex, 1);
goto out;
}
break;
case ClearPortFeature:
switch (wValue) {
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(hcd->self.controller,
"ClearPortFeature: C_OVER_CURRENT\n");
if (valid_port(wIndex)) {
info->port[wIndex-1].oc_changed = 0;
info->port[wIndex-1].oc_status = 0;
}
goto out;
case USB_PORT_FEAT_OVER_CURRENT:
dev_dbg(hcd->self.controller,
"ClearPortFeature: OVER_CURRENT\n");
if (valid_port(wIndex))
info->port[wIndex-1].oc_status = 0;
goto out;
case USB_PORT_FEAT_POWER:
dev_dbg(hcd->self.controller,
"ClearPortFeature: POWER\n");
if (valid_port(wIndex)) {
s3c2410_usb_set_power(info, wIndex, 0);
return 0;
}
}
break;
}
ret = ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
if (ret)
goto out;
switch (typeReq) {
case GetHubDescriptor:
/* update the hub's descriptor */
desc = (struct usb_hub_descriptor *)buf;
if (info->power_control == NULL)
return ret;
dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n",
desc->wHubCharacteristics);
/* remove the old configurations for power-switching, and
* over-current protection, and insert our new configuration
*/
desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
desc->wHubCharacteristics |= cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM);
if (info->enable_oc) {
desc->wHubCharacteristics &= ~cpu_to_le16(
HUB_CHAR_OCPM);
desc->wHubCharacteristics |= cpu_to_le16(
HUB_CHAR_INDV_PORT_OCPM);
}
dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
desc->wHubCharacteristics);
return ret;
case GetPortStatus:
/* check port status */
dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
if (valid_port(wIndex)) {
if (info->port[wIndex-1].oc_changed)
*data |= cpu_to_le32(RH_PS_OCIC);
if (info->port[wIndex-1].oc_status)
*data |= cpu_to_le32(RH_PS_POCI);
}
}
out:
return ret;
}
/* s3c2410_hcd_oc
*
* handle an over-current report
*/
static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
{
struct s3c2410_hcd_port *port;
unsigned long flags;
int portno;
if (info == NULL)
return;
port = &info->port[0];
local_irq_save(flags);
for (portno = 0; portno < 2; port++, portno++) {
if (port_oc & (1<<portno) &&
port->flags & S3C_HCDFLG_USED) {
port->oc_status = 1;
port->oc_changed = 1;
/* ok, once over-current is detected,
the port needs to be powered down */
s3c2410_usb_set_power(info, portno+1, 0);
}
}
local_irq_restore(flags);
}
/* may be called without controller electrically present */
/* may be called with controller, bus, and devices active */
/*
* ohci_hcd_s3c2410_remove - shutdown processing for HCD
* @dev: USB Host Controller being removed
*
* Context: task context, might sleep
*
* Reverses the effect of ohci_hcd_3c2410_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
static void
ohci_hcd_s3c2410_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
usb_remove_hcd(hcd);
s3c2410_stop_hc(dev);
usb_put_hcd(hcd);
}
/*
* ohci_hcd_s3c2410_probe - initialize S3C2410-based HCDs
* @dev: USB Host Controller to be probed
*
* Context: task context, might sleep
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
int retval, irq;
s3c2410_usb_set_power(info, 1, 1);
s3c2410_usb_set_power(info, 2, 1);
hcd = usb_create_hcd(&ohci_s3c2410_hc_driver, &dev->dev, "s3c24xx");
if (hcd == NULL)
return -ENOMEM;
hcd->rsrc_start = dev->resource[0].start;
hcd->rsrc_len = resource_size(&dev->resource[0]);
hcd->regs = devm_ioremap_resource(&dev->dev, &dev->resource[0]);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put;
}
clk = devm_clk_get(&dev->dev, "usb-host");
if (IS_ERR(clk)) {
dev_err(&dev->dev, "cannot get usb-host clock\n");
retval = PTR_ERR(clk);
goto err_put;
}
usb_clk = devm_clk_get(&dev->dev, "usb-bus-host");
if (IS_ERR(usb_clk)) {
dev_err(&dev->dev, "cannot get usb-bus-host clock\n");
retval = PTR_ERR(usb_clk);
goto err_put;
}
irq = platform_get_irq(dev, 0);
if (irq < 0) {
retval = irq;
goto err_put;
}
s3c2410_start_hc(dev, hcd);
retval = usb_add_hcd(hcd, irq, 0);
if (retval != 0)
goto err_ioremap;
device_wakeup_enable(hcd->self.controller);
return 0;
err_ioremap:
s3c2410_stop_hc(dev);
err_put:
usb_put_hcd(hcd);
return retval;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int rc = 0;
rc = ohci_suspend(hcd, do_wakeup);
if (rc)
return rc;
s3c2410_stop_hc(pdev);
return rc;
}
static int ohci_hcd_s3c2410_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct platform_device *pdev = to_platform_device(dev);
s3c2410_start_hc(pdev, hcd);
ohci_resume(hcd, false);
return 0;
}
#else
#define ohci_hcd_s3c2410_drv_suspend NULL
#define ohci_hcd_s3c2410_drv_resume NULL
#endif
static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
.suspend = ohci_hcd_s3c2410_drv_suspend,
.resume = ohci_hcd_s3c2410_drv_resume,
};
static const struct of_device_id ohci_hcd_s3c2410_dt_ids[] = {
{ .compatible = "samsung,s3c2410-ohci" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids);
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_probe,
.remove_new = ohci_hcd_s3c2410_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "s3c2410-ohci",
.pm = &ohci_hcd_s3c2410_pm_ops,
.of_match_table = ohci_hcd_s3c2410_dt_ids,
},
};
static int __init ohci_s3c2410_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_s3c2410_hc_driver, NULL);
/*
* The Samsung HW has some unusual quirks, which require
* Sumsung-specific workarounds. We override certain hc_driver
* functions here to achieve that. We explicitly do not enhance
* ohci_driver_overrides to allow this more easily, since this
* is an unusual case, and we don't want to encourage others to
* override these functions by making it too easy.
*/
ohci_s3c2410_hc_driver.hub_status_data = ohci_s3c2410_hub_status_data;
ohci_s3c2410_hc_driver.hub_control = ohci_s3c2410_hub_control;
return platform_driver_register(&ohci_hcd_s3c2410_driver);
}
module_init(ohci_s3c2410_init);
static void __exit ohci_s3c2410_cleanup(void)
{
platform_driver_unregister(&ohci_hcd_s3c2410_driver);
}
module_exit(ohci_s3c2410_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:s3c2410-ohci");
| linux-master | drivers/usb/host/ohci-s3c2410.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UHCI-specific debugging code. Invaluable when something
* goes wrong, but don't get in my face.
*
* Kernel visible pointers are surrounded in []s and bus
* visible pointers are surrounded in ()s
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2001 Johannes Erdfelt
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include "uhci-hcd.h"
#define EXTRA_SPACE 1024
static struct dentry *uhci_debugfs_root;
#ifdef CONFIG_DYNAMIC_DEBUG
/* Handle REALLY large printks so we don't overflow buffers */
static void lprintk(char *buf)
{
char *p;
/* Just write one line at a time */
while (buf) {
p = strchr(buf, '\n');
if (p)
*p = 0;
printk(KERN_DEBUG "%s\n", buf);
buf = p;
if (buf)
buf++;
}
}
static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf,
int len, int space)
{
char *out = buf;
char *spid;
u32 status, token;
status = td_status(uhci, td);
out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td,
hc32_to_cpu(uhci, td->link));
out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
((status >> 27) & 3),
(status & TD_CTRL_SPD) ? "SPD " : "",
(status & TD_CTRL_LS) ? "LS " : "",
(status & TD_CTRL_IOC) ? "IOC " : "",
(status & TD_CTRL_ACTIVE) ? "Active " : "",
(status & TD_CTRL_STALLED) ? "Stalled " : "",
(status & TD_CTRL_DBUFERR) ? "DataBufErr " : "",
(status & TD_CTRL_BABBLE) ? "Babble " : "",
(status & TD_CTRL_NAK) ? "NAK " : "",
(status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
(status & TD_CTRL_BITSTUFF) ? "BitStuff " : "",
status & 0x7ff);
if (out - buf > len)
goto done;
token = td_token(uhci, td);
switch (uhci_packetid(token)) {
case USB_PID_SETUP:
spid = "SETUP";
break;
case USB_PID_OUT:
spid = "OUT";
break;
case USB_PID_IN:
spid = "IN";
break;
default:
spid = "?";
break;
}
out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ",
token >> 21,
((token >> 19) & 1),
(token >> 15) & 15,
(token >> 8) & 127,
(token & 0xff),
spid);
out += sprintf(out, "(buf=%08x)\n", hc32_to_cpu(uhci, td->buffer));
done:
if (out - buf > len)
out += sprintf(out, " ...\n");
return out - buf;
}
static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
char *buf, int len, int space)
{
char *out = buf;
struct uhci_td *td;
int i, nactive, ninactive;
char *ptype;
out += sprintf(out, "urb_priv [%p] ", urbp);
out += sprintf(out, "urb [%p] ", urbp->urb);
out += sprintf(out, "qh [%p] ", urbp->qh);
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
if (out - buf > len)
goto done;
switch (usb_pipetype(urbp->urb->pipe)) {
case PIPE_ISOCHRONOUS: ptype = "ISO"; break;
case PIPE_INTERRUPT: ptype = "INT"; break;
case PIPE_BULK: ptype = "BLK"; break;
default:
case PIPE_CONTROL: ptype = "CTL"; break;
}
out += sprintf(out, "%s%s", ptype, (urbp->fsbr ? " FSBR" : ""));
out += sprintf(out, " Actlen=%d%s", urbp->urb->actual_length,
(urbp->qh->type == USB_ENDPOINT_XFER_CONTROL ?
"-8" : ""));
if (urbp->urb->unlinked)
out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked);
out += sprintf(out, "\n");
if (out - buf > len)
goto done;
i = nactive = ninactive = 0;
list_for_each_entry(td, &urbp->td_list, list) {
if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC &&
(++i <= 10 || debug > 2)) {
out += sprintf(out, "%*s%d: ", space + 2, "", i);
out += uhci_show_td(uhci, td, out,
len - (out - buf), 0);
if (out - buf > len)
goto tail;
} else {
if (td_status(uhci, td) & TD_CTRL_ACTIVE)
++nactive;
else
++ninactive;
}
}
if (nactive + ninactive > 0)
out += sprintf(out,
"%*s[skipped %d inactive and %d active TDs]\n",
space, "", ninactive, nactive);
done:
if (out - buf > len)
out += sprintf(out, " ...\n");
tail:
return out - buf;
}
static int uhci_show_qh(struct uhci_hcd *uhci,
struct uhci_qh *qh, char *buf, int len, int space)
{
char *out = buf;
int i, nurbs;
__hc32 element = qh_element(qh);
char *qtype;
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC: qtype = "ISO"; break;
case USB_ENDPOINT_XFER_INT: qtype = "INT"; break;
case USB_ENDPOINT_XFER_BULK: qtype = "BLK"; break;
case USB_ENDPOINT_XFER_CONTROL: qtype = "CTL"; break;
default: qtype = "Skel" ; break;
}
out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n",
space, "", qh, qtype,
hc32_to_cpu(uhci, qh->link),
hc32_to_cpu(uhci, element));
if (qh->type == USB_ENDPOINT_XFER_ISOC)
out += sprintf(out,
"%*s period %d phase %d load %d us, frame %x desc [%p]\n",
space, "", qh->period, qh->phase, qh->load,
qh->iso_frame, qh->iso_packet_desc);
else if (qh->type == USB_ENDPOINT_XFER_INT)
out += sprintf(out, "%*s period %d phase %d load %d us\n",
space, "", qh->period, qh->phase, qh->load);
if (out - buf > len)
goto done;
if (element & UHCI_PTR_QH(uhci))
out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
if (element & UHCI_PTR_DEPTH(uhci))
out += sprintf(out, "%*s Depth traverse\n", space, "");
if (element & cpu_to_hc32(uhci, 8))
out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, "");
if (!(element & ~(UHCI_PTR_QH(uhci) | UHCI_PTR_DEPTH(uhci))))
out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
if (out - buf > len)
goto done;
if (list_empty(&qh->queue)) {
out += sprintf(out, "%*s queue is empty\n", space, "");
if (qh == uhci->skel_async_qh) {
out += uhci_show_td(uhci, uhci->term_td, out,
len - (out - buf), 0);
if (out - buf > len)
goto tail;
}
} else {
struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
if (element != LINK_TO_TD(uhci, td))
out += sprintf(out, "%*s Element != First TD\n",
space, "");
i = nurbs = 0;
list_for_each_entry(urbp, &qh->queue, node) {
if (++i <= 10) {
out += uhci_show_urbp(uhci, urbp, out,
len - (out - buf), space + 2);
if (out - buf > len)
goto tail;
}
else
++nurbs;
}
if (nurbs > 0)
out += sprintf(out, "%*s Skipped %d URBs\n",
space, "", nurbs);
}
if (out - buf > len)
goto done;
if (qh->dummy_td) {
out += sprintf(out, "%*s Dummy TD\n", space, "");
out += uhci_show_td(uhci, qh->dummy_td, out,
len - (out - buf), 0);
if (out - buf > len)
goto tail;
}
done:
if (out - buf > len)
out += sprintf(out, " ...\n");
tail:
return out - buf;
}
static int uhci_show_sc(int port, unsigned short status, char *buf)
{
return sprintf(buf, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n",
port,
status,
(status & USBPORTSC_SUSP) ? " Suspend" : "",
(status & USBPORTSC_OCC) ? " OverCurrentChange" : "",
(status & USBPORTSC_OC) ? " OverCurrent" : "",
(status & USBPORTSC_PR) ? " Reset" : "",
(status & USBPORTSC_LSDA) ? " LowSpeed" : "",
(status & USBPORTSC_RD) ? " ResumeDetect" : "",
(status & USBPORTSC_PEC) ? " EnableChange" : "",
(status & USBPORTSC_PE) ? " Enabled" : "",
(status & USBPORTSC_CSC) ? " ConnectChange" : "",
(status & USBPORTSC_CCS) ? " Connected" : "");
}
static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf)
{
char *rh_state;
switch (uhci->rh_state) {
case UHCI_RH_RESET:
rh_state = "reset"; break;
case UHCI_RH_SUSPENDED:
rh_state = "suspended"; break;
case UHCI_RH_AUTO_STOPPED:
rh_state = "auto-stopped"; break;
case UHCI_RH_RESUMING:
rh_state = "resuming"; break;
case UHCI_RH_SUSPENDING:
rh_state = "suspending"; break;
case UHCI_RH_RUNNING:
rh_state = "running"; break;
case UHCI_RH_RUNNING_NODEVS:
rh_state = "running, no devs"; break;
default:
rh_state = "?"; break;
}
return sprintf(buf, "Root-hub state: %s FSBR: %d\n",
rh_state, uhci->fsbr_is_on);
}
static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
{
char *out = buf;
unsigned short usbcmd, usbstat, usbint, usbfrnum;
unsigned int flbaseadd;
unsigned char sof;
unsigned short portsc1, portsc2;
usbcmd = uhci_readw(uhci, USBCMD);
usbstat = uhci_readw(uhci, USBSTS);
usbint = uhci_readw(uhci, USBINTR);
usbfrnum = uhci_readw(uhci, USBFRNUM);
flbaseadd = uhci_readl(uhci, USBFLBASEADD);
sof = uhci_readb(uhci, USBSOF);
portsc1 = uhci_readw(uhci, USBPORTSC1);
portsc2 = uhci_readw(uhci, USBPORTSC2);
out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n",
usbcmd,
(usbcmd & USBCMD_MAXP) ? "Maxp64 " : "Maxp32 ",
(usbcmd & USBCMD_CF) ? "CF " : "",
(usbcmd & USBCMD_SWDBG) ? "SWDBG " : "",
(usbcmd & USBCMD_FGR) ? "FGR " : "",
(usbcmd & USBCMD_EGSM) ? "EGSM " : "",
(usbcmd & USBCMD_GRESET) ? "GRESET " : "",
(usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
(usbcmd & USBCMD_RS) ? "RS " : "");
if (out - buf > len)
goto done;
out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n",
usbstat,
(usbstat & USBSTS_HCH) ? "HCHalted " : "",
(usbstat & USBSTS_HCPE) ? "HostControllerProcessError " : "",
(usbstat & USBSTS_HSE) ? "HostSystemError " : "",
(usbstat & USBSTS_RD) ? "ResumeDetect " : "",
(usbstat & USBSTS_ERROR) ? "USBError " : "",
(usbstat & USBSTS_USBINT) ? "USBINT " : "");
if (out - buf > len)
goto done;
out += sprintf(out, " usbint = %04x\n", usbint);
out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
0xfff & (4*(unsigned int)usbfrnum));
out += sprintf(out, " flbaseadd = %08x\n", flbaseadd);
out += sprintf(out, " sof = %02x\n", sof);
if (out - buf > len)
goto done;
out += uhci_show_sc(1, portsc1, out);
if (out - buf > len)
goto done;
out += uhci_show_sc(2, portsc2, out);
if (out - buf > len)
goto done;
out += sprintf(out,
"Most recent frame: %x (%d) Last ISO frame: %x (%d)\n",
uhci->frame_number, uhci->frame_number & 1023,
uhci->last_iso_frame, uhci->last_iso_frame & 1023);
done:
if (out - buf > len)
out += sprintf(out, " ...\n");
return out - buf;
}
static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
{
char *out = buf;
int i, j;
struct uhci_qh *qh;
struct uhci_td *td;
struct list_head *tmp, *head;
int nframes, nerrs;
__hc32 link;
__hc32 fsbr_link;
static const char * const qh_names[] = {
"unlink", "iso", "int128", "int64", "int32", "int16",
"int8", "int4", "int2", "async", "term"
};
out += uhci_show_root_hub_state(uhci, out);
if (out - buf > len)
goto done;
out += sprintf(out, "HC status\n");
out += uhci_show_status(uhci, out, len - (out - buf));
if (out - buf > len)
goto tail;
out += sprintf(out, "Periodic load table\n");
for (i = 0; i < MAX_PHASE; ++i) {
out += sprintf(out, "\t%d", uhci->load[i]);
if (i % 8 == 7)
*out++ = '\n';
}
out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n",
uhci->total_load,
uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
if (debug <= 1)
goto tail;
out += sprintf(out, "Frame List\n");
nframes = 10;
nerrs = 0;
for (i = 0; i < UHCI_NUMFRAMES; ++i) {
__hc32 qh_dma;
if (out - buf > len)
goto done;
j = 0;
td = uhci->frame_cpu[i];
link = uhci->frame[i];
if (!td)
goto check_link;
if (nframes > 0) {
out += sprintf(out, "- Frame %d -> (%08x)\n",
i, hc32_to_cpu(uhci, link));
j = 1;
}
head = &td->fl_list;
tmp = head;
do {
td = list_entry(tmp, struct uhci_td, fl_list);
tmp = tmp->next;
if (link != LINK_TO_TD(uhci, td)) {
if (nframes > 0) {
out += sprintf(out,
" link does not match list entry!\n");
if (out - buf > len)
goto done;
} else
++nerrs;
}
if (nframes > 0) {
out += uhci_show_td(uhci, td, out,
len - (out - buf), 4);
if (out - buf > len)
goto tail;
}
link = td->link;
} while (tmp != head);
check_link:
qh_dma = uhci_frame_skel_link(uhci, i);
if (link != qh_dma) {
if (nframes > 0) {
if (!j) {
out += sprintf(out,
"- Frame %d -> (%08x)\n",
i, hc32_to_cpu(uhci, link));
j = 1;
}
out += sprintf(out,
" link does not match QH (%08x)!\n",
hc32_to_cpu(uhci, qh_dma));
if (out - buf > len)
goto done;
} else
++nerrs;
}
nframes -= j;
}
if (nerrs > 0)
out += sprintf(out, "Skipped %d bad links\n", nerrs);
out += sprintf(out, "Skeleton QHs\n");
if (out - buf > len)
goto done;
fsbr_link = 0;
for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
int cnt = 0;
qh = uhci->skelqh[i];
out += sprintf(out, "- skel_%s_qh\n", qh_names[i]);
out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4);
if (out - buf > len)
goto tail;
/* Last QH is the Terminating QH, it's different */
if (i == SKEL_TERM) {
if (qh_element(qh) != LINK_TO_TD(uhci, uhci->term_td)) {
out += sprintf(out,
" skel_term_qh element is not set to term_td!\n");
if (out - buf > len)
goto done;
}
link = fsbr_link;
if (!link)
link = LINK_TO_QH(uhci, uhci->skel_term_qh);
goto check_qh_link;
}
head = &qh->node;
tmp = head->next;
while (tmp != head) {
qh = list_entry(tmp, struct uhci_qh, node);
tmp = tmp->next;
if (++cnt <= 10) {
out += uhci_show_qh(uhci, qh, out,
len - (out - buf), 4);
if (out - buf > len)
goto tail;
}
if (!fsbr_link && qh->skel >= SKEL_FSBR)
fsbr_link = LINK_TO_QH(uhci, qh);
}
if ((cnt -= 10) > 0)
out += sprintf(out, " Skipped %d QHs\n", cnt);
link = UHCI_PTR_TERM(uhci);
if (i <= SKEL_ISO)
;
else if (i < SKEL_ASYNC)
link = LINK_TO_QH(uhci, uhci->skel_async_qh);
else if (!uhci->fsbr_is_on)
;
else
link = LINK_TO_QH(uhci, uhci->skel_term_qh);
check_qh_link:
if (qh->link != link)
out += sprintf(out,
" last QH not linked to next skeleton!\n");
if (out - buf > len)
goto done;
}
done:
if (out - buf > len)
out += sprintf(out, " ...\n");
tail:
return out - buf;
}
#ifdef CONFIG_DEBUG_FS
#define MAX_OUTPUT (64 * 1024)
struct uhci_debug {
int size;
char *data;
};
static int uhci_debug_open(struct inode *inode, struct file *file)
{
struct uhci_hcd *uhci = inode->i_private;
struct uhci_debug *up;
unsigned long flags;
up = kmalloc(sizeof(*up), GFP_KERNEL);
if (!up)
return -ENOMEM;
up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
if (!up->data) {
kfree(up);
return -ENOMEM;
}
up->size = 0;
spin_lock_irqsave(&uhci->lock, flags);
if (uhci->is_initialized)
up->size = uhci_sprint_schedule(uhci, up->data,
MAX_OUTPUT - EXTRA_SPACE);
spin_unlock_irqrestore(&uhci->lock, flags);
file->private_data = up;
return 0;
}
static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
{
struct uhci_debug *up = file->private_data;
return no_seek_end_llseek_size(file, off, whence, up->size);
}
static ssize_t uhci_debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct uhci_debug *up = file->private_data;
return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size);
}
static int uhci_debug_release(struct inode *inode, struct file *file)
{
struct uhci_debug *up = file->private_data;
kfree(up->data);
kfree(up);
return 0;
}
static const struct file_operations uhci_debug_operations = {
.owner = THIS_MODULE,
.open = uhci_debug_open,
.llseek = uhci_debug_lseek,
.read = uhci_debug_read,
.release = uhci_debug_release,
};
#define UHCI_DEBUG_OPS
#endif /* CONFIG_DEBUG_FS */
#else /* CONFIG_DYNAMIC_DEBUG*/
static inline void lprintk(char *buf)
{}
static inline int uhci_show_qh(struct uhci_hcd *uhci,
struct uhci_qh *qh, char *buf, int len, int space)
{
return 0;
}
static inline int uhci_sprint_schedule(struct uhci_hcd *uhci,
char *buf, int len)
{
return 0;
}
#endif
| linux-master | drivers/usb/host/uhci-debug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver for R-Car SoCs
*
* Copyright (C) 2014 Renesas Electronics Corporation
*/
#include <linux/firmware.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/usb/phy.h>
#include "xhci.h"
#include "xhci-plat.h"
#include "xhci-rzv2m.h"
#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem"
#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem"
/*
* - The V3 firmware is for all R-Car Gen3
* - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes
* performance degradation. So, this driver continues to use the V1 if R-Car
* Gen2.
* - The V1 firmware is impossible to use on R-Car Gen3.
*/
MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1);
MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
/*** Register Offset ***/
#define RCAR_USB3_AXH_STA 0x104 /* AXI Host Control Status */
#define RCAR_USB3_INT_ENA 0x224 /* Interrupt Enable */
#define RCAR_USB3_DL_CTRL 0x250 /* FW Download Control & Status */
#define RCAR_USB3_FW_DATA0 0x258 /* FW Data0 */
#define RCAR_USB3_LCLK 0xa44 /* LCLK Select */
#define RCAR_USB3_CONF1 0xa48 /* USB3.0 Configuration1 */
#define RCAR_USB3_CONF2 0xa5c /* USB3.0 Configuration2 */
#define RCAR_USB3_CONF3 0xaa8 /* USB3.0 Configuration3 */
#define RCAR_USB3_RX_POL 0xab0 /* USB3.0 RX Polarity */
#define RCAR_USB3_TX_POL 0xab8 /* USB3.0 TX Polarity */
/*** Register Settings ***/
/* AXI Host Control Status */
#define RCAR_USB3_AXH_STA_B3_PLL_ACTIVE 0x00010000
#define RCAR_USB3_AXH_STA_B2_PLL_ACTIVE 0x00000001
#define RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK (RCAR_USB3_AXH_STA_B3_PLL_ACTIVE | \
RCAR_USB3_AXH_STA_B2_PLL_ACTIVE)
/* Interrupt Enable */
#define RCAR_USB3_INT_XHC_ENA 0x00000001
#define RCAR_USB3_INT_PME_ENA 0x00000002
#define RCAR_USB3_INT_HSE_ENA 0x00000004
#define RCAR_USB3_INT_ENA_VAL (RCAR_USB3_INT_XHC_ENA | \
RCAR_USB3_INT_PME_ENA | RCAR_USB3_INT_HSE_ENA)
/* FW Download Control & Status */
#define RCAR_USB3_DL_CTRL_ENABLE 0x00000001
#define RCAR_USB3_DL_CTRL_FW_SUCCESS 0x00000010
#define RCAR_USB3_DL_CTRL_FW_SET_DATA0 0x00000100
/* LCLK Select */
#define RCAR_USB3_LCLK_ENA_VAL 0x01030001
/* USB3.0 Configuration */
#define RCAR_USB3_CONF1_VAL 0x00030204
#define RCAR_USB3_CONF2_VAL 0x00030300
#define RCAR_USB3_CONF3_VAL 0x13802007
/* USB3.0 Polarity */
#define RCAR_USB3_RX_POL_VAL BIT(21)
#define RCAR_USB3_TX_POL_VAL BIT(4)
static void xhci_rcar_start_gen2(struct usb_hcd *hcd)
{
/* LCLK Select */
writel(RCAR_USB3_LCLK_ENA_VAL, hcd->regs + RCAR_USB3_LCLK);
/* USB3.0 Configuration */
writel(RCAR_USB3_CONF1_VAL, hcd->regs + RCAR_USB3_CONF1);
writel(RCAR_USB3_CONF2_VAL, hcd->regs + RCAR_USB3_CONF2);
writel(RCAR_USB3_CONF3_VAL, hcd->regs + RCAR_USB3_CONF3);
/* USB3.0 Polarity */
writel(RCAR_USB3_RX_POL_VAL, hcd->regs + RCAR_USB3_RX_POL);
writel(RCAR_USB3_TX_POL_VAL, hcd->regs + RCAR_USB3_TX_POL);
}
static int xhci_rcar_is_gen2(struct device *dev)
{
struct device_node *node = dev->of_node;
return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
}
static void xhci_rcar_start(struct usb_hcd *hcd)
{
u32 temp;
if (hcd->regs != NULL) {
/* Interrupt Enable */
temp = readl(hcd->regs + RCAR_USB3_INT_ENA);
temp |= RCAR_USB3_INT_ENA_VAL;
writel(temp, hcd->regs + RCAR_USB3_INT_ENA);
if (xhci_rcar_is_gen2(hcd->self.controller))
xhci_rcar_start_gen2(hcd);
}
}
static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
void __iomem *regs = hcd->regs;
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
const struct firmware *fw;
int retval, index, j;
u32 data, val, temp;
/*
* According to the datasheet, "Upon the completion of FW Download,
* there is no need to write or reload FW".
*/
if (readl(regs + RCAR_USB3_DL_CTRL) & RCAR_USB3_DL_CTRL_FW_SUCCESS)
return 0;
/* request R-Car USB3.0 firmware */
retval = request_firmware(&fw, priv->firmware_name, dev);
if (retval)
return retval;
/* download R-Car USB3.0 firmware */
temp = readl(regs + RCAR_USB3_DL_CTRL);
temp |= RCAR_USB3_DL_CTRL_ENABLE;
writel(temp, regs + RCAR_USB3_DL_CTRL);
for (index = 0; index < fw->size; index += 4) {
/* to avoid reading beyond the end of the buffer */
for (data = 0, j = 3; j >= 0; j--) {
if ((j + index) < fw->size)
data |= fw->data[index + j] << (8 * j);
}
writel(data, regs + RCAR_USB3_FW_DATA0);
temp = readl(regs + RCAR_USB3_DL_CTRL);
temp |= RCAR_USB3_DL_CTRL_FW_SET_DATA0;
writel(temp, regs + RCAR_USB3_DL_CTRL);
retval = readl_poll_timeout_atomic(regs + RCAR_USB3_DL_CTRL,
val, !(val & RCAR_USB3_DL_CTRL_FW_SET_DATA0),
1, 10000);
if (retval < 0)
break;
}
temp = readl(regs + RCAR_USB3_DL_CTRL);
temp &= ~RCAR_USB3_DL_CTRL_ENABLE;
writel(temp, regs + RCAR_USB3_DL_CTRL);
retval = readl_poll_timeout_atomic((regs + RCAR_USB3_DL_CTRL),
val, val & RCAR_USB3_DL_CTRL_FW_SUCCESS, 1, 10000);
release_firmware(fw);
return retval;
}
static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd)
{
int retval;
u32 val, mask = RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK;
retval = readl_poll_timeout_atomic(hcd->regs + RCAR_USB3_AXH_STA,
val, (val & mask) == mask, 1, 1000);
return !retval;
}
/* This function needs to initialize a "phy" of usb before */
static int xhci_rcar_init_quirk(struct usb_hcd *hcd)
{
/* If hcd->regs is NULL, we don't just call the following function */
if (!hcd->regs)
return 0;
if (!xhci_rcar_wait_for_pll_active(hcd))
return -ETIMEDOUT;
return xhci_rcar_download_firmware(hcd);
}
static int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
{
int ret;
ret = xhci_rcar_download_firmware(hcd);
if (!ret)
xhci_rcar_start(hcd);
return ret;
}
/*
* On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
* to 1. However, these SoCs don't support 64-bit address memory
* pointers. So, this driver clears the AC64 bit of xhci->hcc_params
* to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
* xhci_gen_setup() by using the XHCI_NO_64BIT_SUPPORT quirk.
*
* And, since the firmware/internal CPU control the USBSTS.STS_HALT
* and the process speed is down when the roothub port enters U3,
* long delay for the handshake of STS_HALT is neeed in xhci_suspend()
* by using the XHCI_SLOW_SUSPEND quirk.
*/
#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
.firmware_name = firmware, \
.quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
XHCI_SLOW_SUSPEND, \
.init_quirk = xhci_rcar_init_quirk, \
.plat_start = xhci_rcar_start, \
.resume_quirk = xhci_rcar_resume_quirk,
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1)
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
};
static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = {
.quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH |
XHCI_SLOW_SUSPEND,
.init_quirk = xhci_rzv2m_init_quirk,
.plat_start = xhci_rzv2m_start,
};
static const struct of_device_id usb_xhci_of_match[] = {
{
.compatible = "renesas,xhci-r8a7790",
.data = &xhci_plat_renesas_rcar_gen2,
}, {
.compatible = "renesas,xhci-r8a7791",
.data = &xhci_plat_renesas_rcar_gen2,
}, {
.compatible = "renesas,xhci-r8a7793",
.data = &xhci_plat_renesas_rcar_gen2,
}, {
.compatible = "renesas,xhci-r8a7795",
.data = &xhci_plat_renesas_rcar_gen3,
}, {
.compatible = "renesas,xhci-r8a7796",
.data = &xhci_plat_renesas_rcar_gen3,
}, {
.compatible = "renesas,rcar-gen2-xhci",
.data = &xhci_plat_renesas_rcar_gen2,
}, {
.compatible = "renesas,rcar-gen3-xhci",
.data = &xhci_plat_renesas_rcar_gen3,
}, {
.compatible = "renesas,rzv2m-xhci",
.data = &xhci_plat_renesas_rzv2m,
},
{ },
};
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
static int xhci_renesas_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
priv_match = of_device_get_match_data(&pdev->dev);
return xhci_plat_probe(pdev, NULL, priv_match);
}
static struct platform_driver usb_xhci_renesas_driver = {
.probe = xhci_renesas_probe,
.remove_new = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-renesas-hcd",
.pm = &xhci_plat_pm_ops,
.of_match_table = usb_xhci_of_match,
},
};
module_platform_driver(usb_xhci_renesas_driver);
MODULE_DESCRIPTION("xHCI Platform Host Controller Driver for Renesas R-Car and RZ");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/xhci-rcar.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver for RZ/V2M
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
#include <linux/usb/rzv2m_usb3drd.h>
#include "xhci-plat.h"
#include "xhci-rzv2m.h"
#define RZV2M_USB3_INTEN 0x1044 /* Interrupt Enable */
#define RZV2M_USB3_INT_XHC_ENA BIT(0)
#define RZV2M_USB3_INT_HSE_ENA BIT(2)
#define RZV2M_USB3_INT_ENA_VAL (RZV2M_USB3_INT_XHC_ENA \
| RZV2M_USB3_INT_HSE_ENA)
int xhci_rzv2m_init_quirk(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
rzv2m_usb3drd_reset(dev->parent, true);
return 0;
}
void xhci_rzv2m_start(struct usb_hcd *hcd)
{
u32 int_en;
if (hcd->regs) {
/* Interrupt Enable */
int_en = readl(hcd->regs + RZV2M_USB3_INTEN);
int_en |= RZV2M_USB3_INT_ENA_VAL;
writel(int_en, hcd->regs + RZV2M_USB3_INTEN);
}
}
| linux-master | drivers/usb/host/xhci-rzv2m.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 by Alan Stern
*/
/* This file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/* Set a bit in the USBCMD register */
static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
{
ehci->command |= bit;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
/* unblock posted write */
ehci_readl(ehci, &ehci->regs->command);
}
/* Clear a bit in the USBCMD register */
static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
{
ehci->command &= ~bit;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
/* unblock posted write */
ehci_readl(ehci, &ehci->regs->command);
}
/*-------------------------------------------------------------------------*/
/*
* EHCI timer support... Now using hrtimers.
*
* Lots of different events are triggered from ehci->hrtimer. Whenever
* the timer routine runs, it checks each possible event; events that are
* currently enabled and whose expiration time has passed get handled.
* The set of enabled events is stored as a collection of bitflags in
* ehci->enabled_hrtimer_events, and they are numbered in order of
* increasing delay values (ranging between 1 ms and 100 ms).
*
* Rather than implementing a sorted list or tree of all pending events,
* we keep track only of the lowest-numbered pending event, in
* ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its
* expiration time is set to the timeout value for this event.
*
* As a result, events might not get handled right away; the actual delay
* could be anywhere up to twice the requested delay. This doesn't
* matter, because none of the events are especially time-critical. The
* ones that matter most all have a delay of 1 ms, so they will be
* handled after 2 ms at most, which is okay. In addition to this, we
* allow for an expiration range of 1 ms.
*/
/*
* Delay lengths for the hrtimer event types.
* Keep this list sorted by delay length, in the same order as
* the event types indexed by enum ehci_hrtimer_event in ehci.h.
*/
static unsigned event_delays_ns[] = {
1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */
1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */
1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */
1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */
2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */
2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ACTIVE_UNLINK */
5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */
6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */
10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */
10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */
15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */
100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */
};
/* Enable a pending hrtimer event */
static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
bool resched)
{
ktime_t *timeout = &ehci->hr_timeouts[event];
if (resched)
*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
ehci->enabled_hrtimer_events |= (1 << event);
/* Track only the lowest-numbered pending event */
if (event < ehci->next_hrtimer_event) {
ehci->next_hrtimer_event = event;
hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
NSEC_PER_MSEC, HRTIMER_MODE_ABS);
}
}
/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
static void ehci_poll_ASS(struct ehci_hcd *ehci)
{
unsigned actual, want;
/* Don't enable anything if the controller isn't running (e.g., died) */
if (ehci->rh_state != EHCI_RH_RUNNING)
return;
want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
if (want != actual) {
/* Poll again later, but give up after about 2-4 ms */
if (ehci->ASS_poll_count++ < 2) {
ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
return;
}
ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
want, actual);
}
ehci->ASS_poll_count = 0;
/* The status is up-to-date; restart or stop the schedule as needed */
if (want == 0) { /* Stopped */
if (ehci->async_count > 0)
ehci_set_command_bit(ehci, CMD_ASE);
} else { /* Running */
if (ehci->async_count == 0) {
/* Turn off the schedule after a while */
ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
true);
}
}
}
/* Turn off the async schedule after a brief delay */
static void ehci_disable_ASE(struct ehci_hcd *ehci)
{
ehci_clear_command_bit(ehci, CMD_ASE);
}
/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
static void ehci_poll_PSS(struct ehci_hcd *ehci)
{
unsigned actual, want;
/* Don't do anything if the controller isn't running (e.g., died) */
if (ehci->rh_state != EHCI_RH_RUNNING)
return;
want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
if (want != actual) {
/* Poll again later, but give up after about 2-4 ms */
if (ehci->PSS_poll_count++ < 2) {
ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
return;
}
ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
want, actual);
}
ehci->PSS_poll_count = 0;
/* The status is up-to-date; restart or stop the schedule as needed */
if (want == 0) { /* Stopped */
if (ehci->periodic_count > 0)
ehci_set_command_bit(ehci, CMD_PSE);
} else { /* Running */
if (ehci->periodic_count == 0) {
/* Turn off the schedule after a while */
ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
true);
}
}
}
/* Turn off the periodic schedule after a brief delay */
static void ehci_disable_PSE(struct ehci_hcd *ehci)
{
ehci_clear_command_bit(ehci, CMD_PSE);
}
/* Poll the STS_HALT status bit; see when a dead controller stops */
static void ehci_handle_controller_death(struct ehci_hcd *ehci)
{
if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
/* Give up after a few milliseconds */
if (ehci->died_poll_count++ < 5) {
/* Try again later */
ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
return;
}
ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
}
/* Clean up the mess */
ehci->rh_state = EHCI_RH_HALTED;
ehci_writel(ehci, 0, &ehci->regs->configured_flag);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
ehci_work(ehci);
end_unlink_async(ehci);
/* Not in process context, so don't try to reset the controller */
}
/* start to unlink interrupt QHs */
static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
{
bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
/*
* Process all the QHs on the intr_unlink list that were added
* before the current unlink cycle began. The list is in
* temporal order, so stop when we reach the first entry in the
* current cycle. But if the root hub isn't running then
* process all the QHs on the list.
*/
while (!list_empty(&ehci->intr_unlink_wait)) {
struct ehci_qh *qh;
qh = list_first_entry(&ehci->intr_unlink_wait,
struct ehci_qh, unlink_node);
if (!stopped && (qh->unlink_cycle ==
ehci->intr_unlink_wait_cycle))
break;
list_del_init(&qh->unlink_node);
qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
start_unlink_intr(ehci, qh);
}
/* Handle remaining entries later */
if (!list_empty(&ehci->intr_unlink_wait)) {
ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
++ehci->intr_unlink_wait_cycle;
}
}
/* Handle unlinked interrupt QHs once they are gone from the hardware */
static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
{
bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
/*
* Process all the QHs on the intr_unlink list that were added
* before the current unlink cycle began. The list is in
* temporal order, so stop when we reach the first entry in the
* current cycle. But if the root hub isn't running then
* process all the QHs on the list.
*/
ehci->intr_unlinking = true;
while (!list_empty(&ehci->intr_unlink)) {
struct ehci_qh *qh;
qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
unlink_node);
if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
break;
list_del_init(&qh->unlink_node);
end_unlink_intr(ehci, qh);
}
/* Handle remaining entries later */
if (!list_empty(&ehci->intr_unlink)) {
ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
++ehci->intr_unlink_cycle;
}
ehci->intr_unlinking = false;
}
/* Start another free-iTDs/siTDs cycle */
static void start_free_itds(struct ehci_hcd *ehci)
{
if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
ehci->last_itd_to_free = list_entry(
ehci->cached_itd_list.prev,
struct ehci_itd, itd_list);
ehci->last_sitd_to_free = list_entry(
ehci->cached_sitd_list.prev,
struct ehci_sitd, sitd_list);
ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
}
}
/* Wait for controller to stop using old iTDs and siTDs */
static void end_free_itds(struct ehci_hcd *ehci)
{
struct ehci_itd *itd, *n;
struct ehci_sitd *sitd, *sn;
if (ehci->rh_state < EHCI_RH_RUNNING) {
ehci->last_itd_to_free = NULL;
ehci->last_sitd_to_free = NULL;
}
list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
list_del(&itd->itd_list);
dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
if (itd == ehci->last_itd_to_free)
break;
}
list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
list_del(&sitd->sitd_list);
dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
if (sitd == ehci->last_sitd_to_free)
break;
}
if (!list_empty(&ehci->cached_itd_list) ||
!list_empty(&ehci->cached_sitd_list))
start_free_itds(ehci);
}
/* Handle lost (or very late) IAA interrupts */
static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
{
u32 cmd, status;
/*
* Lost IAA irqs wedge things badly; seen first with a vt8235.
* So we need this watchdog, but must protect it against both
* (a) SMP races against real IAA firing and retriggering, and
* (b) clean HC shutdown, when IAA watchdog was pending.
*/
if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
return;
/* If we get here, IAA is *REALLY* late. It's barely
* conceivable that the system is so busy that CMD_IAAD
* is still legitimately set, so let's be sure it's
* clear before we read STS_IAA. (The HC should clear
* CMD_IAAD when it sets STS_IAA.)
*/
cmd = ehci_readl(ehci, &ehci->regs->command);
/*
* If IAA is set here it either legitimately triggered
* after the watchdog timer expired (_way_ late, so we'll
* still count it as lost) ... or a silicon erratum:
* - VIA seems to set IAA without triggering the IRQ;
* - IAAD potentially cleared without setting IAA.
*/
status = ehci_readl(ehci, &ehci->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
INCR(ehci->stats.lost_iaa);
ehci_writel(ehci, STS_IAA, &ehci->regs->status);
}
ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
end_iaa_cycle(ehci);
}
/* Enable the I/O watchdog, if appropriate */
static void turn_on_io_watchdog(struct ehci_hcd *ehci)
{
/* Not needed if the controller isn't running or it's already enabled */
if (ehci->rh_state != EHCI_RH_RUNNING ||
(ehci->enabled_hrtimer_events &
BIT(EHCI_HRTIMER_IO_WATCHDOG)))
return;
/*
* Isochronous transfers always need the watchdog.
* For other sorts we use it only if the flag is set.
*/
if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
ehci->async_count + ehci->intr_count > 0))
ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
}
/*
* Handler functions for the hrtimer event types.
* Keep this array in the same order as the event types indexed by
* enum ehci_hrtimer_event in ehci.h.
*/
static void (*event_handlers[])(struct ehci_hcd *) = {
ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */
ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */
ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */
ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */
end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */
end_unlink_async, /* EHCI_HRTIMER_ACTIVE_UNLINK */
ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */
unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */
ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */
ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */
ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */
ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */
};
static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
{
struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer);
ktime_t now;
unsigned long events;
unsigned long flags;
unsigned e;
spin_lock_irqsave(&ehci->lock, flags);
events = ehci->enabled_hrtimer_events;
ehci->enabled_hrtimer_events = 0;
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
/*
* Check each pending event. If its time has expired, handle
* the event; otherwise re-enable it.
*/
now = ktime_get();
for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
event_handlers[e](ehci);
else
ehci_enable_event(ehci, e, false);
}
spin_unlock_irqrestore(&ehci->lock, flags);
return HRTIMER_NORESTART;
}
| linux-master | drivers/usb/host/ehci-timer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MAX3421 Host Controller driver for USB.
*
* Author: David Mosberger-Tang <[email protected]>
*
* (C) Copyright 2014 David Mosberger-Tang <[email protected]>
*
* MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
* controller on a SPI bus.
*
* Based on:
* o MAX3421E datasheet
* https://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
* o MAX3421E Programming Guide
* https://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
* o gadget/dummy_hcd.c
* For USB HCD implementation.
* o Arduino MAX3421 driver
* https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
*
* This file is licenced under the GPL v2.
*
* Important note on worst-case (full-speed) packet size constraints
* (See USB 2.0 Section 5.6.3 and following):
*
* - control: 64 bytes
* - isochronous: 1023 bytes
* - interrupt: 64 bytes
* - bulk: 64 bytes
*
* Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
* multi-FIFO writes/reads for a single USB packet *except* for isochronous
* transfers. We don't support isochronous transfers at this time, so we
* just assume that a USB packet always fits into a single FIFO buffer.
*
* NOTE: The June 2006 version of "MAX3421E Programming Guide"
* (AN3785) has conflicting info for the RCVDAVIRQ bit:
*
* The description of RCVDAVIRQ says "The CPU *must* clear
* this IRQ bit (by writing a 1 to it) before reading the
* RCVFIFO data.
*
* However, the earlier section on "Programming BULK-IN
* Transfers" says * that:
*
* After the CPU retrieves the data, it clears the
* RCVDAVIRQ bit.
*
* The December 2006 version has been corrected and it consistently
* states the second behavior is the correct one.
*
* Synchronous SPI transactions sleep so we can't perform any such
* transactions while holding a spin-lock (and/or while interrupts are
* masked). To achieve this, all SPI transactions are issued from a
* single thread (max3421_spi_thread).
*/
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/of.h>
#include <linux/platform_data/max3421-hcd.h>
#define DRIVER_DESC "MAX3421 USB Host-Controller Driver"
#define DRIVER_VERSION "1.0"
/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
#define USB_MAX_FRAME_NUMBER 0x7ff
#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
/* Port-change mask: */
#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
USB_PORT_STAT_C_ENABLE | \
USB_PORT_STAT_C_SUSPEND | \
USB_PORT_STAT_C_OVERCURRENT | \
USB_PORT_STAT_C_RESET) << 16)
#define MAX3421_GPOUT_COUNT 8
enum max3421_rh_state {
MAX3421_RH_RESET,
MAX3421_RH_SUSPENDED,
MAX3421_RH_RUNNING
};
enum pkt_state {
PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */
PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */
PKT_STATE_TERMINATE /* waiting to terminate control transfer */
};
enum scheduling_pass {
SCHED_PASS_PERIODIC,
SCHED_PASS_NON_PERIODIC,
SCHED_PASS_DONE
};
/* Bit numbers for max3421_hcd->todo: */
enum {
ENABLE_IRQ = 0,
RESET_HCD,
RESET_PORT,
CHECK_UNLINK,
IOPIN_UPDATE
};
struct max3421_dma_buf {
u8 data[2];
};
struct max3421_hcd {
spinlock_t lock;
struct task_struct *spi_thread;
enum max3421_rh_state rh_state;
/* lower 16 bits contain port status, upper 16 bits the change mask: */
u32 port_status;
unsigned active:1;
struct list_head ep_list; /* list of EP's with work */
/*
* The following are owned by spi_thread (may be accessed by
* SPI-thread without acquiring the HCD lock:
*/
u8 rev; /* chip revision */
u16 frame_number;
/*
* kmalloc'd buffers guaranteed to be in separate (DMA)
* cache-lines:
*/
struct max3421_dma_buf *tx;
struct max3421_dma_buf *rx;
/*
* URB we're currently processing. Must not be reset to NULL
* unless MAX3421E chip is idle:
*/
struct urb *curr_urb;
enum scheduling_pass sched_pass;
int urb_done; /* > 0 -> no errors, < 0: errno */
size_t curr_len;
u8 hien;
u8 mode;
u8 iopins[2];
unsigned long todo;
#ifdef DEBUG
unsigned long err_stat[16];
#endif
};
struct max3421_ep {
struct usb_host_endpoint *ep;
struct list_head ep_list;
u32 naks;
u16 last_active; /* frame # this ep was last active */
enum pkt_state pkt_state;
u8 retries;
u8 retransmit; /* packet needs retransmission */
};
#define MAX3421_FIFO_SIZE 64
#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */
/* SPI commands: */
#define MAX3421_SPI_DIR_SHIFT 1
#define MAX3421_SPI_REG_SHIFT 3
#define MAX3421_REG_RCVFIFO 1
#define MAX3421_REG_SNDFIFO 2
#define MAX3421_REG_SUDFIFO 4
#define MAX3421_REG_RCVBC 6
#define MAX3421_REG_SNDBC 7
#define MAX3421_REG_USBIRQ 13
#define MAX3421_REG_USBIEN 14
#define MAX3421_REG_USBCTL 15
#define MAX3421_REG_CPUCTL 16
#define MAX3421_REG_PINCTL 17
#define MAX3421_REG_REVISION 18
#define MAX3421_REG_IOPINS1 20
#define MAX3421_REG_IOPINS2 21
#define MAX3421_REG_GPINIRQ 22
#define MAX3421_REG_GPINIEN 23
#define MAX3421_REG_GPINPOL 24
#define MAX3421_REG_HIRQ 25
#define MAX3421_REG_HIEN 26
#define MAX3421_REG_MODE 27
#define MAX3421_REG_PERADDR 28
#define MAX3421_REG_HCTL 29
#define MAX3421_REG_HXFR 30
#define MAX3421_REG_HRSL 31
enum {
MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
MAX3421_USBIRQ_VBUSIRQ_BIT
};
enum {
MAX3421_CPUCTL_IE_BIT = 0,
MAX3421_CPUCTL_PULSEWID0_BIT = 6,
MAX3421_CPUCTL_PULSEWID1_BIT
};
enum {
MAX3421_USBCTL_PWRDOWN_BIT = 4,
MAX3421_USBCTL_CHIPRES_BIT
};
enum {
MAX3421_PINCTL_GPXA_BIT = 0,
MAX3421_PINCTL_GPXB_BIT,
MAX3421_PINCTL_POSINT_BIT,
MAX3421_PINCTL_INTLEVEL_BIT,
MAX3421_PINCTL_FDUPSPI_BIT,
MAX3421_PINCTL_EP0INAK_BIT,
MAX3421_PINCTL_EP2INAK_BIT,
MAX3421_PINCTL_EP3INAK_BIT,
};
enum {
MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */
MAX3421_HI_RWU_BIT, /* remote wakeup */
MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */
MAX3421_HI_SNDBAV_BIT, /* send buffer available */
MAX3421_HI_SUSDN_BIT, /* suspend operation done */
MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */
MAX3421_HI_FRAME_BIT, /* frame generator */
MAX3421_HI_HXFRDN_BIT, /* host transfer done */
};
enum {
MAX3421_HCTL_BUSRST_BIT = 0,
MAX3421_HCTL_FRMRST_BIT,
MAX3421_HCTL_SAMPLEBUS_BIT,
MAX3421_HCTL_SIGRSM_BIT,
MAX3421_HCTL_RCVTOG0_BIT,
MAX3421_HCTL_RCVTOG1_BIT,
MAX3421_HCTL_SNDTOG0_BIT,
MAX3421_HCTL_SNDTOG1_BIT
};
enum {
MAX3421_MODE_HOST_BIT = 0,
MAX3421_MODE_LOWSPEED_BIT,
MAX3421_MODE_HUBPRE_BIT,
MAX3421_MODE_SOFKAENAB_BIT,
MAX3421_MODE_SEPIRQ_BIT,
MAX3421_MODE_DELAYISO_BIT,
MAX3421_MODE_DMPULLDN_BIT,
MAX3421_MODE_DPPULLDN_BIT
};
enum {
MAX3421_HRSL_OK = 0,
MAX3421_HRSL_BUSY,
MAX3421_HRSL_BADREQ,
MAX3421_HRSL_UNDEF,
MAX3421_HRSL_NAK,
MAX3421_HRSL_STALL,
MAX3421_HRSL_TOGERR,
MAX3421_HRSL_WRONGPID,
MAX3421_HRSL_BADBC,
MAX3421_HRSL_PIDERR,
MAX3421_HRSL_PKTERR,
MAX3421_HRSL_CRCERR,
MAX3421_HRSL_KERR,
MAX3421_HRSL_JERR,
MAX3421_HRSL_TIMEOUT,
MAX3421_HRSL_BABBLE,
MAX3421_HRSL_RESULT_MASK = 0xf,
MAX3421_HRSL_RCVTOGRD_BIT = 4,
MAX3421_HRSL_SNDTOGRD_BIT,
MAX3421_HRSL_KSTATUS_BIT,
MAX3421_HRSL_JSTATUS_BIT
};
/* Return same error-codes as ohci.h:cc_to_error: */
static const int hrsl_to_error[] = {
[MAX3421_HRSL_OK] = 0,
[MAX3421_HRSL_BUSY] = -EINVAL,
[MAX3421_HRSL_BADREQ] = -EINVAL,
[MAX3421_HRSL_UNDEF] = -EINVAL,
[MAX3421_HRSL_NAK] = -EAGAIN,
[MAX3421_HRSL_STALL] = -EPIPE,
[MAX3421_HRSL_TOGERR] = -EILSEQ,
[MAX3421_HRSL_WRONGPID] = -EPROTO,
[MAX3421_HRSL_BADBC] = -EREMOTEIO,
[MAX3421_HRSL_PIDERR] = -EPROTO,
[MAX3421_HRSL_PKTERR] = -EPROTO,
[MAX3421_HRSL_CRCERR] = -EILSEQ,
[MAX3421_HRSL_KERR] = -EIO,
[MAX3421_HRSL_JERR] = -EIO,
[MAX3421_HRSL_TIMEOUT] = -ETIME,
[MAX3421_HRSL_BABBLE] = -EOVERFLOW
};
/*
* See https://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
* reasonable overview of how control transfers use the IN/OUT
* tokens.
*/
#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
#define MAX3421_HXFR_SETUP 0x10
#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */
#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */
#define field(val, bit) ((val) << (bit))
static inline s16
frame_diff(u16 left, u16 right)
{
return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
}
static inline struct max3421_hcd *
hcd_to_max3421(struct usb_hcd *hcd)
{
return (struct max3421_hcd *) hcd->hcd_priv;
}
static inline struct usb_hcd *
max3421_to_hcd(struct max3421_hcd *max3421_hcd)
{
return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
}
static u8
spi_rd8(struct usb_hcd *hcd, unsigned int reg)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct spi_transfer transfer;
struct spi_message msg;
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
max3421_hcd->tx->data[0] =
(field(reg, MAX3421_SPI_REG_SHIFT) |
field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
transfer.tx_buf = max3421_hcd->tx->data;
transfer.rx_buf = max3421_hcd->rx->data;
transfer.len = 2;
spi_message_add_tail(&transfer, &msg);
spi_sync(spi, &msg);
return max3421_hcd->rx->data[1];
}
static void
spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct spi_transfer transfer;
struct spi_message msg;
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
max3421_hcd->tx->data[0] =
(field(reg, MAX3421_SPI_REG_SHIFT) |
field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
max3421_hcd->tx->data[1] = val;
transfer.tx_buf = max3421_hcd->tx->data;
transfer.len = 2;
spi_message_add_tail(&transfer, &msg);
spi_sync(spi, &msg);
}
static void
spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct spi_transfer transfer[2];
struct spi_message msg;
memset(transfer, 0, sizeof(transfer));
spi_message_init(&msg);
max3421_hcd->tx->data[0] =
(field(reg, MAX3421_SPI_REG_SHIFT) |
field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
transfer[0].tx_buf = max3421_hcd->tx->data;
transfer[0].len = 1;
transfer[1].rx_buf = buf;
transfer[1].len = len;
spi_message_add_tail(&transfer[0], &msg);
spi_message_add_tail(&transfer[1], &msg);
spi_sync(spi, &msg);
}
static void
spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct spi_transfer transfer[2];
struct spi_message msg;
memset(transfer, 0, sizeof(transfer));
spi_message_init(&msg);
max3421_hcd->tx->data[0] =
(field(reg, MAX3421_SPI_REG_SHIFT) |
field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
transfer[0].tx_buf = max3421_hcd->tx->data;
transfer[0].len = 1;
transfer[1].tx_buf = buf;
transfer[1].len = len;
spi_message_add_tail(&transfer[0], &msg);
spi_message_add_tail(&transfer[1], &msg);
spi_sync(spi, &msg);
}
/*
* Figure out the correct setting for the LOWSPEED and HUBPRE mode
* bits. The HUBPRE bit needs to be set when MAX3421E operates at
* full speed, but it's talking to a low-speed device (i.e., through a
* hub). Setting that bit ensures that every low-speed packet is
* preceded by a full-speed PRE PID. Possible configurations:
*
* Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit:
* FULL FULL => 0 0
* FULL LOW => 1 1
* LOW LOW => 1 0
* LOW FULL => 1 0
*/
static void
max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT);
if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
mode |= mode_lowspeed;
mode &= ~mode_hubpre;
} else if (dev->speed == USB_SPEED_LOW) {
mode |= mode_lowspeed | mode_hubpre;
} else {
mode &= ~(mode_lowspeed | mode_hubpre);
}
if (mode != max3421_hcd->mode) {
max3421_hcd->mode = mode;
spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
}
}
/*
* Caller must NOT hold HCD spinlock.
*/
static void
max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
{
int rcvtog, sndtog;
u8 hctl;
/* setup new endpoint's toggle bits: */
rcvtog = usb_gettoggle(dev, epnum, 0);
sndtog = usb_gettoggle(dev, epnum, 1);
hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
/*
* Note: devnum for one and the same device can change during
* address-assignment so it's best to just always load the
* address whenever the end-point changed/was forced.
*/
spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
}
static int
max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
{
spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
return MAX3421_HXFR_SETUP;
}
static int
max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
int epnum = usb_pipeendpoint(urb->pipe);
max3421_hcd->curr_len = 0;
max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
return MAX3421_HXFR_BULK_IN(epnum);
}
static int
max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
int epnum = usb_pipeendpoint(urb->pipe);
u32 max_packet;
void *src;
src = urb->transfer_buffer + urb->actual_length;
if (fast_retransmit) {
if (max3421_hcd->rev == 0x12) {
/* work around rev 0x12 bug: */
spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
}
return MAX3421_HXFR_BULK_OUT(epnum);
}
max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max_packet > MAX3421_FIFO_SIZE) {
/*
* We do not support isochronous transfers at this
* time.
*/
dev_err(&spi->dev,
"%s: packet-size of %u too big (limit is %u bytes)",
__func__, max_packet, MAX3421_FIFO_SIZE);
max3421_hcd->urb_done = -EMSGSIZE;
return -EMSGSIZE;
}
max3421_hcd->curr_len = min((urb->transfer_buffer_length -
urb->actual_length), max_packet);
spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
return MAX3421_HXFR_BULK_OUT(epnum);
}
/*
* Issue the next host-transfer command.
* Caller must NOT hold HCD spinlock.
*/
static void
max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb = max3421_hcd->curr_urb;
struct max3421_ep *max3421_ep;
int cmd = -EINVAL;
if (!urb)
return; /* nothing to do */
max3421_ep = urb->ep->hcpriv;
switch (max3421_ep->pkt_state) {
case PKT_STATE_SETUP:
cmd = max3421_ctrl_setup(hcd, urb);
break;
case PKT_STATE_TRANSFER:
if (usb_urb_dir_in(urb))
cmd = max3421_transfer_in(hcd, urb);
else
cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
break;
case PKT_STATE_TERMINATE:
/*
* IN transfers are terminated with HS_OUT token,
* OUT transfers with HS_IN:
*/
if (usb_urb_dir_in(urb))
cmd = MAX3421_HXFR_HS_OUT;
else
cmd = MAX3421_HXFR_HS_IN;
break;
}
if (cmd < 0)
return;
/* issue the command and wait for host-xfer-done interrupt: */
spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
}
/*
* Find the next URB to process and start its execution.
*
* At this time, we do not anticipate ever connecting a USB hub to the
* MAX3421 chip, so at most USB device can be connected and we can use
* a simplistic scheduler: at the start of a frame, schedule all
* periodic transfers. Once that is done, use the remainder of the
* frame to process non-periodic (bulk & control) transfers.
*
* Preconditions:
* o Caller must NOT hold HCD spinlock.
* o max3421_hcd->curr_urb MUST BE NULL.
* o MAX3421E chip must be idle.
*/
static int
max3421_select_and_start_urb(struct usb_hcd *hcd)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb, *curr_urb = NULL;
struct max3421_ep *max3421_ep;
int epnum;
struct usb_host_endpoint *ep;
struct list_head *pos;
unsigned long flags;
spin_lock_irqsave(&max3421_hcd->lock, flags);
for (;
max3421_hcd->sched_pass < SCHED_PASS_DONE;
++max3421_hcd->sched_pass)
list_for_each(pos, &max3421_hcd->ep_list) {
urb = NULL;
max3421_ep = container_of(pos, struct max3421_ep,
ep_list);
ep = max3421_ep->ep;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
if (max3421_hcd->sched_pass !=
SCHED_PASS_PERIODIC)
continue;
break;
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
if (max3421_hcd->sched_pass !=
SCHED_PASS_NON_PERIODIC)
continue;
break;
}
if (list_empty(&ep->urb_list))
continue; /* nothing to do */
urb = list_first_entry(&ep->urb_list, struct urb,
urb_list);
if (urb->unlinked) {
dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
__func__, urb, urb->unlinked);
max3421_hcd->curr_urb = urb;
max3421_hcd->urb_done = 1;
spin_unlock_irqrestore(&max3421_hcd->lock,
flags);
return 1;
}
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
/*
* Allow one control transaction per
* frame per endpoint:
*/
if (frame_diff(max3421_ep->last_active,
max3421_hcd->frame_number) == 0)
continue;
break;
case USB_ENDPOINT_XFER_BULK:
if (max3421_ep->retransmit
&& (frame_diff(max3421_ep->last_active,
max3421_hcd->frame_number)
== 0))
/*
* We already tried this EP
* during this frame and got a
* NAK or error; wait for next frame
*/
continue;
break;
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
if (frame_diff(max3421_hcd->frame_number,
max3421_ep->last_active)
< urb->interval)
/*
* We already processed this
* end-point in the current
* frame
*/
continue;
break;
}
/* move current ep to tail: */
list_move_tail(pos, &max3421_hcd->ep_list);
curr_urb = urb;
goto done;
}
done:
if (!curr_urb) {
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
return 0;
}
urb = max3421_hcd->curr_urb = curr_urb;
epnum = usb_endpoint_num(&urb->ep->desc);
if (max3421_ep->retransmit)
/* restart (part of) a USB transaction: */
max3421_ep->retransmit = 0;
else {
/* start USB transaction: */
if (usb_endpoint_xfer_control(&ep->desc)) {
/*
* See USB 2.0 spec section 8.6.1
* Initialization via SETUP Token:
*/
usb_settoggle(urb->dev, epnum, 0, 1);
usb_settoggle(urb->dev, epnum, 1, 1);
max3421_ep->pkt_state = PKT_STATE_SETUP;
} else
max3421_ep->pkt_state = PKT_STATE_TRANSFER;
}
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
max3421_ep->last_active = max3421_hcd->frame_number;
max3421_set_address(hcd, urb->dev, epnum);
max3421_set_speed(hcd, urb->dev);
max3421_next_transfer(hcd, 0);
return 1;
}
/*
* Check all endpoints for URBs that got unlinked.
*
* Caller must NOT hold HCD spinlock.
*/
static int
max3421_check_unlink(struct usb_hcd *hcd)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct max3421_ep *max3421_ep;
struct usb_host_endpoint *ep;
struct urb *urb, *next;
unsigned long flags;
int retval = 0;
spin_lock_irqsave(&max3421_hcd->lock, flags);
list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
ep = max3421_ep->ep;
list_for_each_entry_safe(urb, next, &ep->urb_list, urb_list) {
if (urb->unlinked) {
retval = 1;
dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
__func__, urb, urb->unlinked);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&max3421_hcd->lock,
flags);
usb_hcd_giveback_urb(hcd, urb, 0);
spin_lock_irqsave(&max3421_hcd->lock, flags);
}
}
}
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
return retval;
}
/*
* Caller must NOT hold HCD spinlock.
*/
static void
max3421_slow_retransmit(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb = max3421_hcd->curr_urb;
struct max3421_ep *max3421_ep;
max3421_ep = urb->ep->hcpriv;
max3421_ep->retransmit = 1;
max3421_hcd->curr_urb = NULL;
}
/*
* Caller must NOT hold HCD spinlock.
*/
static void
max3421_recv_data_available(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb = max3421_hcd->curr_urb;
size_t remaining, transfer_size;
u8 rcvbc;
rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
if (rcvbc > MAX3421_FIFO_SIZE)
rcvbc = MAX3421_FIFO_SIZE;
if (urb->actual_length >= urb->transfer_buffer_length)
remaining = 0;
else
remaining = urb->transfer_buffer_length - urb->actual_length;
transfer_size = rcvbc;
if (transfer_size > remaining)
transfer_size = remaining;
if (transfer_size > 0) {
void *dst = urb->transfer_buffer + urb->actual_length;
spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
urb->actual_length += transfer_size;
max3421_hcd->curr_len = transfer_size;
}
/* ack the RCVDAV irq now that the FIFO has been read: */
spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
}
static void
max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
struct urb *urb = max3421_hcd->curr_urb;
struct max3421_ep *max3421_ep = urb->ep->hcpriv;
int switch_sndfifo;
/*
* If an OUT command results in any response other than OK
* (i.e., error or NAK), we have to perform a dummy-write to
* SNDBC so the FIFO gets switched back to us. Otherwise, we
* get out of sync with the SNDFIFO double buffer.
*/
switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
usb_urb_dir_out(urb));
switch (result_code) {
case MAX3421_HRSL_OK:
return; /* this shouldn't happen */
case MAX3421_HRSL_WRONGPID: /* received wrong PID */
case MAX3421_HRSL_BUSY: /* SIE busy */
case MAX3421_HRSL_BADREQ: /* bad val in HXFR */
case MAX3421_HRSL_UNDEF: /* reserved */
case MAX3421_HRSL_KERR: /* K-state instead of response */
case MAX3421_HRSL_JERR: /* J-state instead of response */
/*
* packet experienced an error that we cannot recover
* from; report error
*/
max3421_hcd->urb_done = hrsl_to_error[result_code];
dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
__func__, hrsl);
break;
case MAX3421_HRSL_TOGERR:
if (usb_urb_dir_in(urb))
; /* don't do anything (device will switch toggle) */
else {
/* flip the send toggle bit: */
int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
sndtog ^= 1;
spi_wr8(hcd, MAX3421_REG_HCTL,
BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
}
fallthrough;
case MAX3421_HRSL_BADBC: /* bad byte count */
case MAX3421_HRSL_PIDERR: /* received PID is corrupted */
case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */
case MAX3421_HRSL_CRCERR: /* CRC error */
case MAX3421_HRSL_BABBLE: /* device talked too long */
case MAX3421_HRSL_TIMEOUT:
if (max3421_ep->retries++ < USB_MAX_RETRIES)
/* retry the packet again in the next frame */
max3421_slow_retransmit(hcd);
else {
/* Based on ohci.h cc_to_err[]: */
max3421_hcd->urb_done = hrsl_to_error[result_code];
dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
__func__, hrsl);
}
break;
case MAX3421_HRSL_STALL:
dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
__func__, hrsl);
max3421_hcd->urb_done = hrsl_to_error[result_code];
break;
case MAX3421_HRSL_NAK:
/*
* Device wasn't ready for data or has no data
* available: retry the packet again.
*/
max3421_next_transfer(hcd, 1);
switch_sndfifo = 0;
break;
}
if (switch_sndfifo)
spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
}
/*
* Caller must NOT hold HCD spinlock.
*/
static int
max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
u32 max_packet;
if (urb->actual_length >= urb->transfer_buffer_length)
return 1; /* read is complete, so we're done */
/*
* USB 2.0 Section 5.3.2 Pipes: packets must be full size
* except for last one.
*/
max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max_packet > MAX3421_FIFO_SIZE) {
/*
* We do not support isochronous transfers at this
* time...
*/
dev_err(&spi->dev,
"%s: packet-size of %u too big (limit is %u bytes)",
__func__, max_packet, MAX3421_FIFO_SIZE);
return -EINVAL;
}
if (max3421_hcd->curr_len < max_packet) {
if (urb->transfer_flags & URB_SHORT_NOT_OK) {
/*
* remaining > 0 and received an
* unexpected partial packet ->
* error
*/
return -EREMOTEIO;
} else
/* short read, but it's OK */
return 1;
}
return 0; /* not done */
}
/*
* Caller must NOT hold HCD spinlock.
*/
static int
max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
urb->actual_length += max3421_hcd->curr_len;
if (urb->actual_length < urb->transfer_buffer_length)
return 0;
if (urb->transfer_flags & URB_ZERO_PACKET) {
/*
* Some hardware needs a zero-size packet at the end
* of a bulk-out transfer if the last transfer was a
* full-sized packet (i.e., such hardware use <
* max_packet as an indicator that the end of the
* packet has been reached).
*/
u32 max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max3421_hcd->curr_len == max_packet)
return 0;
}
return 1;
}
/*
* Caller must NOT hold HCD spinlock.
*/
static void
max3421_host_transfer_done(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb = max3421_hcd->curr_urb;
struct max3421_ep *max3421_ep;
u8 result_code, hrsl;
int urb_done = 0;
max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
BIT(MAX3421_HI_RCVDAV_BIT));
hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
#ifdef DEBUG
++max3421_hcd->err_stat[result_code];
#endif
max3421_ep = urb->ep->hcpriv;
if (unlikely(result_code != MAX3421_HRSL_OK)) {
max3421_handle_error(hcd, hrsl);
return;
}
max3421_ep->naks = 0;
max3421_ep->retries = 0;
switch (max3421_ep->pkt_state) {
case PKT_STATE_SETUP:
if (urb->transfer_buffer_length > 0)
max3421_ep->pkt_state = PKT_STATE_TRANSFER;
else
max3421_ep->pkt_state = PKT_STATE_TERMINATE;
break;
case PKT_STATE_TRANSFER:
if (usb_urb_dir_in(urb))
urb_done = max3421_transfer_in_done(hcd, urb);
else
urb_done = max3421_transfer_out_done(hcd, urb);
if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
/*
* We aren't really done - we still need to
* terminate the control transfer:
*/
max3421_hcd->urb_done = urb_done = 0;
max3421_ep->pkt_state = PKT_STATE_TERMINATE;
}
break;
case PKT_STATE_TERMINATE:
urb_done = 1;
break;
}
if (urb_done)
max3421_hcd->urb_done = urb_done;
else
max3421_next_transfer(hcd, 0);
}
/*
* Caller must NOT hold HCD spinlock.
*/
static void
max3421_detect_conn(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
unsigned int jk, have_conn = 0;
u32 old_port_status, chg;
unsigned long flags;
u8 hrsl, mode;
hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
(((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
mode = max3421_hcd->mode;
switch (jk) {
case 0x0: /* SE0: disconnect */
/*
* Turn off SOFKAENAB bit to avoid getting interrupt
* every milli-second:
*/
mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
break;
case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
if (jk == 0x2)
/* need to switch to the other speed: */
mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
/* turn on SOFKAENAB bit: */
mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
have_conn = 1;
break;
case 0x3: /* illegal */
break;
}
max3421_hcd->mode = mode;
spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
spin_lock_irqsave(&max3421_hcd->lock, flags);
old_port_status = max3421_hcd->port_status;
if (have_conn)
max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION;
else
max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED;
else
max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
chg = (old_port_status ^ max3421_hcd->port_status);
max3421_hcd->port_status |= chg << 16;
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
}
static irqreturn_t
max3421_irq_handler(int irq, void *dev_id)
{
struct usb_hcd *hcd = dev_id;
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
if (max3421_hcd->spi_thread)
wake_up_process(max3421_hcd->spi_thread);
if (!test_and_set_bit(ENABLE_IRQ, &max3421_hcd->todo))
disable_irq_nosync(spi->irq);
return IRQ_HANDLED;
}
#ifdef DEBUG
static void
dump_eps(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct max3421_ep *max3421_ep;
struct usb_host_endpoint *ep;
char ubuf[512], *dp, *end;
unsigned long flags;
struct urb *urb;
int epnum, ret;
spin_lock_irqsave(&max3421_hcd->lock, flags);
list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
ep = max3421_ep->ep;
dp = ubuf;
end = dp + sizeof(ubuf);
*dp = '\0';
list_for_each_entry(urb, &ep->urb_list, urb_list) {
ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
usb_pipetype(urb->pipe),
usb_urb_dir_in(urb) ? "IN" : "OUT",
urb->actual_length,
urb->transfer_buffer_length);
if (ret < 0 || ret >= end - dp)
break; /* error or buffer full */
dp += ret;
}
epnum = usb_endpoint_num(&ep->desc);
pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
epnum, max3421_ep->pkt_state, max3421_ep->last_active,
max3421_ep->retries, max3421_ep->naks,
max3421_ep->retransmit, ubuf);
}
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
}
#endif /* DEBUG */
/* Return zero if no work was performed, 1 otherwise. */
static int
max3421_handle_irqs(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
u32 chg, old_port_status;
unsigned long flags;
u8 hirq;
/*
* Read and ack pending interrupts (CPU must never
* clear SNDBAV directly and RCVDAV must be cleared by
* max3421_recv_data_available()!):
*/
hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
hirq &= max3421_hcd->hien;
if (!hirq)
return 0;
spi_wr8(hcd, MAX3421_REG_HIRQ,
hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
BIT(MAX3421_HI_RCVDAV_BIT)));
if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
& USB_MAX_FRAME_NUMBER);
max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
}
if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
max3421_recv_data_available(hcd);
if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
max3421_host_transfer_done(hcd);
if (hirq & BIT(MAX3421_HI_CONDET_BIT))
max3421_detect_conn(hcd);
/*
* Now process interrupts that may affect HCD state
* other than the end-points:
*/
spin_lock_irqsave(&max3421_hcd->lock, flags);
old_port_status = max3421_hcd->port_status;
if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
/* BUSEVENT due to completion of Bus Reset */
max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
max3421_hcd->port_status |= USB_PORT_STAT_ENABLE;
} else {
/* BUSEVENT due to completion of Bus Resume */
pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
}
}
if (hirq & BIT(MAX3421_HI_RWU_BIT))
pr_info("%s: RWU\n", __func__);
if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
pr_info("%s: SUSDN\n", __func__);
chg = (old_port_status ^ max3421_hcd->port_status);
max3421_hcd->port_status |= chg << 16;
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
#ifdef DEBUG
{
static unsigned long last_time;
char sbuf[16 * 16], *dp, *end;
int i;
if (time_after(jiffies, last_time + 5*HZ)) {
dp = sbuf;
end = sbuf + sizeof(sbuf);
*dp = '\0';
for (i = 0; i < 16; ++i) {
int ret = snprintf(dp, end - dp, " %lu",
max3421_hcd->err_stat[i]);
if (ret < 0 || ret >= end - dp)
break; /* error or buffer full */
dp += ret;
}
pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
memset(max3421_hcd->err_stat, 0,
sizeof(max3421_hcd->err_stat));
last_time = jiffies;
dump_eps(hcd);
}
}
#endif
return 1;
}
static int
max3421_reset_hcd(struct usb_hcd *hcd)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
int timeout;
/* perform a chip reset and wait for OSCIRQ signal to appear: */
spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
/* clear reset: */
spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
timeout = 1000;
while (1) {
if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
& BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
break;
if (--timeout < 0) {
dev_err(&spi->dev,
"timed out waiting for oscillator OK signal");
return 1;
}
cond_resched();
}
/*
* Turn on host mode, automatic generation of SOF packets, and
* enable pull-down registers on DM/DP:
*/
max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
BIT(MAX3421_MODE_SOFKAENAB_BIT) |
BIT(MAX3421_MODE_DMPULLDN_BIT) |
BIT(MAX3421_MODE_DPPULLDN_BIT));
spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
/* reset frame-number: */
max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
/* sample the state of the D+ and D- lines */
spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
max3421_detect_conn(hcd);
/* enable frame, connection-detected, and bus-event interrupts: */
max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
BIT(MAX3421_HI_CONDET_BIT) |
BIT(MAX3421_HI_BUSEVENT_BIT));
spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
/* enable interrupts: */
spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
return 1;
}
static int
max3421_urb_done(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
unsigned long flags;
struct urb *urb;
int status;
status = max3421_hcd->urb_done;
max3421_hcd->urb_done = 0;
if (status > 0)
status = 0;
urb = max3421_hcd->curr_urb;
if (urb) {
/* save the old end-points toggles: */
u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
int epnum = usb_endpoint_num(&urb->ep->desc);
/* no locking: HCD (i.e., we) own toggles, don't we? */
usb_settoggle(urb->dev, epnum, 0, rcvtog);
usb_settoggle(urb->dev, epnum, 1, sndtog);
max3421_hcd->curr_urb = NULL;
spin_lock_irqsave(&max3421_hcd->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
/* must be called without the HCD spinlock: */
usb_hcd_giveback_urb(hcd, urb, status);
}
return 1;
}
static int
max3421_spi_thread(void *dev_id)
{
struct usb_hcd *hcd = dev_id;
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
int i, i_worked = 1;
/* set full-duplex SPI mode, low-active interrupt pin: */
spi_wr8(hcd, MAX3421_REG_PINCTL,
(BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */
BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */
while (!kthread_should_stop()) {
max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
break;
dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
msleep(10000);
}
dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
spi->irq);
while (!kthread_should_stop()) {
if (!i_worked) {
/*
* We'll be waiting for wakeups from the hard
* interrupt handler, so now is a good time to
* sync our hien with the chip:
*/
spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
set_current_state(TASK_INTERRUPTIBLE);
if (test_and_clear_bit(ENABLE_IRQ, &max3421_hcd->todo))
enable_irq(spi->irq);
schedule();
__set_current_state(TASK_RUNNING);
}
i_worked = 0;
if (max3421_hcd->urb_done)
i_worked |= max3421_urb_done(hcd);
else if (max3421_handle_irqs(hcd))
i_worked = 1;
else if (!max3421_hcd->curr_urb)
i_worked |= max3421_select_and_start_urb(hcd);
if (test_and_clear_bit(RESET_HCD, &max3421_hcd->todo))
/* reset the HCD: */
i_worked |= max3421_reset_hcd(hcd);
if (test_and_clear_bit(RESET_PORT, &max3421_hcd->todo)) {
/* perform a USB bus reset: */
spi_wr8(hcd, MAX3421_REG_HCTL,
BIT(MAX3421_HCTL_BUSRST_BIT));
i_worked = 1;
}
if (test_and_clear_bit(CHECK_UNLINK, &max3421_hcd->todo))
i_worked |= max3421_check_unlink(hcd);
if (test_and_clear_bit(IOPIN_UPDATE, &max3421_hcd->todo)) {
/*
* IOPINS1/IOPINS2 do not auto-increment, so we can't
* use spi_wr_buf().
*/
for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1 + i);
val = ((val & 0xf0) |
(max3421_hcd->iopins[i] & 0x0f));
spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
max3421_hcd->iopins[i] = val;
}
i_worked = 1;
}
}
set_current_state(TASK_RUNNING);
dev_info(&spi->dev, "SPI thread exiting");
return 0;
}
static int
max3421_reset_port(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED);
max3421_hcd->port_status |= USB_PORT_STAT_RESET;
set_bit(RESET_PORT, &max3421_hcd->todo);
wake_up_process(max3421_hcd->spi_thread);
return 0;
}
static int
max3421_reset(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
hcd->self.sg_tablesize = 0;
hcd->speed = HCD_USB2;
hcd->self.root_hub->speed = USB_SPEED_FULL;
set_bit(RESET_HCD, &max3421_hcd->todo);
wake_up_process(max3421_hcd->spi_thread);
return 0;
}
static int
max3421_start(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
spin_lock_init(&max3421_hcd->lock);
max3421_hcd->rh_state = MAX3421_RH_RUNNING;
INIT_LIST_HEAD(&max3421_hcd->ep_list);
hcd->power_budget = POWER_BUDGET;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
return 0;
}
static void
max3421_stop(struct usb_hcd *hcd)
{
}
static int
max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct max3421_ep *max3421_ep;
unsigned long flags;
int retval;
switch (usb_pipetype(urb->pipe)) {
case PIPE_INTERRUPT:
case PIPE_ISOCHRONOUS:
if (urb->interval < 0) {
dev_err(&spi->dev,
"%s: interval=%d for intr-/iso-pipe; expected > 0\n",
__func__, urb->interval);
return -EINVAL;
}
break;
default:
break;
}
spin_lock_irqsave(&max3421_hcd->lock, flags);
max3421_ep = urb->ep->hcpriv;
if (!max3421_ep) {
/* gets freed in max3421_endpoint_disable: */
max3421_ep = kzalloc(sizeof(struct max3421_ep), GFP_ATOMIC);
if (!max3421_ep) {
retval = -ENOMEM;
goto out;
}
max3421_ep->ep = urb->ep;
max3421_ep->last_active = max3421_hcd->frame_number;
urb->ep->hcpriv = max3421_ep;
list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval == 0) {
/* Since we added to the queue, restart scheduling: */
max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
wake_up_process(max3421_hcd->spi_thread);
}
out:
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
return retval;
}
static int
max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
unsigned long flags;
int retval;
spin_lock_irqsave(&max3421_hcd->lock, flags);
/*
* This will set urb->unlinked which in turn causes the entry
* to be dropped at the next opportunity.
*/
retval = usb_hcd_check_unlink_urb(hcd, urb, status);
if (retval == 0) {
set_bit(CHECK_UNLINK, &max3421_hcd->todo);
wake_up_process(max3421_hcd->spi_thread);
}
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
return retval;
}
static void
max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
unsigned long flags;
spin_lock_irqsave(&max3421_hcd->lock, flags);
if (ep->hcpriv) {
struct max3421_ep *max3421_ep = ep->hcpriv;
/* remove myself from the ep_list: */
if (!list_empty(&max3421_ep->ep_list))
list_del(&max3421_ep->ep_list);
kfree(max3421_ep);
ep->hcpriv = NULL;
}
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
}
static int
max3421_get_frame_number(struct usb_hcd *hcd)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
return max3421_hcd->frame_number;
}
/*
* Should return a non-zero value when any port is undergoing a resume
* transition while the root hub is suspended.
*/
static int
max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
unsigned long flags;
int retval = 0;
spin_lock_irqsave(&max3421_hcd->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
*buf = 0;
if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
*buf = (1 << 1); /* a hub over-current condition exists */
dev_dbg(hcd->self.controller,
"port status 0x%08x has changes\n",
max3421_hcd->port_status);
retval = 1;
if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
}
done:
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
return retval;
}
static inline void
hub_descriptor(struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof(*desc));
/*
* See Table 11-13: Hub Descriptor in USB 2.0 spec.
*/
desc->bDescriptorType = USB_DT_HUB; /* hub descriptor */
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM |
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
}
/*
* Set the MAX3421E general-purpose output with number PIN_NUMBER to
* VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For
* any other value, this function acts as a no-op.
*/
static void
max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
u8 mask, idx;
--pin_number;
if (pin_number >= MAX3421_GPOUT_COUNT)
return;
mask = 1u << (pin_number % 4);
idx = pin_number / 4;
if (value)
max3421_hcd->iopins[idx] |= mask;
else
max3421_hcd->iopins[idx] &= ~mask;
set_bit(IOPIN_UPDATE, &max3421_hcd->todo);
wake_up_process(max3421_hcd->spi_thread);
}
static int
max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
char *buf, u16 length)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct max3421_hcd_platform_data *pdata;
unsigned long flags;
int retval = 0;
pdata = spi->dev.platform_data;
spin_lock_irqsave(&max3421_hcd->lock, flags);
switch (type_req) {
case ClearHubFeature:
break;
case ClearPortFeature:
switch (value) {
case USB_PORT_FEAT_SUSPEND:
break;
case USB_PORT_FEAT_POWER:
dev_dbg(hcd->self.controller, "power-off\n");
max3421_gpout_set_value(hcd, pdata->vbus_gpout,
!pdata->vbus_active_level);
fallthrough;
default:
max3421_hcd->port_status &= ~(1 << value);
}
break;
case GetHubDescriptor:
hub_descriptor((struct usb_hub_descriptor *) buf);
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
case GetPortErrorCount:
case SetHubDepth:
/* USB3 only */
goto error;
case GetHubStatus:
*(__le32 *) buf = cpu_to_le32(0);
break;
case GetPortStatus:
if (index != 1) {
retval = -EPIPE;
goto error;
}
((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
((__le16 *) buf)[1] =
cpu_to_le16(max3421_hcd->port_status >> 16);
break;
case SetHubFeature:
retval = -EPIPE;
break;
case SetPortFeature:
switch (value) {
case USB_PORT_FEAT_LINK_STATE:
case USB_PORT_FEAT_U1_TIMEOUT:
case USB_PORT_FEAT_U2_TIMEOUT:
case USB_PORT_FEAT_BH_PORT_RESET:
goto error;
case USB_PORT_FEAT_SUSPEND:
if (max3421_hcd->active)
max3421_hcd->port_status |=
USB_PORT_STAT_SUSPEND;
break;
case USB_PORT_FEAT_POWER:
dev_dbg(hcd->self.controller, "power-on\n");
max3421_hcd->port_status |= USB_PORT_STAT_POWER;
max3421_gpout_set_value(hcd, pdata->vbus_gpout,
pdata->vbus_active_level);
break;
case USB_PORT_FEAT_RESET:
max3421_reset_port(hcd);
fallthrough;
default:
if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
!= 0)
max3421_hcd->port_status |= (1 << value);
}
break;
default:
dev_dbg(hcd->self.controller,
"hub control req%04x v%04x i%04x l%d\n",
type_req, value, index, length);
error: /* "protocol stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
return retval;
}
static int
max3421_bus_suspend(struct usb_hcd *hcd)
{
return -1;
}
static int
max3421_bus_resume(struct usb_hcd *hcd)
{
return -1;
}
static const struct hc_driver max3421_hcd_desc = {
.description = "max3421",
.product_desc = DRIVER_DESC,
.hcd_priv_size = sizeof(struct max3421_hcd),
.flags = HCD_USB11,
.reset = max3421_reset,
.start = max3421_start,
.stop = max3421_stop,
.get_frame_number = max3421_get_frame_number,
.urb_enqueue = max3421_urb_enqueue,
.urb_dequeue = max3421_urb_dequeue,
.endpoint_disable = max3421_endpoint_disable,
.hub_status_data = max3421_hub_status_data,
.hub_control = max3421_hub_control,
.bus_suspend = max3421_bus_suspend,
.bus_resume = max3421_bus_resume,
};
static int
max3421_of_vbus_en_pin(struct device *dev, struct max3421_hcd_platform_data *pdata)
{
int retval;
uint32_t value[2];
if (!pdata)
return -EINVAL;
retval = of_property_read_u32_array(dev->of_node, "maxim,vbus-en-pin", value, 2);
if (retval) {
dev_err(dev, "device tree node property 'maxim,vbus-en-pin' is missing\n");
return retval;
}
dev_info(dev, "property 'maxim,vbus-en-pin' value is <%d %d>\n", value[0], value[1]);
pdata->vbus_gpout = value[0];
pdata->vbus_active_level = value[1];
return 0;
}
static int
max3421_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct max3421_hcd *max3421_hcd;
struct usb_hcd *hcd = NULL;
struct max3421_hcd_platform_data *pdata = NULL;
int retval;
if (spi_setup(spi) < 0) {
dev_err(&spi->dev, "Unable to setup SPI bus");
return -EFAULT;
}
if (!spi->irq) {
dev_err(dev, "Failed to get SPI IRQ");
return -EFAULT;
}
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
retval = -ENOMEM;
goto error;
}
retval = max3421_of_vbus_en_pin(dev, pdata);
if (retval)
goto error;
spi->dev.platform_data = pdata;
}
pdata = spi->dev.platform_data;
if (!pdata) {
dev_err(&spi->dev, "driver configuration data is not provided\n");
retval = -EFAULT;
goto error;
}
if (pdata->vbus_active_level > 1) {
dev_err(&spi->dev, "vbus active level value %d is out of range (0/1)\n", pdata->vbus_active_level);
retval = -EINVAL;
goto error;
}
if (pdata->vbus_gpout < 1 || pdata->vbus_gpout > MAX3421_GPOUT_COUNT) {
dev_err(&spi->dev, "vbus gpout value %d is out of range (1..8)\n", pdata->vbus_gpout);
retval = -EINVAL;
goto error;
}
retval = -ENOMEM;
hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
dev_name(&spi->dev));
if (!hcd) {
dev_err(&spi->dev, "failed to create HCD structure\n");
goto error;
}
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
max3421_hcd = hcd_to_max3421(hcd);
INIT_LIST_HEAD(&max3421_hcd->ep_list);
spi_set_drvdata(spi, max3421_hcd);
max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
if (!max3421_hcd->tx)
goto error;
max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
if (!max3421_hcd->rx)
goto error;
max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
"max3421_spi_thread");
if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
dev_err(&spi->dev,
"failed to create SPI thread (out of memory)\n");
goto error;
}
retval = usb_add_hcd(hcd, 0, 0);
if (retval) {
dev_err(&spi->dev, "failed to add HCD\n");
goto error;
}
retval = request_irq(spi->irq, max3421_irq_handler,
IRQF_TRIGGER_LOW, "max3421", hcd);
if (retval < 0) {
dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
goto error;
}
return 0;
error:
if (IS_ENABLED(CONFIG_OF) && dev->of_node && pdata) {
devm_kfree(&spi->dev, pdata);
spi->dev.platform_data = NULL;
}
if (hcd) {
kfree(max3421_hcd->tx);
kfree(max3421_hcd->rx);
if (max3421_hcd->spi_thread)
kthread_stop(max3421_hcd->spi_thread);
usb_put_hcd(hcd);
}
return retval;
}
static void
max3421_remove(struct spi_device *spi)
{
struct max3421_hcd *max3421_hcd;
struct usb_hcd *hcd;
unsigned long flags;
max3421_hcd = spi_get_drvdata(spi);
hcd = max3421_to_hcd(max3421_hcd);
usb_remove_hcd(hcd);
spin_lock_irqsave(&max3421_hcd->lock, flags);
kthread_stop(max3421_hcd->spi_thread);
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
free_irq(spi->irq, hcd);
usb_put_hcd(hcd);
}
static const struct of_device_id max3421_of_match_table[] = {
{ .compatible = "maxim,max3421", },
{},
};
MODULE_DEVICE_TABLE(of, max3421_of_match_table);
static struct spi_driver max3421_driver = {
.probe = max3421_probe,
.remove = max3421_remove,
.driver = {
.name = "max3421-hcd",
.of_match_table = max3421_of_match_table,
},
};
module_spi_driver(max3421_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("David Mosberger <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/max3421-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
#include <linux/usb.h>
#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-debugfs.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
* initializes the segment to zero, and sets the private next pointer to NULL.
*
* Section 4.11.1.1:
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
unsigned int cycle_state,
unsigned int max_packet,
gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
if (!seg)
return NULL;
seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
if (max_packet) {
seg->bounce_buf = kzalloc_node(max_packet, flags,
dev_to_node(dev));
if (!seg->bounce_buf) {
dma_pool_free(xhci->segment_pool, seg->trbs, dma);
kfree(seg);
return NULL;
}
}
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
}
seg->dma = dma;
seg->next = NULL;
return seg;
}
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
if (seg->trbs) {
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
kfree(seg->bounce_buf);
kfree(seg);
}
static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *first)
{
struct xhci_segment *seg;
seg = first->next;
while (seg != first) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
}
xhci_segment_free(xhci, first);
}
/*
* Make the prev segment point to the next segment.
*
* Change the last TRB in the prev segment to be a Link TRB which points to the
* DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_segment *prev,
struct xhci_segment *next,
enum xhci_ring_type type, bool chain_links)
{
u32 val;
if (!prev || !next)
return;
prev->next = next;
if (type != TYPE_EVENT) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
cpu_to_le64(next->dma);
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
if (chain_links)
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
}
/*
* Link the ring to the new segments.
* Set Toggle Cycle for the new ring if needed.
*/
static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *first, struct xhci_segment *last,
unsigned int num_segs)
{
struct xhci_segment *next;
bool chain_links;
if (!ring || !first || !last)
return;
/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
chain_links = !!(xhci_link_trb_quirk(xhci) ||
(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)));
next = ring->enq_seg->next;
xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
xhci_link_segments(last, next, ring->type, chain_links);
ring->num_segs += num_segs;
if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
&= ~cpu_to_le32(LINK_TOGGLE);
last->trbs[TRBS_PER_SEGMENT-1].link.control
|= cpu_to_le32(LINK_TOGGLE);
ring->last_seg = last;
}
}
/*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the host controller won't tell
* us which stream ring the TRB came from. We could store the stream ID in an
* event data TRB, but that doesn't help us for the cancellation case, since the
* endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example, say I
* have segments of size 1KB, that are always 1KB aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *seg,
gfp_t mem_flags)
{
unsigned long key;
int ret;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
/* Skip any segments that were already added. */
if (radix_tree_lookup(trb_address_map, key))
return 0;
ret = radix_tree_maybe_preload(mem_flags);
if (ret)
return ret;
ret = radix_tree_insert(trb_address_map,
key, ring);
radix_tree_preload_end();
return ret;
}
static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
struct xhci_segment *seg)
{
unsigned long key;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
if (radix_tree_lookup(trb_address_map, key))
radix_tree_delete(trb_address_map, key);
}
static int xhci_update_stream_segment_mapping(
struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *first_seg,
struct xhci_segment *last_seg,
gfp_t mem_flags)
{
struct xhci_segment *seg;
struct xhci_segment *failed_seg;
int ret;
if (WARN_ON_ONCE(trb_address_map == NULL))
return 0;
seg = first_seg;
do {
ret = xhci_insert_segment_mapping(trb_address_map,
ring, seg, mem_flags);
if (ret)
goto remove_streams;
if (seg == last_seg)
return 0;
seg = seg->next;
} while (seg != first_seg);
return 0;
remove_streams:
failed_seg = seg;
seg = first_seg;
do {
xhci_remove_segment_mapping(trb_address_map, seg);
if (seg == failed_seg)
return ret;
seg = seg->next;
} while (seg != first_seg);
return ret;
}
static void xhci_remove_stream_mapping(struct xhci_ring *ring)
{
struct xhci_segment *seg;
if (WARN_ON_ONCE(ring->trb_address_map == NULL))
return;
seg = ring->first_seg;
do {
xhci_remove_segment_mapping(ring->trb_address_map, seg);
seg = seg->next;
} while (seg != ring->first_seg);
}
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
{
return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
ring->first_seg, ring->last_seg, mem_flags);
}
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
if (!ring)
return;
trace_xhci_ring_free(ring);
if (ring->first_seg) {
if (ring->type == TYPE_STREAM)
xhci_remove_stream_mapping(ring);
xhci_free_segments_for_ring(xhci, ring->first_seg);
}
kfree(ring);
}
void xhci_initialize_ring_info(struct xhci_ring *ring,
unsigned int cycle_state)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
ring->enq_seg = ring->first_seg;
ring->dequeue = ring->enqueue;
ring->deq_seg = ring->first_seg;
/* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
*
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
ring->cycle_state = cycle_state;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
* accounting purpose
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **last,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_segment *prev;
bool chain_links;
/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
chain_links = !!(xhci_link_trb_quirk(xhci) ||
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)));
prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
if (!prev)
return -ENOMEM;
num_segs--;
*first = prev;
while (num_segs > 0) {
struct xhci_segment *next;
next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
if (!next) {
prev = *first;
while (prev) {
next = prev->next;
xhci_segment_free(xhci, prev);
prev = next;
}
return -ENOMEM;
}
xhci_link_segments(prev, next, type, chain_links);
prev = next;
num_segs--;
}
xhci_link_segments(prev, *first, type, chain_links);
*last = prev;
return 0;
}
/*
* Create a new ring with zero or more segments.
*
* Link each segment together into a ring.
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
if (!ring)
return NULL;
ring->num_segs = num_segs;
ring->bounce_buf_len = max_packet;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
&ring->last_seg, num_segs, cycle_state, type,
max_packet, flags);
if (ret)
goto fail;
/* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
/* See section 4.9.2.1 and 6.4.4.1 */
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
}
xhci_initialize_ring_info(ring, cycle_state);
trace_xhci_ring_alloc(ring);
return ring;
fail:
kfree(ring);
return NULL;
}
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
virt_dev->eps[ep_index].ring = NULL;
}
/*
* Expand an existing ring.
* Allocate a new ring which has same segment numbers and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_new_segs, gfp_t flags)
{
struct xhci_segment *first;
struct xhci_segment *last;
int ret;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
num_new_segs, ring->cycle_state, ring->type,
ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
if (ring->type == TYPE_STREAM)
ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
ring, first, last, flags);
if (ret) {
struct xhci_segment *next;
do {
next = first->next;
xhci_segment_free(xhci, first);
if (first == last)
break;
first = next;
} while (true);
return ret;
}
xhci_link_rings(xhci, ring, first, last, num_new_segs);
trace_xhci_ring_expansion(ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
ring->num_segs);
return 0;
}
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
return NULL;
ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
if (!ctx)
return NULL;
ctx->type = type;
ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
if (!ctx->bytes) {
kfree(ctx);
return NULL;
}
return ctx;
}
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (!ctx)
return;
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
struct xhci_input_control_ctx *xhci_get_input_control_ctx(
struct xhci_container_ctx *ctx)
{
if (ctx->type != XHCI_CTX_TYPE_INPUT)
return NULL;
return (struct xhci_input_control_ctx *)ctx->bytes;
}
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (ctx->type == XHCI_CTX_TYPE_DEVICE)
return (struct xhci_slot_ctx *)ctx->bytes;
return (struct xhci_slot_ctx *)
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
}
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int ep_index)
{
/* increment ep index by offset of start of ep ctx array */
ep_index++;
if (ctx->type == XHCI_CTX_TYPE_INPUT)
ep_index++;
return (struct xhci_ep_ctx *)
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
/***************** Streams structures manipulation *************************/
static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (size > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(dev, size, stream_ctx, dma);
else if (size > SMALL_STREAM_ARRAY_SIZE)
dma_pool_free(xhci->medium_streams_pool, stream_ctx, dma);
else
dma_pool_free(xhci->small_streams_pool, stream_ctx, dma);
}
/*
* The stream context array for each endpoint with bulk streams enabled can
* vary in size, based on:
* - how many streams the endpoint supports,
* - the maximum primary stream array size the host controller supports,
* - and how many streams the device driver asks for.
*
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t size = size_mul(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
if (size > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(dev, size, dma, mem_flags);
if (size > SMALL_STREAM_ARRAY_SIZE)
return dma_pool_zalloc(xhci->medium_streams_pool, mem_flags, dma);
else
return dma_pool_zalloc(xhci->small_streams_pool, mem_flags, dma);
}
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address)
{
if (ep->ep_state & EP_HAS_STREAMS)
return radix_tree_lookup(&ep->stream_info->trb_address_map,
address >> TRB_SEGMENT_SHIFT);
return ep->ring;
}
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
* drivers.
*
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
unsigned int num_streams,
unsigned int max_packet, gfp_t mem_flags)
{
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
u64 addr;
int ret;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
xhci_dbg(xhci, "Allocating %u streams and %u stream context array entries.\n",
num_streams, num_stream_ctxs);
if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
return NULL;
}
xhci->cmd_ring_reserved_trbs++;
stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
dev_to_node(dev));
if (!stream_info)
goto cleanup_trbs;
stream_info->num_streams = num_streams;
stream_info->num_stream_ctxs = num_stream_ctxs;
/* Initialize the array of virtual pointers to stream rings. */
stream_info->stream_rings = kcalloc_node(
num_streams, sizeof(struct xhci_ring *), mem_flags,
dev_to_node(dev));
if (!stream_info->stream_rings)
goto cleanup_info;
/* Initialize the array of DMA addresses for stream rings for the HW. */
stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
goto cleanup_ring_array;
/* Allocate everything needed to free the stream rings later */
stream_info->free_streams_command =
xhci_alloc_command_with_ctx(xhci, true, mem_flags);
if (!stream_info->free_streams_command)
goto cleanup_ctx;
INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
/* Allocate rings for all the streams that the driver will use,
* and add their segment DMA addresses to the radix tree.
* Stream 0 is reserved.
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
cur_ring->trb_address_map = &stream_info->trb_address_map;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
cur_ring->cycle_state;
stream_info->stream_ctx_array[cur_stream].stream_ring =
cpu_to_le64(addr);
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, addr);
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
goto cleanup_rings;
}
}
/* Leave the other unused stream ring pointers in the stream context
* array initialized to zero. This will cause the xHC to give us an
* error if the device asks for a stream ID we don't have setup (if it
* was any other way, the host controller would assume the ring is
* "empty" and wait forever for data to be queued to that stream ID).
*/
return stream_info;
cleanup_rings:
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
xhci_free_stream_ctx(xhci,
stream_info->num_stream_ctxs,
stream_info->stream_ctx_array,
stream_info->ctx_array_dma);
cleanup_ring_array:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
cleanup_trbs:
xhci->cmd_ring_reserved_trbs--;
return NULL;
}
/*
* Sets the MaxPStreams field and the Linear Stream Array field.
* Sets the dequeue pointer to the stream context array.
*/
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_stream_info *stream_info)
{
u32 max_primary_streams;
/* MaxPStreams is the number of stream context array entries, not the
* number we're actually using. Must be in 2^(MaxPstreams + 1) format.
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Setting number of stream ctx array entries to %u",
1 << (max_primary_streams + 1));
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
| EP_HAS_LSA);
ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
}
/*
* Sets the MaxPStreams field and the Linear Stream Array field to 0.
* Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
* not at the beginning of the ring).
*/
void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep)
{
dma_addr_t addr;
ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
}
/* Frees all stream contexts associated with the endpoint,
*
* Caller should fix the endpoint context streams fields.
*/
void xhci_free_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info)
{
int cur_stream;
struct xhci_ring *cur_ring;
if (!stream_info)
return;
for (cur_stream = 1; cur_stream < stream_info->num_streams;
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
xhci_free_command(xhci, stream_info->free_streams_command);
xhci->cmd_ring_reserved_trbs--;
if (stream_info->stream_ctx_array)
xhci_free_stream_ctx(xhci,
stream_info->num_stream_ctxs,
stream_info->stream_ctx_array,
stream_info->ctx_array_dma);
kfree(stream_info->stream_rings);
kfree(stream_info);
}
/***************** Device context manipulation *************************/
static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
{
struct list_head *tt_list_head;
struct xhci_tt_bw_info *tt_info, *next;
bool slot_found = false;
/* If the device never made it past the Set Address stage,
* it may not have the real_port set correctly.
*/
if (virt_dev->real_port == 0 ||
virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
xhci_dbg(xhci, "Bad real port.\n");
return;
}
tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* Multi-TT hubs will have more than one entry */
if (tt_info->slot_id == slot_id) {
slot_found = true;
list_del(&tt_info->tt_list);
kfree(tt_info);
} else if (slot_found) {
break;
}
}
}
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_tt_bw_info *tt_info;
unsigned int num_ports;
int i, j;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!tt->multi)
num_ports = 1;
else
num_ports = hdev->maxchild;
for (i = 0; i < num_ports; i++, tt_info++) {
struct xhci_interval_bw_table *bw_table;
tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
dev_to_node(dev));
if (!tt_info)
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
&xhci->rh_bw[virt_dev->real_port - 1].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
bw_table = &tt_info->bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++)
INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
}
return 0;
free_tts:
xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
return -ENOMEM;
}
/* All the xhci_tds in the ring's TD list should be freed at this point.
* Should be called with xhci->lock held if there is any chance the TT lists
* will be manipulated by the configure endpoint, allocate device, or update
* hub functions while this function is removing the TT entries from the list.
*/
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *dev;
int i;
int old_active_eps = 0;
/* Slot ID 0 is reserved */
if (slot_id == 0 || !xhci->devs[slot_id])
return;
dev = xhci->devs[slot_id];
xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
if (!dev)
return;
trace_xhci_free_virt_device(dev);
if (dev->tt_info)
old_active_eps = dev->tt_info->active_eps;
for (i = 0; i < 31; i++) {
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
/*
* Endpoints are normally deleted from the bandwidth list when
* endpoints are dropped, before device is freed.
* If host is dying or being removed then endpoints aren't
* dropped cleanly, so delete the endpoint from list here.
* Only applicable for hosts with software bandwidth checking.
*/
if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
list_del_init(&dev->eps[i].bw_endpoint_list);
xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
slot_id, i);
}
}
/* If this is a hub, free the TT(s) from the TT list */
xhci_free_tt_info(xhci, dev, slot_id);
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, dev, old_active_eps);
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
xhci_free_container_ctx(xhci, dev->out_ctx);
if (dev->udev && dev->udev->slot_id)
dev->udev->slot_id = 0;
kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = NULL;
}
/*
* Free a virt_device structure.
* If the virt_device added a tt_info (a hub) and has children pointing to
* that tt_info, then free the child first. Recursive.
* We can't rely on udev at this point to find child-parent relationships.
*/
static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *vdev;
struct list_head *tt_list_head;
struct xhci_tt_bw_info *tt_info, *next;
int i;
vdev = xhci->devs[slot_id];
if (!vdev)
return;
if (vdev->real_port == 0 ||
vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
xhci_dbg(xhci, "Bad vdev->real_port.\n");
goto out;
}
tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
if (tt_info->slot_id == slot_id) {
/* are any devices using this tt_info? */
for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
vdev = xhci->devs[i];
if (vdev && (vdev->tt_info == tt_info))
xhci_free_virt_devices_depth_first(
xhci, i);
}
}
}
out:
/* we are now at a leaf device */
xhci_debugfs_remove_slot(xhci, slot_id);
xhci_free_virt_device(xhci, slot_id);
}
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
struct xhci_virt_device *dev;
int i;
/* Slot ID 0 is reserved */
if (slot_id == 0 || xhci->devs[slot_id]) {
xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
return 0;
}
dev = kzalloc(sizeof(*dev), flags);
if (!dev)
return 0;
dev->slot_id = slot_id;
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx)
goto fail;
xhci_dbg(xhci, "Slot %d output ctx = 0x%pad (dma)\n", slot_id, &dev->out_ctx->dma);
/* Allocate the (input) device context for address device command */
dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
if (!dev->in_ctx)
goto fail;
xhci_dbg(xhci, "Slot %d input ctx = 0x%pad (dma)\n", slot_id, &dev->in_ctx->dma);
/* Initialize the cancellation and bandwidth list for each ep */
for (i = 0; i < 31; i++) {
dev->eps[i].ep_index = i;
dev->eps[i].vdev = dev;
dev->eps[i].xhci = xhci;
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
/* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring)
goto fail;
dev->udev = udev;
/* Point to output device context in dcbaa. */
xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id,
&xhci->dcbaa->dev_context_ptrs[slot_id],
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
trace_xhci_alloc_virt_device(dev);
xhci->devs[slot_id] = dev;
return 1;
fail:
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
xhci_free_container_ctx(xhci, dev->out_ctx);
kfree(dev);
return 0;
}
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev)
{
struct xhci_virt_device *virt_dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_ring *ep_ring;
virt_dev = xhci->devs[udev->slot_id];
ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
ep_ring = virt_dev->eps[0].ring;
/*
* FIXME we don't keep track of the dequeue pointer very well after a
* Set TR dequeue pointer, so we're setting the dequeue pointer of the
* host to our enqueue pointer. This should only be called after a
* configured device has reset, so all control transfers should have
* been completed or cancelled before the reset.
*/
ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
ep_ring->enqueue)
| ep_ring->cycle_state);
}
/*
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers.
*
* The xHCI hardware wants to know the roothub port number that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
* registers. Call xhci_find_raw_port_number() to get real index.
*/
static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_device *udev)
{
struct usb_device *top_dev;
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
hcd = xhci_get_usb3_hcd(xhci);
else
hcd = xhci->main_hcd;
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
return xhci_find_raw_port_number(hcd, top_dev->portnum);
}
/* Setup an xHCI virtual device for a Set Address command */
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
u32 port_num;
u32 max_packets;
struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
if (udev->slot_id == 0 || !dev) {
xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
udev->slot_id);
return -EINVAL;
}
ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
case USB_SPEED_SUPER_PLUS:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
max_packets = MAX_PACKET(512);
break;
case USB_SPEED_SUPER:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
max_packets = MAX_PACKET(512);
break;
case USB_SPEED_HIGH:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
max_packets = MAX_PACKET(64);
break;
/* USB core guesses at a 64-byte max packet first for FS devices */
case USB_SPEED_FULL:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
max_packets = MAX_PACKET(64);
break;
case USB_SPEED_LOW:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
max_packets = MAX_PACKET(8);
break;
default:
/* Speed was set earlier, this shouldn't happen. */
return -EINVAL;
}
/* Find the root hub port this device is under */
port_num = xhci_find_real_port_number(xhci, udev);
if (!port_num)
return -EINVAL;
slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
/* Set the port number in the virtual_device to the faked port number */
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
dev->fake_port = top_dev->portnum;
dev->real_port = port_num;
xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
* decendent of one), it counts as a primary bandwidth domain, not a
* secondary bandwidth domain under a TT. An xhci_tt_info structure
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
rh_bw = &xhci->rh_bw[port_num - 1];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
continue;
if (!dev->udev->tt->multi ||
(udev->tt->multi &&
tt_bw->ttport == dev->udev->ttport)) {
dev->bw_table = &tt_bw->bw_table;
dev->tt_info = tt_bw;
break;
}
}
if (!dev->tt_info)
xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
}
/* Is this a LS/FS device under an external HS hub? */
if (udev->tt && udev->tt->hub->parent) {
slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
(udev->ttport << 8));
if (udev->tt->multi)
slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
}
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
/* Step 4 - ring already allocated */
/* Step 5 */
ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
max_packets);
ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
dev->eps[0].ring->cycle_state);
trace_xhci_setup_addressable_virt_device(dev);
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
return 0;
}
/*
* Convert interval expressed as 2^(bInterval - 1) == interval into
* straight exponent value 2^n == interval.
*
*/
static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval;
interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
if (interval != ep->desc.bInterval - 1)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d %sframes\n",
ep->desc.bEndpointAddress,
1 << interval,
udev->speed == USB_SPEED_FULL ? "" : "micro");
if (udev->speed == USB_SPEED_FULL) {
/*
* Full speed isoc endpoints specify interval in frames,
* not microframes. We are using microframes everywhere,
* so adjust accordingly.
*/
interval += 3; /* 1 frame = 2^3 uframes */
}
return interval;
}
/*
* Convert bInterval expressed in microframes (in 1-255 range) to exponent of
* microframes, rounded down to nearest power of 2.
*/
static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
struct usb_host_endpoint *ep, unsigned int desc_interval,
unsigned int min_exponent, unsigned int max_exponent)
{
unsigned int interval;
interval = fls(desc_interval) - 1;
interval = clamp_val(interval, min_exponent, max_exponent);
if ((1 << interval) != desc_interval)
dev_dbg(&udev->dev,
"ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
ep->desc.bEndpointAddress,
1 << interval,
desc_interval);
return interval;
}
static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (ep->desc.bInterval == 0)
return 0;
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval, 0, 15);
}
static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval * 8, 3, 10);
}
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
* is set to N, it will service the endpoint every 2^(Interval)*125us.
*
* The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
* is set to 0.
*/
static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval = 0;
switch (udev->speed) {
case USB_SPEED_HIGH:
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc)) {
interval = xhci_parse_microframe_interval(udev, ep);
break;
}
fallthrough; /* SS and HS isoc/int have same decoding */
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
}
break;
case USB_SPEED_FULL:
if (usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
break;
}
/*
* Fall through for interrupt endpoint interval decoding
* since it uses the same rules as low speed interrupt
* endpoints.
*/
fallthrough;
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_frame_interval(udev, ep);
}
break;
default:
BUG();
}
return interval;
}
/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
* High speed endpoint descriptors can define "the number of additional
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (udev->speed < USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(&ep->desc))
return 0;
return ep->ss_ep_comp.bmAttributes;
}
static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
/* Super speed and Plus have max burst in ep companion desc */
if (udev->speed >= USB_SPEED_SUPER)
return ep->ss_ep_comp.bMaxBurst;
if (udev->speed == USB_SPEED_HIGH &&
(usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)))
return usb_endpoint_maxp_mult(&ep->desc) - 1;
return 0;
}
static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
{
int in;
in = usb_endpoint_dir_in(&ep->desc);
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
return CTRL_EP;
case USB_ENDPOINT_XFER_BULK:
return in ? BULK_IN_EP : BULK_OUT_EP;
case USB_ENDPOINT_XFER_ISOC:
return in ? ISOC_IN_EP : ISOC_OUT_EP;
case USB_ENDPOINT_XFER_INT:
return in ? INT_IN_EP : INT_OUT_EP;
}
return 0;
}
/* Return the maximum endpoint service interval time (ESIT) payload.
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
static u32 xhci_get_max_esit_payload(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int max_burst;
int max_packet;
/* Only applies for interrupt or isochronous endpoints */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
/* SuperSpeedPlus Isoc ep sending over 48k per esit */
if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
if (udev->speed >= USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
max_packet = usb_endpoint_maxp(&ep->desc);
max_burst = usb_endpoint_maxp_mult(&ep->desc);
/* A 0 in max burst means 1 transfer per ESIT */
return max_packet * max_burst;
}
/* Set up an endpoint with one ring segment. Do not allocate stream rings.
* Drivers will have to call usb_alloc_streams() to do that.
*/
int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *udev,
struct usb_host_endpoint *ep,
gfp_t mem_flags)
{
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
struct xhci_ring *ep_ring;
unsigned int max_packet;
enum xhci_ring_type ring_type;
u32 max_esit_payload;
u32 endpoint_type;
unsigned int max_burst;
unsigned int interval;
unsigned int mult;
unsigned int avg_trb_len;
unsigned int err_count = 0;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
endpoint_type = xhci_get_endpoint_type(ep);
if (!endpoint_type)
return -EINVAL;
ring_type = usb_endpoint_type(&ep->desc);
/*
* Get values to fill the endpoint context, mostly from ep descriptor.
* The average TRB buffer lengt for bulk endpoints is unclear as we
* have no clue on scatter gather list entry size. For Isoc and Int,
* set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
*/
max_esit_payload = xhci_get_max_esit_payload(udev, ep);
interval = xhci_get_endpoint_interval(udev, ep);
/* Periodic endpoint bInterval limit quirk */
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
udev->speed >= USB_SPEED_HIGH &&
interval >= 7) {
interval = 6;
}
}
mult = xhci_get_endpoint_mult(udev, ep);
max_packet = usb_endpoint_maxp(&ep->desc);
max_burst = xhci_get_endpoint_max_burst(udev, ep);
avg_trb_len = max_esit_payload;
/* FIXME dig Mult and streams info out of ep companion desc */
/* Allow 3 retries for everything but isoc, set CErr = 3 */
if (!usb_endpoint_xfer_isoc(&ep->desc))
err_count = 3;
/* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
if (usb_endpoint_xfer_bulk(&ep->desc)) {
if (udev->speed == USB_SPEED_HIGH)
max_packet = 512;
if (udev->speed == USB_SPEED_FULL) {
max_packet = rounddown_pow_of_two(max_packet);
max_packet = clamp_val(max_packet, 8, 64);
}
}
/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
avg_trb_len = 8;
/* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
mult = 0;
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
/* Fill the endpoint context */
ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
EP_INTERVAL(interval) |
EP_MULT(mult));
ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
MAX_PACKET(max_packet) |
MAX_BURST(max_burst) |
ERROR_COUNT(err_count));
ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
ep_ring->cycle_state);
ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
EP_AVG_TRB_LENGTH(avg_trb_len));
return 0;
}
void xhci_endpoint_zero(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_host_endpoint *ep)
{
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0;
ep_ctx->deq = 0;
ep_ctx->tx_info = 0;
/* Don't free the endpoint ring until the set interface or configuration
* request succeeds.
*/
}
void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
{
bw_info->ep_interval = 0;
bw_info->mult = 0;
bw_info->num_packets = 0;
bw_info->max_packet_size = 0;
bw_info->type = 0;
bw_info->max_esit_payload = 0;
}
void xhci_update_bw_info(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_input_control_ctx *ctrl_ctx,
struct xhci_virt_device *virt_dev)
{
struct xhci_bw_info *bw_info;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_type;
int i;
for (i = 1; i < 31; i++) {
bw_info = &virt_dev->eps[i].bw_info;
/* We can't tell what endpoint type is being dropped, but
* unconditionally clearing the bandwidth info for non-periodic
* endpoints should be harmless because the info will never be
* set in the first place.
*/
if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
/* Dropped endpoint */
xhci_clear_endpoint_bw_info(bw_info);
continue;
}
if (EP_IS_ADDED(ctrl_ctx, i)) {
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
/* Ignore non-periodic endpoints */
if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
ep_type != ISOC_IN_EP &&
ep_type != INT_IN_EP)
continue;
/* Added or changed endpoint */
bw_info->ep_interval = CTX_TO_EP_INTERVAL(
le32_to_cpu(ep_ctx->ep_info));
/* Number of packets and mult are zero-based in the
* input context, but we want one-based for the
* interval table.
*/
bw_info->mult = CTX_TO_EP_MULT(
le32_to_cpu(ep_ctx->ep_info)) + 1;
bw_info->num_packets = CTX_TO_MAX_BURST(
le32_to_cpu(ep_ctx->ep_info2)) + 1;
bw_info->max_packet_size = MAX_PACKET_DECODED(
le32_to_cpu(ep_ctx->ep_info2));
bw_info->type = ep_type;
bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
le32_to_cpu(ep_ctx->tx_info));
}
}
}
/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command.
*/
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
unsigned int ep_index)
{
struct xhci_ep_ctx *out_ep_ctx;
struct xhci_ep_ctx *in_ep_ctx;
out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
in_ep_ctx->ep_info = out_ep_ctx->ep_info;
in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
in_ep_ctx->deq = out_ep_ctx->deq;
in_ep_ctx->tx_info = out_ep_ctx->tx_info;
if (xhci->quirks & XHCI_MTK_HOST) {
in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
}
}
/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command. Only the context entries field matters,
* but we'll copy the whole thing anyway.
*/
void xhci_slot_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx)
{
struct xhci_slot_ctx *in_slot_ctx;
struct xhci_slot_ctx *out_slot_ctx;
in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
in_slot_ctx->dev_info = out_slot_ctx->dev_info;
in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
in_slot_ctx->tt_info = out_slot_ctx->tt_info;
in_slot_ctx->dev_state = out_slot_ctx->dev_state;
}
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocating %d scratchpad buffers", num_sp);
if (!num_sp)
return 0;
xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
dev_to_node(dev));
if (!xhci->scratchpad)
goto fail_sp;
xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
size_mul(sizeof(u64), num_sp),
&xhci->scratchpad->sp_dma, flags);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
flags, dev_to_node(dev));
if (!xhci->scratchpad->sp_buffers)
goto fail_sp3;
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
flags);
if (!buf)
goto fail_sp4;
xhci->scratchpad->sp_array[i] = dma;
xhci->scratchpad->sp_buffers[i] = buf;
}
return 0;
fail_sp4:
while (i--)
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_array[i]);
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
fail_sp2:
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
fail_sp:
return -ENOMEM;
}
static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!xhci->scratchpad)
return;
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_array[i]);
}
kfree(xhci->scratchpad->sp_buffers);
dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags)
{
struct xhci_command *command;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
if (!command)
return NULL;
if (allocate_completion) {
command->completion =
kzalloc_node(sizeof(struct completion), mem_flags,
dev_to_node(dev));
if (!command->completion) {
kfree(command);
return NULL;
}
init_completion(command->completion);
}
command->status = 0;
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags)
{
struct xhci_command *command;
command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
if (!command)
return NULL;
command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
mem_flags);
if (!command->in_ctx) {
kfree(command->completion);
kfree(command);
return NULL;
}
return command;
}
void xhci_urb_free_priv(struct urb_priv *urb_priv)
{
kfree(urb_priv);
}
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command)
{
xhci_free_container_ctx(xhci,
command->in_ctx);
kfree(command->completion);
kfree(command);
}
int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_ring *evt_ring,
struct xhci_erst *erst,
gfp_t flags)
{
size_t size;
unsigned int val;
struct xhci_segment *seg;
struct xhci_erst_entry *entry;
size = size_mul(sizeof(struct xhci_erst_entry), evt_ring->num_segs);
erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
size, &erst->erst_dma_addr, flags);
if (!erst->entries)
return -ENOMEM;
erst->num_entries = evt_ring->num_segs;
seg = evt_ring->first_seg;
for (val = 0; val < evt_ring->num_segs; val++) {
entry = &erst->entries[val];
entry->seg_addr = cpu_to_le64(seg->dma);
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
entry->rsvd = 0;
seg = seg->next;
}
return 0;
}
static void
xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t erst_size;
u64 tmp64;
u32 tmp;
if (!ir)
return;
erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries;
if (ir->erst.entries)
dma_free_coherent(dev, erst_size,
ir->erst.entries,
ir->erst.erst_dma_addr);
ir->erst.entries = NULL;
/*
* Clean out interrupter registers except ERSTBA. Clearing either the
* low or high 32 bits of ERSTBA immediately causes the controller to
* dereference the partially cleared 64 bit address, causing IOMMU error.
*/
if (ir->ir_set) {
tmp = readl(&ir->ir_set->erst_size);
tmp &= ERST_SIZE_MASK;
writel(tmp, &ir->ir_set->erst_size);
tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
tmp64 &= (u64) ERST_PTR_MASK;
xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
}
/* free interrrupter event ring */
if (ir->event_ring)
xhci_ring_free(xhci, ir->event_ring);
ir->event_ring = NULL;
kfree(ir);
}
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int i, j, num_ports;
cancel_delayed_work_sync(&xhci->cmd_timer);
xhci_free_interrupter(xhci, xhci->interrupter);
xhci->interrupter = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring");
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
xhci_cleanup_command_queue(xhci);
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
for (i = 0; i < num_ports && xhci->rh_bw; i++) {
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
struct list_head *ep = &bwt->interval_bw[j].endpoints;
while (!list_empty(ep))
list_del_init(ep->next);
}
}
for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
xhci_free_virt_devices_depth_first(xhci, i);
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed small stream array pool");
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed medium stream array pool");
if (xhci->dcbaa)
dma_free_coherent(dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
xhci->dcbaa = NULL;
scratchpad_free(xhci);
if (!xhci->rh_bw)
goto no_bw;
for (i = 0; i < num_ports; i++) {
struct xhci_tt_bw_info *tt, *n;
list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
list_del(&tt->tt_list);
kfree(tt);
}
}
no_bw:
xhci->cmd_ring_reserved_trbs = 0;
xhci->usb2_rhub.num_ports = 0;
xhci->usb3_rhub.num_ports = 0;
xhci->num_active_eps = 0;
kfree(xhci->usb2_rhub.ports);
kfree(xhci->usb3_rhub.ports);
kfree(xhci->hw_ports);
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
for (i = 0; i < xhci->num_port_caps; i++)
kfree(xhci->port_caps[i].psi);
kfree(xhci->port_caps);
xhci->num_port_caps = 0;
xhci->usb2_rhub.ports = NULL;
xhci->usb3_rhub.ports = NULL;
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
xhci->port_caps = NULL;
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->usb2_rhub.bus_state.bus_suspended = 0;
xhci->usb3_rhub.bus_state.bus_suspended = 0;
}
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
u64 temp;
dma_addr_t deq;
deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
ir->event_ring->dequeue);
if (!deq)
xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
temp &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&ir->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
__le32 __iomem *addr, int max_caps)
{
u32 temp, port_offset, port_count;
int i;
u8 major_revision, minor_revision, tmp_minor_revision;
struct xhci_hub *rhub;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_port_cap *port_cap;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
minor_revision = XHCI_EXT_PORT_MINOR(temp);
if (major_revision == 0x03) {
rhub = &xhci->usb3_rhub;
/*
* Some hosts incorrectly use sub-minor version for minor
* version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
* for bcdUSB 0x310). Since there is no USB release with sub
* minor version 0x301 to 0x309, we can assume that they are
* incorrect and fix it here.
*/
if (minor_revision > 0x00 && minor_revision < 0x10)
minor_revision <<= 4;
/*
* Some zhaoxin's xHCI controller that follow usb3.1 spec
* but only support Gen1.
*/
if (xhci->quirks & XHCI_ZHAOXIN_HOST) {
tmp_minor_revision = minor_revision;
minor_revision = 0;
}
} else if (major_revision <= 0x02) {
rhub = &xhci->usb2_rhub;
} else {
xhci_warn(xhci, "Ignoring unknown port speed, Ext Cap %p, revision = 0x%x\n",
addr, major_revision);
/* Ignoring port protocol we can't understand. FIXME */
return;
}
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Ext Cap %p, port offset = %u, count = %u, revision = 0x%x",
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
/* WTF? "Valid values are ‘1’ to MaxPorts" */
return;
port_cap = &xhci->port_caps[xhci->num_port_caps++];
if (xhci->num_port_caps > max_caps)
return;
port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
if (port_cap->psi_count) {
port_cap->psi = kcalloc_node(port_cap->psi_count,
sizeof(*port_cap->psi),
GFP_KERNEL, dev_to_node(dev));
if (!port_cap->psi)
port_cap->psi_count = 0;
port_cap->psi_uid_count++;
for (i = 0; i < port_cap->psi_count; i++) {
port_cap->psi[i] = readl(addr + 4 + i);
/* count unique ID values, two consecutive entries can
* have the same ID if link is assymetric
*/
if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
port_cap->psi_uid_count++;
if (xhci->quirks & XHCI_ZHAOXIN_HOST &&
major_revision == 0x03 &&
XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5)
minor_revision = tmp_minor_revision;
xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
XHCI_EXT_PORT_PLT(port_cap->psi[i]),
XHCI_EXT_PORT_PFD(port_cap->psi[i]),
XHCI_EXT_PORT_LP(port_cap->psi[i]),
XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
}
}
rhub->maj_rev = major_revision;
if (rhub->min_rev < minor_revision)
rhub->min_rev = minor_revision;
port_cap->maj_rev = major_revision;
port_cap->min_rev = minor_revision;
/* cache usb2 port capabilities */
if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
xhci->ext_caps[xhci->num_ext_caps++] = temp;
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
(temp & XHCI_HLC)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI 1.0: support USB2 hardware lpm");
xhci->hw_lpm_support = 1;
}
port_offset--;
for (i = port_offset; i < (port_offset + port_count); i++) {
struct xhci_port *hw_port = &xhci->hw_ports[i];
/* Duplicate entry. Ignore the port if the revisions differ. */
if (hw_port->rhub) {
xhci_warn(xhci, "Duplicate port entry, Ext Cap %p, port %u\n", addr, i);
xhci_warn(xhci, "Port was marked as USB %u, duplicated as USB %u\n",
hw_port->rhub->maj_rev, major_revision);
/* Only adjust the roothub port counts if we haven't
* found a similar duplicate.
*/
if (hw_port->rhub != rhub &&
hw_port->hcd_portnum != DUPLICATE_ENTRY) {
hw_port->rhub->num_ports--;
hw_port->hcd_portnum = DUPLICATE_ENTRY;
}
continue;
}
hw_port->rhub = rhub;
hw_port->port_cap = port_cap;
rhub->num_ports++;
}
/* FIXME: Should we disable ports not in the Extended Capabilities? */
}
static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
struct xhci_hub *rhub, gfp_t flags)
{
int port_index = 0;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!rhub->num_ports)
return;
rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
flags, dev_to_node(dev));
if (!rhub->ports)
return;
for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
if (xhci->hw_ports[i].rhub != rhub ||
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
continue;
xhci->hw_ports[i].hcd_portnum = port_index;
rhub->ports[port_index] = &xhci->hw_ports[i];
port_index++;
if (port_index == rhub->num_ports)
break;
}
}
/*
* Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
* specify what speeds each port is supposed to be. We can't count on the port
* speed bits in the PORTSC register being correct until a device is connected,
* but we need to set up the two fake roothubs with the correct number of USB
* 3.0 and USB 2.0 ports at host controller initialization time.
*/
static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{
void __iomem *base;
u32 offset;
unsigned int num_ports;
int i, j;
int cap_count = 0;
u32 cap_start;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
flags, dev_to_node(dev));
if (!xhci->hw_ports)
return -ENOMEM;
for (i = 0; i < num_ports; i++) {
xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
NUM_PORT_REGS * i;
xhci->hw_ports[i].hw_portnum = i;
init_completion(&xhci->hw_ports[i].rexit_done);
init_completion(&xhci->hw_ports[i].u3exit_done);
}
xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
dev_to_node(dev));
if (!xhci->rh_bw)
return -ENOMEM;
for (i = 0; i < num_ports; i++) {
struct xhci_interval_bw_table *bw_table;
INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
bw_table = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++)
INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
}
base = &xhci->cap_regs->hc_capbase;
cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
if (!cap_start) {
xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
return -ENODEV;
}
offset = cap_start;
/* count extended protocol capability entries for later caching */
while (offset) {
cap_count++;
offset = xhci_find_next_ext_cap(base, offset,
XHCI_EXT_CAPS_PROTOCOL);
}
xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
flags, dev_to_node(dev));
if (!xhci->ext_caps)
return -ENOMEM;
xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
flags, dev_to_node(dev));
if (!xhci->port_caps)
return -ENOMEM;
offset = cap_start;
while (offset) {
xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
num_ports)
break;
offset = xhci_find_next_ext_cap(base, offset,
XHCI_EXT_CAPS_PROTOCOL);
}
if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 3.0 roothub ports to %u.",
USB_SS_MAXPORTS);
xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
}
if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN);
xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
}
if (!xhci->usb2_rhub.num_ports)
xhci_info(xhci, "USB2 root hub has no ports\n");
if (!xhci->usb3_rhub.num_ports)
xhci_info(xhci, "USB3 root hub has no ports\n");
xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
return 0;
}
static struct xhci_interrupter *
xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
int ret;
ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
if (!ir)
return NULL;
ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
0, flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir);
return NULL;
}
ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags);
if (ret) {
xhci_warn(xhci, "Failed to allocate interrupter erst\n");
xhci_ring_free(xhci, ir->event_ring);
kfree(ir);
return NULL;
}
return ir;
}
static int
xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
unsigned int intr_num)
{
u64 erst_base;
u32 erst_size;
if (intr_num > xhci->max_interrupters) {
xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
intr_num, xhci->max_interrupters);
return -EINVAL;
}
ir->ir_set = &xhci->run_regs->ir_set[intr_num];
/* set ERST count with the number of entries in the segment table */
erst_size = readl(&ir->ir_set->erst_size);
erst_size &= ERST_SIZE_MASK;
erst_size |= ERST_NUM_SEGS;
writel(erst_size, &ir->ir_set->erst_size);
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
erst_base &= ERST_PTR_MASK;
erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
/* Set the event ring dequeue address of this interrupter */
xhci_set_hc_event_deq(xhci, ir);
return 0;
}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
int i;
INIT_LIST_HEAD(&xhci->cmd_list);
/* init command timeout work */
INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
init_completion(&xhci->cmd_ring_stop_completion);
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size register = 0x%x", page_size);
i = ffs(page_size);
if (i < 16)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size of %iK", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"HCD page size set to %iK", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// xHC can handle at most %d device slots.", val);
val2 = readl(&xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting Max device slots reg = 0x%x.", val);
writel(val, &xhci->op_regs->config_reg);
/*
* xHCI section 5.4.6 - Device Context array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
flags);
if (!xhci->dcbaa)
goto fail;
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%pad (DMA), %p (virt)",
&xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
* however, the command ring segment needs 64-byte aligned segments
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
else
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2112, 64, xhci->page_size);
if (!xhci->segment_pool || !xhci->device_pool)
goto fail;
/* Linear stream context arrays don't have any boundary restrictions,
* and only need to be 16-byte aligned.
*/
xhci->small_streams_pool =
dma_pool_create("xHCI 256 byte stream ctx arrays",
dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
xhci->medium_streams_pool =
dma_pool_create("xHCI 1KB stream ctx arrays",
dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
* will be allocated with dma_alloc_coherent()
*/
if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocated command ring at %p", xhci->cmd_ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
&xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%016llx", val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
/* Reserve one command ring TRB for disabling LPM.
* Since the USB core grabs the shared usb_bus bandwidth mutex before
* disabling LPM, we only need to reserve one TRB for all devices.
*/
xhci->cmd_ring_reserved_trbs++;
val = readl(&xhci->cap_regs->db_off);
val &= DBOFF_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Doorbell array is located at offset 0x%x from cap regs base addr",
val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
/* Allocate and set up primary interrupter 0 with an event ring. */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocating primary event ring");
xhci->interrupter = xhci_alloc_interrupter(xhci, flags);
if (!xhci->interrupter)
goto fail;
if (xhci_add_interrupter(xhci, xhci->interrupter, 0))
goto fail;
xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
* See section 5.5.1.2.
*/
for (i = 0; i < MAX_HC_SLOTS; i++)
xhci->devs[i] = NULL;
if (scratchpad_alloc(xhci, flags))
goto fail;
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
/* Enable USB 3.0 device notifications for function remote wake, which
* is necessary for allowing USB 3.0 devices to do remote wakeup from
* U3 (device suspend).
*/
temp = readl(&xhci->op_regs->dev_notification);
temp &= ~DEV_NOTE_MASK;
temp |= DEV_NOTE_FWAKE;
writel(temp, &xhci->op_regs->dev_notification);
return 0;
fail:
xhci_halt(xhci);
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}
| linux-master | drivers/usb/host/xhci-mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Universal Host Controller Interface driver for USB.
*
* Maintainer: Alan Stern <[email protected]>
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, [email protected]
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, [email protected]
* (C) Copyright 1999 Deti Fliegl, [email protected]
* (C) Copyright 1999 Thomas Sailer, [email protected]
* (C) Copyright 2004 Alan Stern, [email protected]
*/
static const __u8 root_hub_hub_des[] =
{
0x09, /* __u8 bLength; */
USB_DT_HUB, /* __u8 bDescriptorType; Hub-descriptor */
0x02, /* __u8 bNbrPorts; */
HUB_CHAR_NO_LPSM | /* __u16 wHubCharacteristics; */
HUB_CHAR_INDV_PORT_OCPM, /* (per-port OC, no power switching) */
0x00,
0x01, /* __u8 bPwrOn2pwrGood; 2ms */
0x00, /* __u8 bHubContrCurrent; 0 mA */
0x00, /* __u8 DeviceRemovable; *** 7 Ports max */
0xff /* __u8 PortPwrCtrlMask; *** 7 ports max */
};
#define UHCI_RH_MAXCHILD 7
/* must write as zeroes */
#define WZ_BITS (USBPORTSC_RES2 | USBPORTSC_RES3 | USBPORTSC_RES4)
/* status change bits: nonzero writes will clear */
#define RWC_BITS (USBPORTSC_OCC | USBPORTSC_PEC | USBPORTSC_CSC)
/* suspend/resume bits: port suspended or port resuming */
#define SUSPEND_BITS (USBPORTSC_SUSP | USBPORTSC_RD)
/* A port that either is connected or has a changed-bit set will prevent
* us from AUTO_STOPPING.
*/
static int any_ports_active(struct uhci_hcd *uhci)
{
int port;
for (port = 0; port < uhci->rh_numports; ++port) {
if ((uhci_readw(uhci, USBPORTSC1 + port * 2) &
(USBPORTSC_CCS | RWC_BITS)) ||
test_bit(port, &uhci->port_c_suspend))
return 1;
}
return 0;
}
static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf)
{
int port;
int mask = RWC_BITS;
/* Some boards (both VIA and Intel apparently) report bogus
* overcurrent indications, causing massive log spam unless
* we completely ignore them. This doesn't seem to be a problem
* with the chipset so much as with the way it is connected on
* the motherboard; if the overcurrent input is left to float
* then it may constantly register false positives. */
if (ignore_oc)
mask &= ~USBPORTSC_OCC;
*buf = 0;
for (port = 0; port < uhci->rh_numports; ++port) {
if ((uhci_readw(uhci, USBPORTSC1 + port * 2) & mask) ||
test_bit(port, &uhci->port_c_suspend))
*buf |= (1 << (port + 1));
}
return !!*buf;
}
#define CLR_RH_PORTSTAT(x) \
status = uhci_readw(uhci, port_addr); \
status &= ~(RWC_BITS|WZ_BITS); \
status &= ~(x); \
status |= RWC_BITS & (x); \
uhci_writew(uhci, status, port_addr)
#define SET_RH_PORTSTAT(x) \
status = uhci_readw(uhci, port_addr); \
status |= (x); \
status &= ~(RWC_BITS|WZ_BITS); \
uhci_writew(uhci, status, port_addr)
/* UHCI controllers don't automatically stop resume signalling after 20 msec,
* so we have to poll and check timeouts in order to take care of it.
*/
static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
unsigned long port_addr)
{
int status;
int i;
if (uhci_readw(uhci, port_addr) & SUSPEND_BITS) {
CLR_RH_PORTSTAT(SUSPEND_BITS);
if (test_bit(port, &uhci->resuming_ports))
set_bit(port, &uhci->port_c_suspend);
/* The controller won't actually turn off the RD bit until
* it has had a chance to send a low-speed EOP sequence,
* which is supposed to take 3 bit times (= 2 microseconds).
* Experiments show that some controllers take longer, so
* we'll poll for completion. */
for (i = 0; i < 10; ++i) {
if (!(uhci_readw(uhci, port_addr) & SUSPEND_BITS))
break;
udelay(1);
}
}
clear_bit(port, &uhci->resuming_ports);
usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);
}
/* Wait for the UHCI controller in HP's iLO2 server management chip.
* It can take up to 250 us to finish a reset and set the CSC bit.
*/
static void wait_for_HP(struct uhci_hcd *uhci, unsigned long port_addr)
{
int i;
for (i = 10; i < 250; i += 10) {
if (uhci_readw(uhci, port_addr) & USBPORTSC_CSC)
return;
udelay(10);
}
/* Log a warning? */
}
static void uhci_check_ports(struct uhci_hcd *uhci)
{
unsigned int port;
unsigned long port_addr;
int status;
for (port = 0; port < uhci->rh_numports; ++port) {
port_addr = USBPORTSC1 + 2 * port;
status = uhci_readw(uhci, port_addr);
if (unlikely(status & USBPORTSC_PR)) {
if (time_after_eq(jiffies, uhci->ports_timeout)) {
CLR_RH_PORTSTAT(USBPORTSC_PR);
udelay(10);
/* HP's server management chip requires
* a longer delay. */
if (uhci->wait_for_hp)
wait_for_HP(uhci, port_addr);
/* If the port was enabled before, turning
* reset on caused a port enable change.
* Turning reset off causes a port connect
* status change. Clear these changes. */
CLR_RH_PORTSTAT(USBPORTSC_CSC | USBPORTSC_PEC);
SET_RH_PORTSTAT(USBPORTSC_PE);
}
}
if (unlikely(status & USBPORTSC_RD)) {
if (!test_bit(port, &uhci->resuming_ports)) {
/* Port received a wakeup request */
set_bit(port, &uhci->resuming_ports);
uhci->ports_timeout = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
usb_hcd_start_port_resume(
&uhci_to_hcd(uhci)->self, port);
/* Make sure we see the port again
* after the resuming period is over. */
mod_timer(&uhci_to_hcd(uhci)->rh_timer,
uhci->ports_timeout);
} else if (time_after_eq(jiffies,
uhci->ports_timeout)) {
uhci_finish_suspend(uhci, port, port_addr);
}
}
}
}
static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
int status = 0;
spin_lock_irqsave(&uhci->lock, flags);
uhci_scan_schedule(uhci);
if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done;
uhci_check_ports(uhci);
status = get_hub_status_data(uhci, buf);
switch (uhci->rh_state) {
case UHCI_RH_SUSPENDED:
/* if port change, ask to be resumed */
if (status || uhci->resuming_ports) {
status = 1;
usb_hcd_resume_root_hub(hcd);
}
break;
case UHCI_RH_AUTO_STOPPED:
/* if port change, auto start */
if (status)
wakeup_rh(uhci);
break;
case UHCI_RH_RUNNING:
/* are any devices attached? */
if (!any_ports_active(uhci)) {
uhci->rh_state = UHCI_RH_RUNNING_NODEVS;
uhci->auto_stop_time = jiffies + HZ;
}
break;
case UHCI_RH_RUNNING_NODEVS:
/* auto-stop if nothing connected for 1 second */
if (any_ports_active(uhci))
uhci->rh_state = UHCI_RH_RUNNING;
else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
!uhci->wait_for_hp)
suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
break;
default:
break;
}
done:
spin_unlock_irqrestore(&uhci->lock, flags);
return status;
}
/* size of returned buffer is part of USB spec */
static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int status, lstatus, retval = 0;
unsigned int port = wIndex - 1;
unsigned long port_addr = USBPORTSC1 + 2 * port;
u16 wPortChange, wPortStatus;
unsigned long flags;
if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
return -ETIMEDOUT;
spin_lock_irqsave(&uhci->lock, flags);
switch (typeReq) {
case GetHubStatus:
*(__le32 *)buf = cpu_to_le32(0);
retval = 4; /* hub power */
break;
case GetPortStatus:
if (port >= uhci->rh_numports)
goto err;
uhci_check_ports(uhci);
status = uhci_readw(uhci, port_addr);
/* Intel controllers report the OverCurrent bit active on.
* VIA controllers report it active off, so we'll adjust the
* bit value. (It's not standardized in the UHCI spec.)
*/
if (uhci->oc_low)
status ^= USBPORTSC_OC;
/* UHCI doesn't support C_RESET (always false) */
wPortChange = lstatus = 0;
if (status & USBPORTSC_CSC)
wPortChange |= USB_PORT_STAT_C_CONNECTION;
if (status & USBPORTSC_PEC)
wPortChange |= USB_PORT_STAT_C_ENABLE;
if ((status & USBPORTSC_OCC) && !ignore_oc)
wPortChange |= USB_PORT_STAT_C_OVERCURRENT;
if (test_bit(port, &uhci->port_c_suspend)) {
wPortChange |= USB_PORT_STAT_C_SUSPEND;
lstatus |= 1;
}
if (test_bit(port, &uhci->resuming_ports))
lstatus |= 4;
/* UHCI has no power switching (always on) */
wPortStatus = USB_PORT_STAT_POWER;
if (status & USBPORTSC_CCS)
wPortStatus |= USB_PORT_STAT_CONNECTION;
if (status & USBPORTSC_PE) {
wPortStatus |= USB_PORT_STAT_ENABLE;
if (status & SUSPEND_BITS)
wPortStatus |= USB_PORT_STAT_SUSPEND;
}
if (status & USBPORTSC_OC)
wPortStatus |= USB_PORT_STAT_OVERCURRENT;
if (status & USBPORTSC_PR)
wPortStatus |= USB_PORT_STAT_RESET;
if (status & USBPORTSC_LSDA)
wPortStatus |= USB_PORT_STAT_LOW_SPEED;
if (wPortChange)
dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
wIndex, status, lstatus);
*(__le16 *)buf = cpu_to_le16(wPortStatus);
*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
retval = 4;
break;
case SetHubFeature: /* We don't implement these */
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto err;
}
break;
case SetPortFeature:
if (port >= uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
SET_RH_PORTSTAT(USBPORTSC_SUSP);
break;
case USB_PORT_FEAT_RESET:
SET_RH_PORTSTAT(USBPORTSC_PR);
/* Reset terminates Resume signalling */
uhci_finish_suspend(uhci, port, port_addr);
/* USB v2.0 7.1.7.5 */
uhci->ports_timeout = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
break;
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
break;
default:
goto err;
}
break;
case ClearPortFeature:
if (port >= uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PE);
/* Disable terminates Resume signalling */
uhci_finish_suspend(uhci, port, port_addr);
break;
case USB_PORT_FEAT_C_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PEC);
break;
case USB_PORT_FEAT_SUSPEND:
if (!(uhci_readw(uhci, port_addr) & USBPORTSC_SUSP)) {
/* Make certain the port isn't suspended */
uhci_finish_suspend(uhci, port, port_addr);
} else if (!test_and_set_bit(port,
&uhci->resuming_ports)) {
SET_RH_PORTSTAT(USBPORTSC_RD);
/* The controller won't allow RD to be set
* if the port is disabled. When this happens
* just skip the Resume signalling.
*/
if (!(uhci_readw(uhci, port_addr) &
USBPORTSC_RD))
uhci_finish_suspend(uhci, port,
port_addr);
else
/* USB v2.0 7.1.7.7 */
uhci->ports_timeout = jiffies +
msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(port, &uhci->port_c_suspend);
break;
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
goto err;
case USB_PORT_FEAT_C_CONNECTION:
CLR_RH_PORTSTAT(USBPORTSC_CSC);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
CLR_RH_PORTSTAT(USBPORTSC_OCC);
break;
case USB_PORT_FEAT_C_RESET:
/* this driver won't report these */
break;
default:
goto err;
}
break;
case GetHubDescriptor:
retval = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
memcpy(buf, root_hub_hub_des, retval);
if (retval > 2)
buf[2] = uhci->rh_numports;
break;
default:
err:
retval = -EPIPE;
}
spin_unlock_irqrestore(&uhci->lock, flags);
return retval;
}
| linux-master | drivers/usb/host/uhci-hub.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* Copyright (C) 2004 SAN People (Pty) Ltd.
* Copyright (C) 2005 Thibaut VARENE <[email protected]>
*
* AT91 Bus Glue
*
* Based on fragments of 2.4 driver by Rick Bronson.
* Based on ohci-omap.c
*
* This file is licenced under the GPL.
*/
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/atmel.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <soc/at91/atmel-sfr.h>
#include "ohci.h"
#define valid_port(index) ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
#define at91_for_each_port(index) \
for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++)
/* interface, function and usb clocks; sometimes also an AHB clock */
#define hcd_to_ohci_at91_priv(h) \
((struct ohci_at91_priv *)hcd_to_ohci(h)->priv)
#define AT91_MAX_USBH_PORTS 3
struct at91_usbh_data {
struct gpio_desc *vbus_pin[AT91_MAX_USBH_PORTS];
struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
u8 ports; /* number of ports on root hub */
u8 overcurrent_supported;
u8 overcurrent_status[AT91_MAX_USBH_PORTS];
u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
};
struct ohci_at91_priv {
struct clk *iclk;
struct clk *fclk;
struct clk *hclk;
bool clocked;
bool wakeup; /* Saved wake-up state for resume */
struct regmap *sfr_regmap;
u32 suspend_smc_id;
};
/* interface and function clocks; sometimes also an AHB clock */
#define DRIVER_DESC "OHCI Atmel driver"
static struct hc_driver __read_mostly ohci_at91_hc_driver;
static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst = {
.extra_priv_size = sizeof(struct ohci_at91_priv),
};
/*-------------------------------------------------------------------------*/
static void at91_start_clock(struct ohci_at91_priv *ohci_at91)
{
if (ohci_at91->clocked)
return;
clk_set_rate(ohci_at91->fclk, 48000000);
clk_prepare_enable(ohci_at91->hclk);
clk_prepare_enable(ohci_at91->iclk);
clk_prepare_enable(ohci_at91->fclk);
ohci_at91->clocked = true;
}
static void at91_stop_clock(struct ohci_at91_priv *ohci_at91)
{
if (!ohci_at91->clocked)
return;
clk_disable_unprepare(ohci_at91->fclk);
clk_disable_unprepare(ohci_at91->iclk);
clk_disable_unprepare(ohci_at91->hclk);
ohci_at91->clocked = false;
}
static void at91_start_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_regs __iomem *regs = hcd->regs;
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "start\n");
/*
* Start the USB clocks.
*/
at91_start_clock(ohci_at91);
/*
* The USB host controller must remain in reset.
*/
writel(0, ®s->control);
}
static void at91_stop_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "stop\n");
/*
* Put the USB host controller into reset.
*/
usb_hcd_platform_shutdown(pdev);
/*
* Stop the USB clocks.
*/
at91_stop_clock(ohci_at91);
}
/*-------------------------------------------------------------------------*/
static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
static u32 at91_dt_suspend_smc(struct device *dev)
{
u32 suspend_smc_id;
if (!dev->of_node)
return 0;
if (of_property_read_u32(dev->of_node, "microchip,suspend-smc-id", &suspend_smc_id))
return 0;
return suspend_smc_id;
}
static struct regmap *at91_dt_syscon_sfr(void)
{
struct regmap *regmap;
regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
if (IS_ERR(regmap)) {
regmap = syscon_regmap_lookup_by_compatible("microchip,sam9x60-sfr");
if (IS_ERR(regmap))
regmap = NULL;
}
return regmap;
}
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/*
* usb_hcd_at91_probe - initialize AT91-based HCDs
* @driver: Pointer to hc driver instance
* @pdev: USB controller to probe
*
* Context: task context, might sleep
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
struct at91_usbh_data *board;
struct ohci_hcd *ohci;
int retval;
struct usb_hcd *hcd;
struct ohci_at91_priv *ohci_at91;
struct device *dev = &pdev->dev;
struct resource *res;
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
hcd = usb_create_hcd(driver, dev, "at91");
if (!hcd)
return -ENOMEM;
ohci_at91 = hcd_to_ohci_at91_priv(hcd);
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
ohci_at91->iclk = devm_clk_get(dev, "ohci_clk");
if (IS_ERR(ohci_at91->iclk)) {
dev_err(dev, "failed to get ohci_clk\n");
retval = PTR_ERR(ohci_at91->iclk);
goto err;
}
ohci_at91->fclk = devm_clk_get(dev, "uhpck");
if (IS_ERR(ohci_at91->fclk)) {
dev_err(dev, "failed to get uhpck\n");
retval = PTR_ERR(ohci_at91->fclk);
goto err;
}
ohci_at91->hclk = devm_clk_get(dev, "hclk");
if (IS_ERR(ohci_at91->hclk)) {
dev_err(dev, "failed to get hclk\n");
retval = PTR_ERR(ohci_at91->hclk);
goto err;
}
ohci_at91->suspend_smc_id = at91_dt_suspend_smc(dev);
if (!ohci_at91->suspend_smc_id) {
dev_dbg(dev, "failed to find sfr suspend smc id, using regmap\n");
ohci_at91->sfr_regmap = at91_dt_syscon_sfr();
if (!ohci_at91->sfr_regmap)
dev_dbg(dev, "failed to find sfr node\n");
}
board = hcd->self.controller->platform_data;
ohci = hcd_to_ohci(hcd);
ohci->num_ports = board->ports;
at91_start_hc(pdev);
/*
* The RemoteWakeupConnected bit has to be set explicitly
* before calling ohci_run. The reset value of this bit is 0.
*/
ohci->hc_control = OHCI_CTRL_RWC;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval == 0) {
device_wakeup_enable(hcd->self.controller);
return retval;
}
/* Error handling */
at91_stop_hc(pdev);
err:
usb_put_hcd(hcd);
return retval;
}
/* may be called with controller, bus, and devices active */
/*
* usb_hcd_at91_remove - shutdown processing for AT91-based HCDs
* @hcd: USB controller to remove
* @pdev: Platform device required for cleanup
*
* Context: task context, might sleep
*
* Reverses the effect of usb_hcd_at91_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, "rmmod" or something similar.
*/
static void usb_hcd_at91_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
usb_remove_hcd(hcd);
at91_stop_hc(pdev);
usb_put_hcd(hcd);
}
/*-------------------------------------------------------------------------*/
static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
{
if (!valid_port(port))
return;
gpiod_set_value(pdata->vbus_pin[port], enable);
}
static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
{
if (!valid_port(port))
return -EINVAL;
return gpiod_get_value(pdata->vbus_pin[port]);
}
/*
* Update the status data from the hub with the over-current indicator change.
*/
static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
int length = ohci_hub_status_data(hcd, buf);
int port;
at91_for_each_port(port) {
if (pdata->overcurrent_changed[port]) {
if (!length)
length = 1;
buf[0] |= 1 << (port + 1);
}
}
return length;
}
static int ohci_at91_port_suspend(struct ohci_at91_priv *ohci_at91, u8 set)
{
struct regmap *regmap = ohci_at91->sfr_regmap;
u32 regval;
int ret;
if (ohci_at91->suspend_smc_id) {
struct arm_smccc_res res;
arm_smccc_smc(ohci_at91->suspend_smc_id, set, 0, 0, 0, 0, 0, 0, &res);
if (res.a0)
return -EINVAL;
} else if (regmap) {
ret = regmap_read(regmap, AT91_SFR_OHCIICR, ®val);
if (ret)
return ret;
if (set)
regval |= AT91_OHCIICR_USB_SUSPEND;
else
regval &= ~AT91_OHCIICR_USB_SUSPEND;
regmap_write(regmap, AT91_SFR_OHCIICR, regval);
}
return 0;
}
/*
* Look at the control requests to the root hub and see if we need to override.
*/
static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
struct usb_hub_descriptor *desc;
int ret = -EINVAL;
u32 *data = (u32 *)buf;
dev_dbg(hcd->self.controller,
"ohci_at91_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
hcd, typeReq, wValue, wIndex, buf, wLength);
wIndex--;
switch (typeReq) {
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_POWER:
dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
if (valid_port(wIndex)) {
ohci_at91_usb_set_power(pdata, wIndex, 1);
ret = 0;
}
goto out;
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
if (valid_port(wIndex)) {
ohci_at91_port_suspend(ohci_at91, 1);
return 0;
}
break;
}
break;
case ClearPortFeature:
switch (wValue) {
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(hcd->self.controller,
"ClearPortFeature: C_OVER_CURRENT\n");
if (valid_port(wIndex)) {
pdata->overcurrent_changed[wIndex] = 0;
pdata->overcurrent_status[wIndex] = 0;
}
goto out;
case USB_PORT_FEAT_OVER_CURRENT:
dev_dbg(hcd->self.controller,
"ClearPortFeature: OVER_CURRENT\n");
if (valid_port(wIndex))
pdata->overcurrent_status[wIndex] = 0;
goto out;
case USB_PORT_FEAT_POWER:
dev_dbg(hcd->self.controller,
"ClearPortFeature: POWER\n");
if (valid_port(wIndex)) {
ohci_at91_usb_set_power(pdata, wIndex, 0);
return 0;
}
break;
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
if (valid_port(wIndex)) {
ohci_at91_port_suspend(ohci_at91, 0);
return 0;
}
break;
}
break;
}
ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
if (ret)
goto out;
switch (typeReq) {
case GetHubDescriptor:
/* update the hub's descriptor */
desc = (struct usb_hub_descriptor *)buf;
dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n",
desc->wHubCharacteristics);
/* remove the old configurations for power-switching, and
* over-current protection, and insert our new configuration
*/
desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
desc->wHubCharacteristics |=
cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM);
if (pdata->overcurrent_supported) {
desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
desc->wHubCharacteristics |=
cpu_to_le16(HUB_CHAR_INDV_PORT_OCPM);
}
dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
desc->wHubCharacteristics);
return ret;
case GetPortStatus:
/* check port status */
dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
if (valid_port(wIndex)) {
if (!ohci_at91_usb_get_power(pdata, wIndex))
*data &= ~cpu_to_le32(RH_PS_PPS);
if (pdata->overcurrent_changed[wIndex])
*data |= cpu_to_le32(RH_PS_OCIC);
if (pdata->overcurrent_status[wIndex])
*data |= cpu_to_le32(RH_PS_POCI);
}
}
out:
return ret;
}
/*-------------------------------------------------------------------------*/
static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
{
struct platform_device *pdev = data;
struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
int val, port;
/* From the GPIO notifying the over-current situation, find
* out the corresponding port */
at91_for_each_port(port) {
if (gpiod_to_irq(pdata->overcurrent_pin[port]) == irq)
break;
}
if (port == AT91_MAX_USBH_PORTS) {
dev_err(& pdev->dev, "overcurrent interrupt from unknown GPIO\n");
return IRQ_HANDLED;
}
val = gpiod_get_value(pdata->overcurrent_pin[port]);
/* When notified of an over-current situation, disable power
on the corresponding port, and mark this port in
over-current. */
if (!val) {
ohci_at91_usb_set_power(pdata, port, 0);
pdata->overcurrent_status[port] = 1;
pdata->overcurrent_changed[port] = 1;
}
dev_dbg(& pdev->dev, "overcurrent situation %s\n",
val ? "exited" : "notified");
return IRQ_HANDLED;
}
static const struct of_device_id at91_ohci_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-ohci" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
/*-------------------------------------------------------------------------*/
static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct at91_usbh_data *pdata;
int i;
int ret;
int err;
u32 ports;
/* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdev->dev.platform_data = pdata;
if (!of_property_read_u32(np, "num-ports", &ports))
pdata->ports = ports;
at91_for_each_port(i) {
if (i >= pdata->ports)
break;
pdata->vbus_pin[i] =
devm_gpiod_get_index_optional(&pdev->dev, "atmel,vbus",
i, GPIOD_OUT_HIGH);
if (IS_ERR(pdata->vbus_pin[i])) {
err = PTR_ERR(pdata->vbus_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
continue;
}
}
at91_for_each_port(i) {
if (i >= pdata->ports)
break;
pdata->overcurrent_pin[i] =
devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
i, GPIOD_IN);
if (!pdata->overcurrent_pin[i])
continue;
if (IS_ERR(pdata->overcurrent_pin[i])) {
err = PTR_ERR(pdata->overcurrent_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
continue;
}
ret = devm_request_irq(&pdev->dev,
gpiod_to_irq(pdata->overcurrent_pin[i]),
ohci_hcd_at91_overcurrent_irq,
IRQF_SHARED,
"ohci_overcurrent", pdev);
if (ret)
dev_info(&pdev->dev, "failed to request gpio \"overcurrent\" IRQ\n");
}
device_init_wakeup(&pdev->dev, 1);
return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
}
static void ohci_hcd_at91_drv_remove(struct platform_device *pdev)
{
struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
int i;
if (pdata) {
at91_for_each_port(i)
ohci_at91_usb_set_power(pdata, i, 0);
}
device_init_wakeup(&pdev->dev, 0);
usb_hcd_at91_remove(platform_get_drvdata(pdev), pdev);
}
static int __maybe_unused
ohci_hcd_at91_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
int ret;
/*
* Disable wakeup if we are going to sleep with slow clock mode
* enabled.
*/
ohci_at91->wakeup = device_may_wakeup(dev)
&& !at91_suspend_entering_slow_clock();
if (ohci_at91->wakeup)
enable_irq_wake(hcd->irq);
ret = ohci_suspend(hcd, ohci_at91->wakeup);
if (ret) {
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
return ret;
}
/*
* The integrated transceivers seem unable to notice disconnect,
* reconnect, or wakeup without the 48 MHz clock active. so for
* correctness, always discard connection state (using reset).
*
* REVISIT: some boards will be able to turn VBUS off...
*/
if (!ohci_at91->wakeup) {
ohci->rh_state = OHCI_RH_HALTED;
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
msleep(1);
ohci_at91_port_suspend(ohci_at91, 1);
at91_stop_clock(ohci_at91);
} else {
ohci_at91_port_suspend(ohci_at91, 1);
}
return ret;
}
static int __maybe_unused
ohci_hcd_at91_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
ohci_at91_port_suspend(ohci_at91, 0);
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
else
at91_start_clock(ohci_at91);
/*
* According to the comment in ohci_hcd_at91_drv_suspend()
* we need to do a reset if the 48Mhz clock was stopped,
* that is, if ohci_at91->wakeup is clear. Tell ohci_resume()
* to reset in this case by setting its "hibernated" flag.
*/
ohci_resume(hcd, !ohci_at91->wakeup);
return 0;
}
static SIMPLE_DEV_PM_OPS(ohci_hcd_at91_pm_ops, ohci_hcd_at91_drv_suspend,
ohci_hcd_at91_drv_resume);
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
.remove_new = ohci_hcd_at91_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "at91_ohci",
.pm = &ohci_hcd_at91_pm_ops,
.of_match_table = at91_ohci_dt_ids,
},
};
static int __init ohci_at91_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_at91_hc_driver, &ohci_at91_drv_overrides);
/*
* The Atmel HW has some unusual quirks, which require Atmel-specific
* workarounds. We override certain hc_driver functions here to
* achieve that. We explicitly do not enhance ohci_driver_overrides to
* allow this more easily, since this is an unusual case, and we don't
* want to encourage others to override these functions by making it
* too easy.
*/
ohci_at91_hc_driver.hub_status_data = ohci_at91_hub_status_data;
ohci_at91_hc_driver.hub_control = ohci_at91_hub_control;
return platform_driver_register(&ohci_hcd_at91_driver);
}
module_init(ohci_at91_init);
static void __exit ohci_at91_cleanup(void)
{
platform_driver_unregister(&ohci_hcd_at91_driver);
}
module_exit(ohci_at91_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:at91_ohci");
| linux-master | drivers/usb/host/ohci-at91.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "fhci.h"
void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
{
int i;
if (usb_er == -1) {
fhci->usb_irq_stat[12]++;
return;
}
for (i = 0; i < 12; ++i) {
if (usb_er & (1 << i))
fhci->usb_irq_stat[i]++;
}
}
static int fhci_dfs_regs_show(struct seq_file *s, void *v)
{
struct fhci_hcd *fhci = s->private;
struct qe_usb_ctlr __iomem *regs = fhci->regs;
seq_printf(s,
"mode: 0x%x\n" "addr: 0x%x\n"
"command: 0x%x\n" "ep0: 0x%x\n"
"event: 0x%x\n" "mask: 0x%x\n"
"status: 0x%x\n" "SOF timer: %d\n"
"frame number: %d\n"
"lines status: 0x%x\n",
in_8(®s->usb_usmod), in_8(®s->usb_usadr),
in_8(®s->usb_uscom), in_be16(®s->usb_usep[0]),
in_be16(®s->usb_usber), in_be16(®s->usb_usbmr),
in_8(®s->usb_usbs), in_be16(®s->usb_ussft),
in_be16(®s->usb_usfrn),
fhci_ioports_check_bus_state(fhci));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fhci_dfs_regs);
static int fhci_dfs_irq_stat_show(struct seq_file *s, void *v)
{
struct fhci_hcd *fhci = s->private;
int *usb_irq_stat = fhci->usb_irq_stat;
seq_printf(s,
"RXB: %d\n" "TXB: %d\n" "BSY: %d\n"
"SOF: %d\n" "TXE0: %d\n" "TXE1: %d\n"
"TXE2: %d\n" "TXE3: %d\n" "IDLE: %d\n"
"RESET: %d\n" "SFT: %d\n" "MSF: %d\n"
"IDLE_ONLY: %d\n",
usb_irq_stat[0], usb_irq_stat[1], usb_irq_stat[2],
usb_irq_stat[3], usb_irq_stat[4], usb_irq_stat[5],
usb_irq_stat[6], usb_irq_stat[7], usb_irq_stat[8],
usb_irq_stat[9], usb_irq_stat[10], usb_irq_stat[11],
usb_irq_stat[12]);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fhci_dfs_irq_stat);
void fhci_dfs_create(struct fhci_hcd *fhci)
{
struct device *dev = fhci_to_hcd(fhci)->self.controller;
fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
debugfs_create_file("regs", S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
&fhci_dfs_regs_fops);
debugfs_create_file("irq_stat", S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
&fhci_dfs_irq_stat_fops);
}
void fhci_dfs_destroy(struct fhci_hcd *fhci)
{
debugfs_remove_recursive(fhci->dfs_root);
}
| linux-master | drivers/usb/host/fhci-dbg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver PCI Bus Glue.
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/reset.h>
#include <linux/suspend.h>
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-pci.h"
#define SSIC_PORT_NUM 2
#define SSIC_PORT_CFG2 0x880c
#define SSIC_PORT_CFG2_OFFSET 0x30
#define PROG_DONE (1 << 30)
#define SSIC_PORT_UNUSED (1 << 31)
#define SPARSE_DISABLE_BIT 17
#define SPARSE_CNTL_ENABLE 0xC12C
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
#define PCI_VENDOR_ID_ETRON 0x1b6f
#define PCI_DEVICE_ID_EJ168 0x7023
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI 0x15c1
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI 0x15db
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI 0x15d4
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
static int xhci_pci_setup(struct usb_hcd *hcd);
static int xhci_pci_run(struct usb_hcd *hcd);
static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
.reset = xhci_pci_setup,
.start = xhci_pci_run,
.update_hub_device = xhci_pci_update_hub_device,
};
static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
if (hcd->msix_enabled) {
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int i;
for (i = 0; i < xhci->msix_count; i++)
synchronize_irq(pci_irq_vector(pdev, i));
}
}
/* Free any IRQs and disable MSI-X */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
/* return if using legacy interrupt */
if (hcd->irq > 0)
return;
if (hcd->msix_enabled) {
int i;
for (i = 0; i < xhci->msix_count; i++)
free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
} else {
free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
}
pci_free_irq_vectors(pdev);
hcd->msix_enabled = 0;
}
/*
* Set up MSI
*/
static int xhci_setup_msi(struct xhci_hcd *xhci)
{
int ret;
/*
* TODO:Check with MSI Soc for sysdev
*/
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret < 0) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"failed to allocate MSI entry");
return ret;
}
ret = request_irq(pdev->irq, xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"disable MSI interrupt");
pci_free_irq_vectors(pdev);
}
return ret;
}
/*
* Set up MSI-X
*/
static int xhci_setup_msix(struct xhci_hcd *xhci)
{
int i, ret;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
/*
* calculate number of msi-x vectors supported.
* - HCS_MAX_INTRS: the max number of interrupts the host can handle,
* with max number of interrupters based on the xhci HCSPARAMS1.
* - num_online_cpus: maximum msi-x vectors per CPUs core.
* Add additional 1 vector to ensure always available interrupt.
*/
xhci->msix_count = min(num_online_cpus() + 1,
HCS_MAX_INTRS(xhci->hcs_params1));
ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
PCI_IRQ_MSIX);
if (ret < 0) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Failed to enable MSI-X");
return ret;
}
for (i = 0; i < xhci->msix_count; i++) {
ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
"xhci_hcd", xhci_to_hcd(xhci));
if (ret)
goto disable_msix;
}
hcd->msix_enabled = 1;
return ret;
disable_msix:
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
pci_free_irq_vectors(pdev);
return ret;
}
static int xhci_try_enable_msi(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev;
int ret;
pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
/*
* Some Fresco Logic host controllers advertise MSI, but fail to
* generate interrupts. Don't even try to enable MSI.
*/
if (xhci->quirks & XHCI_BROKEN_MSI)
goto legacy_irq;
/* unregister the legacy interrupt */
if (hcd->irq)
free_irq(hcd->irq, hcd);
hcd->irq = 0;
ret = xhci_setup_msix(xhci);
if (ret)
/* fall back to msi*/
ret = xhci_setup_msi(xhci);
if (!ret) {
hcd->msi_enabled = 1;
return 0;
}
if (!pdev->irq) {
xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
return -EINVAL;
}
legacy_irq:
if (!strlen(hcd->irq_descr))
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
if (ret) {
xhci_err(xhci, "request interrupt %d failed\n",
pdev->irq);
return ret;
}
hcd->irq = pdev->irq;
return 0;
}
static int xhci_pci_run(struct usb_hcd *hcd)
{
int ret;
if (usb_hcd_is_primary_hcd(hcd)) {
ret = xhci_try_enable_msi(hcd);
if (ret)
return ret;
}
return xhci_run(hcd);
}
static void xhci_pci_stop(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
xhci_stop(hcd);
if (usb_hcd_is_primary_hcd(hcd))
xhci_cleanup_msix(xhci);
}
/* called after powerup, by probe or system-pm "wakeup" */
static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
{
/*
* TODO: Implement finding debug ports later.
* TODO: see if there are any quirks that need to be added to handle
* new extended capabilities.
*/
/* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
if (!pci_set_mwi(pdev))
xhci_dbg(xhci, "MWI active\n");
xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
return 0;
}
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct xhci_driver_data *driver_data;
const struct pci_device_id *id;
id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev);
if (id && id->driver_data) {
driver_data = (struct xhci_driver_data *)id->driver_data;
xhci->quirks |= driver_data->quirks;
}
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"XHCI_RESET_EP_QUIRK for this evaluation HW is deprecated");
}
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x4) {
xhci->quirks |= XHCI_SLOW_SUSPEND;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Fresco Logic xHC revision %u"
"must be suspended extra slowly",
pdev->revision);
}
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
xhci->quirks |= XHCI_BROKEN_STREAMS;
/* Fresco Logic confirms: all revisions of this chip do not
* support MSI, even though some of them claim to in their PCI
* capabilities.
*/
xhci->quirks |= XHCI_BROKEN_MSI;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Fresco Logic revision %u "
"has broken MSI implementation",
pdev->revision);
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
xhci->quirks |= XHCI_AMD_0x96_HOST;
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_quirk_pll_check())
xhci->quirks |= XHCI_AMD_PLL_FIX;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
(pdev->device == 0x145c ||
pdev->device == 0x15e0 ||
pdev->device == 0x15e1 ||
pdev->device == 0x43bb))
xhci->quirks |= XHCI_SUSPEND_DELAY;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
xhci->quirks |= XHCI_DISABLE_SPARSE;
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
xhci->quirks |= XHCI_U2_DISABLE_WAKE;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
xhci->quirks |= XHCI_BROKEN_D3COLD_S2I;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
xhci->quirks |= XHCI_AVOID_BEI;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
xhci->quirks |= XHCI_SW_BW_CHECKING;
/*
* PPT desktop boards DH77EB and DH77DF will power back on after
* a few seconds of being shutdown. The fix for this is to
* switch the ports from xHCI to EHCI on shutdown. We can't use
* DMI information to find those particular boards (since each
* vendor will change the board name), so we have to key off all
* PPT chipsets.
*/
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI))
xhci->quirks |= XHCI_RESET_TO_DEFAULT;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_ZERO_64B_REGS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci->quirks |= XHCI_ZERO_64B_REGS;
}
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
/* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
if (pdev->vendor == PCI_VENDOR_ID_VIA &&
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
xhci->quirks |= XHCI_LPM_SUPPORT;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
/*
* try to tame the ASMedia 1042 controller which reports 0.96
* but appears to behave more like 1.0
*/
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
(pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI ||
pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI ||
pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI))
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
pdev->device == 0x9026)
xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 ||
pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
xhci->quirks |= XHCI_NO_SOFT_RETRY;
if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) {
xhci->quirks |= XHCI_ZHAOXIN_HOST;
xhci->quirks |= XHCI_LPM_SUPPORT;
if (pdev->device == 0x9202) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
}
if (pdev->device == 0x9203)
xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
}
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Resetting on resume");
}
#ifdef CONFIG_ACPI
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
{
static const guid_t intel_dsm_guid =
GUID_INIT(0xac340cb7, 0xe901, 0x45bf,
0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23);
union acpi_object *obj;
obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), &intel_dsm_guid, 3, 1,
NULL);
ACPI_FREE(obj);
}
static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_hub *rhub = &xhci->usb3_rhub;
int ret;
int i;
/* This is not the usb3 roothub we are looking for */
if (hcd != rhub->hcd)
return;
if (hdev->maxchild > rhub->num_ports) {
dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
return;
}
for (i = 0; i < hdev->maxchild; i++) {
ret = usb_acpi_port_lpm_incapable(hdev, i);
dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
if (ret >= 0) {
rhub->ports[i]->lpm_incapable = ret;
continue;
}
}
}
#else
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
#endif /* CONFIG_ACPI */
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci;
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
xhci = hcd_to_xhci(hcd);
if (!xhci->sbrn)
pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
retval = xhci_gen_setup(hcd, xhci_pci_quirks);
if (retval)
return retval;
if (!usb_hcd_is_primary_hcd(hcd))
return 0;
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
/* Find any debug ports */
return xhci_pci_reinit(xhci, pdev);
}
static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
/* Check if acpi claims some USB3 roothub ports are lpm incapable */
if (!hdev->parent)
xhci_find_lpm_incapable_ports(hcd, hdev);
return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
}
/*
* We need to register our own PCI probe function (instead of the USB core's
* function) in order to create a second roothub under xHCI.
*/
static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
struct xhci_driver_data *driver_data;
struct reset_control *reset;
driver_data = (struct xhci_driver_data *)id->driver_data;
if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) {
retval = renesas_xhci_check_request_fw(dev, id);
if (retval)
return retval;
}
reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
reset_control_reset(reset);
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
/* Register the USB 2.0 roothub.
* FIXME: USB core must know to register the USB 2.0 roothub first.
* This is sort of silly, because we could just set the HCD driver flags
* to say USB 2.0, but I'm not sure what the implications would be in
* the other parts of the HCD code.
*/
retval = usb_hcd_pci_probe(dev, &xhci_pci_hc_driver);
if (retval)
goto put_runtime_pm;
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
xhci = hcd_to_xhci(hcd);
xhci->reset = reset;
xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev,
pci_name(dev), hcd);
if (!xhci->shared_hcd) {
retval = -ENOMEM;
goto dealloc_usb2_hcd;
}
retval = xhci_ext_cap_init(xhci);
if (retval)
goto put_usb3_hcd;
retval = usb_add_hcd(xhci->shared_hcd, dev->irq,
IRQF_SHARED);
if (retval)
goto put_usb3_hcd;
/* Roothub already marked as USB 3.0 speed */
if (!(xhci->quirks & XHCI_BROKEN_STREAMS) &&
HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_allow(&dev->dev);
dma_set_max_seg_size(&dev->dev, UINT_MAX);
return 0;
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_hcd_pci_remove(dev);
put_runtime_pm:
pm_runtime_put_noidle(&dev->dev);
return retval;
}
static void xhci_pci_remove(struct pci_dev *dev)
{
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_forbid(&dev->dev);
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
xhci->shared_hcd = NULL;
}
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(dev, PCI_D3hot);
usb_hcd_pci_remove(dev);
}
/*
* In some Intel xHCI controllers, in order to get D3 working,
* through a vendor specific SSIC CONFIG register at offset 0x883c,
* SSIC PORT need to be marked as "unused" before putting xHCI
* into D3. After D3 exit, the SSIC port need to be marked as "used".
* Without this change, xHCI might not enter D3 state.
*/
static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 val;
void __iomem *reg;
int i;
for (i = 0; i < SSIC_PORT_NUM; i++) {
reg = (void __iomem *) xhci->cap_regs +
SSIC_PORT_CFG2 +
i * SSIC_PORT_CFG2_OFFSET;
/* Notify SSIC that SSIC profile programming is not done. */
val = readl(reg) & ~PROG_DONE;
writel(val, reg);
/* Mark SSIC port as unused(suspend) or used(resume) */
val = readl(reg);
if (suspend)
val |= SSIC_PORT_UNUSED;
else
val &= ~SSIC_PORT_UNUSED;
writel(val, reg);
/* Notify SSIC that SSIC profile programming is done */
val = readl(reg) | PROG_DONE;
writel(val, reg);
readl(reg);
}
}
/*
* Make sure PME works on some Intel xHCI controllers by writing 1 to clear
* the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
*/
static void xhci_pme_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
void __iomem *reg;
u32 val;
reg = (void __iomem *) xhci->cap_regs + 0x80a4;
val = readl(reg);
writel(val | BIT(28), reg);
readl(reg);
}
static void xhci_sparse_control_quirk(struct usb_hcd *hcd)
{
u32 reg;
reg = readl(hcd->regs + SPARSE_CNTL_ENABLE);
reg &= ~BIT(SPARSE_DISABLE_BIT);
writel(reg, hcd->regs + SPARSE_CNTL_ENABLE);
}
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int ret;
/*
* Systems with the TI redriver that loses port status change events
* need to have the registers polled during D3, so avoid D3cold.
*/
if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
pci_d3cold_disable(pdev);
#ifdef CONFIG_SUSPEND
/* d3cold is broken, but only when s2idle is used */
if (pm_suspend_target_state == PM_SUSPEND_TO_IDLE &&
xhci->quirks & (XHCI_BROKEN_D3COLD_S2I))
pci_d3cold_disable(pdev);
#endif
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
xhci_ssic_port_unused_quirk(hcd, true);
if (xhci->quirks & XHCI_DISABLE_SPARSE)
xhci_sparse_control_quirk(hcd);
ret = xhci_suspend(xhci, do_wakeup);
/* synchronize irq when using MSI-X */
xhci_msix_sync_irqs(xhci);
if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
xhci_ssic_port_unused_quirk(hcd, false);
return ret;
}
static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval = 0;
reset_control_reset(xhci->reset);
/* The BIOS on systems with the Intel Panther Point chipset may or may
* not support xHCI natively. That means that during system resume, it
* may switch the ports back to EHCI so that users can use their
* keyboard to select a kernel from GRUB after resume from hibernate.
*
* The BIOS is supposed to remember whether the OS had xHCI ports
* enabled before resume, and switch the ports back to xHCI when the
* BIOS/OS semaphore is written, but we all know we can't trust BIOS
* writers.
*
* Unconditionally switch the ports back to xHCI after a system resume.
* It should not matter whether the EHCI or xHCI controller is
* resumed first. It's enough to do the switchover in xHCI because
* USB core won't notice anything as the hub driver doesn't start
* running again until after all the devices (including both EHCI and
* xHCI host controllers) have been resumed.
*/
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
usb_enable_intel_xhci_ports(pdev);
if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
xhci_ssic_port_unused_quirk(hcd, false);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
retval = xhci_resume(xhci, msg);
return retval;
}
static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_port *port;
struct usb_device *udev;
unsigned int slot_id;
u32 portsc;
int i;
/*
* Systems with XHCI_RESET_TO_DEFAULT quirk have boot firmware that
* cause significant boot delay if usb ports are in suspended U3 state
* during boot. Some USB devices survive in U3 state over S4 hibernate
*
* Disable ports that are in U3 if remote wake is not enabled for either
* host controller or connected device
*/
if (!(xhci->quirks & XHCI_RESET_TO_DEFAULT))
return 0;
for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
port = &xhci->hw_ports[i];
portsc = readl(port->addr);
if ((portsc & PORT_PLS_MASK) != XDEV_U3)
continue;
slot_id = xhci_find_slot_id_by_port(port->rhub->hcd, xhci,
port->hcd_portnum + 1);
if (!slot_id || !xhci->devs[slot_id]) {
xhci_err(xhci, "No dev for slot_id %d for port %d-%d in U3\n",
slot_id, port->rhub->hcd->self.busnum, port->hcd_portnum + 1);
continue;
}
udev = xhci->devs[slot_id]->udev;
/* if wakeup is enabled then don't disable the port */
if (udev->do_remote_wakeup && do_wakeup)
continue;
xhci_dbg(xhci, "port %d-%d in U3 without wakeup, disable it\n",
port->rhub->hcd->self.busnum, port->hcd_portnum + 1);
portsc = xhci_port_state_to_neutral(portsc);
writel(portsc | PORT_PE, port->addr);
}
return 0;
}
static void xhci_pci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
xhci_shutdown(hcd);
xhci_cleanup_msix(xhci);
/* Yet another workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(pdev, PCI_D3hot);
}
/*-------------------------------------------------------------------------*/
static const struct xhci_driver_data reneses_data = {
.quirks = XHCI_RENESAS_FW_QUIRK,
.firmware = "renesas_usb_fw.mem",
};
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(0x1912, 0x0014),
.driver_data = (unsigned long)&reneses_data,
},
{ PCI_DEVICE(0x1912, 0x0015),
.driver_data = (unsigned long)&reneses_data,
},
/* handle any USB 3.0 xHCI controller */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
/*
* Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
* load firmware, so don't encumber the xhci-pci driver with it.
*/
#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
MODULE_FIRMWARE("renesas_usb_fw.mem");
#endif
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
.name = hcd_name,
.id_table = pci_ids,
.probe = xhci_pci_probe,
.remove = xhci_pci_remove,
/* suspend and resume implemented later */
.shutdown = usb_hcd_pci_shutdown,
.driver = {
.pm = pm_ptr(&usb_hcd_pci_pm_ops),
},
};
static int __init xhci_pci_init(void)
{
xhci_init_driver(&xhci_pci_hc_driver, &xhci_pci_overrides);
xhci_pci_hc_driver.pci_suspend = pm_ptr(xhci_pci_suspend);
xhci_pci_hc_driver.pci_resume = pm_ptr(xhci_pci_resume);
xhci_pci_hc_driver.pci_poweroff_late = pm_ptr(xhci_pci_poweroff_late);
xhci_pci_hc_driver.shutdown = pm_ptr(xhci_pci_shutdown);
xhci_pci_hc_driver.stop = xhci_pci_stop;
return pci_register_driver(&xhci_pci_driver);
}
module_init(xhci_pci_init);
static void __exit xhci_pci_exit(void)
{
pci_unregister_driver(&xhci_pci_driver);
}
module_exit(xhci_pci_exit);
MODULE_DESCRIPTION("xHCI PCI Host Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/xhci-pci.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Setup platform devices needed by the Freescale multi-port host
* and/or dual-role USB controller modules based on the description
* in flat device tree.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
struct fsl_usb2_dev_data {
char *dr_mode; /* controller mode */
char *drivers[3]; /* drivers to instantiate for this mode */
enum fsl_usb2_operating_modes op_mode; /* operating mode */
};
static struct fsl_usb2_dev_data dr_mode_data[] = {
{
.dr_mode = "host",
.drivers = { "fsl-ehci", NULL, NULL, },
.op_mode = FSL_USB2_DR_HOST,
},
{
.dr_mode = "otg",
.drivers = { "fsl-usb2-otg", "fsl-ehci", "fsl-usb2-udc", },
.op_mode = FSL_USB2_DR_OTG,
},
{
.dr_mode = "peripheral",
.drivers = { "fsl-usb2-udc", NULL, NULL, },
.op_mode = FSL_USB2_DR_DEVICE,
},
};
static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
{
const unsigned char *prop;
int i;
prop = of_get_property(np, "dr_mode", NULL);
if (prop) {
for (i = 0; i < ARRAY_SIZE(dr_mode_data); i++) {
if (!strcmp(prop, dr_mode_data[i].dr_mode))
return &dr_mode_data[i];
}
}
pr_warn("%pOF: Invalid 'dr_mode' property, fallback to host mode\n",
np);
return &dr_mode_data[0]; /* mode not specified, use host */
}
static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
{
if (!phy_type)
return FSL_USB2_PHY_NONE;
if (!strcasecmp(phy_type, "ulpi"))
return FSL_USB2_PHY_ULPI;
if (!strcasecmp(phy_type, "utmi"))
return FSL_USB2_PHY_UTMI;
if (!strcasecmp(phy_type, "utmi_wide"))
return FSL_USB2_PHY_UTMI_WIDE;
if (!strcasecmp(phy_type, "utmi_dual"))
return FSL_USB2_PHY_UTMI_DUAL;
if (!strcasecmp(phy_type, "serial"))
return FSL_USB2_PHY_SERIAL;
return FSL_USB2_PHY_NONE;
}
static struct platform_device *fsl_usb2_device_register(
struct platform_device *ofdev,
struct fsl_usb2_platform_data *pdata,
const char *name, int id)
{
struct platform_device *pdev;
const struct resource *res = ofdev->resource;
unsigned int num = ofdev->num_resources;
int retval;
pdev = platform_device_alloc(name, id);
if (!pdev) {
retval = -ENOMEM;
goto error;
}
pdev->dev.parent = &ofdev->dev;
pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
if (!pdev->dev.dma_mask) {
pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
} else {
retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (retval)
goto error;
}
retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (retval)
goto error;
if (num) {
retval = platform_device_add_resources(pdev, res, num);
if (retval)
goto error;
}
device_set_of_node_from_dev(&pdev->dev, &ofdev->dev);
retval = platform_device_add(pdev);
if (retval)
goto error;
return pdev;
error:
platform_device_put(pdev);
return ERR_PTR(retval);
}
static const struct of_device_id fsl_usb2_mph_dr_of_match[];
static enum fsl_usb2_controller_ver usb_get_ver_info(struct device_node *np)
{
enum fsl_usb2_controller_ver ver = FSL_USB_VER_NONE;
/*
* returns 1 for usb controller version 1.6
* returns 2 for usb controller version 2.2
* returns 3 for usb controller version 2.4
* returns 4 for usb controller version 2.5
* returns 0 otherwise
*/
if (of_device_is_compatible(np, "fsl-usb2-dr")) {
if (of_device_is_compatible(np, "fsl-usb2-dr-v1.6"))
ver = FSL_USB_VER_1_6;
else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.2"))
ver = FSL_USB_VER_2_2;
else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.4"))
ver = FSL_USB_VER_2_4;
else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.5"))
ver = FSL_USB_VER_2_5;
else /* for previous controller versions */
ver = FSL_USB_VER_OLD;
if (ver > FSL_USB_VER_NONE)
return ver;
}
if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
return FSL_USB_VER_OLD;
if (of_device_is_compatible(np, "fsl-usb2-mph")) {
if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
ver = FSL_USB_VER_1_6;
else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.2"))
ver = FSL_USB_VER_2_2;
else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.4"))
ver = FSL_USB_VER_2_4;
else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.5"))
ver = FSL_USB_VER_2_5;
else /* for previous controller versions */
ver = FSL_USB_VER_OLD;
}
return ver;
}
static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct platform_device *usb_dev;
struct fsl_usb2_platform_data data, *pdata;
struct fsl_usb2_dev_data *dev_data;
const struct of_device_id *match;
const unsigned char *prop;
static unsigned int idx;
int i;
if (!of_device_is_available(np))
return -ENODEV;
match = of_match_device(fsl_usb2_mph_dr_of_match, &ofdev->dev);
if (!match)
return -ENODEV;
pdata = &data;
if (match->data)
memcpy(pdata, match->data, sizeof(data));
else
memset(pdata, 0, sizeof(data));
dev_data = get_dr_mode_data(np);
if (of_device_is_compatible(np, "fsl-usb2-mph")) {
if (of_property_present(np, "port0"))
pdata->port_enables |= FSL_USB2_PORT0_ENABLED;
if (of_property_present(np, "port1"))
pdata->port_enables |= FSL_USB2_PORT1_ENABLED;
pdata->operating_mode = FSL_USB2_MPH_HOST;
} else {
pdata->invert_drvvbus = of_property_read_bool(np, "fsl,invert-drvvbus");
pdata->invert_pwr_fault = of_property_read_bool(np, "fsl,invert-pwr-fault");
/* setup mode selected in the device tree */
pdata->operating_mode = dev_data->op_mode;
}
prop = of_get_property(np, "phy_type", NULL);
pdata->phy_mode = determine_usb_phy(prop);
pdata->controller_ver = usb_get_ver_info(np);
/* Activate Erratum by reading property in device tree */
pdata->has_fsl_erratum_a007792 =
of_property_read_bool(np, "fsl,usb-erratum-a007792");
pdata->has_fsl_erratum_a005275 =
of_property_read_bool(np, "fsl,usb-erratum-a005275");
pdata->has_fsl_erratum_a005697 =
of_property_read_bool(np, "fsl,usb_erratum-a005697");
pdata->has_fsl_erratum_a006918 =
of_property_read_bool(np, "fsl,usb_erratum-a006918");
pdata->has_fsl_erratum_14 =
of_property_read_bool(np, "fsl,usb_erratum-14");
/*
* Determine whether phy_clk_valid needs to be checked
* by reading property in device tree
*/
pdata->check_phy_clk_valid =
of_property_read_bool(np, "phy-clk-valid");
if (pdata->have_sysif_regs) {
if (pdata->controller_ver == FSL_USB_VER_NONE) {
dev_warn(&ofdev->dev, "Could not get controller version\n");
return -ENODEV;
}
}
for (i = 0; i < ARRAY_SIZE(dev_data->drivers); i++) {
if (!dev_data->drivers[i])
continue;
usb_dev = fsl_usb2_device_register(ofdev, pdata,
dev_data->drivers[i], idx);
if (IS_ERR(usb_dev)) {
dev_err(&ofdev->dev, "Can't register usb device\n");
return PTR_ERR(usb_dev);
}
}
idx++;
return 0;
}
static int __unregister_subdev(struct device *dev, void *d)
{
platform_device_unregister(to_platform_device(dev));
return 0;
}
static void fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
{
device_for_each_child(&ofdev->dev, NULL, __unregister_subdev);
}
#ifdef CONFIG_PPC_MPC512x
#define USBGENCTRL 0x200 /* NOTE: big endian */
#define GC_WU_INT_CLR (1 << 5) /* Wakeup int clear */
#define GC_ULPI_SEL (1 << 4) /* ULPI i/f select (usb0 only)*/
#define GC_PPP (1 << 3) /* Inv. Port Power Polarity */
#define GC_PFP (1 << 2) /* Inv. Power Fault Polarity */
#define GC_WU_ULPI_EN (1 << 1) /* Wakeup on ULPI event */
#define GC_WU_IE (1 << 1) /* Wakeup interrupt enable */
#define ISIPHYCTRL 0x204 /* NOTE: big endian */
#define PHYCTRL_PHYE (1 << 4) /* On-chip UTMI PHY enable */
#define PHYCTRL_BSENH (1 << 3) /* Bit Stuff Enable High */
#define PHYCTRL_BSEN (1 << 2) /* Bit Stuff Enable */
#define PHYCTRL_LSFE (1 << 1) /* Line State Filter Enable */
#define PHYCTRL_PXE (1 << 0) /* PHY oscillator enable */
int fsl_usb2_mpc5121_init(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
int err;
clk = devm_clk_get(pdev->dev.parent, "ipg");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
return PTR_ERR(clk);
}
err = clk_prepare_enable(clk);
if (err) {
dev_err(&pdev->dev, "failed to enable clk\n");
return err;
}
pdata->clk = clk;
if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) {
u32 reg = 0;
if (pdata->invert_drvvbus)
reg |= GC_PPP;
if (pdata->invert_pwr_fault)
reg |= GC_PFP;
out_be32(pdata->regs + ISIPHYCTRL, PHYCTRL_PHYE | PHYCTRL_PXE);
out_be32(pdata->regs + USBGENCTRL, reg);
}
return 0;
}
static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
pdata->regs = NULL;
if (pdata->clk)
clk_disable_unprepare(pdata->clk);
}
static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
.big_endian_desc = 1,
.big_endian_mmio = 1,
.es = 1,
.have_sysif_regs = 0,
.le_setup_buf = 1,
.init = fsl_usb2_mpc5121_init,
.exit = fsl_usb2_mpc5121_exit,
};
#endif /* CONFIG_PPC_MPC512x */
static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
.have_sysif_regs = 1,
};
static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
{ .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
{ .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
#ifdef CONFIG_PPC_MPC512x
{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
#endif
{},
};
MODULE_DEVICE_TABLE(of, fsl_usb2_mph_dr_of_match);
static struct platform_driver fsl_usb2_mph_dr_driver = {
.driver = {
.name = "fsl-usb2-mph-dr",
.of_match_table = fsl_usb2_mph_dr_of_match,
},
.probe = fsl_usb2_mph_dr_of_probe,
.remove_new = fsl_usb2_mph_dr_of_remove,
};
module_platform_driver(fsl_usb2_mph_dr_driver);
MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
MODULE_AUTHOR("Anatolij Gustschin <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/fsl-mph-dr-of.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Sonics Silicon Backplane
* Broadcom USB-core driver (SSB bus glue)
*
* Copyright 2011-2012 Hauke Mehrtens <[email protected]>
*
* Based on ssb-ohci driver
* Copyright 2007 Michael Buesch <[email protected]>
*
* Derived from the OHCI-PCI driver
* Copyright 1999 Roman Weissgaerber
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
*
* Derived from the USBcore related parts of Broadcom-SB
* Copyright 2005-2011 Broadcom Corporation
*/
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_DESCRIPTION("Common USB driver for SSB Bus");
MODULE_LICENSE("GPL");
#define SSB_HCD_TMSLOW_HOSTMODE (1 << 29)
struct ssb_hcd_device {
struct platform_device *ehci_dev;
struct platform_device *ohci_dev;
u32 enable_flags;
};
static void ssb_hcd_5354wa(struct ssb_device *dev)
{
#ifdef CONFIG_SSB_DRIVER_MIPS
/* Work around for 5354 failures */
if (dev->id.revision == 2 && dev->bus->chip_id == 0x5354) {
/* Change syn01 reg */
ssb_write32(dev, 0x894, 0x00fe00fe);
/* Change syn03 reg */
ssb_write32(dev, 0x89c, ssb_read32(dev, 0x89c) | 0x1);
}
#endif
}
static void ssb_hcd_usb20wa(struct ssb_device *dev)
{
if (dev->id.coreid == SSB_DEV_USB20_HOST) {
/*
* USB 2.0 special considerations:
*
* In addition to the standard SSB reset sequence, the Host
* Control Register must be programmed to bring the USB core
* and various phy components out of reset.
*/
ssb_write32(dev, 0x200, 0x7ff);
/* Change Flush control reg */
ssb_write32(dev, 0x400, ssb_read32(dev, 0x400) & ~8);
ssb_read32(dev, 0x400);
/* Change Shim control reg */
ssb_write32(dev, 0x304, ssb_read32(dev, 0x304) & ~0x100);
ssb_read32(dev, 0x304);
udelay(1);
ssb_hcd_5354wa(dev);
}
}
/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
static u32 ssb_hcd_init_chip(struct ssb_device *dev)
{
u32 flags = 0;
if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV)
/* Put the device into host-mode. */
flags |= SSB_HCD_TMSLOW_HOSTMODE;
ssb_device_enable(dev, flags);
ssb_hcd_usb20wa(dev);
return flags;
}
static const struct usb_ehci_pdata ehci_pdata = {
};
static const struct usb_ohci_pdata ohci_pdata = {
};
static struct platform_device *ssb_hcd_create_pdev(struct ssb_device *dev, bool ohci, u32 addr, u32 len)
{
struct platform_device *hci_dev;
struct resource hci_res[2];
int ret;
memset(hci_res, 0, sizeof(hci_res));
hci_res[0].start = addr;
hci_res[0].end = hci_res[0].start + len - 1;
hci_res[0].flags = IORESOURCE_MEM;
hci_res[1].start = dev->irq;
hci_res[1].flags = IORESOURCE_IRQ;
hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
"ehci-platform" , 0);
if (!hci_dev)
return ERR_PTR(-ENOMEM);
hci_dev->dev.parent = dev->dev;
hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
ret = platform_device_add_resources(hci_dev, hci_res,
ARRAY_SIZE(hci_res));
if (ret)
goto err_alloc;
if (ohci)
ret = platform_device_add_data(hci_dev, &ohci_pdata,
sizeof(ohci_pdata));
else
ret = platform_device_add_data(hci_dev, &ehci_pdata,
sizeof(ehci_pdata));
if (ret)
goto err_alloc;
ret = platform_device_add(hci_dev);
if (ret)
goto err_alloc;
return hci_dev;
err_alloc:
platform_device_put(hci_dev);
return ERR_PTR(ret);
}
static int ssb_hcd_probe(struct ssb_device *dev,
const struct ssb_device_id *id)
{
int err, tmp;
int start, len;
u16 chipid_top;
u16 coreid = dev->id.coreid;
struct ssb_hcd_device *usb_dev;
/* USBcores are only connected on embedded devices. */
chipid_top = (dev->bus->chip_id & 0xFF00);
if (chipid_top != 0x4700 && chipid_top != 0x5300)
return -ENODEV;
/* TODO: Probably need checks here; is the core connected? */
if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
usb_dev = devm_kzalloc(dev->dev, sizeof(struct ssb_hcd_device),
GFP_KERNEL);
if (!usb_dev)
return -ENOMEM;
/* We currently always attach SSB_DEV_USB11_HOSTDEV
* as HOST OHCI. If we want to attach it as Client device,
* we must branch here and call into the (yet to
* be written) Client mode driver. Same for remove(). */
usb_dev->enable_flags = ssb_hcd_init_chip(dev);
tmp = ssb_read32(dev, SSB_ADMATCH0);
start = ssb_admatch_base(tmp);
len = (coreid == SSB_DEV_USB20_HOST) ? 0x800 : ssb_admatch_size(tmp);
usb_dev->ohci_dev = ssb_hcd_create_pdev(dev, true, start, len);
if (IS_ERR(usb_dev->ohci_dev))
return PTR_ERR(usb_dev->ohci_dev);
if (coreid == SSB_DEV_USB20_HOST) {
start = ssb_admatch_base(tmp) + 0x800; /* ehci core offset */
usb_dev->ehci_dev = ssb_hcd_create_pdev(dev, false, start, len);
if (IS_ERR(usb_dev->ehci_dev)) {
err = PTR_ERR(usb_dev->ehci_dev);
goto err_unregister_ohci_dev;
}
}
ssb_set_drvdata(dev, usb_dev);
return 0;
err_unregister_ohci_dev:
platform_device_unregister(usb_dev->ohci_dev);
return err;
}
static void ssb_hcd_remove(struct ssb_device *dev)
{
struct ssb_hcd_device *usb_dev = ssb_get_drvdata(dev);
struct platform_device *ohci_dev = usb_dev->ohci_dev;
struct platform_device *ehci_dev = usb_dev->ehci_dev;
if (ohci_dev)
platform_device_unregister(ohci_dev);
if (ehci_dev)
platform_device_unregister(ehci_dev);
ssb_device_disable(dev, 0);
}
static void ssb_hcd_shutdown(struct ssb_device *dev)
{
ssb_device_disable(dev, 0);
}
#ifdef CONFIG_PM
static int ssb_hcd_suspend(struct ssb_device *dev, pm_message_t state)
{
ssb_device_disable(dev, 0);
return 0;
}
static int ssb_hcd_resume(struct ssb_device *dev)
{
struct ssb_hcd_device *usb_dev = ssb_get_drvdata(dev);
ssb_device_enable(dev, usb_dev->enable_flags);
return 0;
}
#else /* !CONFIG_PM */
#define ssb_hcd_suspend NULL
#define ssb_hcd_resume NULL
#endif /* CONFIG_PM */
static const struct ssb_device_id ssb_hcd_table[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV),
{},
};
MODULE_DEVICE_TABLE(ssb, ssb_hcd_table);
static struct ssb_driver ssb_hcd_driver = {
.name = KBUILD_MODNAME,
.id_table = ssb_hcd_table,
.probe = ssb_hcd_probe,
.remove = ssb_hcd_remove,
.shutdown = ssb_hcd_shutdown,
.suspend = ssb_hcd_suspend,
.resume = ssb_hcd_resume,
};
static int __init ssb_hcd_init(void)
{
return ssb_driver_register(&ssb_hcd_driver);
}
module_init(ssb_hcd_init);
static void __exit ssb_hcd_exit(void)
{
ssb_driver_unregister(&ssb_hcd_driver);
}
module_exit(ssb_hcd_exit);
| linux-master | drivers/usb/host/ssb-hcd.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2005 David Brownell
* (C) Copyright 2002 Hewlett-Packard Company
*
* OMAP Bus Glue
*
* Modified for OMAP by Tony Lindgren <[email protected]>
* Based on the 2.4 OMAP OHCI driver originally done by MontaVista Software Inc.
* and on ohci-sa1111.c by Christopher Hoover <[email protected]>
*
* This file is licenced under the GPL.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/otg.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap1.h>
#include <linux/soc/ti/omap1-usb.h>
#include <linux/soc/ti/omap1-mux.h>
#include <linux/soc/ti/omap1-soc.h>
#include <linux/soc/ti/omap1-io.h>
#include <linux/signal.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ohci.h"
#include <asm/io.h>
#include <asm/mach-types.h>
#define DRIVER_DESC "OHCI OMAP driver"
struct ohci_omap_priv {
struct clk *usb_host_ck;
struct clk *usb_dc_ck;
struct gpio_desc *power;
struct gpio_desc *overcurrent;
};
static const char hcd_name[] = "ohci-omap";
static struct hc_driver __read_mostly ohci_omap_hc_driver;
#define hcd_to_ohci_omap_priv(h) \
((struct ohci_omap_priv *)hcd_to_ohci(h)->priv)
static void omap_ohci_clock_power(struct ohci_omap_priv *priv, int on)
{
if (on) {
clk_enable(priv->usb_dc_ck);
clk_enable(priv->usb_host_ck);
/* guesstimate for T5 == 1x 32K clock + APLL lock time */
udelay(100);
} else {
clk_disable(priv->usb_host_ck);
clk_disable(priv->usb_dc_ck);
}
}
static void start_hnp(struct ohci_hcd *ohci)
{
struct usb_hcd *hcd = ohci_to_hcd(ohci);
const unsigned port = hcd->self.otg_port - 1;
unsigned long flags;
u32 l;
otg_start_hnp(hcd->usb_phy->otg);
local_irq_save(flags);
hcd->usb_phy->otg->state = OTG_STATE_A_SUSPEND;
writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]);
l = omap_readl(OTG_CTRL);
l &= ~OTG_A_BUSREQ;
omap_writel(l, OTG_CTRL);
local_irq_restore(flags);
}
/*-------------------------------------------------------------------------*/
static int ohci_omap_reset(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct omap_usb_config *config = dev_get_platdata(hcd->self.controller);
struct ohci_omap_priv *priv = hcd_to_ohci_omap_priv(hcd);
int need_transceiver = (config->otg != 0);
int ret;
dev_dbg(hcd->self.controller, "starting USB Controller\n");
if (config->otg) {
hcd->self.otg_port = config->otg;
/* default/minimum OTG power budget: 8 mA */
hcd->power_budget = 8;
}
/* XXX OMAP16xx only */
if (config->ocpi_enable)
config->ocpi_enable();
if (IS_ENABLED(CONFIG_USB_OTG) && need_transceiver) {
hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
int status = otg_set_host(hcd->usb_phy->otg,
&ohci_to_hcd(ohci)->self);
dev_dbg(hcd->self.controller, "init %s phy, status %d\n",
hcd->usb_phy->label, status);
if (status) {
usb_put_phy(hcd->usb_phy);
return status;
}
} else {
return -EPROBE_DEFER;
}
hcd->skip_phy_initialization = 1;
ohci->start_hnp = start_hnp;
}
omap_ohci_clock_power(priv, 1);
if (config->lb_reset)
config->lb_reset();
ret = ohci_setup(hcd);
if (ret < 0)
return ret;
if (config->otg || config->rwc) {
ohci->hc_control = OHCI_CTRL_RWC;
writel(OHCI_CTRL_RWC, &ohci->regs->control);
}
/* board-specific power switching and overcurrent support */
if (machine_is_omap_osk()) {
u32 rh = roothub_a (ohci);
/* power switching (ganged by default) */
rh &= ~RH_A_NPS;
/* TPS2045 switch for internal transceiver (port 1) */
if (machine_is_omap_osk()) {
ohci_to_hcd(ohci)->power_budget = 250;
rh &= ~RH_A_NOCP;
/* gpio9 for overcurrent detction */
omap_cfg_reg(W8_1610_GPIO9);
/* for paranoia's sake: disable USB.PUEN */
omap_cfg_reg(W4_USB_HIGHZ);
}
ohci_writel(ohci, rh, &ohci->regs->roothub.a);
ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
} else if (machine_is_nokia770()) {
/* We require a self-powered hub, which should have
* plenty of power. */
ohci_to_hcd(ohci)->power_budget = 0;
}
/* FIXME hub_wq hub requests should manage power switching */
if (config->transceiver_power)
return config->transceiver_power(1);
if (priv->power)
gpiod_set_value_cansleep(priv->power, 0);
/* board init will have already handled HMC and mux setup.
* any external transceiver should already be initialized
* too, so all configured ports use the right signaling now.
*/
return 0;
}
/*-------------------------------------------------------------------------*/
/**
* ohci_hcd_omap_probe - initialize OMAP-based HCDs
* @pdev: USB controller to probe
*
* Context: task context, might sleep
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int ohci_hcd_omap_probe(struct platform_device *pdev)
{
int retval, irq;
struct usb_hcd *hcd = 0;
struct ohci_omap_priv *priv;
if (pdev->num_resources != 2) {
dev_err(&pdev->dev, "invalid num_resources: %i\n",
pdev->num_resources);
return -ENODEV;
}
if (pdev->resource[0].flags != IORESOURCE_MEM
|| pdev->resource[1].flags != IORESOURCE_IRQ) {
dev_err(&pdev->dev, "invalid resource type\n");
return -ENODEV;
}
hcd = usb_create_hcd(&ohci_omap_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = pdev->resource[0].start;
hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
priv = hcd_to_ohci_omap_priv(hcd);
/* Obtain two optional GPIO lines */
priv->power = devm_gpiod_get_optional(&pdev->dev, "power", GPIOD_ASIS);
if (IS_ERR(priv->power)) {
retval = PTR_ERR(priv->power);
goto err_put_hcd;
}
if (priv->power)
gpiod_set_consumer_name(priv->power, "OHCI power");
/*
* This "overcurrent" GPIO line isn't really used in the code,
* but has a designated hardware function.
* TODO: implement proper overcurrent handling.
*/
priv->overcurrent = devm_gpiod_get_optional(&pdev->dev, "overcurrent",
GPIOD_IN);
if (IS_ERR(priv->overcurrent)) {
retval = PTR_ERR(priv->overcurrent);
goto err_put_hcd;
}
if (priv->overcurrent)
gpiod_set_consumer_name(priv->overcurrent, "OHCI overcurrent");
priv->usb_host_ck = clk_get(&pdev->dev, "usb_hhc_ck");
if (IS_ERR(priv->usb_host_ck)) {
retval = PTR_ERR(priv->usb_host_ck);
goto err_put_hcd;
}
retval = clk_prepare(priv->usb_host_ck);
if (retval)
goto err_put_host_ck;
if (!cpu_is_omap15xx())
priv->usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
else
priv->usb_dc_ck = clk_get(&pdev->dev, "lb_ck");
if (IS_ERR(priv->usb_dc_ck)) {
retval = PTR_ERR(priv->usb_dc_ck);
goto err_unprepare_host_ck;
}
retval = clk_prepare(priv->usb_dc_ck);
if (retval)
goto err_put_dc_ck;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
retval = -EBUSY;
goto err_unprepare_dc_ck;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
dev_err(&pdev->dev, "can't ioremap OHCI HCD\n");
retval = -ENOMEM;
goto err2;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = irq;
goto err3;
}
retval = usb_add_hcd(hcd, irq, 0);
if (retval)
goto err3;
device_wakeup_enable(hcd->self.controller);
return 0;
err3:
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err_unprepare_dc_ck:
clk_unprepare(priv->usb_dc_ck);
err_put_dc_ck:
clk_put(priv->usb_dc_ck);
err_unprepare_host_ck:
clk_unprepare(priv->usb_host_ck);
err_put_host_ck:
clk_put(priv->usb_host_ck);
err_put_hcd:
usb_put_hcd(hcd);
return retval;
}
/* may be called with controller, bus, and devices active */
/**
* ohci_hcd_omap_remove - shutdown processing for OMAP-based HCDs
* @pdev: USB Host Controller being removed
*
* Context: task context, might sleep
*
* Reverses the effect of ohci_hcd_omap_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
static void ohci_hcd_omap_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_omap_priv *priv = hcd_to_ohci_omap_priv(hcd);
dev_dbg(hcd->self.controller, "stopping USB Controller\n");
usb_remove_hcd(hcd);
omap_ohci_clock_power(priv, 0);
if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
(void) otg_set_host(hcd->usb_phy->otg, 0);
usb_put_phy(hcd->usb_phy);
}
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
clk_unprepare(priv->usb_dc_ck);
clk_put(priv->usb_dc_ck);
clk_unprepare(priv->usb_host_ck);
clk_put(priv->usb_host_ck);
usb_put_hcd(hcd);
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
static int ohci_omap_suspend(struct platform_device *pdev, pm_message_t message)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct ohci_omap_priv *priv = hcd_to_ohci_omap_priv(hcd);
bool do_wakeup = device_may_wakeup(&pdev->dev);
int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
ret = ohci_suspend(hcd, do_wakeup);
if (ret)
return ret;
omap_ohci_clock_power(priv, 0);
return ret;
}
static int ohci_omap_resume(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct ohci_omap_priv *priv = hcd_to_ohci_omap_priv(hcd);
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
omap_ohci_clock_power(priv, 1);
ohci_resume(hcd, false);
return 0;
}
#endif
/*-------------------------------------------------------------------------*/
/*
* Driver definition to register with the OMAP bus
*/
static struct platform_driver ohci_hcd_omap_driver = {
.probe = ohci_hcd_omap_probe,
.remove_new = ohci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_omap_suspend,
.resume = ohci_omap_resume,
#endif
.driver = {
.name = "ohci",
},
};
static const struct ohci_driver_overrides omap_overrides __initconst = {
.product_desc = "OMAP OHCI",
.reset = ohci_omap_reset,
.extra_priv_size = sizeof(struct ohci_omap_priv),
};
static int __init ohci_omap_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_omap_hc_driver, &omap_overrides);
return platform_driver_register(&ohci_hcd_omap_driver);
}
module_init(ohci_omap_init);
static void __exit ohci_omap_cleanup(void)
{
platform_driver_unregister(&ohci_hcd_omap_driver);
}
module_exit(ohci_omap_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:ohci");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ohci-omap.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Enhanced Host Controller Interface (EHCI) driver for USB.
*
* Maintainer: Alan Stern <[email protected]>
*
* Copyright (c) 2000-2004 by David Brownell
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#if defined(CONFIG_PPC_PS3)
#include <asm/firmware.h>
#endif
/*-------------------------------------------------------------------------*/
/*
* EHCI hc_driver implementation ... experimental, incomplete.
* Based on the final 1.0 register interface specification.
*
* USB 2.0 shows up in upcoming www.pcmcia.org technology.
* First was PCMCIA, like ISA; then CardBus, which is PCI.
* Next comes "CardBay", using USB 2.0 signals.
*
* Contains additional contributions by Brad Hards, Rory Bolt, and others.
* Special thanks to Intel and VIA for providing host controllers to
* test this driver on, and Cypress (including In-System Design) for
* providing early devices for those host controllers to talk to!
*/
#define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
static const char hcd_name [] = "ehci_hcd";
#undef EHCI_URB_TRACE
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
/*
* Some drivers think it's safe to schedule isochronous transfers more than
* 256 ms into the future (partly as a result of an old bug in the scheduling
* code). In an attempt to avoid trouble, we will use a minimum scheduling
* length of 512 frames instead of 256.
*/
#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh; // 0 to 6
module_param (log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* initial park setting: slower than hw default */
static unsigned park;
module_param (park, uint, S_IRUGO);
MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
/* for flakey hardware, ignore overcurrent indicators */
static bool ignore_oc;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
/*-------------------------------------------------------------------------*/
#include "ehci.h"
#include "pci-quirks.h"
static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
struct ehci_tt *tt);
/*
* The MosChip MCS9990 controller updates its microframe counter
* a little before the frame counter, and occasionally we will read
* the invalid intermediate value. Avoid problems by checking the
* microframe number (the low-order 3 bits); if they are 0 then
* re-read the register to get the correct value.
*/
static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci)
{
unsigned uf;
uf = ehci_readl(ehci, &ehci->regs->frame_index);
if (unlikely((uf & 7) == 0))
uf = ehci_readl(ehci, &ehci->regs->frame_index);
return uf;
}
static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
{
if (ehci->frame_index_bug)
return ehci_moschip_read_frame_index(ehci);
return ehci_readl(ehci, &ehci->regs->frame_index);
}
#include "ehci-dbg.c"
/*-------------------------------------------------------------------------*/
/*
* ehci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*
* That last failure should_only happen in cases like physical cardbus eject
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
do {
result = ehci_readl(ehci, ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
if (result == done)
return 0;
udelay (1);
usec--;
} while (usec > 0);
return -ETIMEDOUT;
}
EXPORT_SYMBOL_GPL(ehci_handshake);
/* check TDI/ARC silicon is in host mode */
static int tdi_in_host_mode (struct ehci_hcd *ehci)
{
u32 tmp;
tmp = ehci_readl(ehci, &ehci->regs->usbmode);
return (tmp & 3) == USBMODE_CM_HC;
}
/*
* Force HC to halt state from unknown (EHCI spec section 2.3).
* Must be called with interrupts enabled and the lock not held.
*/
static int ehci_halt (struct ehci_hcd *ehci)
{
u32 temp;
spin_lock_irq(&ehci->lock);
/* disable any irqs left enabled by previous code */
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
spin_unlock_irq(&ehci->lock);
return 0;
}
/*
* This routine gets called during probe before ehci->command
* has been initialized, so we can't rely on its value.
*/
ehci->command &= ~CMD_RUN;
temp = ehci_readl(ehci, &ehci->regs->command);
temp &= ~(CMD_RUN | CMD_IAAD);
ehci_writel(ehci, temp, &ehci->regs->command);
spin_unlock_irq(&ehci->lock);
synchronize_irq(ehci_to_hcd(ehci)->irq);
return ehci_handshake(ehci, &ehci->regs->status,
STS_HALT, STS_HALT, 16 * 125);
}
/* put TDI/ARC silicon into EHCI mode */
static void tdi_reset (struct ehci_hcd *ehci)
{
u32 tmp;
tmp = ehci_readl(ehci, &ehci->regs->usbmode);
tmp |= USBMODE_CM_HC;
/* The default byte access to MMR space is LE after
* controller reset. Set the required endian mode
* for transfer buffers to match the host microprocessor
*/
if (ehci_big_endian_mmio(ehci))
tmp |= USBMODE_BE;
ehci_writel(ehci, tmp, &ehci->regs->usbmode);
}
/*
* Reset a non-running (STS_HALT == 1) controller.
* Must be called with interrupts enabled and the lock not held.
*/
int ehci_reset(struct ehci_hcd *ehci)
{
int retval;
u32 command = ehci_readl(ehci, &ehci->regs->command);
/* If the EHCI debug controller is active, special care must be
* taken before and after a host controller reset */
if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci)))
ehci->debug = NULL;
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
ehci_writel(ehci, command, &ehci->regs->command);
ehci->rh_state = EHCI_RH_HALTED;
ehci->next_statechange = jiffies;
retval = ehci_handshake(ehci, &ehci->regs->command,
CMD_RESET, 0, 250 * 1000);
if (ehci->has_hostpc) {
ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
&ehci->regs->usbmode_ex);
ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
}
if (retval)
return retval;
if (ehci_is_TDI(ehci))
tdi_reset (ehci);
if (ehci->debug)
dbgp_external_startup(ehci_to_hcd(ehci));
ehci->port_c_suspend = ehci->suspended_ports =
ehci->resuming_ports = 0;
return retval;
}
EXPORT_SYMBOL_GPL(ehci_reset);
/*
* Idle the controller (turn off the schedules).
* Must be called with interrupts enabled and the lock not held.
*/
static void ehci_quiesce (struct ehci_hcd *ehci)
{
u32 temp;
if (ehci->rh_state != EHCI_RH_RUNNING)
return;
/* wait for any schedule enables/disables to take effect */
temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
16 * 125);
/* then disable anything that's still active */
spin_lock_irq(&ehci->lock);
ehci->command &= ~(CMD_ASE | CMD_PSE);
ehci_writel(ehci, ehci->command, &ehci->regs->command);
spin_unlock_irq(&ehci->lock);
/* hardware can take 16 microframes to turn off ... */
ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
16 * 125);
}
/*-------------------------------------------------------------------------*/
static void end_iaa_cycle(struct ehci_hcd *ehci);
static void end_unlink_async(struct ehci_hcd *ehci);
static void unlink_empty_async(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable);
#include "ehci-timer.c"
#include "ehci-hub.c"
#include "ehci-mem.c"
#include "ehci-q.c"
#include "ehci-sched.c"
#include "ehci-sysfs.c"
/*-------------------------------------------------------------------------*/
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
*/
static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
{
int port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
spin_unlock_irq(&ehci->lock);
ehci_port_power(ehci, port, false);
spin_lock_irq(&ehci->lock);
ehci_writel(ehci, PORT_RWC_BITS,
&ehci->regs->port_status[port]);
}
}
/*
* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
* Must be called with interrupts enabled and the lock not held.
*/
static void ehci_silence_controller(struct ehci_hcd *ehci)
{
ehci_halt(ehci);
spin_lock_irq(&ehci->lock);
ehci->rh_state = EHCI_RH_HALTED;
ehci_turn_off_all_ports(ehci);
/* make BIOS/etc use companion controller during reboot */
ehci_writel(ehci, 0, &ehci->regs->configured_flag);
/* unblock posted writes */
ehci_readl(ehci, &ehci->regs->configured_flag);
spin_unlock_irq(&ehci->lock);
}
/* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
* This forcibly disables dma and IRQs, helping kexec and other cases
* where the next system software may expect clean state.
*/
static void ehci_shutdown(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
/**
* Protect the system from crashing at system shutdown in cases where
* usb host is not added yet from OTG controller driver.
* As ehci_setup() not done yet, so stop accessing registers or
* variables initialized in ehci_setup()
*/
if (!ehci->sbrn)
return;
spin_lock_irq(&ehci->lock);
ehci->shutdown = true;
ehci->rh_state = EHCI_RH_STOPPING;
ehci->enabled_hrtimer_events = 0;
spin_unlock_irq(&ehci->lock);
ehci_silence_controller(ehci);
hrtimer_cancel(&ehci->hrtimer);
}
/*-------------------------------------------------------------------------*/
/*
* ehci_work is called from some interrupts, timers, and so on.
* it calls driver completion functions, after dropping ehci->lock.
*/
static void ehci_work (struct ehci_hcd *ehci)
{
/* another CPU may drop ehci->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
if (ehci->scanning) {
ehci->need_rescan = true;
return;
}
ehci->scanning = true;
rescan:
ehci->need_rescan = false;
if (ehci->async_count)
scan_async(ehci);
if (ehci->intr_count > 0)
scan_intr(ehci);
if (ehci->isoc_count > 0)
scan_isoc(ehci);
if (ehci->need_rescan)
goto rescan;
ehci->scanning = false;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
turn_on_io_watchdog(ehci);
}
/*
* Called when the ehci_hcd module is removed.
*/
static void ehci_stop (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
ehci_dbg (ehci, "stop\n");
/* no more interrupts ... */
spin_lock_irq(&ehci->lock);
ehci->enabled_hrtimer_events = 0;
spin_unlock_irq(&ehci->lock);
ehci_quiesce(ehci);
ehci_silence_controller(ehci);
ehci_reset (ehci);
hrtimer_cancel(&ehci->hrtimer);
remove_sysfs_files(ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq (&ehci->lock);
end_free_itds(ehci);
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
if (ehci->amd_pll_fix == 1)
usb_amd_dev_put();
dbg_status (ehci, "ehci_stop completed",
ehci_readl(ehci, &ehci->regs->status));
}
/* one-time init, only for memory state */
static int ehci_init(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 temp;
int retval;
u32 hcc_params;
struct ehci_qh_hw *hw;
spin_lock_init(&ehci->lock);
/*
* keep io watchdog by default, those good HCDs could turn off it later
*/
ehci->need_io_watchdog = 1;
hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ehci->hrtimer.function = ehci_hrtimer_func;
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
/*
* by default set standard 80% (== 100 usec/uframe) max periodic
* bandwidth as required by USB 2.0
*/
ehci->uframe_periodic_max = 100;
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->async_unlink);
INIT_LIST_HEAD(&ehci->async_idle);
INIT_LIST_HEAD(&ehci->intr_unlink_wait);
INIT_LIST_HEAD(&ehci->intr_unlink);
INIT_LIST_HEAD(&ehci->intr_qh_list);
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
INIT_LIST_HEAD(&ehci->tt_list);
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
switch (EHCI_TUNE_FLS) {
case 0: ehci->periodic_size = 1024; break;
case 1: ehci->periodic_size = 512; break;
case 2: ehci->periodic_size = 256; break;
default: BUG();
}
}
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
ehci->i_thresh = 0;
else // N microframes cached
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
ehci->async->qh_next.qh = NULL;
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
#if defined(CONFIG_PPC_PS3)
hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
#endif
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
ehci->has_ppcd = 1;
ehci_dbg(ehci, "enable per-port change event\n");
temp |= CMD_PPCEE;
}
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
* schedule by avoiding QH fetches between transfers.
*
* With fast usb storage devices and NForce2, "park" seems to
* make problems: throughput reduction (!), data errors...
*/
if (park) {
park = min(park, (unsigned) 3);
temp |= CMD_PARK;
temp |= park << 8;
}
ehci_dbg(ehci, "park %d\n", park);
}
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
}
ehci->command = temp;
/* Accept arbitrarily long scatter-gather lists */
if (!hcd->localmem_pool)
hcd->self.sg_tablesize = ~0;
/* Prepare for unlinking active QHs */
ehci->old_current = ~0;
return 0;
}
/* start HC running; it's halted, ehci_init() has been run (once) */
static int ehci_run (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
int rc;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
/*
* hcc_params controls whether ehci->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* dma_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like dma_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
*/
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params)) {
ehci_writel(ehci, 0, &ehci->regs->segment);
#if 0
// this is deeply broken on almost all architectures
if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
ehci_info(ehci, "enabled 64bit DMA\n");
#endif
}
// Philips, Intel, and maybe others need CMD_RUN before the
// root hub will detect new devices (why?); NEC doesn't
ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
dbg_cmd (ehci, "init", ehci->command);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
* involved with the root hub. (Except where one is integrated,
* and there's no companion controller unless maybe for USB OTG.)
*
* Turning on the CF flag will transfer ownership of all ports
* from the companions to the EHCI controller. If any of the
* companions are in the middle of a port reset at the time, it
* could cause trouble. Write-locking ehci_cf_port_reset_rwsem
* guarantees that no resets are in progress. After we set CF,
* a short delay lets the hardware catch up; new resets shouldn't
* be started before the port switching actions could complete.
*/
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
/* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
/* For Aspeed, STS_HALT also depends on ASS/PSS status.
* Check CMD_RUN instead.
*/
if (ehci->is_aspeed)
rc = ehci_handshake(ehci, &ehci->regs->command, CMD_RUN,
1, 100 * 1000);
else
rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT,
0, 100 * 1000);
up_write(&ehci_cf_port_reset_rwsem);
if (rc) {
ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
return rc;
}
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci_info (ehci,
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
temp >> 8, temp & 0xff,
(ignore_oc || ehci->spurious_oc) ? ", overcurrent ignored" : "");
ehci_writel(ehci, INTR_MASK,
&ehci->regs->intr_enable); /* Turn On Interrupts */
/* GRR this is run-once init(), being done every time the HC starts.
* So long as they're part of class devices, we can't do it init()
* since the class device isn't created that early.
*/
create_debug_files(ehci);
create_sysfs_files(ehci);
return 0;
}
int ehci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
ehci->regs = (void __iomem *)ehci->caps +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
ehci->sbrn = HCD_USB2;
/* data structure init */
retval = ehci_init(hcd);
if (retval)
return retval;
retval = ehci_halt(ehci);
if (retval) {
ehci_mem_cleanup(ehci);
return retval;
}
ehci_reset(ehci);
return 0;
}
EXPORT_SYMBOL_GPL(ehci_setup);
/*-------------------------------------------------------------------------*/
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, current_status, masked_status, pcd_status = 0;
u32 cmd;
int bh;
spin_lock(&ehci->lock);
status = 0;
current_status = ehci_readl(ehci, &ehci->regs->status);
restart:
/* e.g. cardbus physical eject */
if (current_status == ~(u32) 0) {
ehci_dbg (ehci, "device removed\n");
goto dead;
}
status |= current_status;
/*
* We don't use STS_FLR, but some controllers don't like it to
* remain on, so mask it out along with the other status bits.
*/
masked_status = current_status & (INTR_MASK | STS_FLR);
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
spin_unlock(&ehci->lock);
return IRQ_NONE;
}
/* clear (just) interrupts */
ehci_writel(ehci, masked_status, &ehci->regs->status);
/* For edge interrupts, don't race with an interrupt bit being raised */
current_status = ehci_readl(ehci, &ehci->regs->status);
if (current_status & INTR_MASK)
goto restart;
cmd = ehci_readl(ehci, &ehci->regs->command);
bh = 0;
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0)) {
INCR(ehci->stats.normal);
} else {
/* Force to check port status */
if (ehci->has_ci_pec_bug)
status |= STS_PCD;
INCR(ehci->stats.error);
}
bh = 1;
}
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
/* Turn off the IAA watchdog */
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
/*
* Mild optimization: Allow another IAAD to reset the
* hrtimer, if one occurs before the next expiration.
* In theory we could always cancel the hrtimer, but
* tests show that about half the time it will be reset
* for some other event anyway.
*/
if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
++ehci->next_hrtimer_event;
/* guard against (alleged) silicon errata */
if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (ehci->iaa_in_progress)
INCR(ehci->stats.iaa);
end_iaa_cycle(ehci);
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS (ehci->hcs_params);
u32 ppcd = ~0;
/* kick root hub later */
pcd_status = status;
/* resume root hub? */
if (ehci->rh_state == EHCI_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
/* get per-port change detect bits */
if (ehci->has_ppcd)
ppcd = status >> 16;
while (i--) {
int pstatus;
/* leverage per-port change bits feature */
if (!(ppcd & (1 << i)))
continue;
pstatus = ehci_readl(ehci,
&ehci->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
if (!(test_bit(i, &ehci->suspended_ports) &&
((pstatus & PORT_RESUME) ||
!(pstatus & PORT_SUSPEND)) &&
(pstatus & PORT_PE) &&
ehci->reset_done[i] == 0))
continue;
/* start USB_RESUME_TIMEOUT msec resume signaling from
* this port, and make hub_wq collect
* PORT_STAT_C_SUSPEND to stop that signaling.
*/
ehci->reset_done[i] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(i, &ehci->resuming_ports);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
usb_hcd_start_port_resume(&hcd->self, i);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
}
/* PCI errors [4.15.2.4] */
if (unlikely ((status & STS_FATAL) != 0)) {
ehci_err(ehci, "fatal error\n");
dbg_cmd(ehci, "fatal", cmd);
dbg_status(ehci, "fatal", status);
dead:
usb_hc_died(hcd);
/* Don't let the controller do anything more */
ehci->shutdown = true;
ehci->rh_state = EHCI_RH_STOPPING;
ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
ehci_handle_controller_death(ehci);
/* Handle completions when the controller stops */
bh = 0;
}
if (bh)
ehci_work (ehci);
spin_unlock(&ehci->lock);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
/*
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
* we're queueing TDs onto software and hardware lists
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int ehci_urb_enqueue (
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct list_head qtd_list;
INIT_LIST_HEAD (&qtd_list);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
/* qh_completions() code doesn't handle all the fault cases
* in multi-TD control transfers. Even 1KB is rare anyway.
*/
if (urb->transfer_buffer_length > (16 * 1024))
return -EMSGSIZE;
fallthrough;
/* case PIPE_BULK: */
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async(ehci, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit(ehci, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
return itd_submit (ehci, urb, mem_flags);
else
return sitd_submit (ehci, urb, mem_flags);
}
}
/* remove from hardware lists
* completions normally happen asynchronously
*/
static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct ehci_qh *qh;
unsigned long flags;
int rc;
spin_lock_irqsave (&ehci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
/*
* We don't expedite dequeue for isochronous URBs.
* Just wait until they complete normally or their
* time slot expires.
*/
} else {
qh = (struct ehci_qh *) urb->hcpriv;
qh->unlink_reason |= QH_UNLINK_REQUESTED;
switch (qh->qh_state) {
case QH_STATE_LINKED:
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
start_unlink_intr(ehci, qh);
else
start_unlink_async(ehci, qh);
break;
case QH_STATE_COMPLETING:
qh->dequeue_during_giveback = 1;
break;
case QH_STATE_UNLINK:
case QH_STATE_UNLINK_WAIT:
/* already started */
break;
case QH_STATE_IDLE:
/* QH might be waiting for a Clear-TT-Buffer */
qh_completions(ehci, qh);
break;
}
}
done:
spin_unlock_irqrestore (&ehci->lock, flags);
return rc;
}
/*-------------------------------------------------------------------------*/
// bulk qh holds the data toggle
static void
ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
unsigned long flags;
struct ehci_qh *qh;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
rescan:
spin_lock_irqsave (&ehci->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw == NULL) {
struct ehci_iso_stream *stream = ep->hcpriv;
if (!list_empty(&stream->td_list))
goto idle_timeout;
/* BUG_ON(!list_empty(&stream->free_list)); */
reserve_release_iso_bandwidth(ehci, stream, -1);
kfree(stream);
goto done;
}
qh->unlink_reason |= QH_UNLINK_REQUESTED;
switch (qh->qh_state) {
case QH_STATE_LINKED:
if (list_empty(&qh->qtd_list))
qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
else
WARN_ON(1);
if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
start_unlink_async(ehci, qh);
else
start_unlink_intr(ehci, qh);
fallthrough;
case QH_STATE_COMPLETING: /* already in unlinking */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
idle_timeout:
spin_unlock_irqrestore (&ehci->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (qh->clearing_tt)
goto idle_timeout;
if (list_empty (&qh->qtd_list)) {
if (qh->ps.bw_uperiod)
reserve_release_intr_bandwidth(ehci, qh, -1);
qh_destroy(ehci, qh);
break;
}
fallthrough;
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty (&qh->qtd_list) ? "" : "(has tds)");
break;
}
done:
ep->hcpriv = NULL;
spin_unlock_irqrestore (&ehci->lock, flags);
}
static void
ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_qh *qh;
int eptype = usb_endpoint_type(&ep->desc);
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
unsigned long flags;
if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
return;
spin_lock_irqsave(&ehci->lock, flags);
qh = ep->hcpriv;
/* For Bulk and Interrupt endpoints we maintain the toggle state
* in the hardware; the toggle bits in udev aren't used at all.
* When an endpoint is reset by usb_clear_halt() we must reset
* the toggle bit in the QH.
*/
if (qh) {
if (!list_empty(&qh->qtd_list)) {
WARN_ONCE(1, "clear_halt for a busy endpoint\n");
} else {
/* The toggle value in the QH can't be updated
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
usb_settoggle(qh->ps.udev, epnum, is_out, 0);
qh->unlink_reason |= QH_UNLINK_REQUESTED;
if (eptype == USB_ENDPOINT_XFER_BULK)
start_unlink_async(ehci, qh);
else
start_unlink_intr(ehci, qh);
}
}
spin_unlock_irqrestore(&ehci->lock, flags);
}
static int ehci_get_frame (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
}
/*-------------------------------------------------------------------------*/
/* Device addition and removal */
static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
spin_lock_irq(&ehci->lock);
drop_tt(udev);
spin_unlock_irq(&ehci->lock);
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
/* Clear wakeup signal locked in zhaoxin platform when device plug in. */
static void ehci_zx_wakeup_clear(struct ehci_hcd *ehci)
{
u32 __iomem *reg = &ehci->regs->port_status[4];
u32 t1 = ehci_readl(ehci, reg);
t1 &= (u32)~0xf0000;
t1 |= PORT_TEST_FORCE;
ehci_writel(ehci, t1, reg);
t1 = ehci_readl(ehci, reg);
msleep(1);
t1 &= (u32)~0xf0000;
ehci_writel(ehci, t1, reg);
ehci_readl(ehci, reg);
msleep(1);
t1 = ehci_readl(ehci, reg);
ehci_writel(ehci, t1 | PORT_CSC, reg);
ehci_readl(ehci, reg);
}
/* suspend/resume, section 4.3 */
/* These routines handle the generic parts of controller suspend/resume */
int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
if (time_before(jiffies, ehci->next_statechange))
msleep(10);
/*
* Root hub was already suspended. Disable IRQ emission and
* mark HW unaccessible. The PM and USB cores make sure that
* the root hub is either suspended or stopped.
*/
ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
spin_lock_irq(&ehci->lock);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void) ehci_readl(ehci, &ehci->regs->intr_enable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irq(&ehci->lock);
synchronize_irq(hcd->irq);
/* Check for race with a wakeup request */
if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
ehci_resume(hcd, false);
return -EBUSY;
}
return 0;
}
EXPORT_SYMBOL_GPL(ehci_suspend);
/* Returns 0 if power was preserved, 1 if power was lost */
int ehci_resume(struct usb_hcd *hcd, bool force_reset)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
if (time_before(jiffies, ehci->next_statechange))
msleep(100);
/* Mark hardware accessible again as we are back to full power by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (ehci->shutdown)
return 0; /* Controller is dead */
if (ehci->zx_wakeup_clear_needed)
ehci_zx_wakeup_clear(ehci);
/*
* If CF is still set and reset isn't forced
* then we maintained suspend power.
* Just undo the effect of ehci_suspend().
*/
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
!force_reset) {
int mask = INTR_MASK;
ehci_prepare_ports_for_controller_resume(ehci);
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto skip;
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
skip:
spin_unlock_irq(&ehci->lock);
return 0;
}
/*
* Else reset, to cope with power loss or resume from hibernation
* having let the firmware kick in during reboot.
*/
usb_root_hub_lost_power(hcd->self.root_hub);
(void) ehci_halt(ehci);
(void) ehci_reset(ehci);
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto skip;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
ehci->rh_state = EHCI_RH_SUSPENDED;
spin_unlock_irq(&ehci->lock);
return 1;
}
EXPORT_SYMBOL_GPL(ehci_resume);
#endif
/*-------------------------------------------------------------------------*/
/*
* Generic structure: This gets copied for platform drivers so that
* individual entries can be overridden as needed.
*/
static const struct hc_driver ehci_hc_driver = {
.description = hcd_name,
.product_desc = "EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
.reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.get_resuming_ports = ehci_get_resuming_ports,
/*
* device support
*/
.free_dev = ehci_remove_device,
#ifdef CONFIG_USB_HCD_TEST_MODE
/* EH SINGLE_STEP_SET_FEATURE test support */
.submit_single_step_set_feature = ehci_submit_single_step_set_feature,
#endif
};
void ehci_init_driver(struct hc_driver *drv,
const struct ehci_driver_overrides *over)
{
/* Copy the generic table to drv and then apply the overrides */
*drv = ehci_hc_driver;
if (over) {
drv->hcd_priv_size += over->extra_priv_size;
if (over->reset)
drv->reset = over->reset;
if (over->port_power)
drv->port_power = over->port_power;
}
}
EXPORT_SYMBOL_GPL(ehci_init_driver);
/*-------------------------------------------------------------------------*/
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR (DRIVER_AUTHOR);
MODULE_LICENSE ("GPL");
#ifdef CONFIG_USB_EHCI_SH
#include "ehci-sh.c"
#endif
#ifdef CONFIG_PPC_PS3
#include "ehci-ps3.c"
#endif
#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
#include "ehci-ppc-of.c"
#endif
#ifdef CONFIG_XPS_USB_HCD_XILINX
#include "ehci-xilinx-of.c"
#endif
#ifdef CONFIG_SPARC_LEON
#include "ehci-grlib.c"
#endif
static struct platform_driver * const platform_drivers[] = {
#ifdef CONFIG_USB_EHCI_SH
&ehci_hcd_sh_driver,
#endif
#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
&ehci_hcd_ppc_of_driver,
#endif
#ifdef CONFIG_XPS_USB_HCD_XILINX
&ehci_hcd_xilinx_of_driver,
#endif
#ifdef CONFIG_SPARC_LEON
&ehci_grlib_driver,
#endif
};
static int __init ehci_hcd_init(void)
{
int retval = 0;
if (usb_disabled())
return -ENODEV;
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
" before uhci_hcd and ohci_hcd, not after\n");
pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd sitd %zd\n",
hcd_name,
sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
#ifdef CONFIG_DYNAMIC_DEBUG
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
#endif
retval = platform_register_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
if (retval < 0)
goto clean0;
#ifdef CONFIG_PPC_PS3
retval = ps3_ehci_driver_register(&ps3_ehci_driver);
if (retval < 0)
goto clean1;
#endif
return 0;
#ifdef CONFIG_PPC_PS3
clean1:
#endif
platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
clean0:
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
return retval;
}
module_init(ehci_hcd_init);
static void __exit ehci_hcd_cleanup(void)
{
#ifdef CONFIG_PPC_PS3
ps3_ehci_driver_unregister(&ps3_ehci_driver);
#endif
platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ehci_hcd_cleanup);
| linux-master | drivers/usb/host/ehci-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PS3 OHCI Host Controller driver
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <asm/firmware.h>
#include <asm/ps3.h>
static int ps3_ohci_hc_reset(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci->flags |= OHCI_QUIRK_BE_MMIO;
ohci_hcd_init(ohci);
return ohci_init(ohci);
}
static int ps3_ohci_hc_start(struct usb_hcd *hcd)
{
int result;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
/* Handle root hub init quirk in spider south bridge. */
/* Also set PwrOn2PwrGood to 0x7f (254ms). */
ohci_writel(ohci, 0x7f000000 | RH_A_PSM | RH_A_OCPM,
&ohci->regs->roothub.a);
ohci_writel(ohci, 0x00060000, &ohci->regs->roothub.b);
result = ohci_run(ohci);
if (result < 0) {
dev_err(hcd->self.controller, "can't start %s\n",
hcd->self.bus_name);
ohci_stop(hcd);
}
return result;
}
static const struct hc_driver ps3_ohci_hc_driver = {
.description = hcd_name,
.product_desc = "PS3 OHCI Host Controller",
.hcd_priv_size = sizeof(struct ohci_hcd),
.irq = ohci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
.reset = ps3_ohci_hc_reset,
.start = ps3_ohci_hc_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.start_port_reset = ohci_start_port_reset,
#if defined(CONFIG_PM)
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
};
static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
{
int result;
struct usb_hcd *hcd;
unsigned int virq;
static u64 dummy_mask;
if (usb_disabled()) {
result = -ENODEV;
goto fail_start;
}
result = ps3_open_hv_device(dev);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EPERM;
goto fail_open;
}
result = ps3_dma_region_create(dev->d_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: "
"(%d)\n", __func__, __LINE__, result);
BUG_ON("check region type");
goto fail_dma_region;
}
result = ps3_mmio_region_create(dev->m_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
__func__, __LINE__);
result = -EPERM;
goto fail_mmio_region;
}
dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
__LINE__, dev->m_region->lpar_addr);
result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
__func__, __LINE__, virq);
result = -EPERM;
goto fail_irq;
}
dummy_mask = DMA_BIT_MASK(32);
dev->core.dma_mask = &dummy_mask;
dma_set_coherent_mask(&dev->core, dummy_mask);
hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev_name(&dev->core));
if (!hcd) {
dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
__LINE__);
result = -ENOMEM;
goto fail_create_hcd;
}
hcd->rsrc_start = dev->m_region->lpar_addr;
hcd->rsrc_len = dev->m_region->len;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name))
dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n",
__func__, __LINE__);
hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
if (!hcd->regs) {
dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
result = -EPERM;
goto fail_ioremap;
}
dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
(unsigned long)hcd->rsrc_start);
dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__,
(unsigned long)hcd->rsrc_len);
dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__,
(unsigned long)hcd->regs);
dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
(unsigned long)virq);
ps3_system_bus_set_drvdata(dev, hcd);
result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
__func__, __LINE__, result);
goto fail_add_hcd;
}
device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
iounmap(hcd->regs);
fail_ioremap:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
fail_create_hcd:
ps3_io_irq_destroy(virq);
fail_irq:
ps3_free_mmio_region(dev->m_region);
fail_mmio_region:
ps3_dma_region_free(dev->d_region);
fail_dma_region:
ps3_close_hv_device(dev);
fail_open:
fail_start:
return result;
}
static void ps3_ohci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
BUG_ON(!hcd);
dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs);
dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq);
tmp = hcd->irq;
ohci_shutdown(hcd);
usb_remove_hcd(hcd);
ps3_system_bus_set_drvdata(dev, NULL);
BUG_ON(!hcd->regs);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
ps3_io_irq_destroy(tmp);
ps3_free_mmio_region(dev->m_region);
ps3_dma_region_free(dev->d_region);
ps3_close_hv_device(dev);
}
static int __init ps3_ohci_driver_register(struct ps3_system_bus_driver *drv)
{
return firmware_has_feature(FW_FEATURE_PS3_LV1)
? ps3_system_bus_driver_register(drv)
: 0;
}
static void ps3_ohci_driver_unregister(struct ps3_system_bus_driver *drv)
{
if (firmware_has_feature(FW_FEATURE_PS3_LV1))
ps3_system_bus_driver_unregister(drv);
}
MODULE_ALIAS(PS3_MODULE_ALIAS_OHCI);
static struct ps3_system_bus_driver ps3_ohci_driver = {
.core.name = "ps3-ohci-driver",
.core.owner = THIS_MODULE,
.match_id = PS3_MATCH_ID_OHCI,
.probe = ps3_ohci_probe,
.remove = ps3_ohci_remove,
.shutdown = ps3_ohci_remove,
};
| linux-master | drivers/usb/host/ohci-ps3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
#include "xhci.h"
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
int state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
return xhci_slot_state_string(state);
}
void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
xhci_dbg(xhci, "%pV\n", &vaf);
trace(&vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(xhci_dbg_trace);
| linux-master | drivers/usb/host/xhci-dbg.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2004 by David Brownell
* Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* EHCI scheduled transaction support: interrupt, iso, split iso
* These are called "periodic" transactions in the EHCI spec.
*
* Note that for interrupt transfers, the QH/QTD manipulation is shared
* with the "asynchronous" transaction support (control/bulk transfers).
* The only real difference is in how interrupt transfers are scheduled.
*
* For ISO, we make an "iso_stream" head to serve the same role as a QH.
* It keeps track of every ITD (or SITD) that's linked, and holds enough
* pre-calculated schedule data to make appending to the queue be quick.
*/
static int ehci_get_frame(struct usb_hcd *hcd);
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *
periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
__hc32 tag)
{
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
return &periodic->qh->qh_next;
case Q_TYPE_FSTN:
return &periodic->fstn->fstn_next;
case Q_TYPE_ITD:
return &periodic->itd->itd_next;
/* case Q_TYPE_SITD: */
default:
return &periodic->sitd->sitd_next;
}
}
static __hc32 *
shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
__hc32 tag)
{
switch (hc32_to_cpu(ehci, tag)) {
/* our ehci_shadow.qh is actually software part */
case Q_TYPE_QH:
return &periodic->qh->hw->hw_next;
/* others are hw parts */
default:
return periodic->hw_next;
}
}
/* caller must hold ehci->lock */
static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &ehci->pshadow[frame];
__hc32 *hw_p = &ehci->periodic[frame];
union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(ehci, prev_p,
Q_NEXT_TYPE(ehci, *hw_p));
hw_p = shadow_next_periodic(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr)
return;
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
if (!ehci->use_dummy_qh ||
*shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
!= EHCI_LIST_END(ehci))
*hw_p = *shadow_next_periodic(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
else
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
}
/*-------------------------------------------------------------------------*/
/* Bandwidth and TT management */
/* Find the TT data structure for this device; create it if necessary */
static struct ehci_tt *find_tt(struct usb_device *udev)
{
struct usb_tt *utt = udev->tt;
struct ehci_tt *tt, **tt_index, **ptt;
unsigned port;
bool allocated_index = false;
if (!utt)
return NULL; /* Not below a TT */
/*
* Find/create our data structure.
* For hubs with a single TT, we get it directly.
* For hubs with multiple TTs, there's an extra level of pointers.
*/
tt_index = NULL;
if (utt->multi) {
tt_index = utt->hcpriv;
if (!tt_index) { /* Create the index array */
tt_index = kcalloc(utt->hub->maxchild,
sizeof(*tt_index),
GFP_ATOMIC);
if (!tt_index)
return ERR_PTR(-ENOMEM);
utt->hcpriv = tt_index;
allocated_index = true;
}
port = udev->ttport - 1;
ptt = &tt_index[port];
} else {
port = 0;
ptt = (struct ehci_tt **) &utt->hcpriv;
}
tt = *ptt;
if (!tt) { /* Create the ehci_tt */
struct ehci_hcd *ehci =
hcd_to_ehci(bus_to_hcd(udev->bus));
tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
if (!tt) {
if (allocated_index) {
utt->hcpriv = NULL;
kfree(tt_index);
}
return ERR_PTR(-ENOMEM);
}
list_add_tail(&tt->tt_list, &ehci->tt_list);
INIT_LIST_HEAD(&tt->ps_list);
tt->usb_tt = utt;
tt->tt_port = port;
*ptt = tt;
}
return tt;
}
/* Release the TT above udev, if it's not in use */
static void drop_tt(struct usb_device *udev)
{
struct usb_tt *utt = udev->tt;
struct ehci_tt *tt, **tt_index, **ptt;
int cnt, i;
if (!utt || !utt->hcpriv)
return; /* Not below a TT, or never allocated */
cnt = 0;
if (utt->multi) {
tt_index = utt->hcpriv;
ptt = &tt_index[udev->ttport - 1];
/* How many entries are left in tt_index? */
for (i = 0; i < utt->hub->maxchild; ++i)
cnt += !!tt_index[i];
} else {
tt_index = NULL;
ptt = (struct ehci_tt **) &utt->hcpriv;
}
tt = *ptt;
if (!tt || !list_empty(&tt->ps_list))
return; /* never allocated, or still in use */
list_del(&tt->tt_list);
*ptt = NULL;
kfree(tt);
if (cnt == 1) {
utt->hcpriv = NULL;
kfree(tt_index);
}
}
static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
struct ehci_per_sched *ps)
{
dev_dbg(&ps->udev->dev,
"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
ps->ep->desc.bEndpointAddress,
(sign >= 0 ? "reserve" : "release"), type,
(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
ps->phase, ps->phase_uf, ps->period,
ps->usecs, ps->c_usecs, ps->cs_mask);
}
static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
struct ehci_qh *qh, int sign)
{
unsigned start_uf;
unsigned i, j, m;
int usecs = qh->ps.usecs;
int c_usecs = qh->ps.c_usecs;
int tt_usecs = qh->ps.tt_usecs;
struct ehci_tt *tt;
if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
return;
start_uf = qh->ps.bw_phase << 3;
bandwidth_dbg(ehci, sign, "intr", &qh->ps);
if (sign < 0) { /* Release bandwidth */
usecs = -usecs;
c_usecs = -c_usecs;
tt_usecs = -tt_usecs;
}
/* Entire transaction (high speed) or start-split (full/low speed) */
for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
i += qh->ps.bw_uperiod)
ehci->bandwidth[i] += usecs;
/* Complete-split (full/low speed) */
if (qh->ps.c_usecs) {
/* NOTE: adjustments needed for FSTN */
for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
i += qh->ps.bw_uperiod) {
for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
if (qh->ps.cs_mask & m)
ehci->bandwidth[i+j] += c_usecs;
}
}
}
/* FS/LS bus bandwidth */
if (tt_usecs) {
/*
* find_tt() will not return any error here as we have
* already called find_tt() before calling this function
* and checked for any error return. The previous call
* would have created the data structure.
*/
tt = find_tt(qh->ps.udev);
if (sign > 0)
list_add_tail(&qh->ps.ps_list, &tt->ps_list);
else
list_del(&qh->ps.ps_list);
for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
i += qh->ps.bw_period)
tt->bandwidth[i] += tt_usecs;
}
}
/*-------------------------------------------------------------------------*/
static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
struct ehci_tt *tt)
{
struct ehci_per_sched *ps;
unsigned uframe, uf, x;
u8 *budget_line;
if (!tt)
return;
memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
/* Add up the contributions from all the endpoints using this TT */
list_for_each_entry(ps, &tt->ps_list, ps_list) {
for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
uframe += ps->bw_uperiod) {
budget_line = &budget_table[uframe];
x = ps->tt_usecs;
/* propagate the time forward */
for (uf = ps->phase_uf; uf < 8; ++uf) {
x += budget_line[uf];
/* Each microframe lasts 125 us */
if (x <= 125) {
budget_line[uf] = x;
break;
}
budget_line[uf] = 125;
x -= 125;
}
}
}
}
static int __maybe_unused same_tt(struct usb_device *dev1,
struct usb_device *dev2)
{
if (!dev1->tt || !dev2->tt)
return 0;
if (dev1->tt != dev2->tt)
return 0;
if (dev1->tt->multi)
return dev1->ttport == dev2->ttport;
else
return 1;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
static const unsigned char
max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
/* carryover low/fullspeed bandwidth that crosses uframe boundries */
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
{
int i;
for (i = 0; i < 7; i++) {
if (max_tt_usecs[i] < tt_usecs[i]) {
tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
tt_usecs[i] = max_tt_usecs[i];
}
}
}
/*
* Return true if the device's tt's downstream bus is available for a
* periodic transfer of the specified length (usecs), starting at the
* specified frame/uframe. Note that (as summarized in section 11.19
* of the usb 2.0 spec) TTs can buffer multiple transactions for each
* uframe.
*
* The uframe parameter is when the fullspeed/lowspeed transfer
* should be executed in "B-frame" terms, which is the same as the
* highspeed ssplit's uframe (which is in "H-frame" terms). For example
* a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
* See the EHCI spec sec 4.5 and fig 4.7.
*
* This checks if the full/lowspeed bus, at the specified starting uframe,
* has the specified bandwidth available, according to rules listed
* in USB 2.0 spec section 11.18.1 fig 11-60.
*
* This does not check if the transfer would exceed the max ssplit
* limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
* since proper scheduling limits ssplits to less than 16 per uframe.
*/
static int tt_available(
struct ehci_hcd *ehci,
struct ehci_per_sched *ps,
struct ehci_tt *tt,
unsigned frame,
unsigned uframe
)
{
unsigned period = ps->bw_period;
unsigned usecs = ps->tt_usecs;
if ((period == 0) || (uframe >= 7)) /* error */
return 0;
for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
frame += period) {
unsigned i, uf;
unsigned short tt_usecs[8];
if (tt->bandwidth[frame] + usecs > 900)
return 0;
uf = frame << 3;
for (i = 0; i < 8; (++i, ++uf))
tt_usecs[i] = ehci->tt_budget[uf];
if (max_tt_usecs[uframe] <= tt_usecs[uframe])
return 0;
/* special case for isoc transfers larger than 125us:
* the first and each subsequent fully used uframe
* must be empty, so as to not illegally delay
* already scheduled transactions
*/
if (usecs > 125) {
int ufs = (usecs / 125);
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
if (tt_usecs[i] > 0)
return 0;
}
tt_usecs[uframe] += usecs;
carryover_tt_bandwidth(tt_usecs);
/* fail if the carryover pushed bw past the last uframe's limit */
if (max_tt_usecs[7] < tt_usecs[7])
return 0;
}
return 1;
}
#else
/* return true iff the device's transaction translator is available
* for a periodic transfer starting at the specified frame, using
* all the uframes in the mask.
*/
static int tt_no_collision(
struct ehci_hcd *ehci,
unsigned period,
struct usb_device *dev,
unsigned frame,
u32 uf_mask
)
{
if (period == 0) /* error */
return 0;
/* note bandwidth wastage: split never follows csplit
* (different dev or endpoint) until the next uframe.
* calling convention doesn't make that distinction.
*/
for (; frame < ehci->periodic_size; frame += period) {
union ehci_shadow here;
__hc32 type;
struct ehci_qh_hw *hw;
here = ehci->pshadow[frame];
type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
while (here.ptr) {
switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_ITD:
type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
here = here.itd->itd_next;
continue;
case Q_TYPE_QH:
hw = here.qh->hw;
if (same_tt(dev, here.qh->ps.udev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
hw->hw_info2);
/* "knows" no gap is needed */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
type = Q_NEXT_TYPE(ehci, hw->hw_next);
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
if (same_tt(dev, here.sitd->urb->dev)) {
u16 mask;
mask = hc32_to_cpu(ehci, here.sitd
->hw_uframe);
/* FIXME assumes no gap for IN! */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
here = here.sitd->sitd_next;
continue;
/* case Q_TYPE_FSTN: */
default:
ehci_dbg(ehci,
"periodic frame %d bogus type %d\n",
frame, type);
}
/* collision or error */
return 0;
}
}
/* no collision */
return 1;
}
#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
/*-------------------------------------------------------------------------*/
static void enable_periodic(struct ehci_hcd *ehci)
{
if (ehci->periodic_count++)
goto out;
/* Stop waiting to turn off the periodic schedule */
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
/* Don't start the schedule until PSS is 0 */
ehci_poll_PSS(ehci);
out:
turn_on_io_watchdog(ehci);
}
static void disable_periodic(struct ehci_hcd *ehci)
{
if (--ehci->periodic_count)
return;
/* Don't turn off the schedule until PSS is 1 */
ehci_poll_PSS(ehci);
}
/*-------------------------------------------------------------------------*/
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; ehci 0.96+)
*/
static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->ps.period;
dev_dbg(&qh->ps.udev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
& (QH_CMASK | QH_SMASK),
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
union ehci_shadow *prev = &ehci->pshadow[i];
__hc32 *hw_p = &ehci->periodic[i];
union ehci_shadow here = *prev;
__hc32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
type = Q_NEXT_TYPE(ehci, *hw_p);
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
prev = periodic_next_shadow(ehci, prev, type);
hw_p = shadow_next_periodic(ehci, &here, type);
here = *prev;
}
/* sorting each branch by period (slow-->fast)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->ps.period > here.qh->ps.period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(ehci, qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
qh->unlink_reason = 0;
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
: (qh->ps.usecs * 8);
list_add(&qh->intr_node, &ehci->intr_qh_list);
/* maybe enable periodic schedule processing */
++ehci->intr_count;
enable_periodic(ehci);
}
static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
/*
* If qh is for a low/full-speed device, simply unlinking it
* could interfere with an ongoing split transaction. To unlink
* it safely would require setting the QH_INACTIVATE bit and
* waiting at least one frame, as described in EHCI 4.12.2.5.
*
* We won't bother with any of this. Instead, we assume that the
* only reason for unlinking an interrupt QH while the current URB
* is still active is to dequeue all the URBs (flush the whole
* endpoint queue).
*
* If rebalancing the periodic schedule is ever implemented, this
* approach will no longer be valid.
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->ps.period ? : 1;
for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
periodic_unlink(ehci, i, qh);
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
: (qh->ps.usecs * 8);
dev_dbg(&qh->ps.udev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->ps.period,
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
if (ehci->qh_scan_next == qh)
ehci->qh_scan_next = list_entry(qh->intr_node.next,
struct ehci_qh, intr_node);
list_del(&qh->intr_node);
}
static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
if (qh->qh_state != QH_STATE_LINKED ||
list_empty(&qh->unlink_node))
return;
list_del_init(&qh->unlink_node);
/*
* TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
* avoiding unnecessary CPU wakeup
*/
}
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* If the QH isn't linked then there's nothing we can do. */
if (qh->qh_state != QH_STATE_LINKED)
return;
/* if the qh is waiting for unlink, cancel it now */
cancel_unlink_wait_intr(ehci, qh);
qh_unlink_periodic(ehci, qh);
/* Make sure the unlinks are visible before starting the timer */
wmb();
/*
* The EHCI spec doesn't say how long it takes the controller to
* stop accessing an unlinked interrupt QH. The timer delay is
* 9 uframes; presumably that will be long enough.
*/
qh->unlink_cycle = ehci->intr_unlink_cycle;
/* New entries go at the end of the intr_unlink list */
list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
if (ehci->intr_unlinking)
; /* Avoid recursive calls */
else if (ehci->rh_state < EHCI_RH_RUNNING)
ehci_handle_intr_unlinks(ehci);
else if (ehci->intr_unlink.next == &qh->unlink_node) {
ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
++ehci->intr_unlink_cycle;
}
}
/*
* It is common only one intr URB is scheduled on one qh, and
* given complete() is run in tasklet context, introduce a bit
* delay to avoid unlink qh too early.
*/
static void start_unlink_intr_wait(struct ehci_hcd *ehci,
struct ehci_qh *qh)
{
qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
/* New entries go at the end of the intr_unlink_wait list */
list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
if (ehci->rh_state < EHCI_RH_RUNNING)
ehci_handle_start_intr_unlinks(ehci);
else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
++ehci->intr_unlink_wait_cycle;
}
}
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh_hw *hw = qh->hw;
int rc;
qh->qh_state = QH_STATE_IDLE;
hw->hw_next = EHCI_LIST_END(ehci);
if (!list_empty(&qh->qtd_list))
qh_completions(ehci, qh);
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
rc = qh_schedule(ehci, qh);
if (rc == 0) {
qh_refresh(ehci, qh);
qh_link_periodic(ehci, qh);
}
/* An error here likely indicates handshake failure
* or no space left in the schedule. Neither fault
* should happen often ...
*
* FIXME kill the now-dysfunctional queued urbs
*/
else {
ehci_err(ehci, "can't reschedule qh %p, err %d\n",
qh, rc);
}
}
/* maybe turn off periodic schedule */
--ehci->intr_count;
disable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
static int check_period(
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
unsigned uperiod,
unsigned usecs
) {
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/* convert "usecs we need" to "max already claimed" */
usecs = ehci->uframe_periodic_max - usecs;
for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
uframe += uperiod) {
if (ehci->bandwidth[uframe] > usecs)
return 0;
}
/* success! */
return 1;
}
static int check_intr_schedule(
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
struct ehci_qh *qh,
unsigned *c_maskp,
struct ehci_tt *tt
)
{
int retval = -ENOSPC;
u8 mask = 0;
if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
goto done;
if (!qh->ps.c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
unsigned i;
/* TODO : this may need FSTN for SSPLIT in uframe 5. */
for (i = uframe+2; i < 8 && i <= uframe+4; i++)
if (!check_period(ehci, frame, i,
qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
else
mask |= 1 << i;
retval = 0;
*c_maskp = mask;
}
#else
/* Make sure this tt's buffer is also available for CSPLITs.
* We pessimize a bit; probably the typical full speed case
* doesn't need the second CSPLIT.
*
* NOTE: both SPLIT and CSPLIT could be checked in just
* one smart pass...
*/
mask = 0x03 << (uframe + qh->gap_uf);
*c_maskp = mask;
mask |= 1 << uframe;
if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
if (!check_period(ehci, frame, uframe + qh->gap_uf,
qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
retval = 0;
}
#endif
done:
return retval;
}
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int status = 0;
unsigned uframe;
unsigned c_mask;
struct ehci_qh_hw *hw = qh->hw;
struct ehci_tt *tt;
hw->hw_next = EHCI_LIST_END(ehci);
/* reuse the previous schedule slots, if we can */
if (qh->ps.phase != NO_FRAME) {
ehci_dbg(ehci, "reused qh %p schedule\n", qh);
return 0;
}
uframe = 0;
c_mask = 0;
tt = find_tt(qh->ps.udev);
if (IS_ERR(tt)) {
status = PTR_ERR(tt);
goto done;
}
compute_tt_budget(ehci->tt_budget, tt);
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
/* "normal" case, uframing flexible except with splits */
if (qh->ps.bw_period) {
int i;
unsigned frame;
for (i = qh->ps.bw_period; i > 0; --i) {
frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule(ehci,
frame, uframe, qh, &c_mask, tt);
if (status == 0)
goto got_it;
}
}
/* qh->ps.bw_period == 0 means every uframe */
} else {
status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
}
if (status)
goto done;
got_it:
qh->ps.phase = (qh->ps.period ? ehci->random_frame &
(qh->ps.period - 1) : 0);
qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
qh->ps.phase_uf = uframe;
qh->ps.cs_mask = qh->ps.period ?
(c_mask << 8) | (1 << uframe) :
QH_SMASK;
/* reset S-frame and (maybe) C-frame masks */
hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
reserve_release_intr_bandwidth(ehci, qh, 1);
done:
return status;
}
static int intr_submit(
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
) {
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
int status;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD(&empty);
qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
status = qh_schedule(ehci, qh);
if (status)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON(qh == NULL);
/* stuff into the periodic schedule */
if (qh->qh_state == QH_STATE_IDLE) {
qh_refresh(ehci, qh);
qh_link_periodic(ehci, qh);
} else {
/* cancel unlink wait for the qh */
cancel_unlink_wait_intr(ehci, qh);
}
/* ... update usbfs periodic stats */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
done:
if (unlikely(status))
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
done_not_linked:
spin_unlock_irqrestore(&ehci->lock, flags);
if (status)
qtd_list_free(ehci, urb, qtd_list);
return status;
}
static void scan_intr(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
intr_node) {
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
int temp;
/*
* Unlinks could happen here; completion reporting
* drops the lock. That's why ehci->qh_scan_next
* always holds the next qh to scan; if the next qh
* gets unlinked then ehci->qh_scan_next is adjusted
* in qh_unlink_periodic().
*/
temp = qh_completions(ehci, qh);
if (unlikely(temp))
start_unlink_intr(ehci, qh);
else if (unlikely(list_empty(&qh->qtd_list) &&
qh->qh_state == QH_STATE_LINKED))
start_unlink_intr_wait(ehci, qh);
}
}
}
/*-------------------------------------------------------------------------*/
/* ehci_iso_stream ops work with both ITD and SITD */
static struct ehci_iso_stream *
iso_stream_alloc(gfp_t mem_flags)
{
struct ehci_iso_stream *stream;
stream = kzalloc(sizeof(*stream), mem_flags);
if (likely(stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
stream->next_uframe = NO_FRAME;
stream->ps.phase = NO_FRAME;
}
return stream;
}
static void
iso_stream_init(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct urb *urb
)
{
static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
struct usb_device *dev = urb->dev;
u32 buf1;
unsigned epnum, maxp;
int is_input;
unsigned tmp;
/*
* this might be a "high bandwidth" highspeed endpoint,
* as encoded in the ep descriptor's wMaxPacket field
*/
epnum = usb_pipeendpoint(urb->pipe);
is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
buf1 = is_input ? 1 << 11 : 0;
/* knows about ITD vs SITD */
if (dev->speed == USB_SPEED_HIGH) {
unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
stream->highspeed = 1;
buf1 |= maxp;
maxp *= multi;
stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
stream->buf1 = cpu_to_hc32(ehci, buf1);
stream->buf2 = cpu_to_hc32(ehci, multi);
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
*/
stream->ps.usecs = HS_USECS_ISO(maxp);
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
stream->uperiod = urb->interval;
stream->ps.period = urb->interval >> 3;
stream->bandwidth = stream->ps.usecs * 8 /
stream->ps.bw_uperiod;
} else {
u32 addr;
int think_time;
int hs_transfers;
addr = dev->ttport << 24;
if (!ehci_is_TDI(ehci)
|| (dev->tt->hub !=
ehci_to_hcd(ehci)->self.root_hub))
addr |= dev->tt->hub->devnum << 16;
addr |= epnum << 8;
addr |= dev->devnum;
stream->ps.usecs = HS_USECS_ISO(maxp);
think_time = dev->tt->think_time;
stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
dev->speed, is_input, 1, maxp));
hs_transfers = max(1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
addr |= 1 << 31;
stream->ps.c_usecs = stream->ps.usecs;
stream->ps.usecs = HS_USECS_ISO(1);
stream->ps.cs_mask = 1;
/* c-mask as specified in USB 2.0 11.18.4 3.c */
tmp = (1 << (hs_transfers + 2)) - 1;
stream->ps.cs_mask |= tmp << (8 + 2);
} else
stream->ps.cs_mask = smask_out[hs_transfers - 1];
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
stream->ps.bw_uperiod = stream->ps.bw_period << 3;
stream->ps.period = urb->interval;
stream->uperiod = urb->interval << 3;
stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
stream->ps.bw_period;
/* stream->splits gets created from cs_mask later */
stream->address = cpu_to_hc32(ehci, addr);
}
stream->ps.udev = dev;
stream->ps.ep = urb->ep;
stream->bEndpointAddress = is_input | epnum;
stream->maxp = maxp;
}
static struct ehci_iso_stream *
iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
{
unsigned epnum;
struct ehci_iso_stream *stream;
struct usb_host_endpoint *ep;
unsigned long flags;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein(urb->pipe))
ep = urb->dev->ep_in[epnum];
else
ep = urb->dev->ep_out[epnum];
spin_lock_irqsave(&ehci->lock, flags);
stream = ep->hcpriv;
if (unlikely(stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely(stream != NULL)) {
ep->hcpriv = stream;
iso_stream_init(ehci, stream, urb);
}
/* if dev->ep [epnum] is a QH, hw is set */
} else if (unlikely(stream->hw != NULL)) {
ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
urb->dev->devpath, epnum,
usb_pipein(urb->pipe) ? "in" : "out");
stream = NULL;
}
spin_unlock_irqrestore(&ehci->lock, flags);
return stream;
}
/*-------------------------------------------------------------------------*/
/* ehci_iso_sched ops can be ITD-only or SITD-only */
static struct ehci_iso_sched *
iso_sched_alloc(unsigned packets, gfp_t mem_flags)
{
struct ehci_iso_sched *iso_sched;
iso_sched = kzalloc(struct_size(iso_sched, packet, packets), mem_flags);
if (likely(iso_sched != NULL))
INIT_LIST_HEAD(&iso_sched->td_list);
return iso_sched;
}
static inline void
itd_sched_init(
struct ehci_hcd *ehci,
struct ehci_iso_sched *iso_sched,
struct ehci_iso_stream *stream,
struct urb *urb
)
{
unsigned i;
dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->uperiod;
/* figure out per-uframe itd fields that we'll need later
* when we fit new itds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
struct ehci_iso_packet *uframe = &iso_sched->packet[i];
unsigned length;
dma_addr_t buf;
u32 trans;
length = urb->iso_frame_desc[i].length;
buf = dma + urb->iso_frame_desc[i].offset;
trans = EHCI_ISOC_ACTIVE;
trans |= buf & 0x0fff;
if (unlikely(((i + 1) == urb->number_of_packets))
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= EHCI_ITD_IOC;
trans |= length << 16;
uframe->transaction = cpu_to_hc32(ehci, trans);
/* might need to cross a buffer page within a uframe */
uframe->bufp = (buf & ~(u64)0x0fff);
buf += length;
if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
uframe->cross = 1;
}
}
static void
iso_sched_free(
struct ehci_iso_stream *stream,
struct ehci_iso_sched *iso_sched
)
{
if (!iso_sched)
return;
/* caller must hold ehci->lock! */
list_splice(&iso_sched->td_list, &stream->free_list);
kfree(iso_sched);
}
static int
itd_urb_transaction(
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t mem_flags
)
{
struct ehci_itd *itd;
dma_addr_t itd_dma;
int i;
unsigned num_itds;
struct ehci_iso_sched *sched;
unsigned long flags;
sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
if (unlikely(sched == NULL))
return -ENOMEM;
itd_sched_init(ehci, sched, stream, urb);
if (urb->interval < 8)
num_itds = 1 + (sched->span + 7) / 8;
else
num_itds = urb->number_of_packets;
/* allocate/init ITDs */
spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < num_itds; i++) {
/*
* Use iTDs from the free list, but not iTDs that may
* still be in use by the hardware.
*/
if (likely(!list_empty(&stream->free_list))) {
itd = list_first_entry(&stream->free_list,
struct ehci_itd, itd_list);
if (itd->frame == ehci->now_frame)
goto alloc_itd;
list_del(&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
alloc_itd:
spin_unlock_irqrestore(&ehci->lock, flags);
itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
&itd_dma);
spin_lock_irqsave(&ehci->lock, flags);
if (!itd) {
iso_sched_free(stream, sched);
spin_unlock_irqrestore(&ehci->lock, flags);
return -ENOMEM;
}
}
memset(itd, 0, sizeof(*itd));
itd->itd_dma = itd_dma;
itd->frame = NO_FRAME;
list_add(&itd->itd_list, &sched->td_list);
}
spin_unlock_irqrestore(&ehci->lock, flags);
/* temporarily store schedule info in hcpriv */
urb->hcpriv = sched;
urb->error_count = 0;
return 0;
}
/*-------------------------------------------------------------------------*/
static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
struct ehci_iso_stream *stream, int sign)
{
unsigned uframe;
unsigned i, j;
unsigned s_mask, c_mask, m;
int usecs = stream->ps.usecs;
int c_usecs = stream->ps.c_usecs;
int tt_usecs = stream->ps.tt_usecs;
struct ehci_tt *tt;
if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
return;
uframe = stream->ps.bw_phase << 3;
bandwidth_dbg(ehci, sign, "iso", &stream->ps);
if (sign < 0) { /* Release bandwidth */
usecs = -usecs;
c_usecs = -c_usecs;
tt_usecs = -tt_usecs;
}
if (!stream->splits) { /* High speed */
for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
i += stream->ps.bw_uperiod)
ehci->bandwidth[i] += usecs;
} else { /* Full speed */
s_mask = stream->ps.cs_mask;
c_mask = s_mask >> 8;
/* NOTE: adjustment needed for frame overflow */
for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
i += stream->ps.bw_uperiod) {
for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
(++j, m <<= 1)) {
if (s_mask & m)
ehci->bandwidth[i+j] += usecs;
else if (c_mask & m)
ehci->bandwidth[i+j] += c_usecs;
}
}
/*
* find_tt() will not return any error here as we have
* already called find_tt() before calling this function
* and checked for any error return. The previous call
* would have created the data structure.
*/
tt = find_tt(stream->ps.udev);
if (sign > 0)
list_add_tail(&stream->ps.ps_list, &tt->ps_list);
else
list_del(&stream->ps.ps_list);
for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
i += stream->ps.bw_period)
tt->bandwidth[i] += tt_usecs;
}
}
static inline int
itd_slot_ok(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
unsigned uframe
)
{
unsigned usecs;
/* convert "usecs we need" to "max already claimed" */
usecs = ehci->uframe_periodic_max - stream->ps.usecs;
for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
uframe += stream->ps.bw_uperiod) {
if (ehci->bandwidth[uframe] > usecs)
return 0;
}
return 1;
}
static inline int
sitd_slot_ok(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
unsigned uframe,
struct ehci_iso_sched *sched,
struct ehci_tt *tt
)
{
unsigned mask, tmp;
unsigned frame, uf;
mask = stream->ps.cs_mask << (uframe & 7);
/* for OUT, don't wrap SSPLIT into H-microframe 7 */
if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
return 0;
/* for IN, don't wrap CSPLIT into the next frame */
if (mask & ~0xffff)
return 0;
/* check bandwidth */
uframe &= stream->ps.bw_uperiod - 1;
frame = uframe >> 3;
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
/* The tt's fullspeed bus bandwidth must be available.
* tt_available scheduling guarantees 10+% for control/bulk.
*/
uf = uframe & 7;
if (!tt_available(ehci, &stream->ps, tt, frame, uf))
return 0;
#else
/* tt must be idle for start(s), any gap, and csplit.
* assume scheduling slop leaves 10+% for control/bulk.
*/
if (!tt_no_collision(ehci, stream->ps.bw_period,
stream->ps.udev, frame, mask))
return 0;
#endif
do {
unsigned max_used;
unsigned i;
/* check starts (OUT uses more than one) */
uf = uframe;
max_used = ehci->uframe_periodic_max - stream->ps.usecs;
for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
if (ehci->bandwidth[uf] > max_used)
return 0;
}
/* for IN, check CSPLIT */
if (stream->ps.c_usecs) {
max_used = ehci->uframe_periodic_max -
stream->ps.c_usecs;
uf = uframe & ~7;
tmp = 1 << (2+8);
for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
if ((stream->ps.cs_mask & tmp) == 0)
continue;
if (ehci->bandwidth[uf+i] > max_used)
return 0;
}
}
uframe += stream->ps.bw_uperiod;
} while (uframe < EHCI_BANDWIDTH_SIZE);
stream->ps.cs_mask <<= uframe & 7;
stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
return 1;
}
/*
* This scheduler plans almost as far into the future as it has actual
* periodic schedule slots. (Affected by TUNE_FLS, which defaults to
* "as small as possible" to be cache-friendlier.) That limits the size
* transfers you can stream reliably; avoid more than 64 msec per urb.
* Also avoid queue depths of less than ehci's worst irq latency (affected
* by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
* and other factors); or more than about 230 msec total (for portability,
* given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
static int
iso_stream_schedule(
struct ehci_hcd *ehci,
struct urb *urb,
struct ehci_iso_stream *stream
)
{
u32 now, base, next, start, period, span, now2;
u32 wrap = 0, skip = 0;
int status = 0;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
bool empty = list_empty(&stream->td_list);
bool new_stream = false;
period = stream->uperiod;
span = sched->span;
if (!stream->highspeed)
span <<= 3;
/* Start a new isochronous stream? */
if (unlikely(empty && !hcd_periodic_completion_in_progress(
ehci_to_hcd(ehci), urb->ep))) {
/* Schedule the endpoint */
if (stream->ps.phase == NO_FRAME) {
int done = 0;
struct ehci_tt *tt = find_tt(stream->ps.udev);
if (IS_ERR(tt)) {
status = PTR_ERR(tt);
goto fail;
}
compute_tt_budget(ehci->tt_budget, tt);
start = ((-(++ehci->random_frame)) << 3) & (period - 1);
/* find a uframe slot with enough bandwidth.
* Early uframes are more precious because full-speed
* iso IN transfers can't use late uframes,
* and therefore they should be allocated last.
*/
next = start;
start += period;
do {
start--;
/* check schedule: enough space? */
if (stream->highspeed) {
if (itd_slot_ok(ehci, stream, start))
done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, stream, start,
sched, tt))
done = 1;
}
} while (start > next && !done);
/* no room in the schedule */
if (!done) {
ehci_dbg(ehci, "iso sched full %p", urb);
status = -ENOSPC;
goto fail;
}
stream->ps.phase = (start >> 3) &
(stream->ps.period - 1);
stream->ps.bw_phase = stream->ps.phase &
(stream->ps.bw_period - 1);
stream->ps.phase_uf = start & 7;
reserve_release_iso_bandwidth(ehci, stream, 1);
}
/* New stream is already scheduled; use the upcoming slot */
else {
start = (stream->ps.phase << 3) + stream->ps.phase_uf;
}
stream->next_uframe = start;
new_stream = true;
}
now = ehci_read_frame_index(ehci) & (mod - 1);
/* Take the isochronous scheduling threshold into account */
if (ehci->i_thresh)
next = now + ehci->i_thresh; /* uframe cache */
else
next = (now + 2 + 7) & ~0x07; /* full frame cache */
/* If needed, initialize last_iso_frame so that this URB will be seen */
if (ehci->isoc_count == 0)
ehci->last_iso_frame = now >> 3;
/*
* Use ehci->last_iso_frame as the base. There can't be any
* TDs scheduled for earlier than that.
*/
base = ehci->last_iso_frame << 3;
next = (next - base) & (mod - 1);
start = (stream->next_uframe - base) & (mod - 1);
if (unlikely(new_stream))
goto do_ASAP;
/*
* Typical case: reuse current schedule, stream may still be active.
* Hopefully there are no gaps from the host falling behind
* (irq delays etc). If there are, the behavior depends on
* whether URB_ISO_ASAP is set.
*/
now2 = (now - base) & (mod - 1);
/* Is the schedule about to wrap around? */
if (unlikely(!empty && start < period)) {
ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
urb, stream->next_uframe, base, period, mod);
status = -EFBIG;
goto fail;
}
/* Is the next packet scheduled after the base time? */
if (likely(!empty || start <= now2 + period)) {
/* URB_ISO_ASAP: make sure that start >= next */
if (unlikely(start < next &&
(urb->transfer_flags & URB_ISO_ASAP)))
goto do_ASAP;
/* Otherwise use start, if it's not in the past */
if (likely(start >= now2))
goto use_start;
/* Otherwise we got an underrun while the queue was empty */
} else {
if (urb->transfer_flags & URB_ISO_ASAP)
goto do_ASAP;
wrap = mod;
now2 += mod;
}
/* How many uframes and packets do we need to skip? */
skip = (now2 - start + period - 1) & -period;
if (skip >= span) { /* Entirely in the past? */
ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
urb, start + base, span - period, now2 + base,
base);
/* Try to keep the last TD intact for scanning later */
skip = span - period;
/* Will it come before the current scan position? */
if (empty) {
skip = span; /* Skip the entire URB */
status = 1; /* and give it back immediately */
iso_sched_free(stream, sched);
sched = NULL;
}
}
urb->error_count = skip / period;
if (sched)
sched->first_packet = urb->error_count;
goto use_start;
do_ASAP:
/* Use the first slot after "next" */
start = next + ((start - next) & (period - 1));
use_start:
/* Tried to schedule too far into the future? */
if (unlikely(start + span - period >= mod + wrap)) {
ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
urb, start, span - period, mod + wrap);
status = -EFBIG;
goto fail;
}
start += base;
stream->next_uframe = (start + skip) & (mod - 1);
/* report high speed start in uframes; full speed, in frames */
urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
return status;
fail:
iso_sched_free(stream, sched);
urb->hcpriv = NULL;
return status;
}
/*-------------------------------------------------------------------------*/
static inline void
itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
struct ehci_itd *itd)
{
int i;
/* it's been recently zeroed */
itd->hw_next = EHCI_LIST_END(ehci);
itd->hw_bufp[0] = stream->buf0;
itd->hw_bufp[1] = stream->buf1;
itd->hw_bufp[2] = stream->buf2;
for (i = 0; i < 8; i++)
itd->index[i] = -1;
/* All other fields are filled when scheduling */
}
static inline void
itd_patch(
struct ehci_hcd *ehci,
struct ehci_itd *itd,
struct ehci_iso_sched *iso_sched,
unsigned index,
u16 uframe
)
{
struct ehci_iso_packet *uf = &iso_sched->packet[index];
unsigned pg = itd->pg;
/* BUG_ON(pg == 6 && uf->cross); */
uframe &= 0x07;
itd->index[uframe] = index;
itd->hw_transaction[uframe] = uf->transaction;
itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
/* iso_frame_desc[].offset must be strictly increasing */
if (unlikely(uf->cross)) {
u64 bufp = uf->bufp + 4096;
itd->pg = ++pg;
itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
}
}
static inline void
itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{
union ehci_shadow *prev = &ehci->pshadow[frame];
__hc32 *hw_p = &ehci->periodic[frame];
union ehci_shadow here = *prev;
__hc32 type = 0;
/* skip any iso nodes which might belong to previous microframes */
while (here.ptr) {
type = Q_NEXT_TYPE(ehci, *hw_p);
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
prev = periodic_next_shadow(ehci, prev, type);
hw_p = shadow_next_periodic(ehci, &here, type);
here = *prev;
}
itd->itd_next = here;
itd->hw_next = *hw_p;
prev->itd = itd;
itd->frame = frame;
wmb();
*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
/* fit urb's itds into the selected schedule slot; activate as needed */
static void itd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
struct ehci_iso_stream *stream
)
{
int packet;
unsigned next_uframe, uframe, frame;
struct ehci_iso_sched *iso_sched = urb->hcpriv;
struct ehci_itd *itd;
next_uframe = stream->next_uframe & (mod - 1);
if (unlikely(list_empty(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_disable();
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
for (packet = iso_sched->first_packet, itd = NULL;
packet < urb->number_of_packets;) {
if (itd == NULL) {
/* ASSERT: we have all necessary itds */
/* BUG_ON(list_empty(&iso_sched->td_list)); */
/* ASSERT: no itds for this endpoint in this uframe */
itd = list_entry(iso_sched->td_list.next,
struct ehci_itd, itd_list);
list_move_tail(&itd->itd_list, &stream->td_list);
itd->stream = stream;
itd->urb = urb;
itd_init(ehci, stream, itd);
}
uframe = next_uframe & 0x07;
frame = next_uframe >> 3;
itd_patch(ehci, itd, iso_sched, packet, uframe);
next_uframe += stream->uperiod;
next_uframe &= mod - 1;
packet++;
/* link completed itds into the schedule */
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
itd = NULL;
}
}
stream->next_uframe = next_uframe;
/* don't need that schedule data any more */
iso_sched_free(stream, iso_sched);
urb->hcpriv = stream;
++ehci->isoc_count;
enable_periodic(ehci);
}
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
/* Process and recycle a completed ITD. Return true iff its urb completed,
* and hence its completion callback probably added things to the hardware
* schedule.
*
* Note that we carefully avoid recycling this descriptor until after any
* completion callback runs, so that it won't be reused quickly. That is,
* assuming (a) no more than two urbs per frame on this endpoint, and also
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
{
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
unsigned uframe;
int urb_index = -1;
struct ehci_iso_stream *stream = itd->stream;
bool retval = false;
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
if (likely(itd->index[uframe] == -1))
continue;
urb_index = itd->index[uframe];
desc = &urb->iso_frame_desc[urb_index];
t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
itd->hw_transaction[uframe] = 0;
/* report transfer status */
if (unlikely(t & ISO_ERRS)) {
urb->error_count++;
if (t & EHCI_ISOC_BUF_ERR)
desc->status = usb_pipein(urb->pipe)
? -ENOSR /* hc couldn't read */
: -ECOMM; /* hc couldn't write */
else if (t & EHCI_ISOC_BABBLE)
desc->status = -EOVERFLOW;
else /* (t & EHCI_ISOC_XACTERR) */
desc->status = -EPROTO;
/* HC need not update length with this error */
if (!(t & EHCI_ISOC_BABBLE)) {
desc->actual_length = EHCI_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
desc->status = 0;
desc->actual_length = EHCI_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
} else {
/* URB was too late */
urb->error_count++;
}
}
/* handle completion now? */
if (likely((urb_index + 1) != urb->number_of_packets))
goto done;
/*
* ASSERT: it's really the last itd for this urb
* list_for_each_entry (itd, &stream->td_list, itd_list)
* BUG_ON(itd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
--ehci->isoc_count;
disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_enable();
}
if (unlikely(list_is_singular(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
done:
itd->urb = NULL;
/* Add to the end of the free list for later reuse */
list_move_tail(&itd->itd_list, &stream->free_list);
/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
if (list_empty(&stream->td_list)) {
list_splice_tail_init(&stream->free_list,
&ehci->cached_itd_list);
start_free_itds(ehci);
}
return retval;
}
/*-------------------------------------------------------------------------*/
static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
struct ehci_iso_stream *stream;
/* Get iso_stream head */
stream = iso_stream_find(ehci, urb);
if (unlikely(stream == NULL)) {
ehci_dbg(ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (unlikely(urb->interval != stream->uperiod)) {
ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
stream->uperiod, urb->interval);
goto done;
}
#ifdef EHCI_URB_TRACE
ehci_dbg(ehci,
"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->transfer_buffer_length,
urb->number_of_packets, urb->interval,
stream);
#endif
/* allocate ITDs w/o locking anything */
status = itd_urb_transaction(stream, ehci, urb, mem_flags);
if (unlikely(status < 0)) {
ehci_dbg(ehci, "can't init itds\n");
goto done;
}
/* schedule ... need to lock */
spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (likely(status == 0)) {
itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
} else if (status > 0) {
status = 0;
ehci_urb_done(ehci, urb, 0);
} else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
}
done_not_linked:
spin_unlock_irqrestore(&ehci->lock, flags);
done:
return status;
}
/*-------------------------------------------------------------------------*/
/*
* "Split ISO TDs" ... used for USB 1.1 devices going through the
* TTs in USB 2.0 hubs. These need microframe scheduling.
*/
static inline void
sitd_sched_init(
struct ehci_hcd *ehci,
struct ehci_iso_sched *iso_sched,
struct ehci_iso_stream *stream,
struct urb *urb
)
{
unsigned i;
dma_addr_t dma = urb->transfer_dma;
/* how many frames are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->ps.period;
/* figure out per-frame sitd fields that we'll need later
* when we fit new sitds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
struct ehci_iso_packet *packet = &iso_sched->packet[i];
unsigned length;
dma_addr_t buf;
u32 trans;
length = urb->iso_frame_desc[i].length & 0x03ff;
buf = dma + urb->iso_frame_desc[i].offset;
trans = SITD_STS_ACTIVE;
if (((i + 1) == urb->number_of_packets)
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= SITD_IOC;
trans |= length << 16;
packet->transaction = cpu_to_hc32(ehci, trans);
/* might need to cross a buffer page within a td */
packet->bufp = buf;
packet->buf1 = (buf + length) & ~0x0fff;
if (packet->buf1 != (buf & ~(u64)0x0fff))
packet->cross = 1;
/* OUT uses multiple start-splits */
if (stream->bEndpointAddress & USB_DIR_IN)
continue;
length = (length + 187) / 188;
if (length > 1) /* BEGIN vs ALL */
length |= 1 << 3;
packet->buf1 |= length;
}
}
static int
sitd_urb_transaction(
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t mem_flags
)
{
struct ehci_sitd *sitd;
dma_addr_t sitd_dma;
int i;
struct ehci_iso_sched *iso_sched;
unsigned long flags;
iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
if (iso_sched == NULL)
return -ENOMEM;
sitd_sched_init(ehci, iso_sched, stream, urb);
/* allocate/init sITDs */
spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < urb->number_of_packets; i++) {
/* NOTE: for now, we don't try to handle wraparound cases
* for IN (using sitd->hw_backpointer, like a FSTN), which
* means we never need two sitds for full speed packets.
*/
/*
* Use siTDs from the free list, but not siTDs that may
* still be in use by the hardware.
*/
if (likely(!list_empty(&stream->free_list))) {
sitd = list_first_entry(&stream->free_list,
struct ehci_sitd, sitd_list);
if (sitd->frame == ehci->now_frame)
goto alloc_sitd;
list_del(&sitd->sitd_list);
sitd_dma = sitd->sitd_dma;
} else {
alloc_sitd:
spin_unlock_irqrestore(&ehci->lock, flags);
sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
&sitd_dma);
spin_lock_irqsave(&ehci->lock, flags);
if (!sitd) {
iso_sched_free(stream, iso_sched);
spin_unlock_irqrestore(&ehci->lock, flags);
return -ENOMEM;
}
}
memset(sitd, 0, sizeof(*sitd));
sitd->sitd_dma = sitd_dma;
sitd->frame = NO_FRAME;
list_add(&sitd->sitd_list, &iso_sched->td_list);
}
/* temporarily store schedule info in hcpriv */
urb->hcpriv = iso_sched;
urb->error_count = 0;
spin_unlock_irqrestore(&ehci->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static inline void
sitd_patch(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct ehci_sitd *sitd,
struct ehci_iso_sched *iso_sched,
unsigned index
)
{
struct ehci_iso_packet *uf = &iso_sched->packet[index];
u64 bufp;
sitd->hw_next = EHCI_LIST_END(ehci);
sitd->hw_fullspeed_ep = stream->address;
sitd->hw_uframe = stream->splits;
sitd->hw_results = uf->transaction;
sitd->hw_backpointer = EHCI_LIST_END(ehci);
bufp = uf->bufp;
sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
if (uf->cross)
bufp += 4096;
sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
sitd->index = index;
}
static inline void
sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
{
/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
sitd->sitd_next = ehci->pshadow[frame];
sitd->hw_next = ehci->periodic[frame];
ehci->pshadow[frame].sitd = sitd;
sitd->frame = frame;
wmb();
ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
}
/* fit urb's sitds into the selected schedule slot; activate as needed */
static void sitd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
struct ehci_iso_stream *stream
)
{
int packet;
unsigned next_uframe;
struct ehci_iso_sched *sched = urb->hcpriv;
struct ehci_sitd *sitd;
next_uframe = stream->next_uframe;
if (list_empty(&stream->td_list))
/* usbfs ignores TT bandwidth */
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_disable();
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
for (packet = sched->first_packet, sitd = NULL;
packet < urb->number_of_packets;
packet++) {
/* ASSERT: we have all necessary sitds */
BUG_ON(list_empty(&sched->td_list));
/* ASSERT: no itds for this endpoint in this frame */
sitd = list_entry(sched->td_list.next,
struct ehci_sitd, sitd_list);
list_move_tail(&sitd->sitd_list, &stream->td_list);
sitd->stream = stream;
sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
next_uframe += stream->uperiod;
}
stream->next_uframe = next_uframe & (mod - 1);
/* don't need that schedule data any more */
iso_sched_free(stream, sched);
urb->hcpriv = stream;
++ehci->isoc_count;
enable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
| SITD_STS_XACT | SITD_STS_MMF)
/* Process and recycle a completed SITD. Return true iff its urb completed,
* and hence its completion callback probably added things to the hardware
* schedule.
*
* Note that we carefully avoid recycling this descriptor until after any
* completion callback runs, so that it won't be reused quickly. That is,
* assuming (a) no more than two urbs per frame on this endpoint, and also
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
struct urb *urb = sitd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
int urb_index;
struct ehci_iso_stream *stream = sitd->stream;
bool retval = false;
urb_index = sitd->index;
desc = &urb->iso_frame_desc[urb_index];
t = hc32_to_cpup(ehci, &sitd->hw_results);
/* report transfer status */
if (unlikely(t & SITD_ERRS)) {
urb->error_count++;
if (t & SITD_STS_DBE)
desc->status = usb_pipein(urb->pipe)
? -ENOSR /* hc couldn't read */
: -ECOMM; /* hc couldn't write */
else if (t & SITD_STS_BABBLE)
desc->status = -EOVERFLOW;
else /* XACT, MMF, etc */
desc->status = -EPROTO;
} else if (unlikely(t & SITD_STS_ACTIVE)) {
/* URB was too late */
urb->error_count++;
} else {
desc->status = 0;
desc->actual_length = desc->length - SITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
/* handle completion now? */
if ((urb_index + 1) != urb->number_of_packets)
goto done;
/*
* ASSERT: it's really the last sitd for this urb
* list_for_each_entry (sitd, &stream->td_list, sitd_list)
* BUG_ON(sitd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
--ehci->isoc_count;
disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_enable();
}
if (list_is_singular(&stream->td_list))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
done:
sitd->urb = NULL;
/* Add to the end of the free list for later reuse */
list_move_tail(&sitd->sitd_list, &stream->free_list);
/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
if (list_empty(&stream->td_list)) {
list_splice_tail_init(&stream->free_list,
&ehci->cached_sitd_list);
start_free_itds(ehci);
}
return retval;
}
static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
struct ehci_iso_stream *stream;
/* Get iso_stream head */
stream = iso_stream_find(ehci, urb);
if (stream == NULL) {
ehci_dbg(ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (urb->interval != stream->ps.period) {
ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
stream->ps.period, urb->interval);
goto done;
}
#ifdef EHCI_URB_TRACE
ehci_dbg(ehci,
"submit %p dev%s ep%d%s-iso len %d\n",
urb, urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->transfer_buffer_length);
#endif
/* allocate SITDs */
status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
if (status < 0) {
ehci_dbg(ehci, "can't init sitds\n");
goto done;
}
/* schedule ... need to lock */
spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (likely(status == 0)) {
sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
} else if (status > 0) {
status = 0;
ehci_urb_done(ehci, urb, 0);
} else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
}
done_not_linked:
spin_unlock_irqrestore(&ehci->lock, flags);
done:
return status;
}
/*-------------------------------------------------------------------------*/
static void scan_isoc(struct ehci_hcd *ehci)
{
unsigned uf, now_frame, frame;
unsigned fmask = ehci->periodic_size - 1;
bool modified, live;
union ehci_shadow q, *q_p;
__hc32 type, *hw_p;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
if (ehci->rh_state >= EHCI_RH_RUNNING) {
uf = ehci_read_frame_index(ehci);
now_frame = (uf >> 3) & fmask;
live = true;
} else {
now_frame = (ehci->last_iso_frame - 1) & fmask;
live = false;
}
ehci->now_frame = now_frame;
frame = ehci->last_iso_frame;
restart:
/* Scan each element in frame's queue for completions */
q_p = &ehci->pshadow[frame];
hw_p = &ehci->periodic[frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(ehci, *hw_p);
modified = false;
while (q.ptr != NULL) {
switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_ITD:
/*
* If this ITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
if (frame == now_frame && live) {
rmb();
for (uf = 0; uf < 8; uf++) {
if (q.itd->hw_transaction[uf] &
ITD_ACTIVE(ehci))
break;
}
if (uf < 8) {
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE(ehci,
q.itd->hw_next);
q = *q_p;
break;
}
}
/*
* Take finished ITDs out of the schedule
* and process them: recycle, maybe report
* URB completion. HC won't cache the
* pointer for much longer, if at all.
*/
*q_p = q.itd->itd_next;
if (!ehci->use_dummy_qh ||
q.itd->hw_next != EHCI_LIST_END(ehci))
*hw_p = q.itd->hw_next;
else
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
wmb();
modified = itd_complete(ehci, q.itd);
q = *q_p;
break;
case Q_TYPE_SITD:
/*
* If this SITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
if (((frame == now_frame) ||
(((frame + 1) & fmask) == now_frame))
&& live
&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
q = *q_p;
break;
}
/*
* Take finished SITDs out of the schedule
* and process them: recycle, maybe report
* URB completion.
*/
*q_p = q.sitd->sitd_next;
if (!ehci->use_dummy_qh ||
q.sitd->hw_next != EHCI_LIST_END(ehci))
*hw_p = q.sitd->hw_next;
else
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
wmb();
modified = sitd_complete(ehci, q.sitd);
q = *q_p;
break;
default:
ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
/* BUG(); */
fallthrough;
case Q_TYPE_QH:
case Q_TYPE_FSTN:
/* End of the iTDs and siTDs */
q.ptr = NULL;
break;
}
/* Assume completion callbacks modify the queue */
if (unlikely(modified && ehci->isoc_count > 0))
goto restart;
}
/* Stop when we have reached the current frame */
if (frame == now_frame)
return;
/* The last frame may still have active siTDs */
ehci->last_iso_frame = frame;
frame = (frame + 1) & fmask;
goto restart;
}
| linux-master | drivers/usb/host/ehci-sched.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2007 by Alan Stern
*/
/* this file is part of ehci-hcd.c */
/* Display the ports dedicated to the companion controller */
static ssize_t companion_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ehci_hcd *ehci;
int nports, index, n;
int count = PAGE_SIZE;
char *ptr = buf;
ehci = hcd_to_ehci(dev_get_drvdata(dev));
nports = HCS_N_PORTS(ehci->hcs_params);
for (index = 0; index < nports; ++index) {
if (test_bit(index, &ehci->companion_ports)) {
n = scnprintf(ptr, count, "%d\n", index + 1);
ptr += n;
count -= n;
}
}
return ptr - buf;
}
/*
* Dedicate or undedicate a port to the companion controller.
* Syntax is "[-]portnum", where a leading '-' sign means
* return control of the port to the EHCI controller.
*/
static ssize_t companion_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ehci_hcd *ehci;
int portnum, new_owner;
ehci = hcd_to_ehci(dev_get_drvdata(dev));
new_owner = PORT_OWNER; /* Owned by companion */
if (sscanf(buf, "%d", &portnum) != 1)
return -EINVAL;
if (portnum < 0) {
portnum = - portnum;
new_owner = 0; /* Owned by EHCI */
}
if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
return -ENOENT;
portnum--;
if (new_owner)
set_bit(portnum, &ehci->companion_ports);
else
clear_bit(portnum, &ehci->companion_ports);
set_owner(ehci, portnum, new_owner);
return count;
}
static DEVICE_ATTR_RW(companion);
/*
* Display / Set uframe_periodic_max
*/
static ssize_t uframe_periodic_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ehci_hcd *ehci;
int n;
ehci = hcd_to_ehci(dev_get_drvdata(dev));
n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
return n;
}
static ssize_t uframe_periodic_max_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ehci_hcd *ehci;
unsigned uframe_periodic_max;
unsigned uframe;
unsigned long flags;
ssize_t ret;
ehci = hcd_to_ehci(dev_get_drvdata(dev));
if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
return -EINVAL;
if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
ehci_info(ehci, "rejecting invalid request for "
"uframe_periodic_max=%u\n", uframe_periodic_max);
return -EINVAL;
}
ret = -EINVAL;
/*
* lock, so that our checking does not race with possible periodic
* bandwidth allocation through submitting new urbs.
*/
spin_lock_irqsave (&ehci->lock, flags);
/*
* for request to decrease max periodic bandwidth, we have to check
* to see whether the decrease is possible.
*/
if (uframe_periodic_max < ehci->uframe_periodic_max) {
u8 allocated_max = 0;
for (uframe = 0; uframe < EHCI_BANDWIDTH_SIZE; ++uframe)
allocated_max = max(allocated_max,
ehci->bandwidth[uframe]);
if (allocated_max > uframe_periodic_max) {
ehci_info(ehci,
"cannot decrease uframe_periodic_max because "
"periodic bandwidth is already allocated "
"(%u > %u)\n",
allocated_max, uframe_periodic_max);
goto out_unlock;
}
}
/* increasing is always ok */
ehci_info(ehci, "setting max periodic bandwidth to %u%% "
"(== %u usec/uframe)\n",
100*uframe_periodic_max/125, uframe_periodic_max);
if (uframe_periodic_max != 100)
ehci_warn(ehci, "max periodic bandwidth set is non-standard\n");
ehci->uframe_periodic_max = uframe_periodic_max;
ret = count;
out_unlock:
spin_unlock_irqrestore (&ehci->lock, flags);
return ret;
}
static DEVICE_ATTR_RW(uframe_periodic_max);
static inline int create_sysfs_files(struct ehci_hcd *ehci)
{
struct device *controller = ehci_to_hcd(ehci)->self.controller;
int i = 0;
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
i = device_create_file(controller, &dev_attr_companion);
if (i)
goto out;
i = device_create_file(controller, &dev_attr_uframe_periodic_max);
out:
return i;
}
static inline void remove_sysfs_files(struct ehci_hcd *ehci)
{
struct device *controller = ehci_to_hcd(ehci)->self.controller;
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
device_remove_file(controller, &dev_attr_companion);
device_remove_file(controller, &dev_attr_uframe_periodic_max);
}
| linux-master | drivers/usb/host/ehci-sysfs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Aeroflex Gaisler GRLIB GRUSBHC EHCI host controller
*
* GRUSBHC is typically found on LEON/GRLIB SoCs
*
* (c) Jan Andersson <[email protected]>
*
* Based on ehci-ppc-of.c which is:
* (c) Valentine Barshak <[email protected]>
* and in turn based on "ehci-ppc-soc.c" by Stefan Roese <[email protected]>
* and "ohci-ppc-of.c" by Sylvain Munaut <[email protected]>
*/
#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#define GRUSBHC_HCIVERSION 0x0100 /* Known value of cap. reg. HCIVERSION */
static const struct hc_driver ehci_grlib_hc_driver = {
.description = hcd_name,
.product_desc = "GRLIB GRUSBHC EHCI",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
.reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_hcd_grlib_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ehci_hcd *ehci = NULL;
struct resource res;
u32 hc_capbase;
int irq;
int rv;
if (usb_disabled())
return -ENODEV;
dev_dbg(&op->dev, "initializing GRUSBHC EHCI USB Controller\n");
rv = of_address_to_resource(dn, 0, &res);
if (rv)
return rv;
/* usb_create_hcd requires dma_mask != NULL */
op->dev.dma_mask = &op->dev.coherent_dma_mask;
hcd = usb_create_hcd(&ehci_grlib_hc_driver, &op->dev,
"GRUSBHC EHCI USB");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res.start;
hcd->rsrc_len = resource_size(&res);
irq = irq_of_parse_and_map(dn, 0);
if (!irq) {
dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
__FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = devm_ioremap_resource(&op->dev, &res);
if (IS_ERR(hcd->regs)) {
rv = PTR_ERR(hcd->regs);
goto err_ioremap;
}
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
/* determine endianness of this implementation */
hc_capbase = ehci_readl(ehci, &ehci->caps->hc_capbase);
if (HC_VERSION(ehci, hc_capbase) != GRUSBHC_HCIVERSION) {
ehci->big_endian_mmio = 1;
ehci->big_endian_desc = 1;
ehci->big_endian_capbase = 1;
}
rv = usb_add_hcd(hcd, irq, 0);
if (rv)
goto err_ioremap;
device_wakeup_enable(hcd->self.controller);
return 0;
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
usb_put_hcd(hcd);
return rv;
}
static void ehci_hcd_grlib_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
dev_dbg(&op->dev, "stopping GRLIB GRUSBHC EHCI USB Controller\n");
usb_remove_hcd(hcd);
irq_dispose_mapping(hcd->irq);
usb_put_hcd(hcd);
}
static const struct of_device_id ehci_hcd_grlib_of_match[] = {
{
.name = "GAISLER_EHCI",
},
{
.name = "01_026",
},
{},
};
MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match);
static struct platform_driver ehci_grlib_driver = {
.probe = ehci_hcd_grlib_probe,
.remove_new = ehci_hcd_grlib_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "grlib-ehci",
.of_match_table = ehci_hcd_grlib_of_match,
},
};
| linux-master | drivers/usb/host/ehci-grlib.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "fhci.h"
static void init_td(struct td *td)
{
memset(td, 0, sizeof(*td));
INIT_LIST_HEAD(&td->node);
INIT_LIST_HEAD(&td->frame_lh);
}
static void init_ed(struct ed *ed)
{
memset(ed, 0, sizeof(*ed));
INIT_LIST_HEAD(&ed->td_list);
INIT_LIST_HEAD(&ed->node);
}
static struct td *get_empty_td(struct fhci_hcd *fhci)
{
struct td *td;
if (!list_empty(&fhci->empty_tds)) {
td = list_entry(fhci->empty_tds.next, struct td, node);
list_del(fhci->empty_tds.next);
} else {
td = kmalloc(sizeof(*td), GFP_ATOMIC);
if (!td)
fhci_err(fhci, "No memory to allocate to TD\n");
else
init_td(td);
}
return td;
}
void fhci_recycle_empty_td(struct fhci_hcd *fhci, struct td *td)
{
init_td(td);
list_add(&td->node, &fhci->empty_tds);
}
struct ed *fhci_get_empty_ed(struct fhci_hcd *fhci)
{
struct ed *ed;
if (!list_empty(&fhci->empty_eds)) {
ed = list_entry(fhci->empty_eds.next, struct ed, node);
list_del(fhci->empty_eds.next);
} else {
ed = kmalloc(sizeof(*ed), GFP_ATOMIC);
if (!ed)
fhci_err(fhci, "No memory to allocate to ED\n");
else
init_ed(ed);
}
return ed;
}
void fhci_recycle_empty_ed(struct fhci_hcd *fhci, struct ed *ed)
{
init_ed(ed);
list_add(&ed->node, &fhci->empty_eds);
}
struct td *fhci_td_fill(struct fhci_hcd *fhci, struct urb *urb,
struct urb_priv *urb_priv, struct ed *ed, u16 index,
enum fhci_ta_type type, int toggle, u8 *data, u32 len,
u16 interval, u16 start_frame, bool ioc)
{
struct td *td = get_empty_td(fhci);
if (!td)
return NULL;
td->urb = urb;
td->ed = ed;
td->type = type;
td->toggle = toggle;
td->data = data;
td->len = len;
td->iso_index = index;
td->interval = interval;
td->start_frame = start_frame;
td->ioc = ioc;
td->status = USB_TD_OK;
urb_priv->tds[index] = td;
return td;
}
| linux-master | drivers/usb/host/fhci-mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MediaTek xHCI Host Controller Driver
*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Chunfeng Yun <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include "xhci.h"
#include "xhci-mtk.h"
/* ip_pw_ctrl0 register */
#define CTRL0_IP_SW_RST BIT(0)
/* ip_pw_ctrl1 register */
#define CTRL1_IP_HOST_PDN BIT(0)
/* ip_pw_ctrl2 register */
#define CTRL2_IP_DEV_PDN BIT(0)
/* ip_pw_sts1 register */
#define STS1_IP_SLEEP_STS BIT(30)
#define STS1_U3_MAC_RST BIT(16)
#define STS1_XHCI_RST BIT(11)
#define STS1_SYS125_RST BIT(10)
#define STS1_REF_RST BIT(8)
#define STS1_SYSPLL_STABLE BIT(0)
/* ip_xhci_cap register */
#define CAP_U3_PORT_NUM(p) ((p) & 0xff)
#define CAP_U2_PORT_NUM(p) (((p) >> 8) & 0xff)
/* u3_ctrl_p register */
#define CTRL_U3_PORT_HOST_SEL BIT(2)
#define CTRL_U3_PORT_PDN BIT(1)
#define CTRL_U3_PORT_DIS BIT(0)
/* u2_ctrl_p register */
#define CTRL_U2_PORT_HOST_SEL BIT(2)
#define CTRL_U2_PORT_PDN BIT(1)
#define CTRL_U2_PORT_DIS BIT(0)
/* u2_phy_pll register */
#define CTRL_U2_FORCE_PLL_STB BIT(28)
/* xHCI CSR */
#define LS_EOF_CFG 0x930
#define LSEOF_OFFSET 0x89
#define FS_EOF_CFG 0x934
#define FSEOF_OFFSET 0x2e
#define SS_GEN1_EOF_CFG 0x93c
#define SSG1EOF_OFFSET 0x78
#define HFCNTR_CFG 0x944
#define ITP_DELTA_CLK (0xa << 1)
#define ITP_DELTA_CLK_MASK GENMASK(5, 1)
#define FRMCNT_LEV1_RANG (0x12b << 8)
#define FRMCNT_LEV1_RANG_MASK GENMASK(19, 8)
#define SS_GEN2_EOF_CFG 0x990
#define SSG2EOF_OFFSET 0x3c
#define XSEOF_OFFSET_MASK GENMASK(11, 0)
/* usb remote wakeup registers in syscon */
/* mt8173 etc */
#define PERI_WK_CTRL1 0x4
#define WC1_IS_C(x) (((x) & 0xf) << 26) /* cycle debounce */
#define WC1_IS_EN BIT(25)
#define WC1_IS_P BIT(6) /* polarity for ip sleep */
/* mt8183 */
#define PERI_WK_CTRL0 0x0
#define WC0_IS_C(x) ((u32)(((x) & 0xf) << 28)) /* cycle debounce */
#define WC0_IS_P BIT(12) /* polarity */
#define WC0_IS_EN BIT(6)
/* mt8192 */
#define WC0_SSUSB0_CDEN BIT(6)
#define WC0_IS_SPM_EN BIT(1)
/* mt8195 */
#define PERI_WK_CTRL0_8195 0x04
#define WC0_IS_P_95 BIT(30) /* polarity */
#define WC0_IS_C_95(x) ((u32)(((x) & 0x7) << 27))
#define WC0_IS_EN_P3_95 BIT(26)
#define WC0_IS_EN_P2_95 BIT(25)
#define WC0_IS_EN_P1_95 BIT(24)
#define PERI_WK_CTRL1_8195 0x20
#define WC1_IS_C_95(x) ((u32)(((x) & 0xf) << 28))
#define WC1_IS_P_95 BIT(12)
#define WC1_IS_EN_P0_95 BIT(6)
/* mt2712 etc */
#define PERI_SSUSB_SPM_CTRL 0x0
#define SSC_IP_SLEEP_EN BIT(4)
#define SSC_SPM_INT_EN BIT(1)
enum ssusb_uwk_vers {
SSUSB_UWK_V1 = 1,
SSUSB_UWK_V2,
SSUSB_UWK_V1_1 = 101, /* specific revision 1.01 */
SSUSB_UWK_V1_2, /* specific revision 1.2 */
SSUSB_UWK_V1_3, /* mt8195 IP0 */
SSUSB_UWK_V1_4, /* mt8195 IP1 */
SSUSB_UWK_V1_5, /* mt8195 IP2 */
SSUSB_UWK_V1_6, /* mt8195 IP3 */
};
/*
* MT8195 has 4 controllers, the controller1~3's default SOF/ITP interval
* is calculated from the frame counter clock 24M, but in fact, the clock
* is 48M, add workaround for it.
*/
static void xhci_mtk_set_frame_interval(struct xhci_hcd_mtk *mtk)
{
struct device *dev = mtk->dev;
struct usb_hcd *hcd = mtk->hcd;
u32 value;
if (!of_device_is_compatible(dev->of_node, "mediatek,mt8195-xhci"))
return;
value = readl(hcd->regs + HFCNTR_CFG);
value &= ~(ITP_DELTA_CLK_MASK | FRMCNT_LEV1_RANG_MASK);
value |= (ITP_DELTA_CLK | FRMCNT_LEV1_RANG);
writel(value, hcd->regs + HFCNTR_CFG);
value = readl(hcd->regs + LS_EOF_CFG);
value &= ~XSEOF_OFFSET_MASK;
value |= LSEOF_OFFSET;
writel(value, hcd->regs + LS_EOF_CFG);
value = readl(hcd->regs + FS_EOF_CFG);
value &= ~XSEOF_OFFSET_MASK;
value |= FSEOF_OFFSET;
writel(value, hcd->regs + FS_EOF_CFG);
value = readl(hcd->regs + SS_GEN1_EOF_CFG);
value &= ~XSEOF_OFFSET_MASK;
value |= SSG1EOF_OFFSET;
writel(value, hcd->regs + SS_GEN1_EOF_CFG);
value = readl(hcd->regs + SS_GEN2_EOF_CFG);
value &= ~XSEOF_OFFSET_MASK;
value |= SSG2EOF_OFFSET;
writel(value, hcd->regs + SS_GEN2_EOF_CFG);
}
static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
{
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value, check_val;
int u3_ports_disabled = 0;
int ret;
int i;
if (!mtk->has_ippc)
return 0;
/* power on host ip */
value = readl(&ippc->ip_pw_ctr1);
value &= ~CTRL1_IP_HOST_PDN;
writel(value, &ippc->ip_pw_ctr1);
/* power on and enable u3 ports except skipped ones */
for (i = 0; i < mtk->num_u3_ports; i++) {
if ((0x1 << i) & mtk->u3p_dis_msk) {
u3_ports_disabled++;
continue;
}
value = readl(&ippc->u3_ctrl_p[i]);
value &= ~(CTRL_U3_PORT_PDN | CTRL_U3_PORT_DIS);
value |= CTRL_U3_PORT_HOST_SEL;
writel(value, &ippc->u3_ctrl_p[i]);
}
/* power on and enable all u2 ports except skipped ones */
for (i = 0; i < mtk->num_u2_ports; i++) {
if (BIT(i) & mtk->u2p_dis_msk)
continue;
value = readl(&ippc->u2_ctrl_p[i]);
value &= ~(CTRL_U2_PORT_PDN | CTRL_U2_PORT_DIS);
value |= CTRL_U2_PORT_HOST_SEL;
writel(value, &ippc->u2_ctrl_p[i]);
}
/*
* wait for clocks to be stable, and clock domains reset to
* be inactive after power on and enable ports
*/
check_val = STS1_SYSPLL_STABLE | STS1_REF_RST |
STS1_SYS125_RST | STS1_XHCI_RST;
if (mtk->num_u3_ports > u3_ports_disabled)
check_val |= STS1_U3_MAC_RST;
ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
(check_val == (value & check_val)), 100, 20000);
if (ret) {
dev_err(mtk->dev, "clocks are not stable (0x%x)\n", value);
return ret;
}
return 0;
}
static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
{
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value;
int ret;
int i;
if (!mtk->has_ippc)
return 0;
/* power down u3 ports except skipped ones */
for (i = 0; i < mtk->num_u3_ports; i++) {
if ((0x1 << i) & mtk->u3p_dis_msk)
continue;
value = readl(&ippc->u3_ctrl_p[i]);
value |= CTRL_U3_PORT_PDN;
writel(value, &ippc->u3_ctrl_p[i]);
}
/* power down all u2 ports except skipped ones */
for (i = 0; i < mtk->num_u2_ports; i++) {
if (BIT(i) & mtk->u2p_dis_msk)
continue;
value = readl(&ippc->u2_ctrl_p[i]);
value |= CTRL_U2_PORT_PDN;
writel(value, &ippc->u2_ctrl_p[i]);
}
/* power down host ip */
value = readl(&ippc->ip_pw_ctr1);
value |= CTRL1_IP_HOST_PDN;
writel(value, &ippc->ip_pw_ctr1);
/* wait for host ip to sleep */
ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
(value & STS1_IP_SLEEP_STS), 100, 100000);
if (ret)
dev_err(mtk->dev, "ip sleep failed!!!\n");
else /* workaound for platforms using low level latch */
usleep_range(100, 200);
return ret;
}
static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
{
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value;
if (!mtk->has_ippc)
return 0;
/* reset whole ip */
value = readl(&ippc->ip_pw_ctr0);
value |= CTRL0_IP_SW_RST;
writel(value, &ippc->ip_pw_ctr0);
udelay(1);
value = readl(&ippc->ip_pw_ctr0);
value &= ~CTRL0_IP_SW_RST;
writel(value, &ippc->ip_pw_ctr0);
/*
* device ip is default power-on in fact
* power down device ip, otherwise ip-sleep will fail
*/
value = readl(&ippc->ip_pw_ctr2);
value |= CTRL2_IP_DEV_PDN;
writel(value, &ippc->ip_pw_ctr2);
value = readl(&ippc->ip_xhci_cap);
mtk->num_u3_ports = CAP_U3_PORT_NUM(value);
mtk->num_u2_ports = CAP_U2_PORT_NUM(value);
dev_dbg(mtk->dev, "%s u2p:%d, u3p:%d\n", __func__,
mtk->num_u2_ports, mtk->num_u3_ports);
return xhci_mtk_host_enable(mtk);
}
/* only clocks can be turn off for ip-sleep wakeup mode */
static void usb_wakeup_ip_sleep_set(struct xhci_hcd_mtk *mtk, bool enable)
{
u32 reg, msk, val;
switch (mtk->uwk_vers) {
case SSUSB_UWK_V1:
reg = mtk->uwk_reg_base + PERI_WK_CTRL1;
msk = WC1_IS_EN | WC1_IS_C(0xf) | WC1_IS_P;
val = enable ? (WC1_IS_EN | WC1_IS_C(0x8)) : 0;
break;
case SSUSB_UWK_V1_1:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0;
msk = WC0_IS_EN | WC0_IS_C(0xf) | WC0_IS_P;
val = enable ? (WC0_IS_EN | WC0_IS_C(0x1)) : 0;
break;
case SSUSB_UWK_V1_2:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0;
msk = WC0_SSUSB0_CDEN | WC0_IS_SPM_EN;
val = enable ? msk : 0;
break;
case SSUSB_UWK_V1_3:
reg = mtk->uwk_reg_base + PERI_WK_CTRL1_8195;
msk = WC1_IS_EN_P0_95 | WC1_IS_C_95(0xf) | WC1_IS_P_95;
val = enable ? (WC1_IS_EN_P0_95 | WC1_IS_C_95(0x1)) : 0;
break;
case SSUSB_UWK_V1_4:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0_8195;
msk = WC0_IS_EN_P1_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
val = enable ? (WC0_IS_EN_P1_95 | WC0_IS_C_95(0x1)) : 0;
break;
case SSUSB_UWK_V1_5:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0_8195;
msk = WC0_IS_EN_P2_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
val = enable ? (WC0_IS_EN_P2_95 | WC0_IS_C_95(0x1)) : 0;
break;
case SSUSB_UWK_V1_6:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0_8195;
msk = WC0_IS_EN_P3_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
val = enable ? (WC0_IS_EN_P3_95 | WC0_IS_C_95(0x1)) : 0;
break;
case SSUSB_UWK_V2:
reg = mtk->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
val = enable ? msk : 0;
break;
default:
return;
}
regmap_update_bits(mtk->uwk, reg, msk, val);
}
static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
struct device_node *dn)
{
struct of_phandle_args args;
int ret;
/* Wakeup function is optional */
mtk->uwk_en = of_property_read_bool(dn, "wakeup-source");
if (!mtk->uwk_en)
return 0;
ret = of_parse_phandle_with_fixed_args(dn,
"mediatek,syscon-wakeup", 2, 0, &args);
if (ret)
return ret;
mtk->uwk_reg_base = args.args[0];
mtk->uwk_vers = args.args[1];
mtk->uwk = syscon_node_to_regmap(args.np);
of_node_put(args.np);
dev_info(mtk->dev, "uwk - reg:0x%x, version:%d\n",
mtk->uwk_reg_base, mtk->uwk_vers);
return PTR_ERR_OR_ZERO(mtk->uwk);
}
static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
{
if (mtk->uwk_en)
usb_wakeup_ip_sleep_set(mtk, enable);
}
static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
{
struct clk_bulk_data *clks = mtk->clks;
clks[0].id = "sys_ck";
clks[1].id = "xhci_ck";
clks[2].id = "ref_ck";
clks[3].id = "mcu_ck";
clks[4].id = "dma_ck";
clks[5].id = "frmcnt_ck";
return devm_clk_bulk_get_optional(mtk->dev, BULK_CLKS_NUM, clks);
}
static int xhci_mtk_vregs_get(struct xhci_hcd_mtk *mtk)
{
struct regulator_bulk_data *supplies = mtk->supplies;
supplies[0].supply = "vbus";
supplies[1].supply = "vusb33";
return devm_regulator_bulk_get(mtk->dev, BULK_VREGS_NUM, supplies);
}
static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
xhci->quirks |= XHCI_MTK_HOST;
/*
* MTK host controller gives a spurious successful event after a
* short transfer. Ignore it.
*/
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
if (mtk->lpm_support)
xhci->quirks |= XHCI_LPM_SUPPORT;
if (mtk->u2_lpm_disable)
xhci->quirks |= XHCI_HW_LPM_DISABLE;
/*
* MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
* and it's 3 when support it.
*/
if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
/* called during probe() after chip reset completes */
static int xhci_mtk_setup(struct usb_hcd *hcd)
{
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
int ret;
if (usb_hcd_is_primary_hcd(hcd)) {
ret = xhci_mtk_ssusb_config(mtk);
if (ret)
return ret;
/* workaround only for mt8195 */
xhci_mtk_set_frame_interval(mtk);
}
ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
if (ret)
return ret;
if (usb_hcd_is_primary_hcd(hcd))
ret = xhci_mtk_sch_init(mtk);
return ret;
}
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
.reset = xhci_mtk_setup,
.add_endpoint = xhci_mtk_add_ep,
.drop_endpoint = xhci_mtk_drop_ep,
.check_bandwidth = xhci_mtk_check_bandwidth,
.reset_bandwidth = xhci_mtk_reset_bandwidth,
};
static struct hc_driver __read_mostly xhci_mtk_hc_driver;
static int xhci_mtk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct xhci_hcd_mtk *mtk;
const struct hc_driver *driver;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *usb3_hcd;
struct usb_hcd *hcd;
int ret = -ENODEV;
int wakeup_irq;
int irq;
if (usb_disabled())
return -ENODEV;
driver = &xhci_mtk_hc_driver;
mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
if (!mtk)
return -ENOMEM;
mtk->dev = dev;
ret = xhci_mtk_vregs_get(mtk);
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
ret = xhci_mtk_clks_get(mtk);
if (ret)
return ret;
irq = platform_get_irq_byname_optional(pdev, "host");
if (irq < 0) {
if (irq == -EPROBE_DEFER)
return irq;
/* for backward compatibility */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
}
wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
if (wakeup_irq == -EPROBE_DEFER)
return wakeup_irq;
mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
/* optional property, ignore the error if it does not exist */
of_property_read_u32(node, "mediatek,u3p-dis-msk",
&mtk->u3p_dis_msk);
of_property_read_u32(node, "mediatek,u2p-dis-msk",
&mtk->u2p_dis_msk);
ret = usb_wakeup_of_property_parse(mtk, node);
if (ret) {
dev_err(dev, "failed to parse uwk property\n");
return ret;
}
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 4000);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = regulator_bulk_enable(BULK_VREGS_NUM, mtk->supplies);
if (ret)
goto disable_pm;
ret = clk_bulk_prepare_enable(BULK_CLKS_NUM, mtk->clks);
if (ret)
goto disable_ldos;
ret = device_reset_optional(dev);
if (ret) {
dev_err_probe(dev, ret, "failed to reset controller\n");
goto disable_clk;
}
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
goto disable_clk;
}
/*
* USB 2.0 roothub is stored in the platform_device.
* Swap it with mtk HCD.
*/
mtk->hcd = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, mtk);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
hcd->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto put_usb2_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
if (res) { /* ippc register is optional */
mtk->ippc_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(mtk->ippc_regs)) {
ret = PTR_ERR(mtk->ippc_regs);
goto put_usb2_hcd;
}
mtk->has_ippc = true;
}
device_init_wakeup(dev, true);
dma_set_max_seg_size(dev, UINT_MAX);
xhci = hcd_to_xhci(hcd);
xhci->main_hcd = hcd;
xhci->allow_single_roothub = 1;
/*
* imod_interval is the interrupt moderation value in nanoseconds.
* The increment interval is 8 times as much as that defined in
* the xHCI spec on MTK's controller.
*/
xhci->imod_interval = 5000;
device_property_read_u32(dev, "imod-interval-ns", &xhci->imod_interval);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_device_wakeup;
if (!xhci_has_one_roothub(xhci)) {
xhci->shared_hcd = usb_create_shared_hcd(driver, dev,
dev_name(dev), hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto dealloc_usb2_hcd;
}
}
usb3_hcd = xhci_get_usb3_hcd(xhci);
if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
!(xhci->quirks & XHCI_BROKEN_STREAMS))
usb3_hcd->can_do_streams = 1;
if (xhci->shared_hcd) {
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
}
if (wakeup_irq > 0) {
ret = dev_pm_set_dedicated_wake_irq_reverse(dev, wakeup_irq);
if (ret) {
dev_err(dev, "set wakeup irq %d failed\n", wakeup_irq);
goto dealloc_usb3_hcd;
}
dev_info(dev, "wakeup irq %d\n", wakeup_irq);
}
device_enable_async_suspend(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
pm_runtime_forbid(dev);
return 0;
dealloc_usb3_hcd:
usb_remove_hcd(xhci->shared_hcd);
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
xhci_mtk_sch_exit(mtk);
usb_remove_hcd(hcd);
disable_device_wakeup:
device_init_wakeup(dev, false);
put_usb2_hcd:
usb_put_hcd(hcd);
disable_clk:
clk_bulk_disable_unprepare(BULK_CLKS_NUM, mtk->clks);
disable_ldos:
regulator_bulk_disable(BULK_VREGS_NUM, mtk->supplies);
disable_pm:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
static void xhci_mtk_remove(struct platform_device *pdev)
{
struct xhci_hcd_mtk *mtk = platform_get_drvdata(pdev);
struct usb_hcd *hcd = mtk->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;
struct device *dev = &pdev->dev;
pm_runtime_get_sync(dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
if (shared_hcd) {
usb_remove_hcd(shared_hcd);
xhci->shared_hcd = NULL;
}
usb_remove_hcd(hcd);
if (shared_hcd)
usb_put_hcd(shared_hcd);
usb_put_hcd(hcd);
xhci_mtk_sch_exit(mtk);
clk_bulk_disable_unprepare(BULK_CLKS_NUM, mtk->clks);
regulator_bulk_disable(BULK_VREGS_NUM, mtk->supplies);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
}
static int __maybe_unused xhci_mtk_suspend(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
struct usb_hcd *hcd = mtk->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;
int ret;
xhci_dbg(xhci, "%s: stop port polling\n", __func__);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
if (shared_hcd) {
clear_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags);
del_timer_sync(&shared_hcd->rh_timer);
}
ret = xhci_mtk_host_disable(mtk);
if (ret)
goto restart_poll_rh;
clk_bulk_disable_unprepare(BULK_CLKS_NUM, mtk->clks);
usb_wakeup_set(mtk, true);
return 0;
restart_poll_rh:
xhci_dbg(xhci, "%s: restart port polling\n", __func__);
if (shared_hcd) {
set_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags);
usb_hcd_poll_rh_status(shared_hcd);
}
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
return ret;
}
static int __maybe_unused xhci_mtk_resume(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
struct usb_hcd *hcd = mtk->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;
int ret;
usb_wakeup_set(mtk, false);
ret = clk_bulk_prepare_enable(BULK_CLKS_NUM, mtk->clks);
if (ret)
goto enable_wakeup;
ret = xhci_mtk_host_enable(mtk);
if (ret)
goto disable_clks;
xhci_dbg(xhci, "%s: restart port polling\n", __func__);
if (shared_hcd) {
set_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags);
usb_hcd_poll_rh_status(shared_hcd);
}
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
return 0;
disable_clks:
clk_bulk_disable_unprepare(BULK_CLKS_NUM, mtk->clks);
enable_wakeup:
usb_wakeup_set(mtk, true);
return ret;
}
static int __maybe_unused xhci_mtk_runtime_suspend(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
int ret = 0;
if (xhci->xhc_state)
return -ESHUTDOWN;
if (device_may_wakeup(dev))
ret = xhci_mtk_suspend(dev);
/* -EBUSY: let PM automatically reschedule another autosuspend */
return ret ? -EBUSY : 0;
}
static int __maybe_unused xhci_mtk_runtime_resume(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
int ret = 0;
if (xhci->xhc_state)
return -ESHUTDOWN;
if (device_may_wakeup(dev))
ret = xhci_mtk_resume(dev);
return ret;
}
static const struct dev_pm_ops xhci_mtk_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_mtk_suspend, xhci_mtk_resume)
SET_RUNTIME_PM_OPS(xhci_mtk_runtime_suspend,
xhci_mtk_runtime_resume, NULL)
};
#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &xhci_mtk_pm_ops : NULL)
static const struct of_device_id mtk_xhci_of_match[] = {
{ .compatible = "mediatek,mt8173-xhci"},
{ .compatible = "mediatek,mt8195-xhci"},
{ .compatible = "mediatek,mtk-xhci"},
{ },
};
MODULE_DEVICE_TABLE(of, mtk_xhci_of_match);
static struct platform_driver mtk_xhci_driver = {
.probe = xhci_mtk_probe,
.remove_new = xhci_mtk_remove,
.driver = {
.name = "xhci-mtk",
.pm = DEV_PM_OPS,
.of_match_table = mtk_xhci_of_match,
},
};
static int __init xhci_mtk_init(void)
{
xhci_init_driver(&xhci_mtk_hc_driver, &xhci_mtk_overrides);
return platform_driver_register(&mtk_xhci_driver);
}
module_init(xhci_mtk_init);
static void __exit xhci_mtk_exit(void)
{
platform_driver_unregister(&mtk_xhci_driver);
}
module_exit(xhci_mtk_exit);
MODULE_AUTHOR("Chunfeng Yun <[email protected]>");
MODULE_DESCRIPTION("MediaTek xHCI Host Controller Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/xhci-mtk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PS3 EHCI Host Controller driver
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <asm/firmware.h>
#include <asm/ps3.h>
static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci)
{
/* PS3 HC internal setup register offsets. */
enum ps3_ehci_hc_insnreg {
ps3_ehci_hc_insnreg01 = 0x084,
ps3_ehci_hc_insnreg02 = 0x088,
ps3_ehci_hc_insnreg03 = 0x08c,
};
/* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its
* internal INSNREGXX setup regs back to the chip default values
* on Host Controller Reset (CMD_RESET) or Light Host Controller
* Reset (CMD_LRESET). The work-around for this is for the HC
* driver to re-initialise these regs when ever the HC is reset.
*/
/* Set burst transfer counts to 256 out, 32 in. */
writel_be(0x01000020, (void __iomem *)ehci->regs +
ps3_ehci_hc_insnreg01);
/* Enable burst transfer counts. */
writel_be(0x00000001, (void __iomem *)ehci->regs +
ps3_ehci_hc_insnreg03);
}
static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
{
int result;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
ehci->big_endian_mmio = 1;
ehci->caps = hcd->regs;
result = ehci_setup(hcd);
if (result)
return result;
ps3_ehci_setup_insnreg(ehci);
return result;
}
static const struct hc_driver ps3_ehci_hc_driver = {
.description = hcd_name,
.product_desc = "PS3 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
.reset = ps3_ehci_hc_reset,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
#if defined(CONFIG_PM)
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
{
int result;
struct usb_hcd *hcd;
unsigned int virq;
static u64 dummy_mask;
if (usb_disabled()) {
result = -ENODEV;
goto fail_start;
}
result = ps3_open_hv_device(dev);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
__func__, __LINE__);
goto fail_open;
}
result = ps3_dma_region_create(dev->d_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: "
"(%d)\n", __func__, __LINE__, result);
BUG_ON("check region type");
goto fail_dma_region;
}
result = ps3_mmio_region_create(dev->m_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
__func__, __LINE__);
result = -EPERM;
goto fail_mmio_region;
}
dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
__LINE__, dev->m_region->lpar_addr);
result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
__func__, __LINE__, virq);
result = -EPERM;
goto fail_irq;
}
dummy_mask = DMA_BIT_MASK(32);
dev->core.dma_mask = &dummy_mask;
dma_set_coherent_mask(&dev->core, dummy_mask);
hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev_name(&dev->core));
if (!hcd) {
dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
__LINE__);
result = -ENOMEM;
goto fail_create_hcd;
}
hcd->rsrc_start = dev->m_region->lpar_addr;
hcd->rsrc_len = dev->m_region->len;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name))
dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n",
__func__, __LINE__);
hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
if (!hcd->regs) {
dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
result = -EPERM;
goto fail_ioremap;
}
dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
(unsigned long)hcd->rsrc_start);
dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__,
(unsigned long)hcd->rsrc_len);
dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__,
(unsigned long)hcd->regs);
dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
(unsigned long)virq);
ps3_system_bus_set_drvdata(dev, hcd);
result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
__func__, __LINE__, result);
goto fail_add_hcd;
}
device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
iounmap(hcd->regs);
fail_ioremap:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
fail_create_hcd:
ps3_io_irq_destroy(virq);
fail_irq:
ps3_free_mmio_region(dev->m_region);
fail_mmio_region:
ps3_dma_region_free(dev->d_region);
fail_dma_region:
ps3_close_hv_device(dev);
fail_open:
fail_start:
return result;
}
static void ps3_ehci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
BUG_ON(!hcd);
dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs);
dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq);
tmp = hcd->irq;
usb_remove_hcd(hcd);
ps3_system_bus_set_drvdata(dev, NULL);
BUG_ON(!hcd->regs);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
ps3_io_irq_destroy(tmp);
ps3_free_mmio_region(dev->m_region);
ps3_dma_region_free(dev->d_region);
ps3_close_hv_device(dev);
}
static int __init ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
{
return firmware_has_feature(FW_FEATURE_PS3_LV1)
? ps3_system_bus_driver_register(drv)
: 0;
}
static void ps3_ehci_driver_unregister(struct ps3_system_bus_driver *drv)
{
if (firmware_has_feature(FW_FEATURE_PS3_LV1))
ps3_system_bus_driver_unregister(drv);
}
MODULE_ALIAS(PS3_MODULE_ALIAS_EHCI);
static struct ps3_system_bus_driver ps3_ehci_driver = {
.core.name = "ps3-ehci-driver",
.core.owner = THIS_MODULE,
.match_id = PS3_MATCH_ID_EHCI,
.probe = ps3_ehci_probe,
.remove = ps3_ehci_remove,
.shutdown = ps3_ehci_remove,
};
| linux-master | drivers/usb/host/ehci-ps3.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Open Host Controller Interface (OHCI) driver for USB.
*
* Maintainer: Alan Stern <[email protected]>
*
* (C) Copyright 1999 Roman Weissgaerber <[email protected]>
* (C) Copyright 2000-2004 David Brownell <[email protected]>
*
* [ Initialisation is based on Linus' ]
* [ uhci code and gregs ohci fragments ]
* [ (C) Copyright 1999 Linus Torvalds ]
* [ (C) Copyright 1999 Gregory P. Smith]
*
*
* OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller
* interfaces (though some non-x86 Intel chips use it). It supports
* smarter hardware than UHCI. A download link for the spec available
* through the https://www.usb.org website.
*
* This file is licenced under the GPL.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/genalloc.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
/*-------------------------------------------------------------------------*/
/* For initializing controller (mask in an HCFS mode too) */
#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
#define OHCI_INTR_INIT \
(OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE \
| OHCI_INTR_RD | OHCI_INTR_WDH)
#ifdef __hppa__
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
#define IR_DISABLE
#endif
#ifdef CONFIG_ARCH_OMAP
/* OMAP doesn't support IR (no SMM; not needed) */
#define IR_DISABLE
#endif
/*-------------------------------------------------------------------------*/
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
#define IO_WATCHDOG_OFF 0xffffff00
#include "ohci.h"
#include "pci-quirks.h"
static void ohci_dump(struct ohci_hcd *ohci);
static void ohci_stop(struct usb_hcd *hcd);
static void io_watchdog_func(struct timer_list *t);
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
#include "ohci-q.c"
/*
* On architectures with edge-triggered interrupts we must never return
* IRQ_NONE.
*/
#if defined(CONFIG_SA1111) /* ... or other edge-triggered systems */
#define IRQ_NOTMINE IRQ_HANDLED
#else
#define IRQ_NOTMINE IRQ_NONE
#endif
/* Some boards misreport power switching/overcurrent */
static bool distrust_firmware;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
static bool no_handshake;
module_param (no_handshake, bool, 0);
MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
/*-------------------------------------------------------------------------*/
static int number_of_tds(struct urb *urb)
{
int len, i, num, this_sg_len;
struct scatterlist *sg;
len = urb->transfer_buffer_length;
i = urb->num_mapped_sgs;
if (len > 0 && i > 0) { /* Scatter-gather transfer */
num = 0;
sg = urb->sg;
for (;;) {
this_sg_len = min_t(int, sg_dma_len(sg), len);
num += DIV_ROUND_UP(this_sg_len, 4096);
len -= this_sg_len;
if (--i <= 0 || len <= 0)
break;
sg = sg_next(sg);
}
} else { /* Non-SG transfer */
/* one TD for every 4096 Bytes (could be up to 8K) */
num = DIV_ROUND_UP(len, 4096);
}
return num;
}
/*
* queue up an urb for anything except the root hub
*/
static int ohci_urb_enqueue (
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct ed *ed;
urb_priv_t *urb_priv;
unsigned int pipe = urb->pipe;
int i, size = 0;
unsigned long flags;
int retval = 0;
/* every endpoint has a ed, locate and maybe (re)initialize it */
ed = ed_get(ohci, urb->ep, urb->dev, pipe, urb->interval);
if (! ed)
return -ENOMEM;
/* for the private part of the URB we need the number of TDs (size) */
switch (ed->type) {
case PIPE_CONTROL:
/* td_submit_urb() doesn't yet handle these */
if (urb->transfer_buffer_length > 4096)
return -EMSGSIZE;
/* 1 TD for setup, 1 for ACK, plus ... */
size = 2;
fallthrough;
// case PIPE_INTERRUPT:
// case PIPE_BULK:
default:
size += number_of_tds(urb);
/* maybe a zero-length packet to wrap it up */
if (size == 0)
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
% usb_maxpacket(urb->dev, pipe)) == 0)
size++;
break;
case PIPE_ISOCHRONOUS: /* number of packets from URB */
size = urb->number_of_packets;
break;
}
/* allocate the private part of the URB */
urb_priv = kzalloc(struct_size(urb_priv, td, size), mem_flags);
if (!urb_priv)
return -ENOMEM;
INIT_LIST_HEAD (&urb_priv->pending);
urb_priv->length = size;
urb_priv->ed = ed;
/* allocate the TDs (deferring hash chain updates) */
for (i = 0; i < size; i++) {
urb_priv->td [i] = td_alloc (ohci, mem_flags);
if (!urb_priv->td [i]) {
urb_priv->length = i;
urb_free_priv (ohci, urb_priv);
return -ENOMEM;
}
}
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
if (!HCD_HW_ACCESSIBLE(hcd)) {
retval = -ENODEV;
goto fail;
}
if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
goto fail;
/* schedule the ed if needed */
if (ed->state == ED_IDLE) {
retval = ed_schedule (ohci, ed);
if (retval < 0) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
goto fail;
}
/* Start up the I/O watchdog timer, if it's not running */
if (ohci->prev_frame_no == IO_WATCHDOG_OFF &&
list_empty(&ohci->eds_in_use) &&
!(ohci->flags & OHCI_QUIRK_QEMU)) {
ohci->prev_frame_no = ohci_frame_no(ohci);
mod_timer(&ohci->io_watchdog,
jiffies + IO_WATCHDOG_DELAY);
}
list_add(&ed->in_use_list, &ohci->eds_in_use);
if (ed->type == PIPE_ISOCHRONOUS) {
u16 frame = ohci_frame_no(ohci);
/* delay a few frames before the first TD */
frame += max_t (u16, 8, ed->interval);
frame &= ~(ed->interval - 1);
frame |= ed->branch;
urb->start_frame = frame;
ed->last_iso = frame + ed->interval * (size - 1);
}
} else if (ed->type == PIPE_ISOCHRONOUS) {
u16 next = ohci_frame_no(ohci) + 1;
u16 frame = ed->last_iso + ed->interval;
u16 length = ed->interval * (size - 1);
/* Behind the scheduling threshold? */
if (unlikely(tick_before(frame, next))) {
/* URB_ISO_ASAP: Round up to the first available slot */
if (urb->transfer_flags & URB_ISO_ASAP) {
frame += (next - frame + ed->interval - 1) &
-ed->interval;
/*
* Not ASAP: Use the next slot in the stream,
* no matter what.
*/
} else {
/*
* Some OHCI hardware doesn't handle late TDs
* correctly. After retiring them it proceeds
* to the next ED instead of the next TD.
* Therefore we have to omit the late TDs
* entirely.
*/
urb_priv->td_cnt = DIV_ROUND_UP(
(u16) (next - frame),
ed->interval);
if (urb_priv->td_cnt >= urb_priv->length) {
++urb_priv->td_cnt; /* Mark it */
ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
urb, frame, length,
next);
}
}
}
urb->start_frame = frame;
ed->last_iso = frame + length;
}
/* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed
* and update count of queued periodic urbs
*/
urb->hcpriv = urb_priv;
td_submit_urb (ohci, urb);
fail:
if (retval)
urb_free_priv (ohci, urb_priv);
spin_unlock_irqrestore (&ohci->lock, flags);
return retval;
}
/*
* decouple the URB from the HC queues (TDs, urb_priv).
* reporting is always done
* asynchronously, and we might be dealing with an urb that's
* partially transferred, or an ED with other urbs being unlinked.
*/
static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
int rc;
urb_priv_t *urb_priv;
spin_lock_irqsave (&ohci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc == 0) {
/* Unless an IRQ completed the unlink while it was being
* handed to us, flag it for unlink and giveback, and force
* some upcoming INTR_SF to call finish_unlinks()
*/
urb_priv = urb->hcpriv;
if (urb_priv->ed->state == ED_OPER)
start_ed_unlink(ohci, urb_priv->ed);
if (ohci->rh_state != OHCI_RH_RUNNING) {
/* With HC dead, we can clean up right away */
ohci_work(ohci);
}
}
spin_unlock_irqrestore (&ohci->lock, flags);
return rc;
}
/*-------------------------------------------------------------------------*/
/* frees config/altsetting state for endpoints,
* including ED memory, dummy TD, and bulk/intr data toggle
*/
static void
ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
struct ed *ed = ep->hcpriv;
unsigned limit = 1000;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
if (!ed)
return;
rescan:
spin_lock_irqsave (&ohci->lock, flags);
if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
ohci_work(ohci);
}
switch (ed->state) {
case ED_UNLINK: /* wait for hw to finish? */
/* major IRQ delivery trouble loses INTR_SF too... */
if (limit-- == 0) {
ohci_warn(ohci, "ED unlink timeout\n");
goto sanitize;
}
spin_unlock_irqrestore (&ohci->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case ED_IDLE: /* fully unlinked */
if (list_empty (&ed->td_list)) {
td_free (ohci, ed->dummy);
ed_free (ohci, ed);
break;
}
fallthrough;
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. can't recover; must leak ed.
*/
ohci_err (ohci, "leak ed %p (#%02x) state %d%s\n",
ed, ep->desc.bEndpointAddress, ed->state,
list_empty (&ed->td_list) ? "" : " (has tds)");
td_free (ohci, ed->dummy);
break;
}
ep->hcpriv = NULL;
spin_unlock_irqrestore (&ohci->lock, flags);
}
static int ohci_get_frame (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
return ohci_frame_no(ohci);
}
static void ohci_usb_reset (struct ohci_hcd *ohci)
{
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
* other cases where the next software may expect clean state from the
* "firmware". this is bus-neutral, unlike shutdown() methods.
*/
static void _ohci_shutdown(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci;
ohci = hcd_to_ohci (hcd);
ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
/* Software reset, after which the controller goes into SUSPEND */
ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
udelay(10);
ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
ohci->rh_state = OHCI_RH_HALTED;
}
static void ohci_shutdown(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
unsigned long flags;
spin_lock_irqsave(&ohci->lock, flags);
_ohci_shutdown(hcd);
spin_unlock_irqrestore(&ohci->lock, flags);
}
/*-------------------------------------------------------------------------*
* HC functions
*-------------------------------------------------------------------------*/
/* init memory, and kick BIOS/SMM off */
static int ohci_init (struct ohci_hcd *ohci)
{
int ret;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
/* Accept arbitrarily long scatter-gather lists */
if (!hcd->localmem_pool)
hcd->self.sg_tablesize = ~0;
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
* was never needed for most non-PCI systems ... remove the code?
*/
#ifndef IR_DISABLE
/* SMM owns the HC? not for long! */
if (!no_handshake && ohci_readl (ohci,
&ohci->regs->control) & OHCI_CTRL_IR) {
u32 temp;
ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n");
/* this timeout is arbitrary. we make it long, so systems
* depending on usb keyboards may be usable even if the
* BIOS/SMM code seems pretty broken.
*/
temp = 500; /* arbitrary: five seconds */
ohci_writel (ohci, OHCI_INTR_OC, &ohci->regs->intrenable);
ohci_writel (ohci, OHCI_OCR, &ohci->regs->cmdstatus);
while (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_IR) {
msleep (10);
if (--temp == 0) {
ohci_err (ohci, "USB HC takeover failed!"
" (BIOS/SMM bug)\n");
return -EBUSY;
}
}
ohci_usb_reset (ohci);
}
#endif
/* Disable HC interrupts */
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
/* flush the writes, and save key bits like RWC */
if (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_RWC)
ohci->hc_control |= OHCI_CTRL_RWC;
/* Read the number of ports unless overridden */
if (ohci->num_ports == 0)
ohci->num_ports = roothub_a(ohci) & RH_A_NDP;
if (ohci->hcca)
return 0;
timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
ohci->prev_frame_no = IO_WATCHDOG_OFF;
if (hcd->localmem_pool)
ohci->hcca = gen_pool_dma_alloc_align(hcd->localmem_pool,
sizeof(*ohci->hcca),
&ohci->hcca_dma, 256);
else
ohci->hcca = dma_alloc_coherent(hcd->self.controller,
sizeof(*ohci->hcca),
&ohci->hcca_dma,
GFP_KERNEL);
if (!ohci->hcca)
return -ENOMEM;
if ((ret = ohci_mem_init (ohci)) < 0)
ohci_stop (hcd);
else {
create_debug_files (ohci);
}
return ret;
}
/*-------------------------------------------------------------------------*/
/* Start an OHCI controller, set the BUS operational
* resets USB and controller
* enable interrupts
*/
static int ohci_run (struct ohci_hcd *ohci)
{
u32 mask, val;
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
val = ohci_readl (ohci, &ohci->regs->fminterval);
ohci->fminterval = val & 0x3fff;
if (ohci->fminterval != FI)
ohci_dbg (ohci, "fminterval delta %d\n",
ohci->fminterval - FI);
ohci->fminterval |= FSMP (ohci->fminterval) << 16;
/* also: power/overcurrent flags in roothub.a */
}
/* Reset USB nearly "by the book". RemoteWakeupConnected has
* to be checked in case boot firmware (BIOS/SMM/...) has set up
* wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
* If the bus glue detected wakeup capability then it should
* already be enabled; if so we'll just enable it again.
*/
if ((ohci->hc_control & OHCI_CTRL_RWC) != 0)
device_set_wakeup_capable(hcd->self.controller, 1);
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
val = 0;
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESUME;
val = 10 /* msec wait */;
break;
// case OHCI_USB_RESET:
default:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESET;
val = 50 /* msec wait */;
break;
}
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush the writes
(void) ohci_readl (ohci, &ohci->regs->control);
msleep(val);
memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
/* 2msec timelimit here means no irqs/preempt */
spin_lock_irq (&ohci->lock);
retry:
/* HC Reset requires max 10 us delay */
ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
val = 30; /* ... allow extra time */
while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
if (--val == 0) {
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "USB HC reset timed out!\n");
return -1;
}
udelay (1);
}
/* now we're in the SUSPEND state ... must go OPERATIONAL
* within 2msec else HC enters RESUME
*
* ... but some hardware won't init fmInterval "by the book"
* (SiS, OPTi ...), so reset again instead. SiS doesn't need
* this if we write fmInterval after we're OPERATIONAL.
* Unclear about ALi, ServerWorks, and others ... this could
* easily be a longstanding bug in chip init on Linux.
*/
if (ohci->flags & OHCI_QUIRK_INITRESET) {
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
}
/* Tell the controller where the control and bulk lists are
* The lists are empty now. */
ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
/* a reset clears this */
ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
periodic_reinit (ohci);
/* some OHCI implementations are finicky about how they init.
* bogus values here mean not even enumeration could work.
*/
if ((ohci_readl (ohci, &ohci->regs->fminterval) & 0x3fff0000) == 0
|| !ohci_readl (ohci, &ohci->regs->periodicstart)) {
if (!(ohci->flags & OHCI_QUIRK_INITRESET)) {
ohci->flags |= OHCI_QUIRK_INITRESET;
ohci_dbg (ohci, "enabling initreset quirk\n");
goto retry;
}
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "init err (%08x %04x)\n",
ohci_readl (ohci, &ohci->regs->fminterval),
ohci_readl (ohci, &ohci->regs->periodicstart));
return -EOVERFLOW;
}
/* use rhsc irqs after hub_wq is allocated */
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
hcd->uses_new_polling = 1;
/* start controller operations */
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
/* Choose the interrupts we care about now, others later on demand */
mask = OHCI_INTR_INIT;
ohci_writel (ohci, ~0, &ohci->regs->intrstatus);
ohci_writel (ohci, mask, &ohci->regs->intrenable);
/* handle root hub init quirks ... */
val = roothub_a (ohci);
/* Configure for per-port over-current protection by default */
val &= ~RH_A_NOCP;
val |= RH_A_OCPM;
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
/* NSC 87560 and maybe others.
* Ganged power switching, no over-current protection.
*/
val |= RH_A_NOCP;
val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms.
*/
val |= RH_A_NPS;
}
ohci_writel(ohci, val, &ohci->regs->roothub.a);
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
spin_unlock_irq (&ohci->lock);
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((val >> 23) & 0x1fe);
ohci_dump(ohci);
return 0;
}
/* ohci_setup routine for generic controller initialization */
int ohci_setup(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
return ohci_init(ohci);
}
EXPORT_SYMBOL_GPL(ohci_setup);
/* ohci_start routine for generic controller start of all OHCI bus glue */
static int ohci_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
ret = ohci_run(ohci);
if (ret < 0) {
ohci_err(ohci, "can't start\n");
ohci_stop(hcd);
}
return ret;
}
/*-------------------------------------------------------------------------*/
/*
* Some OHCI controllers are known to lose track of completed TDs. They
* don't add the TDs to the hardware done queue, which means we never see
* them as being completed.
*
* This watchdog routine checks for such problems. Without some way to
* tell when those TDs have completed, we would never take their EDs off
* the unlink list. As a result, URBs could never be dequeued and
* endpoints could never be released.
*/
static void io_watchdog_func(struct timer_list *t)
{
struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
bool takeback_all_pending = false;
u32 status;
u32 head;
struct ed *ed;
struct td *td, *td_start, *td_next;
unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF;
unsigned long flags;
spin_lock_irqsave(&ohci->lock, flags);
/*
* One way to lose track of completed TDs is if the controller
* never writes back the done queue head. If it hasn't been
* written back since the last time this function ran and if it
* was non-empty at that time, something is badly wrong with the
* hardware.
*/
status = ohci_readl(ohci, &ohci->regs->intrstatus);
if (!(status & OHCI_INTR_WDH) && ohci->wdh_cnt == ohci->prev_wdh_cnt) {
if (ohci->prev_donehead) {
ohci_err(ohci, "HcDoneHead not written back; disabled\n");
died:
usb_hc_died(ohci_to_hcd(ohci));
ohci_dump(ohci);
_ohci_shutdown(ohci_to_hcd(ohci));
goto done;
} else {
/* No write back because the done queue was empty */
takeback_all_pending = true;
}
}
/* Check every ED which might have pending TDs */
list_for_each_entry(ed, &ohci->eds_in_use, in_use_list) {
if (ed->pending_td) {
if (takeback_all_pending ||
OKAY_TO_TAKEBACK(ohci, ed)) {
unsigned tmp = hc32_to_cpu(ohci, ed->hwINFO);
ohci_dbg(ohci, "takeback pending TD for dev %d ep 0x%x\n",
0x007f & tmp,
(0x000f & (tmp >> 7)) +
((tmp & ED_IN) >> 5));
add_to_done_list(ohci, ed->pending_td);
}
}
/* Starting from the latest pending TD, */
td = ed->pending_td;
/* or the last TD on the done list, */
if (!td) {
list_for_each_entry(td_next, &ed->td_list, td_list) {
if (!td_next->next_dl_td)
break;
td = td_next;
}
}
/* find the last TD processed by the controller. */
head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
td_start = td;
td_next = list_prepare_entry(td, &ed->td_list, td_list);
list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
if (head == (u32) td_next->td_dma)
break;
td = td_next; /* head pointer has passed this TD */
}
if (td != td_start) {
/*
* In case a WDH cycle is in progress, we will wait
* for the next two cycles to complete before assuming
* this TD will never get on the done queue.
*/
ed->takeback_wdh_cnt = ohci->wdh_cnt + 2;
ed->pending_td = td;
}
}
ohci_work(ohci);
if (ohci->rh_state == OHCI_RH_RUNNING) {
/*
* Sometimes a controller just stops working. We can tell
* by checking that the frame counter has advanced since
* the last time we ran.
*
* But be careful: Some controllers violate the spec by
* stopping their frame counter when no ports are active.
*/
frame_no = ohci_frame_no(ohci);
if (frame_no == ohci->prev_frame_no) {
int active_cnt = 0;
int i;
unsigned tmp;
for (i = 0; i < ohci->num_ports; ++i) {
tmp = roothub_portstatus(ohci, i);
/* Enabled and not suspended? */
if ((tmp & RH_PS_PES) && !(tmp & RH_PS_PSS))
++active_cnt;
}
if (active_cnt > 0) {
ohci_err(ohci, "frame counter not updating; disabled\n");
goto died;
}
}
if (!list_empty(&ohci->eds_in_use)) {
prev_frame_no = frame_no;
ohci->prev_wdh_cnt = ohci->wdh_cnt;
ohci->prev_donehead = ohci_readl(ohci,
&ohci->regs->donehead);
mod_timer(&ohci->io_watchdog,
jiffies + IO_WATCHDOG_DELAY);
}
}
done:
ohci->prev_frame_no = prev_frame_no;
spin_unlock_irqrestore(&ohci->lock, flags);
}
/* an interrupt happens */
static irqreturn_t ohci_irq (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct ohci_regs __iomem *regs = ohci->regs;
int ints;
/* Read interrupt status (and flush pending writes). We ignore the
* optimization of checking the LSB of hcca->done_head; it doesn't
* work on all systems (edge triggering for OHCI can be a factor).
*/
ints = ohci_readl(ohci, ®s->intrstatus);
/* Check for an all 1's result which is a typical consequence
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
usb_hc_died(hcd);
return IRQ_HANDLED;
}
/* We only care about interrupts that are enabled */
ints &= ohci_readl(ohci, ®s->intrenable);
/* interrupt for some other device? */
if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
// e.g. due to PCI Master/Target Abort
if (quirk_nec(ohci)) {
/* Workaround for a silicon bug in some NEC chips used
* in Apple's PowerBooks. Adapted from Darwin code.
*/
ohci_err (ohci, "OHCI Unrecoverable Error, scheduling NEC chip restart\n");
ohci_writel (ohci, OHCI_INTR_UE, ®s->intrdisable);
schedule_work (&ohci->nec_work);
} else {
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
ohci->rh_state = OHCI_RH_HALTED;
usb_hc_died(hcd);
}
ohci_dump(ohci);
ohci_usb_reset (ohci);
}
if (ints & OHCI_INTR_RHSC) {
ohci_dbg(ohci, "rhsc\n");
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
®s->intrstatus);
/* NOTE: Vendors didn't always make the same implementation
* choices for RHSC. Many followed the spec; RHSC triggers
* on an edge, like setting and maybe clearing a port status
* change bit. With others it's level-triggered, active
* until hub_wq clears all the port status change bits. We'll
* always disable it here and rely on polling until hub_wq
* re-enables it.
*/
ohci_writel(ohci, OHCI_INTR_RHSC, ®s->intrdisable);
usb_hcd_poll_rh_status(hcd);
}
/* For connect and disconnect events, we expect the controller
* to turn on RHSC along with RD. But for remote wakeup events
* this might not happen.
*/
else if (ints & OHCI_INTR_RD) {
ohci_dbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, ®s->intrstatus);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
spin_lock (&ohci->lock);
ohci_rh_resume (ohci);
spin_unlock (&ohci->lock);
} else
usb_hcd_resume_root_hub(hcd);
}
spin_lock(&ohci->lock);
if (ints & OHCI_INTR_WDH)
update_done_list(ohci);
/* could track INTR_SO to reduce available PCI/... bandwidth */
/* handle any pending URB/ED unlinks, leaving INTR_SF enabled
* when there's still unlinking to be done (next frame).
*/
ohci_work(ohci);
if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list
&& ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, ®s->intrdisable);
if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, ®s->intrstatus);
if (ints & OHCI_INTR_WDH)
++ohci->wdh_cnt;
ohci_writel (ohci, OHCI_INTR_MIE, ®s->intrenable);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
}
spin_unlock(&ohci->lock);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
static void ohci_stop (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci_dump(ohci);
if (quirk_nec(ohci))
flush_work(&ohci->nec_work);
del_timer_sync(&ohci->io_watchdog);
ohci->prev_frame_no = IO_WATCHDOG_OFF;
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
ohci_usb_reset(ohci);
free_irq(hcd->irq, hcd);
hcd->irq = 0;
if (quirk_amdiso(ohci))
usb_amd_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
if (ohci->hcca) {
if (hcd->localmem_pool)
gen_pool_free(hcd->localmem_pool,
(unsigned long)ohci->hcca,
sizeof(*ohci->hcca));
else
dma_free_coherent(hcd->self.controller,
sizeof(*ohci->hcca),
ohci->hcca, ohci->hcca_dma);
ohci->hcca = NULL;
ohci->hcca_dma = 0;
}
}
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_PM) || defined(CONFIG_USB_PCI)
/* must not be called from interrupt context */
int ohci_restart(struct ohci_hcd *ohci)
{
int temp;
int i;
struct urb_priv *priv;
ohci_init(ohci);
spin_lock_irq(&ohci->lock);
ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
ohci_dbg(ohci, "abort schedule...\n");
list_for_each_entry (priv, &ohci->pending, pending) {
struct urb *urb = priv->td[0]->urb;
struct ed *ed = priv->ed;
switch (ed->state) {
case ED_OPER:
ed->state = ED_UNLINK;
ed->hwINFO |= cpu_to_hc32(ohci, ED_DEQUEUE);
ed_deschedule (ohci, ed);
ed->ed_next = ohci->ed_rm_list;
ed->ed_prev = NULL;
ohci->ed_rm_list = ed;
fallthrough;
case ED_UNLINK:
break;
default:
ohci_dbg(ohci, "bogus ed %p state %d\n",
ed, ed->state);
}
if (!urb->unlinked)
urb->unlinked = -ESHUTDOWN;
}
ohci_work(ohci);
spin_unlock_irq(&ohci->lock);
/* paranoia, in case that didn't work: */
/* empty the interrupt branches */
for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0;
for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0;
/* no EDs to remove */
ohci->ed_rm_list = NULL;
/* empty control and bulk lists */
ohci->ed_controltail = NULL;
ohci->ed_bulktail = NULL;
if ((temp = ohci_run (ohci)) < 0) {
ohci_err (ohci, "can't restart, %d\n", temp);
return temp;
}
ohci_dbg(ohci, "restart complete\n");
return 0;
}
EXPORT_SYMBOL_GPL(ohci_restart);
#endif
#ifdef CONFIG_PM
int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
int rc = 0;
/* Disable irq emission and mark HW unaccessible. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
*/
spin_lock_irqsave (&ohci->lock, flags);
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
(void)ohci_readl(ohci, &ohci->regs->intrdisable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore (&ohci->lock, flags);
synchronize_irq(hcd->irq);
if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
ohci_resume(hcd, false);
rc = -EBUSY;
}
return rc;
}
EXPORT_SYMBOL_GPL(ohci_suspend);
int ohci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int port;
bool need_reinit = false;
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* Make sure resume from hibernation re-enumerates everything */
if (hibernated)
ohci_usb_reset(ohci);
/* See if the controller is already running or has been reset */
ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
need_reinit = true;
} else {
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
case OHCI_USB_RESET:
need_reinit = true;
}
}
/* If needed, reinitialize and suspend the root hub */
if (need_reinit) {
spin_lock_irq(&ohci->lock);
ohci_rh_resume(ohci);
ohci_rh_suspend(ohci, 0);
spin_unlock_irq(&ohci->lock);
}
/* Normally just turn on port power and enable interrupts */
else {
ohci_dbg(ohci, "powerup ports\n");
for (port = 0; port < ohci->num_ports; port++)
ohci_writel(ohci, RH_PS_PPS,
&ohci->regs->roothub.portstatus[port]);
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrenable);
ohci_readl(ohci, &ohci->regs->intrenable);
msleep(20);
}
usb_hcd_resume_root_hub(hcd);
return 0;
}
EXPORT_SYMBOL_GPL(ohci_resume);
#endif
/*-------------------------------------------------------------------------*/
/*
* Generic structure: This gets copied for platform drivers so that
* individual entries can be overridden as needed.
*/
static const struct hc_driver ohci_hc_driver = {
.description = hcd_name,
.product_desc = "OHCI Host Controller",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
/*
* basic lifecycle operations
*/
.reset = ohci_setup,
.start = ohci_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
void ohci_init_driver(struct hc_driver *drv,
const struct ohci_driver_overrides *over)
{
/* Copy the generic table to drv and then apply the overrides */
*drv = ohci_hc_driver;
if (over) {
drv->product_desc = over->product_desc;
drv->hcd_priv_size += over->extra_priv_size;
if (over->reset)
drv->reset = over->reset;
}
}
EXPORT_SYMBOL_GPL(ohci_init_driver);
/*-------------------------------------------------------------------------*/
MODULE_AUTHOR (DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE ("GPL");
#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
#include "ohci-sa1111.c"
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
#include "ohci-ppc-of.c"
#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
#endif
#ifdef CONFIG_PPC_PS3
#include "ohci-ps3.c"
#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
#endif
#ifdef CONFIG_MFD_SM501
#include "ohci-sm501.c"
#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
#endif
static int __init ohci_hcd_mod_init(void)
{
int retval = 0;
if (usb_disabled())
return -ENODEV;
pr_debug ("%s: block sizes: ed %zd td %zd\n", hcd_name,
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
if (retval < 0)
goto error_ps3;
#endif
#ifdef OF_PLATFORM_DRIVER
retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto error_of_platform;
#endif
#ifdef SA1111_DRIVER
retval = sa1111_driver_register(&SA1111_DRIVER);
if (retval < 0)
goto error_sa1111;
#endif
#ifdef SM501_OHCI_DRIVER
retval = platform_driver_register(&SM501_OHCI_DRIVER);
if (retval < 0)
goto error_sm501;
#endif
return retval;
/* Error path */
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
error_sm501:
#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
error_sa1111:
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
error_of_platform:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
#endif
debugfs_remove(ohci_debug_root);
ohci_debug_root = NULL;
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
return retval;
}
module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
debugfs_remove(ohci_debug_root);
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ohci_hcd_mod_exit);
| linux-master | drivers/usb/host/ohci-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic platform ohci driver
*
* Copyright 2007 Michael Buesch <[email protected]>
* Copyright 2011-2012 Hauke Mehrtens <[email protected]>
* Copyright 2014 Hans de Goede <[email protected]>
*
* Derived from the OCHI-SSB driver
* Derived from the OHCI-PCI driver
* Copyright 1999 Roman Weissgaerber
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/usb/ohci_pdriver.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/of.h>
#include "ohci.h"
#define DRIVER_DESC "OHCI generic platform driver"
#define OHCI_MAX_CLKS 4
#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
struct ohci_platform_priv {
struct clk *clks[OHCI_MAX_CLKS];
struct reset_control *resets;
};
static int ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
int clk, ret;
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++) {
ret = clk_prepare_enable(priv->clks[clk]);
if (ret)
goto err_disable_clks;
}
return 0;
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(priv->clks[clk]);
return ret;
}
static void ohci_platform_power_off(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
int clk;
for (clk = OHCI_MAX_CLKS - 1; clk >= 0; clk--)
if (priv->clks[clk])
clk_disable_unprepare(priv->clks[clk]);
}
static struct hc_driver __read_mostly ohci_platform_hc_driver;
static const struct ohci_driver_overrides platform_overrides __initconst = {
.product_desc = "Generic Platform OHCI controller",
.extra_priv_size = sizeof(struct ohci_platform_priv),
};
static struct usb_ohci_pdata ohci_platform_defaults = {
.power_on = ohci_platform_power_on,
.power_suspend = ohci_platform_power_off,
.power_off = ohci_platform_power_off,
};
static int ohci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv;
struct ohci_hcd *ohci;
int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
/*
* Use reasonable defaults so platforms don't have to provide these
* with DT probing on ARM.
*/
if (!pdata)
pdata = &ohci_platform_defaults;
err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (err)
return err;
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
if (!hcd)
return -ENOMEM;
platform_set_drvdata(dev, hcd);
dev->dev.platform_data = pdata;
priv = hcd_to_ohci_priv(hcd);
ohci = hcd_to_ohci(hcd);
if (pdata == &ohci_platform_defaults && dev->dev.of_node) {
if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
ohci->flags |= OHCI_QUIRK_BE_MMIO;
if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
ohci->flags |= OHCI_QUIRK_BE_DESC;
if (of_property_read_bool(dev->dev.of_node, "big-endian"))
ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
if (of_property_read_bool(dev->dev.of_node, "no-big-frame-no"))
ohci->flags |= OHCI_QUIRK_FRAME_NO;
if (of_property_read_bool(dev->dev.of_node,
"remote-wakeup-connected"))
ohci->hc_control = OHCI_CTRL_RWC;
of_property_read_u32(dev->dev.of_node, "num-ports",
&ohci->num_ports);
for (clk = 0; clk < OHCI_MAX_CLKS; clk++) {
priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
if (IS_ERR(priv->clks[clk])) {
err = PTR_ERR(priv->clks[clk]);
if (err == -EPROBE_DEFER)
goto err_put_clks;
priv->clks[clk] = NULL;
break;
}
}
priv->resets = devm_reset_control_array_get_optional_shared(
&dev->dev);
if (IS_ERR(priv->resets)) {
err = PTR_ERR(priv->resets);
goto err_put_clks;
}
err = reset_control_deassert(priv->resets);
if (err)
goto err_put_clks;
}
if (pdata->big_endian_desc)
ohci->flags |= OHCI_QUIRK_BE_DESC;
if (pdata->big_endian_mmio)
ohci->flags |= OHCI_QUIRK_BE_MMIO;
if (pdata->no_big_frame_no)
ohci->flags |= OHCI_QUIRK_FRAME_NO;
if (pdata->num_ports)
ohci->num_ports = pdata->num_ports;
#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
if (ohci->flags & OHCI_QUIRK_BE_MMIO) {
dev_err(&dev->dev,
"Error: CONFIG_USB_OHCI_BIG_ENDIAN_MMIO not set\n");
err = -EINVAL;
goto err_reset;
}
#endif
#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
if (ohci->flags & OHCI_QUIRK_BE_DESC) {
dev_err(&dev->dev,
"Error: CONFIG_USB_OHCI_BIG_ENDIAN_DESC not set\n");
err = -EINVAL;
goto err_reset;
}
#endif
pm_runtime_set_active(&dev->dev);
pm_runtime_enable(&dev->dev);
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
goto err_reset;
}
hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
}
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
hcd->tpl_support = of_usb_host_tpl_support(dev->dev.of_node);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(dev, hcd);
return err;
err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
pm_runtime_disable(&dev->dev);
reset_control_assert(priv->resets);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
if (pdata == &ohci_platform_defaults)
dev->dev.platform_data = NULL;
usb_put_hcd(hcd);
return err;
}
static void ohci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
int clk;
pm_runtime_get_sync(&dev->dev);
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
reset_control_assert(priv->resets);
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
usb_put_hcd(hcd);
pm_runtime_put_sync(&dev->dev);
pm_runtime_disable(&dev->dev);
if (pdata == &ohci_platform_defaults)
dev->dev.platform_data = NULL;
}
#ifdef CONFIG_PM_SLEEP
static int ohci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int ret;
ret = ohci_suspend(hcd, do_wakeup);
if (ret)
return ret;
if (pdata->power_suspend)
pdata->power_suspend(pdev);
return ret;
}
static int ohci_platform_resume_common(struct device *dev, bool hibernated)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
if (pdata->power_on) {
int err = pdata->power_on(pdev);
if (err < 0)
return err;
}
ohci_resume(hcd, hibernated);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
static int ohci_platform_resume(struct device *dev)
{
return ohci_platform_resume_common(dev, false);
}
static int ohci_platform_restore(struct device *dev)
{
return ohci_platform_resume_common(dev, true);
}
#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id ohci_platform_ids[] = {
{ .compatible = "generic-ohci", },
{ .compatible = "cavium,octeon-6335-ohci", },
{ .compatible = "ti,ohci-omap3", },
{ }
};
MODULE_DEVICE_TABLE(of, ohci_platform_ids);
static const struct platform_device_id ohci_platform_table[] = {
{ "ohci-platform", 0 },
{ }
};
MODULE_DEVICE_TABLE(platform, ohci_platform_table);
#ifdef CONFIG_PM_SLEEP
static const struct dev_pm_ops ohci_platform_pm_ops = {
.suspend = ohci_platform_suspend,
.resume = ohci_platform_resume,
.freeze = ohci_platform_suspend,
.thaw = ohci_platform_resume,
.poweroff = ohci_platform_suspend,
.restore = ohci_platform_restore,
};
#endif
static struct platform_driver ohci_platform_driver = {
.id_table = ohci_platform_table,
.probe = ohci_platform_probe,
.remove_new = ohci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ohci-platform",
#ifdef CONFIG_PM_SLEEP
.pm = &ohci_platform_pm_ops,
#endif
.of_match_table = ohci_platform_ids,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
};
static int __init ohci_platform_init(void)
{
if (usb_disabled())
return -ENODEV;
ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ohci_platform_driver);
}
module_init(ohci_platform_init);
static void __exit ohci_platform_cleanup(void)
{
platform_driver_unregister(&ohci_platform_driver);
}
module_exit(ohci_platform_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_AUTHOR("Alan Stern");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/ohci-platform.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Samsung Exynos USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <[email protected]>
* Author: Joonyoung Shim <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "ehci.h"
#define DRIVER_DESC "EHCI Exynos driver"
#define EHCI_INSNREG00(base) (base + 0x90)
#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
#define EHCI_INSNREG00_ENA_INCR8 (0x1 << 24)
#define EHCI_INSNREG00_ENA_INCR4 (0x1 << 23)
#define EHCI_INSNREG00_ENA_INCRX_ALIGN (0x1 << 22)
#define EHCI_INSNREG00_ENABLE_DMA_BURST \
(EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
static struct hc_driver __read_mostly exynos_ehci_hc_driver;
#define PHY_NUMBER 3
struct exynos_ehci_hcd {
struct clk *clk;
struct device_node *of_node;
struct phy *phy[PHY_NUMBER];
bool legacy_phy;
};
#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
static int exynos_ehci_get_phy(struct device *dev,
struct exynos_ehci_hcd *exynos_ehci)
{
struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
/* Get PHYs for the controller */
num_phys = of_count_phandle_with_args(dev->of_node, "phys",
"#phy-cells");
for (phy_number = 0; phy_number < num_phys; phy_number++) {
phy = devm_of_phy_get_by_index(dev, dev->of_node, phy_number);
if (IS_ERR(phy))
return PTR_ERR(phy);
exynos_ehci->phy[phy_number] = phy;
}
if (num_phys > 0)
return 0;
/* Get PHYs using legacy bindings */
for_each_available_child_of_node(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ehci->phy[phy_number] = phy;
if (IS_ERR(phy)) {
of_node_put(child);
return PTR_ERR(phy);
}
}
exynos_ehci->legacy_phy = true;
return 0;
}
static int exynos_ehci_phy_enable(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
int i;
int ret = 0;
for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
ret = phy_power_on(exynos_ehci->phy[i]);
if (ret)
for (i--; i >= 0; i--)
phy_power_off(exynos_ehci->phy[i]);
return ret;
}
static void exynos_ehci_phy_disable(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
int i;
for (i = 0; i < PHY_NUMBER; i++)
phy_power_off(exynos_ehci->phy[i]);
}
static void exynos_setup_vbus_gpio(struct device *dev)
{
struct gpio_desc *gpio;
int err;
gpio = devm_gpiod_get_optional(dev, "samsung,vbus", GPIOD_OUT_HIGH);
err = PTR_ERR_OR_ZERO(gpio);
if (err)
dev_err(dev, "can't request ehci vbus gpio: %d\n", err);
}
static int exynos_ehci_probe(struct platform_device *pdev)
{
struct exynos_ehci_hcd *exynos_ehci;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource *res;
int irq;
int err;
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
return err;
exynos_setup_vbus_gpio(&pdev->dev);
hcd = usb_create_hcd(&exynos_ehci_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
return -ENOMEM;
}
exynos_ehci = to_exynos_ehci(hcd);
err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci);
if (err)
goto fail_clk;
exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ehci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(exynos_ehci->clk);
goto fail_clk;
}
err = clk_prepare_enable(exynos_ehci->clk);
if (err)
goto fail_clk;
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto fail_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
goto fail_io;
}
err = exynos_ehci_phy_enable(&pdev->dev);
if (err) {
dev_err(&pdev->dev, "Failed to enable USB phy\n");
goto fail_io;
}
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
/*
* Workaround: reset of_node pointer to avoid conflict between legacy
* Exynos EHCI port subnodes and generic USB device bindings
*/
exynos_ehci->of_node = pdev->dev.of_node;
if (exynos_ehci->legacy_phy)
pdev->dev.of_node = NULL;
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail_add_hcd;
}
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(pdev, hcd);
return 0;
fail_add_hcd:
exynos_ehci_phy_disable(&pdev->dev);
pdev->dev.of_node = exynos_ehci->of_node;
fail_io:
clk_disable_unprepare(exynos_ehci->clk);
fail_clk:
usb_put_hcd(hcd);
return err;
}
static void exynos_ehci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
pdev->dev.of_node = exynos_ehci->of_node;
usb_remove_hcd(hcd);
exynos_ehci_phy_disable(&pdev->dev);
clk_disable_unprepare(exynos_ehci->clk);
usb_put_hcd(hcd);
}
#ifdef CONFIG_PM
static int exynos_ehci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
bool do_wakeup = device_may_wakeup(dev);
int rc;
rc = ehci_suspend(hcd, do_wakeup);
if (rc)
return rc;
exynos_ehci_phy_disable(dev);
clk_disable_unprepare(exynos_ehci->clk);
return rc;
}
static int exynos_ehci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
int ret;
ret = clk_prepare_enable(exynos_ehci->clk);
if (ret)
return ret;
ret = exynos_ehci_phy_enable(dev);
if (ret) {
dev_err(dev, "Failed to enable USB phy\n");
clk_disable_unprepare(exynos_ehci->clk);
return ret;
}
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
ehci_resume(hcd, false);
return 0;
}
#else
#define exynos_ehci_suspend NULL
#define exynos_ehci_resume NULL
#endif
static const struct dev_pm_ops exynos_ehci_pm_ops = {
.suspend = exynos_ehci_suspend,
.resume = exynos_ehci_resume,
};
#ifdef CONFIG_OF
static const struct of_device_id exynos_ehci_match[] = {
{ .compatible = "samsung,exynos4210-ehci" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_ehci_match);
#endif
static struct platform_driver exynos_ehci_driver = {
.probe = exynos_ehci_probe,
.remove_new = exynos_ehci_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "exynos-ehci",
.pm = &exynos_ehci_pm_ops,
.of_match_table = of_match_ptr(exynos_ehci_match),
}
};
static const struct ehci_driver_overrides exynos_overrides __initconst = {
.extra_priv_size = sizeof(struct exynos_ehci_hcd),
};
static int __init ehci_exynos_init(void)
{
if (usb_disabled())
return -ENODEV;
ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
return platform_driver_register(&exynos_ehci_driver);
}
module_init(ehci_exynos_init);
static void __exit ehci_exynos_cleanup(void)
{
platform_driver_unregister(&exynos_ehci_driver);
}
module_exit(ehci_exynos_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:exynos-ehci");
MODULE_AUTHOR("Jingoo Han");
MODULE_AUTHOR("Joonyoung Shim");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/host/ehci-exynos.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Zhigang.Wei <[email protected]>
* Chunfeng.Yun <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "xhci.h"
#include "xhci-mtk.h"
#define SSP_BW_BOUNDARY 130000
#define SS_BW_BOUNDARY 51000
/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
#define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188
#define DBG_BUF_EN 64
/* schedule error type */
#define ESCH_SS_Y6 1001
#define ESCH_SS_OVERLAP 1002
#define ESCH_CS_OVERFLOW 1003
#define ESCH_BW_OVERFLOW 1004
#define ESCH_FIXME 1005
/* mtk scheduler bitmasks */
#define EP_BPKTS(p) ((p) & 0x7f)
#define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
#define EP_BBM(p) ((p) << 11)
#define EP_BOFFSET(p) ((p) & 0x3fff)
#define EP_BREPEAT(p) (((p) & 0x7fff) << 16)
static char *sch_error_string(int err_num)
{
switch (err_num) {
case ESCH_SS_Y6:
return "Can't schedule Start-Split in Y6";
case ESCH_SS_OVERLAP:
return "Can't find a suitable Start-Split location";
case ESCH_CS_OVERFLOW:
return "The last Complete-Split is greater than 7";
case ESCH_BW_OVERFLOW:
return "Bandwidth exceeds the maximum limit";
case ESCH_FIXME:
return "FIXME, to be resolved";
default:
return "Unknown";
}
}
static int is_fs_or_ls(enum usb_device_speed speed)
{
return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
}
static const char *
decode_ep(struct usb_host_endpoint *ep, enum usb_device_speed speed)
{
static char buf[DBG_BUF_EN];
struct usb_endpoint_descriptor *epd = &ep->desc;
unsigned int interval;
const char *unit;
interval = usb_decode_interval(epd, speed);
if (interval % 1000) {
unit = "us";
} else {
unit = "ms";
interval /= 1000;
}
snprintf(buf, DBG_BUF_EN, "%s ep%d%s %s, mpkt:%d, interval:%d/%d%s",
usb_speed_string(speed), usb_endpoint_num(epd),
usb_endpoint_dir_in(epd) ? "in" : "out",
usb_ep_type_string(usb_endpoint_type(epd)),
usb_endpoint_maxp(epd), epd->bInterval, interval, unit);
return buf;
}
static u32 get_bw_boundary(enum usb_device_speed speed)
{
u32 boundary;
switch (speed) {
case USB_SPEED_SUPER_PLUS:
boundary = SSP_BW_BOUNDARY;
break;
case USB_SPEED_SUPER:
boundary = SS_BW_BOUNDARY;
break;
default:
boundary = HS_BW_BOUNDARY;
break;
}
return boundary;
}
/*
* get the bandwidth domain which @ep belongs to.
*
* the bandwidth domain array is saved to @sch_array of struct xhci_hcd_mtk,
* each HS root port is treated as a single bandwidth domain,
* but each SS root port is treated as two bandwidth domains, one for IN eps,
* one for OUT eps.
* @real_port value is defined as follow according to xHCI spec:
* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
* so the bandwidth domain array is organized as follow for simplification:
* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
*/
static struct mu3h_sch_bw_info *
get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
struct xhci_virt_device *virt_dev;
int bw_index;
virt_dev = xhci->devs[udev->slot_id];
if (!virt_dev->real_port) {
WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
return NULL;
}
if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
bw_index = (virt_dev->real_port - 1) * 2;
else
bw_index = (virt_dev->real_port - 1) * 2 + 1;
} else {
/* add one more for each SS port */
bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
}
return &mtk->sch_array[bw_index];
}
static u32 get_esit(struct xhci_ep_ctx *ep_ctx)
{
u32 esit;
esit = 1 << CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
if (esit > XHCI_MTK_MAX_ESIT)
esit = XHCI_MTK_MAX_ESIT;
return esit;
}
static struct mu3h_sch_tt *find_tt(struct usb_device *udev)
{
struct usb_tt *utt = udev->tt;
struct mu3h_sch_tt *tt, **tt_index, **ptt;
bool allocated_index = false;
if (!utt)
return NULL; /* Not below a TT */
/*
* Find/create our data structure.
* For hubs with a single TT, we get it directly.
* For hubs with multiple TTs, there's an extra level of pointers.
*/
tt_index = NULL;
if (utt->multi) {
tt_index = utt->hcpriv;
if (!tt_index) { /* Create the index array */
tt_index = kcalloc(utt->hub->maxchild,
sizeof(*tt_index), GFP_KERNEL);
if (!tt_index)
return ERR_PTR(-ENOMEM);
utt->hcpriv = tt_index;
allocated_index = true;
}
ptt = &tt_index[udev->ttport - 1];
} else {
ptt = (struct mu3h_sch_tt **) &utt->hcpriv;
}
tt = *ptt;
if (!tt) { /* Create the mu3h_sch_tt */
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
if (!tt) {
if (allocated_index) {
utt->hcpriv = NULL;
kfree(tt_index);
}
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&tt->ep_list);
*ptt = tt;
}
return tt;
}
/* Release the TT above udev, if it's not in use */
static void drop_tt(struct usb_device *udev)
{
struct usb_tt *utt = udev->tt;
struct mu3h_sch_tt *tt, **tt_index, **ptt;
int i, cnt;
if (!utt || !utt->hcpriv)
return; /* Not below a TT, or never allocated */
cnt = 0;
if (utt->multi) {
tt_index = utt->hcpriv;
ptt = &tt_index[udev->ttport - 1];
/* How many entries are left in tt_index? */
for (i = 0; i < utt->hub->maxchild; ++i)
cnt += !!tt_index[i];
} else {
tt_index = NULL;
ptt = (struct mu3h_sch_tt **)&utt->hcpriv;
}
tt = *ptt;
if (!tt || !list_empty(&tt->ep_list))
return; /* never allocated , or still in use*/
*ptt = NULL;
kfree(tt);
if (cnt == 1) {
utt->hcpriv = NULL;
kfree(tt_index);
}
}
static struct mu3h_sch_ep_info *
create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct mu3h_sch_ep_info *sch_ep;
struct mu3h_sch_bw_info *bw_info;
struct mu3h_sch_tt *tt = NULL;
bw_info = get_bw_info(mtk, udev, ep);
if (!bw_info)
return ERR_PTR(-ENODEV);
sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL);
if (!sch_ep)
return ERR_PTR(-ENOMEM);
if (is_fs_or_ls(udev->speed)) {
tt = find_tt(udev);
if (IS_ERR(tt)) {
kfree(sch_ep);
return ERR_PTR(-ENOMEM);
}
}
sch_ep->bw_info = bw_info;
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
sch_ep->speed = udev->speed;
INIT_LIST_HEAD(&sch_ep->endpoint);
INIT_LIST_HEAD(&sch_ep->tt_endpoint);
INIT_HLIST_NODE(&sch_ep->hentry);
return sch_ep;
}
static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
struct mu3h_sch_ep_info *sch_ep)
{
u32 ep_type;
u32 maxpkt;
u32 max_burst;
u32 mult;
u32 esit_pkts;
u32 max_esit_payload;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
max_esit_payload =
(CTX_TO_MAX_ESIT_PAYLOAD_HI(
le32_to_cpu(ep_ctx->ep_info)) << 16) |
CTX_TO_MAX_ESIT_PAYLOAD(le32_to_cpu(ep_ctx->tx_info));
sch_ep->esit = get_esit(ep_ctx);
sch_ep->num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
sch_ep->ep_type = ep_type;
sch_ep->maxpkt = maxpkt;
sch_ep->offset = 0;
sch_ep->burst_mode = 0;
sch_ep->repeat = 0;
if (sch_ep->speed == USB_SPEED_HIGH) {
sch_ep->cs_count = 0;
/*
* usb_20 spec section5.9
* a single microframe is enough for HS synchromous endpoints
* in a interval
*/
sch_ep->num_budget_microframes = 1;
/*
* xHCI spec section6.2.3.4
* @max_burst is the number of additional transactions
* opportunities per microframe
*/
sch_ep->pkts = max_burst + 1;
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
} else if (sch_ep->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0;
sch_ep->burst_mode = 1;
/*
* some device's (d)wBytesPerInterval is set as 0,
* then max_esit_payload is 0, so evaluate esit_pkts from
* mult and burst
*/
esit_pkts = DIV_ROUND_UP(max_esit_payload, maxpkt);
if (esit_pkts == 0)
esit_pkts = (mult + 1) * (max_burst + 1);
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1;
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
if (sch_ep->esit == 1)
sch_ep->pkts = esit_pkts;
else if (esit_pkts <= sch_ep->esit)
sch_ep->pkts = 1;
else
sch_ep->pkts = roundup_pow_of_two(esit_pkts)
/ sch_ep->esit;
sch_ep->num_budget_microframes =
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
}
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
} else if (is_fs_or_ls(sch_ep->speed)) {
sch_ep->pkts = 1; /* at most one packet for each microframe */
/*
* num_budget_microframes and cs_count will be updated when
* check TT for INT_OUT_EP, ISOC/INT_IN_EP type
*/
sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
sch_ep->num_budget_microframes = sch_ep->cs_count;
sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX);
}
}
/* Get maximum bandwidth when we schedule at offset slot. */
static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
u32 max_bw = 0;
u32 bw;
int i, j, k;
for (i = 0; i < sch_ep->num_esit; i++) {
u32 base = offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j);
bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (bw > max_bw)
max_bw = bw;
}
}
return max_bw;
}
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep, bool used)
{
int bw_updated;
u32 base;
int i, j;
bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++)
sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
}
}
static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
{
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 tmp;
int base;
int i, j, k;
for (i = 0; i < sch_ep->num_esit; i++) {
base = offset + i * sch_ep->esit;
/*
* Compared with hs bus, no matter what ep type,
* the hub will always delay one uframe to send data
*/
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j);
tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (tmp > FS_PAYLOAD_MAX)
return -ESCH_BW_OVERFLOW;
}
}
return 0;
}
static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
u32 start_ss, last_ss;
u32 start_cs, last_cs;
if (!sch_ep->sch_tt)
return 0;
start_ss = offset % 8;
if (sch_ep->ep_type == ISOC_OUT_EP) {
last_ss = start_ss + sch_ep->cs_count - 1;
/*
* usb_20 spec section11.18:
* must never schedule Start-Split in Y6
*/
if (!(start_ss == 7 || last_ss < 6))
return -ESCH_SS_Y6;
} else {
u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
/*
* usb_20 spec section11.18:
* must never schedule Start-Split in Y6
*/
if (start_ss == 6)
return -ESCH_SS_Y6;
/* one uframe for ss + one uframe for idle */
start_cs = (start_ss + 2) % 8;
last_cs = start_cs + cs_count - 1;
if (last_cs > 7)
return -ESCH_CS_OVERFLOW;
if (cs_count > 7)
cs_count = 7; /* HW limit */
sch_ep->cs_count = cs_count;
/* ss, idle are ignored */
sch_ep->num_budget_microframes = cs_count;
/*
* if interval=1, maxp >752, num_budge_micoframe is larger
* than sch_ep->esit, will overstep boundary
*/
if (sch_ep->num_budget_microframes > sch_ep->esit)
sch_ep->num_budget_microframes = sch_ep->esit;
}
return check_fs_bus_bw(sch_ep, offset);
}
static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
{
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
int bw_updated;
u32 base;
int i, j;
bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++)
tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
}
if (used)
list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
else
list_del(&sch_ep->tt_endpoint);
}
static int load_ep_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep, bool loaded)
{
if (sch_ep->sch_tt)
update_sch_tt(sch_ep, loaded);
/* update bus bandwidth info */
update_bus_bw(sch_bw, sch_ep, loaded);
sch_ep->allocated = loaded;
return 0;
}
static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
{
struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info;
const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
u32 offset;
u32 worst_bw;
u32 min_bw = ~0;
int min_index = -1;
int ret = 0;
/*
* Search through all possible schedule microframes.
* and find a microframe where its worst bandwidth is minimum.
*/
for (offset = 0; offset < sch_ep->esit; offset++) {
ret = check_sch_tt(sch_ep, offset);
if (ret)
continue;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
if (worst_bw > bw_boundary)
continue;
if (min_bw > worst_bw) {
min_bw = worst_bw;
min_index = offset;
}
/* use first-fit for LS/FS */
if (sch_ep->sch_tt && min_index >= 0)
break;
if (min_bw == 0)
break;
}
if (min_index < 0)
return ret ? ret : -ESCH_BW_OVERFLOW;
sch_ep->offset = min_index;
return load_ep_bw(sch_bw, sch_ep, true);
}
static void destroy_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
struct mu3h_sch_ep_info *sch_ep)
{
/* only release ep bw check passed by check_sch_bw() */
if (sch_ep->allocated)
load_ep_bw(sch_ep->bw_info, sch_ep, false);
if (sch_ep->sch_tt)
drop_tt(udev);
list_del(&sch_ep->endpoint);
hlist_del(&sch_ep->hentry);
kfree(sch_ep);
}
static bool need_bw_sch(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
bool has_tt = udev->tt && udev->tt->hub->parent;
/* only for periodic endpoints */
if (usb_endpoint_xfer_control(&ep->desc)
|| usb_endpoint_xfer_bulk(&ep->desc))
return false;
/*
* for LS & FS periodic endpoints which its device is not behind
* a TT are also ignored, root-hub will schedule them directly,
* but need set @bpkts field of endpoint context to 1.
*/
if (is_fs_or_ls(udev->speed) && !has_tt)
return false;
/* skip endpoint with zero maxpkt */
if (usb_endpoint_maxp(&ep->desc) == 0)
return false;
return true;
}
int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
{
struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
struct mu3h_sch_bw_info *sch_array;
int num_usb_bus;
/* ss IN and OUT are separated */
num_usb_bus = xhci->usb3_rhub.num_ports * 2 + xhci->usb2_rhub.num_ports;
sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
if (sch_array == NULL)
return -ENOMEM;
mtk->sch_array = sch_array;
INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
hash_init(mtk->sch_ep_hash);
return 0;
}
void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk)
{
kfree(mtk->sch_array);
}
static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_ep_ctx *ep_ctx;
struct xhci_virt_device *virt_dev;
struct mu3h_sch_ep_info *sch_ep;
unsigned int ep_index;
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
if (!need_bw_sch(udev, ep)) {
/*
* set @bpkts to 1 if it is LS or FS periodic endpoint, and its
* device does not connected through an external HS hub
*/
if (usb_endpoint_xfer_int(&ep->desc)
|| usb_endpoint_xfer_isoc(&ep->desc))
ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
return 0;
}
xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
sch_ep = create_sch_ep(mtk, udev, ep);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(ep_ctx, sch_ep);
list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
hash_add(mtk->sch_ep_hash, &sch_ep->hentry, (unsigned long)ep);
return 0;
}
static void drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct mu3h_sch_ep_info *sch_ep;
struct hlist_node *hn;
if (!need_bw_sch(udev, ep))
return;
xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
hash_for_each_possible_safe(mtk->sch_ep_hash, sch_ep,
hn, hentry, (unsigned long)ep) {
if (sch_ep->ep == ep) {
destroy_sch_ep(mtk, udev, sch_ep);
break;
}
}
}
int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
struct mu3h_sch_ep_info *sch_ep;
int ret;
xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
struct xhci_ep_ctx *ep_ctx;
struct usb_host_endpoint *ep = sch_ep->ep;
unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
ret = check_sch_bw(sch_ep);
if (ret) {
xhci_err(xhci, "Not enough bandwidth! (%s)\n",
sch_error_string(-ret));
return -ENOSPC;
}
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
| EP_BCSCOUNT(sch_ep->cs_count)
| EP_BBM(sch_ep->burst_mode));
ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
| EP_BREPEAT(sch_ep->repeat));
xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
sch_ep->offset, sch_ep->repeat);
}
ret = xhci_check_bandwidth(hcd, udev);
if (!ret)
list_del_init(&mtk->bw_ep_chk_list);
return ret;
}
void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct mu3h_sch_ep_info *sch_ep, *tmp;
xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint)
destroy_sch_ep(mtk, udev, sch_ep);
xhci_reset_bandwidth(hcd, udev);
}
int xhci_mtk_add_ep(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int ret;
ret = xhci_add_endpoint(hcd, udev, ep);
if (ret)
return ret;
if (ep->hcpriv)
ret = add_ep_quirk(hcd, udev, ep);
return ret;
}
int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int ret;
ret = xhci_drop_endpoint(hcd, udev, ep);
if (ret)
return ret;
/* needn't check @ep->hcpriv, xhci_endpoint_disable set it NULL */
drop_ep_quirk(hcd, udev, ep);
return 0;
}
| linux-master | drivers/usb/host/xhci-mtk-sch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments DSPS platforms "glue layer"
*
* Copyright (C) 2012, by Texas Instruments
*
* Based on the am35x "glue layer" code.
*
* This file is part of the Inventra Controller Driver for Linux.
*
* musb_dsps.c will be a common file for all the TI DSPS platforms
* such as dm64x, dm36x, dm35x, da8x, am35x and ti81x.
* For now only ti81x is using this and in future davinci.c, am35x.c
* da8xx.c would be merged to this file after testing.
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/sizes.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/usb/of.h>
#include <linux/debugfs.h>
#include "musb_core.h"
static const struct of_device_id musb_dsps_of_match[];
/*
* DSPS musb wrapper register offset.
* FIXME: This should be expanded to have all the wrapper registers from TI DSPS
* musb ips.
*/
struct dsps_musb_wrapper {
u16 revision;
u16 control;
u16 status;
u16 epintr_set;
u16 epintr_clear;
u16 epintr_status;
u16 coreintr_set;
u16 coreintr_clear;
u16 coreintr_status;
u16 phy_utmi;
u16 mode;
u16 tx_mode;
u16 rx_mode;
/* bit positions for control */
unsigned reset:5;
/* bit positions for interrupt */
unsigned usb_shift:5;
u32 usb_mask;
u32 usb_bitmap;
unsigned drvvbus:5;
unsigned txep_shift:5;
u32 txep_mask;
u32 txep_bitmap;
unsigned rxep_shift:5;
u32 rxep_mask;
u32 rxep_bitmap;
/* bit positions for phy_utmi */
unsigned otg_disable:5;
/* bit positions for mode */
unsigned iddig:5;
unsigned iddig_mux:5;
/* miscellaneous stuff */
unsigned poll_timeout;
};
/*
* register shadow for suspend
*/
struct dsps_context {
u32 control;
u32 epintr;
u32 coreintr;
u32 phy_utmi;
u32 mode;
u32 tx_mode;
u32 rx_mode;
};
/*
* DSPS glue structure.
*/
struct dsps_glue {
struct device *dev;
struct platform_device *musb; /* child musb pdev */
const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
int vbus_irq; /* optional vbus irq */
unsigned long last_timer; /* last timer data for each instance */
bool sw_babble_enabled;
void __iomem *usbss_base;
struct dsps_context context;
struct debugfs_regset32 regset;
struct dentry *dbgfs_root;
};
static const struct debugfs_reg32 dsps_musb_regs[] = {
{ "revision", 0x00 },
{ "control", 0x14 },
{ "status", 0x18 },
{ "eoi", 0x24 },
{ "intr0_stat", 0x30 },
{ "intr1_stat", 0x34 },
{ "intr0_set", 0x38 },
{ "intr1_set", 0x3c },
{ "txmode", 0x70 },
{ "rxmode", 0x74 },
{ "autoreq", 0xd0 },
{ "srpfixtime", 0xd4 },
{ "tdown", 0xd8 },
{ "phy_utmi", 0xe0 },
{ "mode", 0xe8 },
};
static void dsps_mod_timer(struct dsps_glue *glue, int wait_ms)
{
struct musb *musb = platform_get_drvdata(glue->musb);
int wait;
if (wait_ms < 0)
wait = msecs_to_jiffies(glue->wrp->poll_timeout);
else
wait = msecs_to_jiffies(wait_ms);
mod_timer(&musb->dev_timer, jiffies + wait);
}
/*
* If no vbus irq from the PMIC is configured, we need to poll VBUS status.
*/
static void dsps_mod_timer_optional(struct dsps_glue *glue)
{
if (glue->vbus_irq)
return;
dsps_mod_timer(glue, -1);
}
/* USBSS / USB AM335x */
#define USBSS_IRQ_STATUS 0x28
#define USBSS_IRQ_ENABLER 0x2c
#define USBSS_IRQ_CLEARR 0x30
#define USBSS_IRQ_PD_COMP (1 << 2)
/*
* dsps_musb_enable - enable interrupts
*/
static void dsps_musb_enable(struct musb *musb)
{
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base = musb->ctrl_base;
u32 epmask, coremask;
/* Workaround: setup IRQs through both register sets. */
epmask = ((musb->epmask & wrp->txep_mask) << wrp->txep_shift) |
((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift);
coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF);
musb_writel(reg_base, wrp->epintr_set, epmask);
musb_writel(reg_base, wrp->coreintr_set, coremask);
/*
* start polling for runtime PM active and idle,
* and for ID change in dual-role idle mode.
*/
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
dsps_mod_timer(glue, -1);
}
/*
* dsps_musb_disable - disable HDRC and flush interrupts
*/
static void dsps_musb_disable(struct musb *musb)
{
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base = musb->ctrl_base;
musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
musb_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
del_timer_sync(&musb->dev_timer);
}
/* Caller must take musb->lock */
static int dsps_check_status(struct musb *musb, void *unused)
{
void __iomem *mregs = musb->mregs;
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
u8 devctl;
int skip_session = 0;
if (glue->vbus_irq)
del_timer(&musb->dev_timer);
/*
* We poll because DSPS IP's won't expose several OTG-critical
* status change events (from the transceiver) otherwise.
*/
devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
usb_otg_state_string(musb->xceiv->otg->state));
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
if (musb->port_mode == MUSB_HOST) {
musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
dsps_mod_timer_optional(glue);
break;
}
fallthrough;
case OTG_STATE_A_WAIT_BCON:
/* keep VBUS on for host-only mode */
if (musb->port_mode == MUSB_HOST) {
dsps_mod_timer_optional(glue);
break;
}
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
fallthrough;
case OTG_STATE_A_IDLE:
case OTG_STATE_B_IDLE:
if (!glue->vbus_irq) {
if (devctl & MUSB_DEVCTL_BDEVICE) {
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
if (musb->port_mode == MUSB_PERIPHERAL)
skip_session = 1;
if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
musb_writeb(mregs, MUSB_DEVCTL,
MUSB_DEVCTL_SESSION);
}
dsps_mod_timer_optional(glue);
break;
case OTG_STATE_A_WAIT_VFALL:
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, wrp->coreintr_set,
MUSB_INTR_VBUSERROR << wrp->usb_shift);
break;
default:
break;
}
return 0;
}
static void otg_timer(struct timer_list *t)
{
struct musb *musb = from_timer(musb, t, dev_timer);
struct device *dev = musb->controller;
unsigned long flags;
int err;
err = pm_runtime_get(dev);
if ((err != -EINPROGRESS) && err < 0) {
dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
pm_runtime_put_noidle(dev);
return;
}
spin_lock_irqsave(&musb->lock, flags);
err = musb_queue_resume_work(musb, dsps_check_status, NULL);
if (err < 0)
dev_err(dev, "%s resume work: %i\n", __func__, err);
spin_unlock_irqrestore(&musb->lock, flags);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
static void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum)
{
u32 epintr;
struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
/* musb->lock might already been held */
epintr = (1 << epnum) << wrp->rxep_shift;
musb_writel(musb->ctrl_base, wrp->epintr_status, epintr);
}
static irqreturn_t dsps_interrupt(int irq, void *hci)
{
struct musb *musb = hci;
void __iomem *reg_base = musb->ctrl_base;
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
u32 epintr, usbintr;
spin_lock_irqsave(&musb->lock, flags);
/* Get endpoint interrupts */
epintr = musb_readl(reg_base, wrp->epintr_status);
musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift;
musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift;
if (epintr)
musb_writel(reg_base, wrp->epintr_status, epintr);
/* Get usb core interrupts */
usbintr = musb_readl(reg_base, wrp->coreintr_status);
if (!usbintr && !epintr)
goto out;
musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
if (usbintr)
musb_writel(reg_base, wrp->coreintr_status, usbintr);
dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n",
usbintr, epintr);
if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) {
int drvvbus = musb_readl(reg_base, wrp->status);
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
int err;
err = musb->int_usb & MUSB_INTR_VBUSERROR;
if (err) {
/*
* The Mentor core doesn't debounce VBUS as needed
* to cope with device connect current spikes. This
* means it's not uncommon for bus-powered devices
* to get VBUS errors during enumeration.
*
* This is a workaround, but newer RTL from Mentor
* seems to allow a better one: "re"-starting sessions
* without waiting for VBUS to stop registering in
* devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
dsps_mod_timer_optional(glue);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
dsps_mod_timer_optional(glue);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
}
/* NOTE: this must complete power-on within 100 ms. */
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
ret = IRQ_HANDLED;
}
if (musb->int_tx || musb->int_rx || musb->int_usb)
ret |= musb_interrupt(musb);
/* Poll for ID change and connect */
switch (musb->xceiv->otg->state) {
case OTG_STATE_B_IDLE:
case OTG_STATE_A_WAIT_BCON:
dsps_mod_timer_optional(glue);
break;
default:
break;
}
out:
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
{
struct dentry *root;
char buf[128];
sprintf(buf, "%s.dsps", dev_name(musb->controller));
root = debugfs_create_dir(buf, usb_debug_root);
glue->dbgfs_root = root;
glue->regset.regs = dsps_musb_regs;
glue->regset.nregs = ARRAY_SIZE(dsps_musb_regs);
glue->regset.base = musb->ctrl_base;
debugfs_create_regset32("regdump", S_IRUGO, root, &glue->regset);
return 0;
}
static int dsps_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
struct platform_device *parent = to_platform_device(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base;
struct resource *r;
u32 rev, val;
int ret;
r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
reg_base = devm_ioremap_resource(dev, r);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
musb->ctrl_base = reg_base;
/* NOP driver needs change if supporting dual instance */
musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent, "phys", 0);
if (IS_ERR(musb->xceiv))
return PTR_ERR(musb->xceiv);
musb->phy = devm_phy_get(dev->parent, "usb2-phy");
/* Returns zero if e.g. not clocked */
rev = musb_readl(reg_base, wrp->revision);
if (!rev)
return -ENODEV;
if (IS_ERR(musb->phy)) {
musb->phy = NULL;
} else {
ret = phy_init(musb->phy);
if (ret < 0)
return ret;
ret = phy_power_on(musb->phy);
if (ret) {
phy_exit(musb->phy);
return ret;
}
}
timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the musb */
musb_writel(reg_base, wrp->control, (1 << wrp->reset));
musb->isr = dsps_interrupt;
/* reset the otgdisable bit, needed for host mode to work */
val = musb_readl(reg_base, wrp->phy_utmi);
val &= ~(1 << wrp->otg_disable);
musb_writel(musb->ctrl_base, wrp->phy_utmi, val);
/*
* Check whether the dsps version has babble control enabled.
* In latest silicon revision the babble control logic is enabled.
* If MUSB_BABBLE_CTL returns 0x4 then we have the babble control
* logic enabled.
*/
val = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
if (val & MUSB_BABBLE_RCV_DISABLE) {
glue->sw_babble_enabled = true;
val |= MUSB_BABBLE_SW_SESSION_CTRL;
musb_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
}
dsps_mod_timer(glue, -1);
return dsps_musb_dbg_init(musb, glue);
}
static int dsps_musb_exit(struct musb *musb)
{
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
del_timer_sync(&musb->dev_timer);
phy_power_off(musb->phy);
phy_exit(musb->phy);
debugfs_remove_recursive(glue->dbgfs_root);
return 0;
}
static int dsps_musb_set_mode(struct musb *musb, u8 mode)
{
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *ctrl_base = musb->ctrl_base;
u32 reg;
reg = musb_readl(ctrl_base, wrp->mode);
switch (mode) {
case MUSB_HOST:
reg &= ~(1 << wrp->iddig);
/*
* if we're setting mode to host-only or device-only, we're
* going to ignore whatever the PHY sends us and just force
* ID pin status by SW
*/
reg |= (1 << wrp->iddig_mux);
musb_writel(ctrl_base, wrp->mode, reg);
musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
case MUSB_PERIPHERAL:
reg |= (1 << wrp->iddig);
/*
* if we're setting mode to host-only or device-only, we're
* going to ignore whatever the PHY sends us and just force
* ID pin status by SW
*/
reg |= (1 << wrp->iddig_mux);
musb_writel(ctrl_base, wrp->mode, reg);
break;
case MUSB_OTG:
musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
default:
dev_err(glue->dev, "unsupported mode %d\n", mode);
return -EINVAL;
}
return 0;
}
static bool dsps_sw_babble_control(struct musb *musb)
{
u8 babble_ctl;
bool session_restart = false;
babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
dev_dbg(musb->controller, "babble: MUSB_BABBLE_CTL value %x\n",
babble_ctl);
/*
* check line monitor flag to check whether babble is
* due to noise
*/
dev_dbg(musb->controller, "STUCK_J is %s\n",
babble_ctl & MUSB_BABBLE_STUCK_J ? "set" : "reset");
if (babble_ctl & MUSB_BABBLE_STUCK_J) {
int timeout = 10;
/*
* babble is due to noise, then set transmit idle (d7 bit)
* to resume normal operation
*/
babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
babble_ctl |= MUSB_BABBLE_FORCE_TXIDLE;
musb_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
/* wait till line monitor flag cleared */
dev_dbg(musb->controller, "Set TXIDLE, wait J to clear\n");
do {
babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
udelay(1);
} while ((babble_ctl & MUSB_BABBLE_STUCK_J) && timeout--);
/* check whether stuck_at_j bit cleared */
if (babble_ctl & MUSB_BABBLE_STUCK_J) {
/*
* real babble condition has occurred
* restart the controller to start the
* session again
*/
dev_dbg(musb->controller, "J not cleared, misc (%x)\n",
babble_ctl);
session_restart = true;
}
} else {
session_restart = true;
}
return session_restart;
}
static int dsps_musb_recover(struct musb *musb)
{
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
int session_restart = 0;
if (glue->sw_babble_enabled)
session_restart = dsps_sw_babble_control(musb);
else
session_restart = 1;
return session_restart ? 0 : -EPIPE;
}
/* Similar to am35x, dm81xx support only 32-bit read operation */
static void dsps_read_fifo32(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
void __iomem *fifo = hw_ep->fifo;
if (len >= 4) {
ioread32_rep(fifo, dst, len >> 2);
dst += len & ~0x03;
len &= 0x03;
}
/* Read any remaining 1 to 3 bytes */
if (len > 0) {
u32 val = musb_readl(fifo, 0);
memcpy(dst, &val, len);
}
}
#ifdef CONFIG_USB_TI_CPPI41_DMA
static void dsps_dma_controller_callback(struct dma_controller *c)
{
struct musb *musb = c->musb;
struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
void __iomem *usbss_base = glue->usbss_base;
u32 status;
status = musb_readl(usbss_base, USBSS_IRQ_STATUS);
if (status & USBSS_IRQ_PD_COMP)
musb_writel(usbss_base, USBSS_IRQ_STATUS, USBSS_IRQ_PD_COMP);
}
static struct dma_controller *
dsps_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct dma_controller *controller;
struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
void __iomem *usbss_base = glue->usbss_base;
controller = cppi41_dma_controller_create(musb, base);
if (IS_ERR_OR_NULL(controller))
return controller;
musb_writel(usbss_base, USBSS_IRQ_ENABLER, USBSS_IRQ_PD_COMP);
controller->dma_callback = dsps_dma_controller_callback;
return controller;
}
#ifdef CONFIG_PM_SLEEP
static void dsps_dma_controller_suspend(struct dsps_glue *glue)
{
void __iomem *usbss_base = glue->usbss_base;
musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
}
static void dsps_dma_controller_resume(struct dsps_glue *glue)
{
void __iomem *usbss_base = glue->usbss_base;
musb_writel(usbss_base, USBSS_IRQ_ENABLER, USBSS_IRQ_PD_COMP);
}
#endif
#else /* CONFIG_USB_TI_CPPI41_DMA */
#ifdef CONFIG_PM_SLEEP
static void dsps_dma_controller_suspend(struct dsps_glue *glue) {}
static void dsps_dma_controller_resume(struct dsps_glue *glue) {}
#endif
#endif /* CONFIG_USB_TI_CPPI41_DMA */
static struct musb_platform_ops dsps_ops = {
.quirks = MUSB_DMA_CPPI41 | MUSB_INDEXED_EP,
.init = dsps_musb_init,
.exit = dsps_musb_exit,
#ifdef CONFIG_USB_TI_CPPI41_DMA
.dma_init = dsps_dma_controller_create,
.dma_exit = cppi41_dma_controller_destroy,
#endif
.enable = dsps_musb_enable,
.disable = dsps_musb_disable,
.set_mode = dsps_musb_set_mode,
.recover = dsps_musb_recover,
.clear_ep_rxintr = dsps_musb_clear_ep_rxintr,
};
static u64 musb_dmamask = DMA_BIT_MASK(32);
static int get_int_prop(struct device_node *dn, const char *s)
{
int ret;
u32 val;
ret = of_property_read_u32(dn, s, &val);
if (ret)
return 0;
return val;
}
static int dsps_create_musb_pdev(struct dsps_glue *glue,
struct platform_device *parent)
{
struct musb_hdrc_platform_data pdata;
struct resource resources[2];
struct resource *res;
struct device *dev = &parent->dev;
struct musb_hdrc_config *config;
struct platform_device *musb;
struct device_node *dn = parent->dev.of_node;
int ret, val;
memset(resources, 0, sizeof(resources));
res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
if (!res) {
dev_err(dev, "failed to get memory.\n");
return -EINVAL;
}
resources[0] = *res;
ret = platform_get_irq_byname(parent, "mc");
if (ret < 0)
return ret;
resources[1].start = ret;
resources[1].end = ret;
resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret);
resources[1].name = "mc";
/* allocate the child platform device */
musb = platform_device_alloc("musb-hdrc",
(resources[0].start & 0xFFF) == 0x400 ? 0 : 1);
if (!musb) {
dev_err(dev, "failed to allocate musb device\n");
return -ENOMEM;
}
musb->dev.parent = dev;
musb->dev.dma_mask = &musb_dmamask;
musb->dev.coherent_dma_mask = musb_dmamask;
device_set_of_node_from_dev(&musb->dev, &parent->dev);
glue->musb = musb;
ret = platform_device_add_resources(musb, resources,
ARRAY_SIZE(resources));
if (ret) {
dev_err(dev, "failed to add resources\n");
goto err;
}
config = devm_kzalloc(&parent->dev, sizeof(*config), GFP_KERNEL);
if (!config) {
ret = -ENOMEM;
goto err;
}
pdata.config = config;
pdata.platform_ops = &dsps_ops;
config->num_eps = get_int_prop(dn, "mentor,num-eps");
config->ram_bits = get_int_prop(dn, "mentor,ram-bits");
config->host_port_deassert_reset_at_resume = 1;
pdata.mode = musb_get_mode(dev);
/* DT keeps this entry in mA, musb expects it as per USB spec */
pdata.power = get_int_prop(dn, "mentor,power") / 2;
ret = of_property_read_u32(dn, "mentor,multipoint", &val);
if (!ret && val)
config->multipoint = true;
config->maximum_speed = usb_get_maximum_speed(&parent->dev);
switch (config->maximum_speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
break;
case USB_SPEED_SUPER:
dev_warn(dev, "ignore incorrect maximum_speed "
"(super-speed) setting in dts");
fallthrough;
default:
config->maximum_speed = USB_SPEED_HIGH;
}
ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
if (ret) {
dev_err(dev, "failed to add platform_data\n");
goto err;
}
ret = platform_device_add(musb);
if (ret) {
dev_err(dev, "failed to register musb device\n");
goto err;
}
return 0;
err:
platform_device_put(musb);
return ret;
}
static irqreturn_t dsps_vbus_threaded_irq(int irq, void *priv)
{
struct dsps_glue *glue = priv;
struct musb *musb = platform_get_drvdata(glue->musb);
if (!musb)
return IRQ_NONE;
dev_dbg(glue->dev, "VBUS interrupt\n");
dsps_mod_timer(glue, 0);
return IRQ_HANDLED;
}
static int dsps_setup_optional_vbus_irq(struct platform_device *pdev,
struct dsps_glue *glue)
{
int error;
glue->vbus_irq = platform_get_irq_byname(pdev, "vbus");
if (glue->vbus_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (glue->vbus_irq <= 0) {
glue->vbus_irq = 0;
return 0;
}
error = devm_request_threaded_irq(glue->dev, glue->vbus_irq,
NULL, dsps_vbus_threaded_irq,
IRQF_ONESHOT,
"vbus", glue);
if (error) {
glue->vbus_irq = 0;
return error;
}
dev_dbg(glue->dev, "VBUS irq %i configured\n", glue->vbus_irq);
return 0;
}
static int dsps_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct dsps_musb_wrapper *wrp;
struct dsps_glue *glue;
int ret;
if (!strcmp(pdev->name, "musb-hdrc"))
return -ENODEV;
match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
if (!match) {
dev_err(&pdev->dev, "fail to get matching of_match struct\n");
return -EINVAL;
}
wrp = match->data;
if (of_device_is_compatible(pdev->dev.of_node, "ti,musb-dm816"))
dsps_ops.read_fifo = dsps_read_fifo32;
/* allocate glue */
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
glue->dev = &pdev->dev;
glue->wrp = wrp;
glue->usbss_base = of_iomap(pdev->dev.parent->of_node, 0);
if (!glue->usbss_base)
return -ENXIO;
platform_set_drvdata(pdev, glue);
pm_runtime_enable(&pdev->dev);
ret = dsps_create_musb_pdev(glue, pdev);
if (ret)
goto err;
if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
ret = dsps_setup_optional_vbus_irq(pdev, glue);
if (ret)
goto unregister_pdev;
}
return 0;
unregister_pdev:
platform_device_unregister(glue->musb);
err:
pm_runtime_disable(&pdev->dev);
iounmap(glue->usbss_base);
return ret;
}
static void dsps_remove(struct platform_device *pdev)
{
struct dsps_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
pm_runtime_disable(&pdev->dev);
iounmap(glue->usbss_base);
}
static const struct dsps_musb_wrapper am33xx_driver_data = {
.revision = 0x00,
.control = 0x14,
.status = 0x18,
.epintr_set = 0x38,
.epintr_clear = 0x40,
.epintr_status = 0x30,
.coreintr_set = 0x3c,
.coreintr_clear = 0x44,
.coreintr_status = 0x34,
.phy_utmi = 0xe0,
.mode = 0xe8,
.tx_mode = 0x70,
.rx_mode = 0x74,
.reset = 0,
.otg_disable = 21,
.iddig = 8,
.iddig_mux = 7,
.usb_shift = 0,
.usb_mask = 0x1ff,
.usb_bitmap = (0x1ff << 0),
.drvvbus = 8,
.txep_shift = 0,
.txep_mask = 0xffff,
.txep_bitmap = (0xffff << 0),
.rxep_shift = 16,
.rxep_mask = 0xfffe,
.rxep_bitmap = (0xfffe << 16),
.poll_timeout = 2000, /* ms */
};
static const struct of_device_id musb_dsps_of_match[] = {
{ .compatible = "ti,musb-am33xx",
.data = &am33xx_driver_data, },
{ .compatible = "ti,musb-dm816",
.data = &am33xx_driver_data, },
{ },
};
MODULE_DEVICE_TABLE(of, musb_dsps_of_match);
#ifdef CONFIG_PM_SLEEP
static int dsps_suspend(struct device *dev)
{
struct dsps_glue *glue = dev_get_drvdata(dev);
const struct dsps_musb_wrapper *wrp = glue->wrp;
struct musb *musb = platform_get_drvdata(glue->musb);
void __iomem *mbase;
int ret;
if (!musb)
/* This can happen if the musb device is in -EPROBE_DEFER */
return 0;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
del_timer_sync(&musb->dev_timer);
mbase = musb->ctrl_base;
glue->context.control = musb_readl(mbase, wrp->control);
glue->context.epintr = musb_readl(mbase, wrp->epintr_set);
glue->context.coreintr = musb_readl(mbase, wrp->coreintr_set);
glue->context.phy_utmi = musb_readl(mbase, wrp->phy_utmi);
glue->context.mode = musb_readl(mbase, wrp->mode);
glue->context.tx_mode = musb_readl(mbase, wrp->tx_mode);
glue->context.rx_mode = musb_readl(mbase, wrp->rx_mode);
dsps_dma_controller_suspend(glue);
return 0;
}
static int dsps_resume(struct device *dev)
{
struct dsps_glue *glue = dev_get_drvdata(dev);
const struct dsps_musb_wrapper *wrp = glue->wrp;
struct musb *musb = platform_get_drvdata(glue->musb);
void __iomem *mbase;
if (!musb)
return 0;
dsps_dma_controller_resume(glue);
mbase = musb->ctrl_base;
musb_writel(mbase, wrp->control, glue->context.control);
musb_writel(mbase, wrp->epintr_set, glue->context.epintr);
musb_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
musb_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
musb_writel(mbase, wrp->mode, glue->context.mode);
musb_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
musb_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
musb->port_mode == MUSB_OTG)
dsps_mod_timer(glue, -1);
pm_runtime_put(dev);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume);
static struct platform_driver dsps_usbss_driver = {
.probe = dsps_probe,
.remove_new = dsps_remove,
.driver = {
.name = "musb-dsps",
.pm = &dsps_pm_ops,
.of_match_table = musb_dsps_of_match,
},
};
MODULE_DESCRIPTION("TI DSPS MUSB Glue Layer");
MODULE_AUTHOR("Ravi B <[email protected]>");
MODULE_AUTHOR("Ajay Kumar Gupta <[email protected]>");
MODULE_LICENSE("GPL v2");
module_platform_driver(dsps_usbss_driver);
| linux-master | drivers/usb/musb/musb_dsps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments DA8xx/OMAP-L1x "glue layer"
*
* Copyright (c) 2008-2009 MontaVista Software, Inc. <[email protected]>
*
* Based on the DaVinci "glue layer" code.
* Copyright (C) 2005-2006 by Texas Instruments
*
* DT support
* Copyright (c) 2016 Petr Kulhavy <[email protected]>
*
* This file is part of the Inventra Controller Driver for Linux.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
/*
* DA8XX specific definitions
*/
/* USB 2.0 OTG module registers */
#define DA8XX_USB_REVISION_REG 0x00
#define DA8XX_USB_CTRL_REG 0x04
#define DA8XX_USB_STAT_REG 0x08
#define DA8XX_USB_EMULATION_REG 0x0c
#define DA8XX_USB_SRP_FIX_TIME_REG 0x18
#define DA8XX_USB_INTR_SRC_REG 0x20
#define DA8XX_USB_INTR_SRC_SET_REG 0x24
#define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28
#define DA8XX_USB_INTR_MASK_REG 0x2c
#define DA8XX_USB_INTR_MASK_SET_REG 0x30
#define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34
#define DA8XX_USB_INTR_SRC_MASKED_REG 0x38
#define DA8XX_USB_END_OF_INTR_REG 0x3c
#define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2))
/* Control register bits */
#define DA8XX_SOFT_RESET_MASK 1
#define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */
#define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */
/* USB interrupt register bits */
#define DA8XX_INTR_USB_SHIFT 16
#define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */
/* interrupts and DRVVBUS interrupt */
#define DA8XX_INTR_DRVVBUS 0x100
#define DA8XX_INTR_RX_SHIFT 8
#define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT)
#define DA8XX_INTR_TX_SHIFT 0
#define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT)
#define DA8XX_MENTOR_CORE_OFFSET 0x400
struct da8xx_glue {
struct device *dev;
struct platform_device *musb;
struct platform_device *usb_phy;
struct clk *clk;
struct phy *phy;
};
/*
* Because we don't set CTRL.UINT, it's "important" to:
* - not read/write INTRUSB/INTRUSBE (except during
* initial setup, as a workaround);
* - use INTSET/INTCLR instead.
*/
/**
* da8xx_musb_enable - enable interrupts
*/
static void da8xx_musb_enable(struct musb *musb)
{
void __iomem *reg_base = musb->ctrl_base;
u32 mask;
/* Workaround: setup IRQs through both register sets. */
mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) |
((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) |
DA8XX_INTR_USB_MASK;
musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask);
/* Force the DRVVBUS IRQ so we can start polling for ID change. */
musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG,
DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT);
}
/**
* da8xx_musb_disable - disable HDRC and flush interrupts
*/
static void da8xx_musb_disable(struct musb *musb)
{
void __iomem *reg_base = musb->ctrl_base;
musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG,
DA8XX_INTR_USB_MASK |
DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK);
musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
}
#define portstate(stmt) stmt
static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
{
WARN_ON(is_on && is_peripheral_active(musb));
}
#define POLL_SECONDS 2
static void otg_timer(struct timer_list *t)
{
struct musb *musb = from_timer(musb, t, dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
/*
* We poll because DaVinci's won't expose several OTG-critical
* status change events (from the transceiver) otherwise.
*/
devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
usb_otg_state_string(musb->xceiv->otg->state));
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE) {
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
break;
case OTG_STATE_A_WAIT_VFALL:
/*
* Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3
* RTL seems to mis-handle session "start" otherwise (or in
* our case "recover"), in routine "VBUS was valid by the time
* VBUSERR got reported during enumeration" cases.
*/
if (devctl & MUSB_DEVCTL_VBUS) {
mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
break;
}
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG,
MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT);
break;
case OTG_STATE_B_IDLE:
/*
* There's no ID-changed IRQ, so we have no good way to tell
* when to switch to the A-Default state machine (by setting
* the DEVCTL.Session bit).
*
* Workaround: whenever we're in B_IDLE, try setting the
* session flag every few seconds. If it works, ID was
* grounded and we're now in the A-Default state machine.
*
* NOTE: setting the session flag is _supposed_ to trigger
* SRP but clearly it doesn't.
*/
musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
default:
break;
}
spin_unlock_irqrestore(&musb->lock, flags);
}
static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
{
static unsigned long last_timer;
if (timeout == 0)
timeout = jiffies + msecs_to_jiffies(3);
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || (musb->a_wait_bcon == 0 &&
musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
return;
}
last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
mod_timer(&musb->dev_timer, timeout);
}
static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
{
struct musb *musb = hci;
void __iomem *reg_base = musb->ctrl_base;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
u32 status;
spin_lock_irqsave(&musb->lock, flags);
/*
* NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through
* the Mentor registers (except for setup), use the TI ones and EOI.
*/
/* Acknowledge and handle non-CPPI interrupts */
status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG);
if (!status)
goto eoi;
musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status);
dev_dbg(musb->controller, "USB IRQ %08x\n", status);
musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT;
musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT;
musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT;
/*
* DRVVBUS IRQs are the only proxy we have (a very poor one!) for
* DA8xx's missing ID change IRQ. We need an ID change IRQ to
* switch appropriately between halves of the OTG state machine.
* Managing DEVCTL.Session per Mentor docs requires that we know its
* value but DEVCTL.BDevice is invalid without DEVCTL.Session set.
* Also, DRVVBUS pulses for SRP (but not at 5 V)...
*/
if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) {
int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG);
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
int err;
err = musb->int_usb & MUSB_INTR_VBUSERROR;
if (err) {
/*
* The Mentor core doesn't debounce VBUS as needed
* to cope with device connect current spikes. This
* means it's not uncommon for bus-powered devices
* to get VBUS errors during enumeration.
*
* This is a workaround, but newer RTL from Mentor
* seems to allow a better one: "re"-starting sessions
* without waiting for VBUS to stop registering in
* devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&musb->dev_timer);
} else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
/*
* When babble condition happens, drvvbus interrupt
* is also generated. Ignore this drvvbus interrupt
* and let babble interrupt handler recovers the
* controller; otherwise, the host-mode flag is lost
* due to the MUSB_DEV_MODE() call below and babble
* recovery logic will not be called.
*/
musb->is_active = 0;
MUSB_DEV_MODE(musb);
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
}
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
ret = IRQ_HANDLED;
}
if (musb->int_tx || musb->int_rx || musb->int_usb)
ret |= musb_interrupt(musb);
eoi:
/* EOI needs to be written for the IRQ to be re-asserted. */
if (ret == IRQ_HANDLED || status)
musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
/* Poll for ID change */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
{
struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
enum phy_mode phy_mode;
/*
* The PHY has some issues when it is forced in device or host mode.
* Unless the user request another mode, configure the PHY in OTG mode.
*/
if (!musb->is_initialized)
return phy_set_mode(glue->phy, PHY_MODE_USB_OTG);
switch (musb_mode) {
case MUSB_HOST: /* Force VBUS valid, ID = 0 */
phy_mode = PHY_MODE_USB_HOST;
break;
case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
phy_mode = PHY_MODE_USB_DEVICE;
break;
case MUSB_OTG: /* Don't override the VBUS/ID comparators */
phy_mode = PHY_MODE_USB_OTG;
break;
default:
return -EINVAL;
}
return phy_set_mode(glue->phy, phy_mode);
}
static int da8xx_musb_init(struct musb *musb)
{
struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
void __iomem *reg_base = musb->ctrl_base;
u32 rev;
int ret = -ENODEV;
musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
ret = clk_prepare_enable(glue->clk);
if (ret) {
dev_err(glue->dev, "failed to enable clock\n");
return ret;
}
/* Returns zero if e.g. not clocked */
rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
if (!rev) {
ret = -ENODEV;
goto fail;
}
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv)) {
ret = -EPROBE_DEFER;
goto fail;
}
timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the controller */
musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
/* Start the on-chip PHY and its PLL. */
ret = phy_init(glue->phy);
if (ret) {
dev_err(glue->dev, "Failed to init phy.\n");
goto fail;
}
ret = phy_power_on(glue->phy);
if (ret) {
dev_err(glue->dev, "Failed to power on phy.\n");
goto err_phy_power_on;
}
msleep(5);
/* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */
pr_debug("DA8xx OTG revision %08x, control %02x\n", rev,
musb_readb(reg_base, DA8XX_USB_CTRL_REG));
musb->isr = da8xx_musb_interrupt;
return 0;
err_phy_power_on:
phy_exit(glue->phy);
fail:
clk_disable_unprepare(glue->clk);
return ret;
}
static int da8xx_musb_exit(struct musb *musb)
{
struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
del_timer_sync(&musb->dev_timer);
phy_power_off(glue->phy);
phy_exit(glue->phy);
clk_disable_unprepare(glue->clk);
usb_put_phy(musb->xceiv);
return 0;
}
static inline u8 get_vbus_power(struct device *dev)
{
struct regulator *vbus_supply;
int current_uA;
vbus_supply = regulator_get_optional(dev, "vbus");
if (IS_ERR(vbus_supply))
return 255;
current_uA = regulator_get_current_limit(vbus_supply);
regulator_put(vbus_supply);
if (current_uA <= 0 || current_uA > 510000)
return 255;
return current_uA / 1000 / 2;
}
#ifdef CONFIG_USB_TI_CPPI41_DMA
static void da8xx_dma_controller_callback(struct dma_controller *c)
{
struct musb *musb = c->musb;
void __iomem *reg_base = musb->ctrl_base;
musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
}
static struct dma_controller *
da8xx_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct dma_controller *controller;
controller = cppi41_dma_controller_create(musb, base);
if (IS_ERR_OR_NULL(controller))
return controller;
controller->dma_callback = da8xx_dma_controller_callback;
return controller;
}
#endif
static const struct musb_platform_ops da8xx_ops = {
.quirks = MUSB_INDEXED_EP | MUSB_PRESERVE_SESSION |
MUSB_DMA_CPPI41 | MUSB_DA8XX,
.init = da8xx_musb_init,
.exit = da8xx_musb_exit,
.fifo_mode = 2,
#ifdef CONFIG_USB_TI_CPPI41_DMA
.dma_init = da8xx_dma_controller_create,
.dma_exit = cppi41_dma_controller_destroy,
#endif
.enable = da8xx_musb_enable,
.disable = da8xx_musb_disable,
.set_mode = da8xx_musb_set_mode,
.try_idle = da8xx_musb_try_idle,
.set_vbus = da8xx_musb_set_vbus,
};
static const struct platform_device_info da8xx_dev_info = {
.name = "musb-hdrc",
.id = PLATFORM_DEVID_AUTO,
.dma_mask = DMA_BIT_MASK(32),
};
static const struct musb_hdrc_config da8xx_config = {
.ram_bits = 10,
.num_eps = 5,
.multipoint = 1,
};
static struct of_dev_auxdata da8xx_auxdata_lookup[] = {
OF_DEV_AUXDATA("ti,da830-cppi41", 0x01e01000, "cppi41-dmaengine",
NULL),
{}
};
static int da8xx_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct da8xx_glue *glue;
struct platform_device_info pinfo;
struct clk *clk;
struct device_node *np = pdev->dev.of_node;
int ret;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(clk);
}
glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
if (IS_ERR(glue->phy))
return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
"failed to get phy\n");
glue->dev = &pdev->dev;
glue->clk = clk;
if (IS_ENABLED(CONFIG_OF) && np) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->config = &da8xx_config;
pdata->mode = musb_get_mode(&pdev->dev);
pdata->power = get_vbus_power(&pdev->dev);
}
pdata->platform_ops = &da8xx_ops;
glue->usb_phy = usb_phy_generic_register();
ret = PTR_ERR_OR_ZERO(glue->usb_phy);
if (ret) {
dev_err(&pdev->dev, "failed to register usb_phy\n");
return ret;
}
platform_set_drvdata(pdev, glue);
ret = of_platform_populate(pdev->dev.of_node, NULL,
da8xx_auxdata_lookup, &pdev->dev);
if (ret)
return ret;
pinfo = da8xx_dev_info;
pinfo.parent = &pdev->dev;
pinfo.res = pdev->resource;
pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
pinfo.fwnode = of_fwnode_handle(np);
pinfo.of_node_reused = true;
glue->musb = platform_device_register_full(&pinfo);
ret = PTR_ERR_OR_ZERO(glue->musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
usb_phy_generic_unregister(glue->usb_phy);
}
return ret;
}
static void da8xx_remove(struct platform_device *pdev)
{
struct da8xx_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
usb_phy_generic_unregister(glue->usb_phy);
}
#ifdef CONFIG_PM_SLEEP
static int da8xx_suspend(struct device *dev)
{
int ret;
struct da8xx_glue *glue = dev_get_drvdata(dev);
ret = phy_power_off(glue->phy);
if (ret)
return ret;
clk_disable_unprepare(glue->clk);
return 0;
}
static int da8xx_resume(struct device *dev)
{
int ret;
struct da8xx_glue *glue = dev_get_drvdata(dev);
ret = clk_prepare_enable(glue->clk);
if (ret)
return ret;
return phy_power_on(glue->phy);
}
#endif
static SIMPLE_DEV_PM_OPS(da8xx_pm_ops, da8xx_suspend, da8xx_resume);
#ifdef CONFIG_OF
static const struct of_device_id da8xx_id_table[] = {
{
.compatible = "ti,da830-musb",
},
{},
};
MODULE_DEVICE_TABLE(of, da8xx_id_table);
#endif
static struct platform_driver da8xx_driver = {
.probe = da8xx_probe,
.remove_new = da8xx_remove,
.driver = {
.name = "musb-da8xx",
.pm = &da8xx_pm_ops,
.of_match_table = of_match_ptr(da8xx_id_table),
},
};
MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer");
MODULE_AUTHOR("Sergei Shtylyov <[email protected]>");
MODULE_LICENSE("GPL v2");
module_platform_driver(da8xx_driver);
| linux-master | drivers/usb/musb/da8xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PolarFire SoC (MPFS) MUSB Glue Layer
*
* Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
* Based on {omap2430,tusb6010,ux500}.c
*
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
#include "musb_dma.h"
#define MPFS_MUSB_MAX_EP_NUM 8
#define MPFS_MUSB_RAM_BITS 12
struct mpfs_glue {
struct device *dev;
struct platform_device *musb;
struct platform_device *phy;
struct clk *clk;
};
static struct musb_fifo_cfg mpfs_musb_mode_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 1024, },
{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 4096, },
};
static const struct musb_hdrc_config mpfs_musb_hdrc_config = {
.fifo_cfg = mpfs_musb_mode_cfg,
.fifo_cfg_size = ARRAY_SIZE(mpfs_musb_mode_cfg),
.multipoint = true,
.dyn_fifo = true,
.num_eps = MPFS_MUSB_MAX_EP_NUM,
.ram_bits = MPFS_MUSB_RAM_BITS,
};
static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci)
{
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
if (musb->int_usb || musb->int_tx || musb->int_rx) {
musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
ret = musb_interrupt(musb);
}
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
{
u8 devctl;
/*
* HDRC controls CPEN, but beware current surges during device
* connect. They can trigger transient overcurrent conditions
* that must be ignored.
*/
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (is_on) {
musb->is_active = 1;
musb->xceiv->otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
MUSB_HST_MODE(musb);
} else {
musb->is_active = 0;
/*
* NOTE: skipping A_WAIT_VFALL -> A_IDLE and
* jumping right to B_IDLE...
*/
musb->xceiv->otg->default_a = 0;
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
devctl &= ~MUSB_DEVCTL_SESSION;
MUSB_DEV_MODE(musb);
}
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
static int mpfs_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(musb->xceiv)) {
dev_err(dev, "HS UDC: no transceiver configured\n");
return PTR_ERR(musb->xceiv);
}
musb->dyn_fifo = true;
musb->isr = mpfs_musb_interrupt;
musb_platform_set_vbus(musb, 1);
return 0;
}
static const struct musb_platform_ops mpfs_ops = {
.quirks = MUSB_DMA_INVENTRA,
.init = mpfs_musb_init,
.fifo_mode = 2,
#ifdef CONFIG_USB_INVENTRA_DMA
.dma_init = musbhs_dma_controller_create,
.dma_exit = musbhs_dma_controller_destroy,
#endif
.set_vbus = mpfs_musb_set_vbus
};
static int mpfs_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mpfs_glue *glue;
struct platform_device *musb_pdev;
struct device *dev = &pdev->dev;
struct clk *clk;
int ret;
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
musb_pdev = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb_pdev) {
dev_err(dev, "failed to allocate musb device\n");
return -ENOMEM;
}
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(clk);
goto err_phy_release;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable clock\n");
goto err_phy_release;
}
musb_pdev->dev.parent = dev;
musb_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(39);
musb_pdev->dev.dma_mask = &musb_pdev->dev.coherent_dma_mask;
device_set_of_node_from_dev(&musb_pdev->dev, dev);
glue->dev = dev;
glue->musb = musb_pdev;
glue->clk = clk;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
ret = -ENOMEM;
goto err_clk_disable;
}
pdata->config = &mpfs_musb_hdrc_config;
pdata->platform_ops = &mpfs_ops;
pdata->mode = usb_get_dr_mode(dev);
if (pdata->mode == USB_DR_MODE_UNKNOWN) {
dev_info(dev, "No dr_mode property found, defaulting to otg\n");
pdata->mode = USB_DR_MODE_OTG;
}
glue->phy = usb_phy_generic_register();
if (IS_ERR(glue->phy)) {
dev_err(dev, "failed to register usb-phy %ld\n",
PTR_ERR(glue->phy));
ret = PTR_ERR(glue->phy);
goto err_clk_disable;
}
platform_set_drvdata(pdev, glue);
ret = platform_device_add_resources(musb_pdev, pdev->resource, pdev->num_resources);
if (ret) {
dev_err(dev, "failed to add resources\n");
goto err_clk_disable;
}
ret = platform_device_add_data(musb_pdev, pdata, sizeof(*pdata));
if (ret) {
dev_err(dev, "failed to add platform_data\n");
goto err_clk_disable;
}
ret = platform_device_add(musb_pdev);
if (ret) {
dev_err(dev, "failed to register musb device\n");
goto err_clk_disable;
}
dev_info(&pdev->dev, "Registered MPFS MUSB driver\n");
return 0;
err_clk_disable:
clk_disable_unprepare(clk);
err_phy_release:
usb_phy_generic_unregister(glue->phy);
platform_device_put(musb_pdev);
return ret;
}
static void mpfs_remove(struct platform_device *pdev)
{
struct mpfs_glue *glue = platform_get_drvdata(pdev);
clk_disable_unprepare(glue->clk);
platform_device_unregister(glue->musb);
usb_phy_generic_unregister(pdev);
}
#ifdef CONFIG_OF
static const struct of_device_id mpfs_id_table[] = {
{ .compatible = "microchip,mpfs-musb" },
{ }
};
MODULE_DEVICE_TABLE(of, mpfs_id_table);
#endif
static struct platform_driver mpfs_musb_driver = {
.probe = mpfs_probe,
.remove_new = mpfs_remove,
.driver = {
.name = "mpfs-musb",
.of_match_table = of_match_ptr(mpfs_id_table)
},
};
module_platform_driver(mpfs_musb_driver);
MODULE_DESCRIPTION("PolarFire SoC MUSB Glue Layer");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/musb/mpfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver - support for Mentor's DMA controller
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2007 by Texas Instruments
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "musb_core.h"
#include "musb_dma.h"
#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
#define musb_read_hsdma_addr(mbase, bchannel) \
musb_readl(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS))
#define musb_write_hsdma_addr(mbase, bchannel, addr) \
musb_writel(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
addr)
#define musb_read_hsdma_count(mbase, bchannel) \
musb_readl(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
#define musb_write_hsdma_count(mbase, bchannel, len) \
musb_writel(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
len)
/* control register (16-bit): */
#define MUSB_HSDMA_ENABLE_SHIFT 0
#define MUSB_HSDMA_TRANSMIT_SHIFT 1
#define MUSB_HSDMA_MODE1_SHIFT 2
#define MUSB_HSDMA_IRQENABLE_SHIFT 3
#define MUSB_HSDMA_ENDPOINT_SHIFT 4
#define MUSB_HSDMA_BUSERROR_SHIFT 8
#define MUSB_HSDMA_BURSTMODE_SHIFT 9
#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
#define MUSB_HSDMA_BURSTMODE_INCR4 1
#define MUSB_HSDMA_BURSTMODE_INCR8 2
#define MUSB_HSDMA_BURSTMODE_INCR16 3
#define MUSB_HSDMA_CHANNELS 8
struct musb_dma_controller;
struct musb_dma_channel {
struct dma_channel channel;
struct musb_dma_controller *controller;
u32 start_addr;
u32 len;
u16 max_packet_sz;
u8 idx;
u8 epnum;
u8 transmit;
};
struct musb_dma_controller {
struct dma_controller controller;
struct musb_dma_channel channel[MUSB_HSDMA_CHANNELS];
void *private_data;
void __iomem *base;
u8 channel_count;
u8 used_channels;
int irq;
};
static void dma_channel_release(struct dma_channel *channel);
static void dma_controller_stop(struct musb_dma_controller *controller)
{
struct musb *musb = controller->private_data;
struct dma_channel *channel;
u8 bit;
if (controller->used_channels != 0) {
dev_err(musb->controller,
"Stopping DMA controller while channel active\n");
for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
if (controller->used_channels & (1 << bit)) {
channel = &controller->channel[bit].channel;
dma_channel_release(channel);
if (!controller->used_channels)
break;
}
}
}
}
static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 transmit)
{
struct musb_dma_controller *controller = container_of(c,
struct musb_dma_controller, controller);
struct musb_dma_channel *musb_channel = NULL;
struct dma_channel *channel = NULL;
u8 bit;
for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
if (!(controller->used_channels & (1 << bit))) {
controller->used_channels |= (1 << bit);
musb_channel = &(controller->channel[bit]);
musb_channel->controller = controller;
musb_channel->idx = bit;
musb_channel->epnum = hw_ep->epnum;
musb_channel->transmit = transmit;
channel = &(musb_channel->channel);
channel->private_data = musb_channel;
channel->status = MUSB_DMA_STATUS_FREE;
channel->max_len = 0x100000;
/* Tx => mode 1; Rx => mode 0 */
channel->desired_mode = transmit;
channel->actual_len = 0;
break;
}
}
return channel;
}
static void dma_channel_release(struct dma_channel *channel)
{
struct musb_dma_channel *musb_channel = channel->private_data;
channel->actual_len = 0;
musb_channel->start_addr = 0;
musb_channel->len = 0;
musb_channel->controller->used_channels &=
~(1 << musb_channel->idx);
channel->status = MUSB_DMA_STATUS_UNKNOWN;
}
static void configure_channel(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct musb_dma_channel *musb_channel = channel->private_data;
struct musb_dma_controller *controller = musb_channel->controller;
struct musb *musb = controller->private_data;
void __iomem *mbase = controller->base;
u8 bchannel = musb_channel->idx;
u16 csr = 0;
musb_dbg(musb, "%p, pkt_sz %d, addr %pad, len %d, mode %d",
channel, packet_sz, &dma_addr, len, mode);
if (mode) {
csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
BUG_ON(len < packet_sz);
}
csr |= MUSB_HSDMA_BURSTMODE_INCR16
<< MUSB_HSDMA_BURSTMODE_SHIFT;
csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
| (1 << MUSB_HSDMA_ENABLE_SHIFT)
| (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
| (musb_channel->transmit
? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
: 0);
/* address/count */
musb_write_hsdma_addr(mbase, bchannel, dma_addr);
musb_write_hsdma_count(mbase, bchannel, len);
/* control (this should start things) */
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
csr);
}
static int dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct musb_dma_channel *musb_channel = channel->private_data;
struct musb_dma_controller *controller = musb_channel->controller;
struct musb *musb = controller->private_data;
musb_dbg(musb, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d",
musb_channel->epnum,
musb_channel->transmit ? "Tx" : "Rx",
packet_sz, &dma_addr, len, mode);
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
/*
* The DMA engine in RTL1.8 and above cannot handle
* DMA addresses that are not aligned to a 4 byte boundary.
* It ends up masking the last two bits of the address
* programmed in DMA_ADDR.
*
* Fail such DMA transfers, so that the backup PIO mode
* can carry out the transfer
*/
if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
return false;
channel->actual_len = 0;
musb_channel->start_addr = dma_addr;
musb_channel->len = len;
musb_channel->max_packet_sz = packet_sz;
channel->status = MUSB_DMA_STATUS_BUSY;
configure_channel(channel, packet_sz, mode, dma_addr, len);
return true;
}
static int dma_channel_abort(struct dma_channel *channel)
{
struct musb_dma_channel *musb_channel = channel->private_data;
void __iomem *mbase = musb_channel->controller->base;
struct musb *musb = musb_channel->controller->private_data;
u8 bchannel = musb_channel->idx;
int offset;
u16 csr;
if (channel->status == MUSB_DMA_STATUS_BUSY) {
if (musb_channel->transmit) {
offset = musb->io.ep_offset(musb_channel->epnum,
MUSB_TXCSR);
/*
* The programming guide says that we must clear
* the DMAENAB bit before the DMAMODE bit...
*/
csr = musb_readw(mbase, offset);
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
musb_writew(mbase, offset, csr);
csr &= ~MUSB_TXCSR_DMAMODE;
musb_writew(mbase, offset, csr);
} else {
offset = musb->io.ep_offset(musb_channel->epnum,
MUSB_RXCSR);
csr = musb_readw(mbase, offset);
csr &= ~(MUSB_RXCSR_AUTOCLEAR |
MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_DMAMODE);
musb_writew(mbase, offset, csr);
}
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
0);
musb_write_hsdma_addr(mbase, bchannel, 0);
musb_write_hsdma_count(mbase, bchannel, 0);
channel->status = MUSB_DMA_STATUS_FREE;
}
return 0;
}
irqreturn_t dma_controller_irq(int irq, void *private_data)
{
struct musb_dma_controller *controller = private_data;
struct musb *musb = controller->private_data;
struct musb_dma_channel *musb_channel;
struct dma_channel *channel;
void __iomem *mbase = controller->base;
irqreturn_t retval = IRQ_NONE;
unsigned long flags;
u8 bchannel;
u8 int_hsdma;
u32 addr, count;
u16 csr;
spin_lock_irqsave(&musb->lock, flags);
int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
if (!int_hsdma) {
musb_dbg(musb, "spurious DMA irq");
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
musb_channel = (struct musb_dma_channel *)
&(controller->channel[bchannel]);
channel = &musb_channel->channel;
if (channel->status == MUSB_DMA_STATUS_BUSY) {
count = musb_read_hsdma_count(mbase, bchannel);
if (count == 0)
int_hsdma |= (1 << bchannel);
}
}
musb_dbg(musb, "int_hsdma = 0x%x", int_hsdma);
if (!int_hsdma)
goto done;
}
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
if (int_hsdma & (1 << bchannel)) {
musb_channel = (struct musb_dma_channel *)
&(controller->channel[bchannel]);
channel = &musb_channel->channel;
csr = musb_readw(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
MUSB_HSDMA_CONTROL));
if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
musb_channel->channel.status =
MUSB_DMA_STATUS_BUS_ABORT;
} else {
addr = musb_read_hsdma_addr(mbase,
bchannel);
channel->actual_len = addr
- musb_channel->start_addr;
musb_dbg(musb, "ch %p, 0x%x -> 0x%x (%zu / %d) %s",
channel, musb_channel->start_addr,
addr, channel->actual_len,
musb_channel->len,
(channel->actual_len
< musb_channel->len) ?
"=> reconfig 0" : "=> complete");
channel->status = MUSB_DMA_STATUS_FREE;
/* completed */
if (musb_channel->transmit &&
(!channel->desired_mode ||
(channel->actual_len %
musb_channel->max_packet_sz))) {
u8 epnum = musb_channel->epnum;
int offset = musb->io.ep_offset(epnum,
MUSB_TXCSR);
u16 txcsr;
/*
* The programming guide says that we
* must clear DMAENAB before DMAMODE.
*/
musb_ep_select(mbase, epnum);
txcsr = musb_readw(mbase, offset);
if (channel->desired_mode == 1) {
txcsr &= ~(MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_AUTOSET);
musb_writew(mbase, offset, txcsr);
/* Send out the packet */
txcsr &= ~MUSB_TXCSR_DMAMODE;
txcsr |= MUSB_TXCSR_DMAENAB;
}
txcsr |= MUSB_TXCSR_TXPKTRDY;
musb_writew(mbase, offset, txcsr);
}
musb_dma_completion(musb, musb_channel->epnum,
musb_channel->transmit);
}
}
}
retval = IRQ_HANDLED;
done:
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(dma_controller_irq);
void musbhs_dma_controller_destroy(struct dma_controller *c)
{
struct musb_dma_controller *controller = container_of(c,
struct musb_dma_controller, controller);
dma_controller_stop(controller);
if (controller->irq)
free_irq(controller->irq, c);
kfree(controller);
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
static struct musb_dma_controller *
dma_controller_alloc(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
return NULL;
controller->channel_count = MUSB_HSDMA_CHANNELS;
controller->private_data = musb;
controller->base = base;
controller->controller.channel_alloc = dma_channel_allocate;
controller->controller.channel_release = dma_channel_release;
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
return controller;
}
struct dma_controller *
musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
struct device *dev = musb->controller;
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq_byname(pdev, "dma");
if (irq <= 0) {
dev_err(dev, "No DMA interrupt line!\n");
return NULL;
}
controller = dma_controller_alloc(musb, base);
if (!controller)
return NULL;
if (request_irq(irq, dma_controller_irq, 0,
dev_name(musb->controller), controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
musb_dma_controller_destroy(&controller->controller);
return NULL;
}
controller->irq = irq;
return &controller->controller;
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
struct dma_controller *
musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
controller = dma_controller_alloc(musb, base);
if (!controller)
return NULL;
return &controller->controller;
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);
| linux-master | drivers/usb/musb/musbhsdma.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/sizes.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include "cppi_dma.h"
#include "musb_core.h"
#include "musb_trace.h"
#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
#define EP_MODE_AUTOREQ_NONE 0
#define EP_MODE_AUTOREQ_ALL_NEOP 1
#define EP_MODE_AUTOREQ_ALWAYS 3
#define EP_MODE_DMA_TRANSPARENT 0
#define EP_MODE_DMA_RNDIS 1
#define EP_MODE_DMA_GEN_RNDIS 3
#define USB_CTRL_TX_MODE 0x70
#define USB_CTRL_RX_MODE 0x74
#define USB_CTRL_AUTOREQ 0xd0
#define USB_TDOWN 0xd8
#define MUSB_DMA_NUM_CHANNELS 15
#define DA8XX_USB_MODE 0x10
#define DA8XX_USB_AUTOREQ 0x14
#define DA8XX_USB_TEARDOWN 0x1c
#define DA8XX_DMA_NUM_CHANNELS 4
struct cppi41_dma_controller {
struct dma_controller controller;
struct cppi41_dma_channel *rx_channel;
struct cppi41_dma_channel *tx_channel;
struct hrtimer early_tx;
struct list_head early_tx_list;
u32 rx_mode;
u32 tx_mode;
u32 auto_req;
u32 tdown_reg;
u32 autoreq_reg;
void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
unsigned int mode);
u8 num_channels;
};
static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
{
u16 csr;
u8 toggle;
if (cppi41_channel->is_tx)
return;
if (!is_host_active(cppi41_channel->controller->controller.musb))
return;
csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
cppi41_channel->usb_toggle = toggle;
}
static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
{
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct musb *musb = hw_ep->musb;
u16 csr;
u8 toggle;
if (cppi41_channel->is_tx)
return;
if (!is_host_active(musb))
return;
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
/*
* AM335x Advisory 1.0.13: Due to internal synchronisation error the
* data toggle may reset from DATA1 to DATA0 during receiving data from
* more than one endpoint.
*/
if (!toggle && toggle == cppi41_channel->usb_toggle) {
csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
musb_dbg(musb, "Restoring DATA1 toggle.");
}
cppi41_channel->usb_toggle = toggle;
}
static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
{
u8 epnum = hw_ep->epnum;
struct musb *musb = hw_ep->musb;
void __iomem *epio = musb->endpoints[epnum].regs;
u16 csr;
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY)
return false;
return true;
}
static void cppi41_dma_callback(void *private_data,
const struct dmaengine_result *result);
static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
{
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct musb *musb = hw_ep->musb;
void __iomem *epio = hw_ep->regs;
u16 csr;
if (!cppi41_channel->prog_len ||
(cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
/* done, complete */
cppi41_channel->channel.actual_len =
cppi41_channel->transferred;
cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
cppi41_channel->channel.rx_packet_done = true;
/*
* transmit ZLP using PIO mode for transfers which size is
* multiple of EP packet size.
*/
if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
cppi41_channel->packet_sz) == 0) {
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
}
trace_musb_cppi41_done(cppi41_channel);
musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
} else {
/* next iteration, reload */
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
u32 remain_bytes;
cppi41_channel->buf_addr += cppi41_channel->packet_sz;
remain_bytes = cppi41_channel->total_len;
remain_bytes -= cppi41_channel->transferred;
remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
cppi41_channel->prog_len = remain_bytes;
direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
: DMA_DEV_TO_MEM;
dma_desc = dmaengine_prep_slave_single(dc,
cppi41_channel->buf_addr,
remain_bytes,
direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (WARN_ON(!dma_desc))
return;
dma_desc->callback_result = cppi41_dma_callback;
dma_desc->callback_param = &cppi41_channel->channel;
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
trace_musb_cppi41_cont(cppi41_channel);
dma_async_issue_pending(dc);
if (!cppi41_channel->is_tx) {
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, csr);
}
}
}
static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
{
struct cppi41_dma_controller *controller;
struct cppi41_dma_channel *cppi41_channel, *n;
struct musb *musb;
unsigned long flags;
enum hrtimer_restart ret = HRTIMER_NORESTART;
controller = container_of(timer, struct cppi41_dma_controller,
early_tx);
musb = controller->controller.musb;
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
tx_check) {
bool empty;
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
empty = musb_is_tx_fifo_empty(hw_ep);
if (empty) {
list_del_init(&cppi41_channel->tx_check);
cppi41_trans_done(cppi41_channel);
}
}
if (!list_empty(&controller->early_tx_list) &&
!hrtimer_is_queued(&controller->early_tx)) {
ret = HRTIMER_RESTART;
hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
}
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
static void cppi41_dma_callback(void *private_data,
const struct dmaengine_result *result)
{
struct dma_channel *channel = private_data;
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct cppi41_dma_controller *controller;
struct musb *musb = hw_ep->musb;
unsigned long flags;
struct dma_tx_state txstate;
u32 transferred;
int is_hs = 0;
bool empty;
controller = cppi41_channel->controller;
if (controller->controller.dma_callback)
controller->controller.dma_callback(&controller->controller);
if (result->result == DMA_TRANS_ABORTED)
return;
spin_lock_irqsave(&musb->lock, flags);
dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
&txstate);
transferred = cppi41_channel->prog_len - txstate.residue;
cppi41_channel->transferred += transferred;
trace_musb_cppi41_gb(cppi41_channel);
update_rx_toggle(cppi41_channel);
if (cppi41_channel->transferred == cppi41_channel->total_len ||
transferred < cppi41_channel->packet_sz)
cppi41_channel->prog_len = 0;
if (cppi41_channel->is_tx) {
u8 type;
if (is_host_active(musb))
type = hw_ep->out_qh->type;
else
type = hw_ep->ep_in.type;
if (type == USB_ENDPOINT_XFER_ISOC)
/*
* Don't use the early-TX-interrupt workaround below
* for Isoch transfter. Since Isoch are periodic
* transfer, by the time the next transfer is
* scheduled, the current one should be done already.
*
* This avoids audio playback underrun issue.
*/
empty = true;
else
empty = musb_is_tx_fifo_empty(hw_ep);
}
if (!cppi41_channel->is_tx || empty) {
cppi41_trans_done(cppi41_channel);
goto out;
}
/*
* On AM335x it has been observed that the TX interrupt fires
* too early that means the TXFIFO is not yet empty but the DMA
* engine says that it is done with the transfer. We don't
* receive a FIFO empty interrupt so the only thing we can do is
* to poll for the bit. On HS it usually takes 2us, on FS around
* 110us - 150us depending on the transfer size.
* We spin on HS (no longer than 25us and setup a timer on
* FS to check for the bit and complete the transfer.
*/
if (is_host_active(musb)) {
if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
is_hs = 1;
} else {
if (musb->g.speed == USB_SPEED_HIGH)
is_hs = 1;
}
if (is_hs) {
unsigned wait = 25;
do {
empty = musb_is_tx_fifo_empty(hw_ep);
if (empty) {
cppi41_trans_done(cppi41_channel);
goto out;
}
wait--;
if (!wait)
break;
cpu_relax();
} while (1);
}
list_add_tail(&cppi41_channel->tx_check,
&controller->early_tx_list);
if (!hrtimer_is_queued(&controller->early_tx)) {
unsigned long usecs = cppi41_channel->total_len / 10;
hrtimer_start_range_ns(&controller->early_tx,
usecs * NSEC_PER_USEC,
20 * NSEC_PER_USEC,
HRTIMER_MODE_REL);
}
out:
spin_unlock_irqrestore(&musb->lock, flags);
}
static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
{
unsigned shift;
shift = (ep - 1) * 2;
old &= ~(3 << shift);
old |= mode << shift;
return old;
}
static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned mode)
{
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
u32 port;
u32 new_mode;
u32 old_mode;
if (cppi41_channel->is_tx)
old_mode = controller->tx_mode;
else
old_mode = controller->rx_mode;
port = cppi41_channel->port_num;
new_mode = update_ep_mode(port, mode, old_mode);
if (new_mode == old_mode)
return;
if (cppi41_channel->is_tx) {
controller->tx_mode = new_mode;
musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
} else {
controller->rx_mode = new_mode;
musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
}
}
static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned int mode)
{
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
unsigned int shift;
u32 port;
u32 new_mode;
u32 old_mode;
old_mode = controller->tx_mode;
port = cppi41_channel->port_num;
shift = (port - 1) * 4;
if (!cppi41_channel->is_tx)
shift += 16;
new_mode = old_mode & ~(3 << shift);
new_mode |= mode << shift;
if (new_mode == old_mode)
return;
controller->tx_mode = new_mode;
musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
}
static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned mode)
{
struct cppi41_dma_controller *controller = cppi41_channel->controller;
u32 port;
u32 new_mode;
u32 old_mode;
old_mode = controller->auto_req;
port = cppi41_channel->port_num;
new_mode = update_ep_mode(port, mode, old_mode);
if (new_mode == old_mode)
return;
controller->auto_req = new_mode;
musb_writel(controller->controller.musb->ctrl_base,
controller->autoreq_reg, new_mode);
}
static bool cppi41_configure_channel(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
struct musb *musb = cppi41_channel->controller->controller.musb;
unsigned use_gen_rndis = 0;
cppi41_channel->buf_addr = dma_addr;
cppi41_channel->total_len = len;
cppi41_channel->transferred = 0;
cppi41_channel->packet_sz = packet_sz;
cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
/*
* Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
* than max packet size at a time.
*/
if (cppi41_channel->is_tx)
use_gen_rndis = 1;
if (use_gen_rndis) {
/* RNDIS mode */
if (len > packet_sz) {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), len);
/* gen rndis */
controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_GEN_RNDIS);
/* auto req */
cppi41_set_autoreq_mode(cppi41_channel,
EP_MODE_AUTOREQ_ALL_NEOP);
} else {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), 0);
controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel,
EP_MODE_AUTOREQ_NONE);
}
} else {
/* fallback mode */
controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
len = min_t(u32, packet_sz, len);
}
cppi41_channel->prog_len = len;
direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma_desc)
return false;
dma_desc->callback_result = cppi41_dma_callback;
dma_desc->callback_param = channel;
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
cppi41_channel->channel.rx_packet_done = false;
trace_musb_cppi41_config(cppi41_channel);
save_rx_toggle(cppi41_channel);
dma_async_issue_pending(dc);
return true;
}
static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 is_tx)
{
struct cppi41_dma_controller *controller = container_of(c,
struct cppi41_dma_controller, controller);
struct cppi41_dma_channel *cppi41_channel = NULL;
u8 ch_num = hw_ep->epnum - 1;
if (ch_num >= controller->num_channels)
return NULL;
if (is_tx)
cppi41_channel = &controller->tx_channel[ch_num];
else
cppi41_channel = &controller->rx_channel[ch_num];
if (!cppi41_channel->dc)
return NULL;
if (cppi41_channel->is_allocated)
return NULL;
cppi41_channel->hw_ep = hw_ep;
cppi41_channel->is_allocated = 1;
trace_musb_cppi41_alloc(cppi41_channel);
return &cppi41_channel->channel;
}
static void cppi41_dma_channel_release(struct dma_channel *channel)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
trace_musb_cppi41_free(cppi41_channel);
if (cppi41_channel->is_allocated) {
cppi41_channel->is_allocated = 0;
channel->status = MUSB_DMA_STATUS_FREE;
channel->actual_len = 0;
}
}
static int cppi41_dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
int ret;
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
int hb_mult = 0;
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
if (is_host_active(cppi41_channel->controller->controller.musb)) {
if (cppi41_channel->is_tx)
hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
else
hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
}
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
if (hb_mult)
packet_sz = hb_mult * (packet_sz & 0x7FF);
ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
if (!ret)
channel->status = MUSB_DMA_STATUS_FREE;
return ret;
}
static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
void *buf, u32 length)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
if (is_host_active(musb)) {
WARN_ON(1);
return 1;
}
if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
return 0;
if (cppi41_channel->is_tx)
return 1;
/* AM335x Advisory 1.0.13. No workaround for device RX mode */
return 0;
}
static int cppi41_dma_channel_abort(struct dma_channel *channel)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
void __iomem *epio = cppi41_channel->hw_ep->regs;
int tdbit;
int ret;
unsigned is_tx;
u16 csr;
is_tx = cppi41_channel->is_tx;
trace_musb_cppi41_abort(cppi41_channel);
if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
return 0;
list_del_init(&cppi41_channel->tx_check);
if (is_tx) {
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
} else {
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
/* delay to drain to cppi dma pipeline for isoch */
udelay(250);
csr = musb_readw(epio, MUSB_RXCSR);
csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
musb_writew(epio, MUSB_RXCSR, csr);
/* wait to drain cppi dma pipe line */
udelay(50);
csr = musb_readw(epio, MUSB_RXCSR);
if (csr & MUSB_RXCSR_RXPKTRDY) {
csr |= MUSB_RXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_RXCSR, csr);
musb_writew(epio, MUSB_RXCSR, csr);
}
}
/* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
if (musb->ops->quirks & MUSB_DA8XX)
mdelay(250);
tdbit = 1 << cppi41_channel->port_num;
if (is_tx)
tdbit <<= 16;
do {
if (is_tx)
musb_writel(musb->ctrl_base, controller->tdown_reg,
tdbit);
ret = dmaengine_terminate_all(cppi41_channel->dc);
} while (ret == -EAGAIN);
if (is_tx) {
musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY) {
csr |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_TXCSR, csr);
}
}
cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
return 0;
}
static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
{
struct dma_chan *dc;
int i;
for (i = 0; i < ctrl->num_channels; i++) {
dc = ctrl->tx_channel[i].dc;
if (dc)
dma_release_channel(dc);
dc = ctrl->rx_channel[i].dc;
if (dc)
dma_release_channel(dc);
}
}
static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
{
cppi41_release_all_dma_chans(controller);
}
static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
{
struct musb *musb = controller->controller.musb;
struct device *dev = musb->controller;
struct device_node *np = dev->parent->of_node;
struct cppi41_dma_channel *cppi41_channel;
int count;
int i;
int ret;
count = of_property_count_strings(np, "dma-names");
if (count < 0)
return count;
for (i = 0; i < count; i++) {
struct dma_chan *dc;
struct dma_channel *musb_dma;
const char *str;
unsigned is_tx;
unsigned int port;
ret = of_property_read_string_index(np, "dma-names", i, &str);
if (ret)
goto err;
if (strstarts(str, "tx"))
is_tx = 1;
else if (strstarts(str, "rx"))
is_tx = 0;
else {
dev_err(dev, "Wrong dmatype %s\n", str);
goto err;
}
ret = kstrtouint(str + 2, 0, &port);
if (ret)
goto err;
ret = -EINVAL;
if (port > controller->num_channels || !port)
goto err;
if (is_tx)
cppi41_channel = &controller->tx_channel[port - 1];
else
cppi41_channel = &controller->rx_channel[port - 1];
cppi41_channel->controller = controller;
cppi41_channel->port_num = port;
cppi41_channel->is_tx = is_tx;
INIT_LIST_HEAD(&cppi41_channel->tx_check);
musb_dma = &cppi41_channel->channel;
musb_dma->private_data = cppi41_channel;
musb_dma->status = MUSB_DMA_STATUS_FREE;
musb_dma->max_len = SZ_4M;
dc = dma_request_chan(dev->parent, str);
if (IS_ERR(dc)) {
ret = dev_err_probe(dev, PTR_ERR(dc),
"Failed to request %s.\n", str);
goto err;
}
cppi41_channel->dc = dc;
}
return 0;
err:
cppi41_release_all_dma_chans(controller);
return ret;
}
void cppi41_dma_controller_destroy(struct dma_controller *c)
{
struct cppi41_dma_controller *controller = container_of(c,
struct cppi41_dma_controller, controller);
hrtimer_cancel(&controller->early_tx);
cppi41_dma_controller_stop(controller);
kfree(controller->rx_channel);
kfree(controller->tx_channel);
kfree(controller);
}
EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
struct dma_controller *
cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct cppi41_dma_controller *controller;
int channel_size;
int ret = 0;
if (!musb->controller->parent->of_node) {
dev_err(musb->controller, "Need DT for the DMA engine.\n");
return NULL;
}
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
goto kzalloc_fail;
hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
controller->early_tx.function = cppi41_recheck_tx_req;
INIT_LIST_HEAD(&controller->early_tx_list);
controller->controller.channel_alloc = cppi41_dma_channel_allocate;
controller->controller.channel_release = cppi41_dma_channel_release;
controller->controller.channel_program = cppi41_dma_channel_program;
controller->controller.channel_abort = cppi41_dma_channel_abort;
controller->controller.is_compatible = cppi41_is_compatible;
controller->controller.musb = musb;
if (musb->ops->quirks & MUSB_DA8XX) {
controller->tdown_reg = DA8XX_USB_TEARDOWN;
controller->autoreq_reg = DA8XX_USB_AUTOREQ;
controller->set_dma_mode = da8xx_set_dma_mode;
controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
} else {
controller->tdown_reg = USB_TDOWN;
controller->autoreq_reg = USB_CTRL_AUTOREQ;
controller->set_dma_mode = cppi41_set_dma_mode;
controller->num_channels = MUSB_DMA_NUM_CHANNELS;
}
channel_size = controller->num_channels *
sizeof(struct cppi41_dma_channel);
controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
if (!controller->rx_channel)
goto rx_channel_alloc_fail;
controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
if (!controller->tx_channel)
goto tx_channel_alloc_fail;
ret = cppi41_dma_controller_start(controller);
if (ret)
goto plat_get_fail;
return &controller->controller;
plat_get_fail:
kfree(controller->tx_channel);
tx_channel_alloc_fail:
kfree(controller->rx_channel);
rx_channel_alloc_fail:
kfree(controller);
kzalloc_fail:
if (ret == -EPROBE_DEFER)
return ERR_PTR(ret);
return NULL;
}
EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);
| linux-master | drivers/usb/musb/musb_cppi41.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 MediaTek Inc.
*
* Author:
* Min Guo <[email protected]>
* Yonglong Wu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/usb/role.h>
#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
#include "musb_dma.h"
#define USB_L1INTS 0x00a0
#define USB_L1INTM 0x00a4
#define MTK_MUSB_TXFUNCADDR 0x0480
/* MediaTek controller toggle enable and status reg */
#define MUSB_RXTOG 0x80
#define MUSB_RXTOGEN 0x82
#define MUSB_TXTOG 0x84
#define MUSB_TXTOGEN 0x86
#define MTK_TOGGLE_EN GENMASK(15, 0)
#define TX_INT_STATUS BIT(0)
#define RX_INT_STATUS BIT(1)
#define USBCOM_INT_STATUS BIT(2)
#define DMA_INT_STATUS BIT(3)
#define DMA_INTR_STATUS_MSK GENMASK(7, 0)
#define DMA_INTR_UNMASK_SET_MSK GENMASK(31, 24)
#define MTK_MUSB_CLKS_NUM 3
struct mtk_glue {
struct device *dev;
struct musb *musb;
struct platform_device *musb_pdev;
struct platform_device *usb_phy;
struct phy *phy;
struct usb_phy *xceiv;
enum phy_mode phy_mode;
struct clk_bulk_data clks[MTK_MUSB_CLKS_NUM];
enum usb_role role;
struct usb_role_switch *role_sw;
};
static int mtk_musb_clks_get(struct mtk_glue *glue)
{
struct device *dev = glue->dev;
glue->clks[0].id = "main";
glue->clks[1].id = "mcu";
glue->clks[2].id = "univpll";
return devm_clk_bulk_get(dev, MTK_MUSB_CLKS_NUM, glue->clks);
}
static int mtk_otg_switch_set(struct mtk_glue *glue, enum usb_role role)
{
struct musb *musb = glue->musb;
u8 devctl = readb(musb->mregs + MUSB_DEVCTL);
enum usb_role new_role;
if (role == glue->role)
return 0;
switch (role) {
case USB_ROLE_HOST:
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
glue->phy_mode = PHY_MODE_USB_HOST;
new_role = USB_ROLE_HOST;
if (glue->role == USB_ROLE_NONE)
phy_power_on(glue->phy);
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
MUSB_HST_MODE(musb);
break;
case USB_ROLE_DEVICE:
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
glue->phy_mode = PHY_MODE_USB_DEVICE;
new_role = USB_ROLE_DEVICE;
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
if (glue->role == USB_ROLE_NONE)
phy_power_on(glue->phy);
MUSB_DEV_MODE(musb);
break;
case USB_ROLE_NONE:
glue->phy_mode = PHY_MODE_USB_OTG;
new_role = USB_ROLE_NONE;
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
if (glue->role != USB_ROLE_NONE)
phy_power_off(glue->phy);
break;
default:
dev_err(glue->dev, "Invalid State\n");
return -EINVAL;
}
glue->role = new_role;
phy_set_mode(glue->phy, glue->phy_mode);
return 0;
}
static int musb_usb_role_sx_set(struct usb_role_switch *sw, enum usb_role role)
{
return mtk_otg_switch_set(usb_role_switch_get_drvdata(sw), role);
}
static enum usb_role musb_usb_role_sx_get(struct usb_role_switch *sw)
{
struct mtk_glue *glue = usb_role_switch_get_drvdata(sw);
return glue->role;
}
static int mtk_otg_switch_init(struct mtk_glue *glue)
{
struct usb_role_switch_desc role_sx_desc = { 0 };
role_sx_desc.set = musb_usb_role_sx_set;
role_sx_desc.get = musb_usb_role_sx_get;
role_sx_desc.allow_userspace_control = true;
role_sx_desc.fwnode = dev_fwnode(glue->dev);
role_sx_desc.driver_data = glue;
glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
return PTR_ERR_OR_ZERO(glue->role_sw);
}
static void mtk_otg_switch_exit(struct mtk_glue *glue)
{
return usb_role_switch_unregister(glue->role_sw);
}
static irqreturn_t generic_interrupt(int irq, void *__hci)
{
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
musb->int_usb = musb_clearb(musb->mregs, MUSB_INTRUSB);
musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
/* ep0 FADDR must be 0 when (re)entering peripheral mode */
musb_ep_select(musb->mregs, 0);
musb_writeb(musb->mregs, MUSB_FADDR, 0);
}
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
static irqreturn_t mtk_musb_interrupt(int irq, void *dev_id)
{
irqreturn_t retval = IRQ_NONE;
struct musb *musb = (struct musb *)dev_id;
u32 l1_ints;
l1_ints = musb_readl(musb->mregs, USB_L1INTS) &
musb_readl(musb->mregs, USB_L1INTM);
if (l1_ints & (TX_INT_STATUS | RX_INT_STATUS | USBCOM_INT_STATUS))
retval = generic_interrupt(irq, musb);
#if defined(CONFIG_USB_INVENTRA_DMA)
if (l1_ints & DMA_INT_STATUS)
retval = dma_controller_irq(irq, musb->dma_controller);
#endif
return retval;
}
static u32 mtk_musb_busctl_offset(u8 epnum, u16 offset)
{
return MTK_MUSB_TXFUNCADDR + offset + 8 * epnum;
}
static u8 mtk_musb_clearb(void __iomem *addr, unsigned int offset)
{
u8 data;
/* W1C */
data = musb_readb(addr, offset);
musb_writeb(addr, offset, data);
return data;
}
static u16 mtk_musb_clearw(void __iomem *addr, unsigned int offset)
{
u16 data;
/* W1C */
data = musb_readw(addr, offset);
musb_writew(addr, offset, data);
return data;
}
static int mtk_musb_set_mode(struct musb *musb, u8 mode)
{
struct device *dev = musb->controller;
struct mtk_glue *glue = dev_get_drvdata(dev->parent);
enum phy_mode new_mode;
enum usb_role new_role;
switch (mode) {
case MUSB_HOST:
new_mode = PHY_MODE_USB_HOST;
new_role = USB_ROLE_HOST;
break;
case MUSB_PERIPHERAL:
new_mode = PHY_MODE_USB_DEVICE;
new_role = USB_ROLE_DEVICE;
break;
case MUSB_OTG:
new_mode = PHY_MODE_USB_OTG;
new_role = USB_ROLE_NONE;
break;
default:
dev_err(glue->dev, "Invalid mode request\n");
return -EINVAL;
}
if (glue->phy_mode == new_mode)
return 0;
if (musb->port_mode != MUSB_OTG) {
dev_err(glue->dev, "Does not support changing modes\n");
return -EINVAL;
}
mtk_otg_switch_set(glue, new_role);
return 0;
}
static int mtk_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
struct mtk_glue *glue = dev_get_drvdata(dev->parent);
int ret;
glue->musb = musb;
musb->phy = glue->phy;
musb->xceiv = glue->xceiv;
musb->is_host = false;
musb->isr = mtk_musb_interrupt;
/* Set TX/RX toggle enable */
musb_writew(musb->mregs, MUSB_TXTOGEN, MTK_TOGGLE_EN);
musb_writew(musb->mregs, MUSB_RXTOGEN, MTK_TOGGLE_EN);
if (musb->port_mode == MUSB_OTG) {
ret = mtk_otg_switch_init(glue);
if (ret)
return ret;
}
ret = phy_init(glue->phy);
if (ret)
goto err_phy_init;
ret = phy_power_on(glue->phy);
if (ret)
goto err_phy_power_on;
phy_set_mode(glue->phy, glue->phy_mode);
#if defined(CONFIG_USB_INVENTRA_DMA)
musb_writel(musb->mregs, MUSB_HSDMA_INTR,
DMA_INTR_STATUS_MSK | DMA_INTR_UNMASK_SET_MSK);
#endif
musb_writel(musb->mregs, USB_L1INTM, TX_INT_STATUS | RX_INT_STATUS |
USBCOM_INT_STATUS | DMA_INT_STATUS);
return 0;
err_phy_power_on:
phy_exit(glue->phy);
err_phy_init:
if (musb->port_mode == MUSB_OTG)
mtk_otg_switch_exit(glue);
return ret;
}
static u16 mtk_musb_get_toggle(struct musb_qh *qh, int is_out)
{
struct musb *musb = qh->hw_ep->musb;
u8 epnum = qh->hw_ep->epnum;
u16 toggle;
toggle = musb_readw(musb->mregs, is_out ? MUSB_TXTOG : MUSB_RXTOG);
return toggle & (1 << epnum);
}
static u16 mtk_musb_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb)
{
struct musb *musb = qh->hw_ep->musb;
u8 epnum = qh->hw_ep->epnum;
u16 value, toggle;
toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
if (is_out) {
value = musb_readw(musb->mregs, MUSB_TXTOG);
value |= toggle << epnum;
musb_writew(musb->mregs, MUSB_TXTOG, value);
} else {
value = musb_readw(musb->mregs, MUSB_RXTOG);
value |= toggle << epnum;
musb_writew(musb->mregs, MUSB_RXTOG, value);
}
return 0;
}
static int mtk_musb_exit(struct musb *musb)
{
struct device *dev = musb->controller;
struct mtk_glue *glue = dev_get_drvdata(dev->parent);
mtk_otg_switch_exit(glue);
phy_power_off(glue->phy);
phy_exit(glue->phy);
clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return 0;
}
static const struct musb_platform_ops mtk_musb_ops = {
.quirks = MUSB_DMA_INVENTRA,
.init = mtk_musb_init,
.get_toggle = mtk_musb_get_toggle,
.set_toggle = mtk_musb_set_toggle,
.exit = mtk_musb_exit,
#ifdef CONFIG_USB_INVENTRA_DMA
.dma_init = musbhs_dma_controller_create_noirq,
.dma_exit = musbhs_dma_controller_destroy,
#endif
.clearb = mtk_musb_clearb,
.clearw = mtk_musb_clearw,
.busctl_offset = mtk_musb_busctl_offset,
.set_mode = mtk_musb_set_mode,
};
#define MTK_MUSB_MAX_EP_NUM 8
#define MTK_MUSB_RAM_BITS 11
static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 1024, },
{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 1024, },
{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
};
static const struct musb_hdrc_config mtk_musb_hdrc_config = {
.fifo_cfg = mtk_musb_mode_cfg,
.fifo_cfg_size = ARRAY_SIZE(mtk_musb_mode_cfg),
.multipoint = true,
.dyn_fifo = true,
.num_eps = MTK_MUSB_MAX_EP_NUM,
.ram_bits = MTK_MUSB_RAM_BITS,
};
static const struct platform_device_info mtk_dev_info = {
.name = "musb-hdrc",
.id = PLATFORM_DEVID_AUTO,
.dma_mask = DMA_BIT_MASK(32),
};
static int mtk_musb_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata;
struct mtk_glue *glue;
struct platform_device_info pinfo;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
glue->dev = dev;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to create child devices at %p\n", np);
return ret;
}
ret = mtk_musb_clks_get(glue);
if (ret)
return ret;
pdata->config = &mtk_musb_hdrc_config;
pdata->platform_ops = &mtk_musb_ops;
pdata->mode = usb_get_dr_mode(dev);
if (IS_ENABLED(CONFIG_USB_MUSB_HOST))
pdata->mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_MUSB_GADGET))
pdata->mode = USB_DR_MODE_PERIPHERAL;
switch (pdata->mode) {
case USB_DR_MODE_HOST:
glue->phy_mode = PHY_MODE_USB_HOST;
glue->role = USB_ROLE_HOST;
break;
case USB_DR_MODE_PERIPHERAL:
glue->phy_mode = PHY_MODE_USB_DEVICE;
glue->role = USB_ROLE_DEVICE;
break;
case USB_DR_MODE_OTG:
glue->phy_mode = PHY_MODE_USB_OTG;
glue->role = USB_ROLE_NONE;
break;
default:
dev_err(&pdev->dev, "Error 'dr_mode' property\n");
return -EINVAL;
}
glue->phy = devm_of_phy_get_by_index(dev, np, 0);
if (IS_ERR(glue->phy)) {
dev_err(dev, "fail to getting phy %ld\n",
PTR_ERR(glue->phy));
return PTR_ERR(glue->phy);
}
glue->usb_phy = usb_phy_generic_register();
if (IS_ERR(glue->usb_phy)) {
dev_err(dev, "fail to registering usb-phy %ld\n",
PTR_ERR(glue->usb_phy));
return PTR_ERR(glue->usb_phy);
}
glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(glue->xceiv)) {
ret = PTR_ERR(glue->xceiv);
dev_err(dev, "fail to getting usb-phy %d\n", ret);
goto err_unregister_usb_phy;
}
platform_set_drvdata(pdev, glue);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = clk_bulk_prepare_enable(MTK_MUSB_CLKS_NUM, glue->clks);
if (ret)
goto err_enable_clk;
pinfo = mtk_dev_info;
pinfo.parent = dev;
pinfo.res = pdev->resource;
pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
pinfo.fwnode = of_fwnode_handle(np);
pinfo.of_node_reused = true;
glue->musb_pdev = platform_device_register_full(&pinfo);
if (IS_ERR(glue->musb_pdev)) {
ret = PTR_ERR(glue->musb_pdev);
dev_err(dev, "failed to register musb device: %d\n", ret);
goto err_device_register;
}
return 0;
err_device_register:
clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
err_enable_clk:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
err_unregister_usb_phy:
usb_phy_generic_unregister(glue->usb_phy);
return ret;
}
static void mtk_musb_remove(struct platform_device *pdev)
{
struct mtk_glue *glue = platform_get_drvdata(pdev);
struct platform_device *usb_phy = glue->usb_phy;
platform_device_unregister(glue->musb_pdev);
usb_phy_generic_unregister(usb_phy);
}
#ifdef CONFIG_OF
static const struct of_device_id mtk_musb_match[] = {
{.compatible = "mediatek,mtk-musb",},
{},
};
MODULE_DEVICE_TABLE(of, mtk_musb_match);
#endif
static struct platform_driver mtk_musb_driver = {
.probe = mtk_musb_probe,
.remove_new = mtk_musb_remove,
.driver = {
.name = "musb-mtk",
.of_match_table = of_match_ptr(mtk_musb_match),
},
};
module_platform_driver(mtk_musb_driver);
MODULE_DESCRIPTION("MediaTek MUSB Glue Layer");
MODULE_AUTHOR("Min Guo <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/musb/mediatek.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver core code
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
*/
/*
* Inventra (Multipoint) Dual-Role Controller Driver for Linux.
*
* This consists of a Host Controller Driver (HCD) and a peripheral
* controller driver implementing the "Gadget" API; OTG support is
* in the works. These are normal Linux-USB controller drivers which
* use IRQs and have no dedicated thread.
*
* This version of the driver has only been used with products from
* Texas Instruments. Those products integrate the Inventra logic
* with other DMA, IRQ, and bus modules, as well as other logic that
* needs to be reflected in this driver.
*
*
* NOTE: the original Mentor code here was pretty much a collection
* of mechanisms that don't seem to have been fully integrated/working
* for *any* Linux kernel version. This version aims at Linux 2.6.now,
* Key open issues include:
*
* - Lack of host-side transaction scheduling, for all transfer types.
* The hardware doesn't do it; instead, software must.
*
* This is not an issue for OTG devices that don't support external
* hubs, but for more "normal" USB hosts it's a user issue that the
* "multipoint" support doesn't scale in the expected ways. That
* includes DaVinci EVM in a common non-OTG mode.
*
* * Control and bulk use dedicated endpoints, and there's as
* yet no mechanism to either (a) reclaim the hardware when
* peripherals are NAKing, which gets complicated with bulk
* endpoints, or (b) use more than a single bulk endpoint in
* each direction.
*
* RESULT: one device may be perceived as blocking another one.
*
* * Interrupt and isochronous will dynamically allocate endpoint
* hardware, but (a) there's no record keeping for bandwidth;
* (b) in the common case that few endpoints are available, there
* is no mechanism to reuse endpoints to talk to multiple devices.
*
* RESULT: At one extreme, bandwidth can be overcommitted in
* some hardware configurations, no faults will be reported.
* At the other extreme, the bandwidth capabilities which do
* exist tend to be severely undercommitted. You can't yet hook
* up both a keyboard and a mouse to an external USB hub.
*/
/*
* This gets many kinds of configuration information:
* - Kconfig for everything user-configurable
* - platform_device for addressing, irq, and platform_data
* - platform_data is mostly for board-specific information
* (plus recentrly, SOC or family details)
*
* Most of the conditional compilation will (someday) vanish.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/prefetch.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include <linux/usb/of.h>
#include "musb_core.h"
#include "musb_trace.h"
#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
#define MUSB_VERSION "6.0"
#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
#define MUSB_DRIVER_NAME "musb-hdrc"
const char musb_driver_name[] = MUSB_DRIVER_NAME;
MODULE_DESCRIPTION(DRIVER_INFO);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
/*-------------------------------------------------------------------------*/
static inline struct musb *dev_to_musb(struct device *dev)
{
return dev_get_drvdata(dev);
}
enum musb_mode musb_get_mode(struct device *dev)
{
enum usb_dr_mode mode;
mode = usb_get_dr_mode(dev);
switch (mode) {
case USB_DR_MODE_HOST:
return MUSB_HOST;
case USB_DR_MODE_PERIPHERAL:
return MUSB_PERIPHERAL;
case USB_DR_MODE_OTG:
case USB_DR_MODE_UNKNOWN:
default:
return MUSB_OTG;
}
}
EXPORT_SYMBOL_GPL(musb_get_mode);
/*-------------------------------------------------------------------------*/
static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
{
void __iomem *addr = phy->io_priv;
int i = 0;
u8 r;
u8 power;
int ret;
pm_runtime_get_sync(phy->io_dev);
/* Make sure the transceiver is not in low power mode */
power = musb_readb(addr, MUSB_POWER);
power &= ~MUSB_POWER_SUSPENDM;
musb_writeb(addr, MUSB_POWER, power);
/* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
* ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
*/
musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
& MUSB_ULPI_REG_CMPLT)) {
i++;
if (i == 10000) {
ret = -ETIMEDOUT;
goto out;
}
}
r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
r &= ~MUSB_ULPI_REG_CMPLT;
musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
out:
pm_runtime_put(phy->io_dev);
return ret;
}
static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
{
void __iomem *addr = phy->io_priv;
int i = 0;
u8 r = 0;
u8 power;
int ret = 0;
pm_runtime_get_sync(phy->io_dev);
/* Make sure the transceiver is not in low power mode */
power = musb_readb(addr, MUSB_POWER);
power &= ~MUSB_POWER_SUSPENDM;
musb_writeb(addr, MUSB_POWER, power);
musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
& MUSB_ULPI_REG_CMPLT)) {
i++;
if (i == 10000) {
ret = -ETIMEDOUT;
goto out;
}
}
r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
r &= ~MUSB_ULPI_REG_CMPLT;
musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
out:
pm_runtime_put(phy->io_dev);
return ret;
}
static struct usb_phy_io_ops musb_ulpi_access = {
.read = musb_ulpi_read,
.write = musb_ulpi_write,
};
/*-------------------------------------------------------------------------*/
static u32 musb_default_fifo_offset(u8 epnum)
{
return 0x20 + (epnum * 4);
}
/* "flat" mapping: each endpoint has its own i/o address */
static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
{
}
static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
{
return 0x100 + (0x10 * epnum) + offset;
}
/* "indexed" mapping: INDEX register controls register bank select */
static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
{
musb_writeb(mbase, MUSB_INDEX, epnum);
}
static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
{
return 0x10 + offset;
}
static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
{
return 0x80 + (0x08 * epnum) + offset;
}
static u8 musb_default_readb(void __iomem *addr, u32 offset)
{
u8 data = __raw_readb(addr + offset);
trace_musb_readb(__builtin_return_address(0), addr, offset, data);
return data;
}
static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data)
{
trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
__raw_writeb(data, addr + offset);
}
static u16 musb_default_readw(void __iomem *addr, u32 offset)
{
u16 data = __raw_readw(addr + offset);
trace_musb_readw(__builtin_return_address(0), addr, offset, data);
return data;
}
static void musb_default_writew(void __iomem *addr, u32 offset, u16 data)
{
trace_musb_writew(__builtin_return_address(0), addr, offset, data);
__raw_writew(data, addr + offset);
}
static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
{
void __iomem *epio = qh->hw_ep->regs;
u16 csr;
if (is_out)
csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
else
csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
return csr;
}
static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
struct urb *urb)
{
u16 csr;
u16 toggle;
toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
if (is_out)
csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
| MUSB_TXCSR_H_DATATOGGLE)
: MUSB_TXCSR_CLRDATATOG;
else
csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
| MUSB_RXCSR_H_DATATOGGLE) : 0;
return csr;
}
/*
* Load an endpoint's FIFO
*/
static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
const u8 *src)
{
struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
if (unlikely(len == 0))
return;
prefetch((u8 *)src);
dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'T', hw_ep->epnum, fifo, len, src);
/* we can't assume unaligned reads work */
if (likely((0x01 & (unsigned long) src) == 0)) {
u16 index = 0;
/* best case is 32bit-aligned source address */
if ((0x02 & (unsigned long) src) == 0) {
if (len >= 4) {
iowrite32_rep(fifo, src + index, len >> 2);
index += len & ~0x03;
}
if (len & 0x02) {
__raw_writew(*(u16 *)&src[index], fifo);
index += 2;
}
} else {
if (len >= 2) {
iowrite16_rep(fifo, src + index, len >> 1);
index += len & ~0x01;
}
}
if (len & 0x01)
__raw_writeb(src[index], fifo);
} else {
/* byte aligned */
iowrite8_rep(fifo, src, len);
}
}
/*
* Unload an endpoint's FIFO
*/
static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
if (unlikely(len == 0))
return;
dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'R', hw_ep->epnum, fifo, len, dst);
/* we can't assume unaligned writes work */
if (likely((0x01 & (unsigned long) dst) == 0)) {
u16 index = 0;
/* best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) dst) == 0) {
if (len >= 4) {
ioread32_rep(fifo, dst, len >> 2);
index = len & ~0x03;
}
if (len & 0x02) {
*(u16 *)&dst[index] = __raw_readw(fifo);
index += 2;
}
} else {
if (len >= 2) {
ioread16_rep(fifo, dst, len >> 1);
index = len & ~0x01;
}
}
if (len & 0x01)
dst[index] = __raw_readb(fifo);
} else {
/* byte aligned */
ioread8_rep(fifo, dst, len);
}
}
/*
* Old style IO functions
*/
u8 (*musb_readb)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readb);
void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
EXPORT_SYMBOL_GPL(musb_writeb);
u8 (*musb_clearb)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_clearb);
u16 (*musb_readw)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readw);
void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
EXPORT_SYMBOL_GPL(musb_writew);
u16 (*musb_clearw)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_clearw);
u32 musb_readl(void __iomem *addr, u32 offset)
{
u32 data = __raw_readl(addr + offset);
trace_musb_readl(__builtin_return_address(0), addr, offset, data);
return data;
}
EXPORT_SYMBOL_GPL(musb_readl);
void musb_writel(void __iomem *addr, u32 offset, u32 data)
{
trace_musb_writel(__builtin_return_address(0), addr, offset, data);
__raw_writel(data, addr + offset);
}
EXPORT_SYMBOL_GPL(musb_writel);
#ifndef CONFIG_MUSB_PIO_ONLY
struct dma_controller *
(*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
EXPORT_SYMBOL(musb_dma_controller_create);
void (*musb_dma_controller_destroy)(struct dma_controller *c);
EXPORT_SYMBOL(musb_dma_controller_destroy);
#endif
/*
* New style IO functions
*/
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
}
void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
{
return hw_ep->musb->io.write_fifo(hw_ep, len, src);
}
static u8 musb_read_devctl(struct musb *musb)
{
return musb_readb(musb->mregs, MUSB_DEVCTL);
}
/**
* musb_set_host - set and initialize host mode
* @musb: musb controller driver data
*
* At least some musb revisions need to enable devctl session bit in
* peripheral mode to switch to host mode. Initializes things to host
* mode and sets A_IDLE. SoC glue needs to advance state further
* based on phy provided VBUS state.
*
* Note that the SoC glue code may need to wait for musb to settle
* on enable before calling this to avoid babble.
*/
int musb_set_host(struct musb *musb)
{
int error = 0;
u8 devctl;
if (!musb)
return -EINVAL;
devctl = musb_read_devctl(musb);
if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
trace_musb_state(musb, devctl, "Already in host mode");
goto init_data;
}
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
error = readx_poll_timeout(musb_read_devctl, musb, devctl,
!(devctl & MUSB_DEVCTL_BDEVICE), 5000,
1000000);
if (error) {
dev_err(musb->controller, "%s: could not set host: %02x\n",
__func__, devctl);
return error;
}
devctl = musb_read_devctl(musb);
trace_musb_state(musb, devctl, "Host mode set");
init_data:
musb->is_active = 1;
musb_set_state(musb, OTG_STATE_A_IDLE);
MUSB_HST_MODE(musb);
return error;
}
EXPORT_SYMBOL_GPL(musb_set_host);
/**
* musb_set_peripheral - set and initialize peripheral mode
* @musb: musb controller driver data
*
* Clears devctl session bit and initializes things for peripheral
* mode and sets B_IDLE. SoC glue needs to advance state further
* based on phy provided VBUS state.
*/
int musb_set_peripheral(struct musb *musb)
{
int error = 0;
u8 devctl;
if (!musb)
return -EINVAL;
devctl = musb_read_devctl(musb);
if (devctl & MUSB_DEVCTL_BDEVICE) {
trace_musb_state(musb, devctl, "Already in peripheral mode");
goto init_data;
}
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
error = readx_poll_timeout(musb_read_devctl, musb, devctl,
devctl & MUSB_DEVCTL_BDEVICE, 5000,
1000000);
if (error) {
dev_err(musb->controller, "%s: could not set peripheral: %02x\n",
__func__, devctl);
return error;
}
devctl = musb_read_devctl(musb);
trace_musb_state(musb, devctl, "Peripheral mode set");
init_data:
musb->is_active = 0;
musb_set_state(musb, OTG_STATE_B_IDLE);
MUSB_DEV_MODE(musb);
return error;
}
EXPORT_SYMBOL_GPL(musb_set_peripheral);
/*-------------------------------------------------------------------------*/
/* for high speed test mode; see USB 2.0 spec 7.1.20 */
static const u8 musb_test_packet[53] = {
/* implicit SYNC then DATA0 to start */
/* JKJKJKJK x9 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* JJKKJJKK x8 */
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
/* JJJJKKKK x8 */
0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
/* JJJJJJJKKKKKKK x8 */
0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
/* JJJJJJJK x8 */
0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
/* JKKKKKKK x10, JK */
0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
/* implicit CRC16 then EOP to end */
};
void musb_load_testpacket(struct musb *musb)
{
void __iomem *regs = musb->endpoints[0].regs;
musb_ep_select(musb->mregs, 0);
musb_write_fifo(musb->control_ep,
sizeof(musb_test_packet), musb_test_packet);
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
}
/*-------------------------------------------------------------------------*/
/*
* Handles OTG hnp timeouts, such as b_ase0_brst
*/
static void musb_otg_timer_func(struct timer_list *t)
{
struct musb *musb = from_timer(musb, t, otg_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
switch (musb_get_state(musb)) {
case OTG_STATE_B_WAIT_ACON:
musb_dbg(musb,
"HNP: b_wait_acon timeout; back to b_peripheral");
musb_g_disconnect(musb);
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
musb->is_active = 0;
break;
case OTG_STATE_A_SUSPEND:
case OTG_STATE_A_WAIT_BCON:
musb_dbg(musb, "HNP: %s timeout",
musb_otg_state_string(musb));
musb_platform_set_vbus(musb, 0);
musb_set_state(musb, OTG_STATE_A_WAIT_VFALL);
break;
default:
musb_dbg(musb, "HNP: Unhandled mode %s",
musb_otg_state_string(musb));
}
spin_unlock_irqrestore(&musb->lock, flags);
}
/*
* Stops the HNP transition. Caller must take care of locking.
*/
void musb_hnp_stop(struct musb *musb)
{
struct usb_hcd *hcd = musb->hcd;
void __iomem *mbase = musb->mregs;
u8 reg;
musb_dbg(musb, "HNP: stop from %s", musb_otg_state_string(musb));
switch (musb_get_state(musb)) {
case OTG_STATE_A_PERIPHERAL:
musb_g_disconnect(musb);
musb_dbg(musb, "HNP: back to %s", musb_otg_state_string(musb));
break;
case OTG_STATE_B_HOST:
musb_dbg(musb, "HNP: Disabling HR");
if (hcd)
hcd->self.is_b_host = 0;
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
MUSB_DEV_MODE(musb);
reg = musb_readb(mbase, MUSB_POWER);
reg |= MUSB_POWER_SUSPENDM;
musb_writeb(mbase, MUSB_POWER, reg);
/* REVISIT: Start SESSION_REQUEST here? */
break;
default:
musb_dbg(musb, "HNP: Stopping in unknown state %s",
musb_otg_state_string(musb));
}
/*
* When returning to A state after HNP, avoid hub_port_rebounce(),
* which cause occasional OPT A "Did not receive reset after connect"
* errors.
*/
musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
}
static void musb_recover_from_babble(struct musb *musb);
static void musb_handle_intr_resume(struct musb *musb, u8 devctl)
{
musb_dbg(musb, "RESUME (%s)", musb_otg_state_string(musb));
if (devctl & MUSB_DEVCTL_HM) {
switch (musb_get_state(musb)) {
case OTG_STATE_A_SUSPEND:
/* remote wakeup? */
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
musb->rh_timer = jiffies
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
musb_set_state(musb, OTG_STATE_A_HOST);
musb->is_active = 1;
musb_host_resume_root_hub(musb);
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
break;
case OTG_STATE_B_WAIT_ACON:
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
musb->is_active = 1;
MUSB_DEV_MODE(musb);
break;
default:
WARNING("bogus %s RESUME (%s)\n",
"host",
musb_otg_state_string(musb));
}
} else {
switch (musb_get_state(musb)) {
case OTG_STATE_A_SUSPEND:
/* possibly DISCONNECT is upcoming */
musb_set_state(musb, OTG_STATE_A_HOST);
musb_host_resume_root_hub(musb);
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_PERIPHERAL:
/* disconnect while suspended? we may
* not get a disconnect irq...
*/
if ((devctl & MUSB_DEVCTL_VBUS)
!= (3 << MUSB_DEVCTL_VBUS_SHIFT)
) {
musb->int_usb |= MUSB_INTR_DISCONNECT;
musb->int_usb &= ~MUSB_INTR_SUSPEND;
break;
}
musb_g_resume(musb);
break;
case OTG_STATE_B_IDLE:
musb->int_usb &= ~MUSB_INTR_SUSPEND;
break;
default:
WARNING("bogus %s RESUME (%s)\n",
"peripheral",
musb_otg_state_string(musb));
}
}
}
/* return IRQ_HANDLED to tell the caller to return immediately */
static irqreturn_t musb_handle_intr_sessreq(struct musb *musb, u8 devctl)
{
void __iomem *mbase = musb->mregs;
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
&& (devctl & MUSB_DEVCTL_BDEVICE)) {
musb_dbg(musb, "SessReq while on B state");
return IRQ_HANDLED;
}
musb_dbg(musb, "SESSION_REQUEST (%s)", musb_otg_state_string(musb));
/* IRQ arrives from ID pin sense or (later, if VBUS power
* is removed) SRP. responses are time critical:
* - turn on VBUS (with silicon-specific mechanism)
* - go through A_WAIT_VRISE
* - ... to A_WAIT_BCON.
* a_wait_vrise_tmout triggers VBUS_ERROR transitions
*/
musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
musb->ep0_stage = MUSB_EP0_START;
musb_set_state(musb, OTG_STATE_A_IDLE);
MUSB_HST_MODE(musb);
musb_platform_set_vbus(musb, 1);
return IRQ_NONE;
}
static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl)
{
int ignore = 0;
/* During connection as an A-Device, we may see a short
* current spikes causing voltage drop, because of cable
* and peripheral capacitance combined with vbus draw.
* (So: less common with truly self-powered devices, where
* vbus doesn't act like a power supply.)
*
* Such spikes are short; usually less than ~500 usec, max
* of ~2 msec. That is, they're not sustained overcurrent
* errors, though they're reported using VBUSERROR irqs.
*
* Workarounds: (a) hardware: use self powered devices.
* (b) software: ignore non-repeated VBUS errors.
*
* REVISIT: do delays from lots of DEBUG_KERNEL checks
* make trouble here, keeping VBUS < 4.4V ?
*/
switch (musb_get_state(musb)) {
case OTG_STATE_A_HOST:
/* recovery is dicey once we've gotten past the
* initial stages of enumeration, but if VBUS
* stayed ok at the other end of the link, and
* another reset is due (at least for high speed,
* to redo the chirp etc), it might work OK...
*/
case OTG_STATE_A_WAIT_BCON:
case OTG_STATE_A_WAIT_VRISE:
if (musb->vbuserr_retry) {
void __iomem *mbase = musb->mregs;
musb->vbuserr_retry--;
ignore = 1;
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(mbase, MUSB_DEVCTL, devctl);
} else {
musb->port1_status |=
USB_PORT_STAT_OVERCURRENT
| (USB_PORT_STAT_C_OVERCURRENT << 16);
}
break;
default:
break;
}
dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
"VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
musb_otg_state_string(musb),
devctl,
({ char *s;
switch (devctl & MUSB_DEVCTL_VBUS) {
case 0 << MUSB_DEVCTL_VBUS_SHIFT:
s = "<SessEnd"; break;
case 1 << MUSB_DEVCTL_VBUS_SHIFT:
s = "<AValid"; break;
case 2 << MUSB_DEVCTL_VBUS_SHIFT:
s = "<VBusValid"; break;
/* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
default:
s = "VALID"; break;
} s; }),
VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
musb->port1_status);
/* go through A_WAIT_VFALL then start a new session */
if (!ignore)
musb_platform_set_vbus(musb, 0);
}
static void musb_handle_intr_suspend(struct musb *musb, u8 devctl)
{
musb_dbg(musb, "SUSPEND (%s) devctl %02x",
musb_otg_state_string(musb), devctl);
switch (musb_get_state(musb)) {
case OTG_STATE_A_PERIPHERAL:
/* We also come here if the cable is removed, since
* this silicon doesn't report ID-no-longer-grounded.
*
* We depend on T(a_wait_bcon) to shut us down, and
* hope users don't do anything dicey during this
* undesired detour through A_WAIT_BCON.
*/
musb_hnp_stop(musb);
musb_host_resume_root_hub(musb);
musb_root_disconnect(musb);
musb_platform_try_idle(musb, jiffies
+ msecs_to_jiffies(musb->a_wait_bcon
? : OTG_TIME_A_WAIT_BCON));
break;
case OTG_STATE_B_IDLE:
if (!musb->is_active)
break;
fallthrough;
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
musb->is_active = musb->g.b_hnp_enable;
if (musb->is_active) {
musb_set_state(musb, OTG_STATE_B_WAIT_ACON);
musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(
OTG_TIME_B_ASE0_BRST));
}
break;
case OTG_STATE_A_WAIT_BCON:
if (musb->a_wait_bcon != 0)
musb_platform_try_idle(musb, jiffies
+ msecs_to_jiffies(musb->a_wait_bcon));
break;
case OTG_STATE_A_HOST:
musb_set_state(musb, OTG_STATE_A_SUSPEND);
musb->is_active = musb->hcd->self.b_hnp_enable;
break;
case OTG_STATE_B_HOST:
/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
break;
default:
/* "should not happen" */
musb->is_active = 0;
break;
}
}
static void musb_handle_intr_connect(struct musb *musb, u8 devctl, u8 int_usb)
{
struct usb_hcd *hcd = musb->hcd;
musb->is_active = 1;
musb->ep0_stage = MUSB_EP0_START;
musb->intrtxe = musb->epmask;
musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
musb->intrrxe = musb->epmask & 0xfffe;
musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
|USB_PORT_STAT_HIGH_SPEED
|USB_PORT_STAT_ENABLE
);
musb->port1_status |= USB_PORT_STAT_CONNECTION
|(USB_PORT_STAT_C_CONNECTION << 16);
/* high vs full speed is just a guess until after reset */
if (devctl & MUSB_DEVCTL_LSDEV)
musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
/* indicate new connection to OTG machine */
switch (musb_get_state(musb)) {
case OTG_STATE_B_PERIPHERAL:
if (int_usb & MUSB_INTR_SUSPEND) {
musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
int_usb &= ~MUSB_INTR_SUSPEND;
goto b_host;
} else
musb_dbg(musb, "CONNECT as b_peripheral???");
break;
case OTG_STATE_B_WAIT_ACON:
musb_dbg(musb, "HNP: CONNECT, now b_host");
b_host:
musb_set_state(musb, OTG_STATE_B_HOST);
if (musb->hcd)
musb->hcd->self.is_b_host = 1;
del_timer(&musb->otg_timer);
break;
default:
if ((devctl & MUSB_DEVCTL_VBUS)
== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
musb_set_state(musb, OTG_STATE_A_HOST);
if (hcd)
hcd->self.is_b_host = 0;
}
break;
}
musb_host_poke_root_hub(musb);
musb_dbg(musb, "CONNECT (%s) devctl %02x",
musb_otg_state_string(musb), devctl);
}
static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl)
{
musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
musb_otg_state_string(musb),
MUSB_MODE(musb), devctl);
switch (musb_get_state(musb)) {
case OTG_STATE_A_HOST:
case OTG_STATE_A_SUSPEND:
musb_host_resume_root_hub(musb);
musb_root_disconnect(musb);
if (musb->a_wait_bcon != 0)
musb_platform_try_idle(musb, jiffies
+ msecs_to_jiffies(musb->a_wait_bcon));
break;
case OTG_STATE_B_HOST:
/* REVISIT this behaves for "real disconnect"
* cases; make sure the other transitions from
* from B_HOST act right too. The B_HOST code
* in hnp_stop() is currently not used...
*/
musb_root_disconnect(musb);
if (musb->hcd)
musb->hcd->self.is_b_host = 0;
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
MUSB_DEV_MODE(musb);
musb_g_disconnect(musb);
break;
case OTG_STATE_A_PERIPHERAL:
musb_hnp_stop(musb);
musb_root_disconnect(musb);
fallthrough;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb_g_disconnect(musb);
break;
default:
WARNING("unhandled DISCONNECT transition (%s)\n",
musb_otg_state_string(musb));
break;
}
}
/*
* mentor saves a bit: bus reset and babble share the same irq.
* only host sees babble; only peripheral sees bus reset.
*/
static void musb_handle_intr_reset(struct musb *musb)
{
if (is_host_active(musb)) {
/*
* When BABBLE happens what we can depends on which
* platform MUSB is running, because some platforms
* implemented proprietary means for 'recovering' from
* Babble conditions. One such platform is AM335x. In
* most cases, however, the only thing we can do is
* drop the session.
*/
dev_err(musb->controller, "Babble\n");
musb_recover_from_babble(musb);
} else {
musb_dbg(musb, "BUS RESET as %s", musb_otg_state_string(musb));
switch (musb_get_state(musb)) {
case OTG_STATE_A_SUSPEND:
musb_g_reset(musb);
fallthrough;
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
/* never use invalid T(a_wait_bcon) */
musb_dbg(musb, "HNP: in %s, %d msec timeout",
musb_otg_state_string(musb),
TA_WAIT_BCON(musb));
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(TA_WAIT_BCON(musb)));
break;
case OTG_STATE_A_PERIPHERAL:
del_timer(&musb->otg_timer);
musb_g_reset(musb);
break;
case OTG_STATE_B_WAIT_ACON:
musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
musb_otg_state_string(musb));
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
musb_g_reset(musb);
break;
case OTG_STATE_B_IDLE:
musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
fallthrough;
case OTG_STATE_B_PERIPHERAL:
musb_g_reset(musb);
break;
default:
musb_dbg(musb, "Unhandled BUS RESET as %s",
musb_otg_state_string(musb));
}
}
}
/*
* Interrupt Service Routine to record USB "global" interrupts.
* Since these do not happen often and signify things of
* paramount importance, it seems OK to check them individually;
* the order of the tests is specified in the manual
*
* @param musb instance pointer
* @param int_usb register contents
* @param devctl
*/
static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
u8 devctl)
{
irqreturn_t handled = IRQ_NONE;
musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
/* in host mode, the peripheral may issue remote wakeup.
* in peripheral mode, the host may resume the link.
* spurious RESUME irqs happen too, paired with SUSPEND.
*/
if (int_usb & MUSB_INTR_RESUME) {
musb_handle_intr_resume(musb, devctl);
handled = IRQ_HANDLED;
}
/* see manual for the order of the tests */
if (int_usb & MUSB_INTR_SESSREQ) {
if (musb_handle_intr_sessreq(musb, devctl))
return IRQ_HANDLED;
handled = IRQ_HANDLED;
}
if (int_usb & MUSB_INTR_VBUSERROR) {
musb_handle_intr_vbuserr(musb, devctl);
handled = IRQ_HANDLED;
}
if (int_usb & MUSB_INTR_SUSPEND) {
musb_handle_intr_suspend(musb, devctl);
handled = IRQ_HANDLED;
}
if (int_usb & MUSB_INTR_CONNECT) {
musb_handle_intr_connect(musb, devctl, int_usb);
handled = IRQ_HANDLED;
}
if (int_usb & MUSB_INTR_DISCONNECT) {
musb_handle_intr_disconnect(musb, devctl);
handled = IRQ_HANDLED;
}
if (int_usb & MUSB_INTR_RESET) {
musb_handle_intr_reset(musb);
handled = IRQ_HANDLED;
}
#if 0
/* REVISIT ... this would be for multiplexing periodic endpoints, or
* supporting transfer phasing to prevent exceeding ISO bandwidth
* limits of a given frame or microframe.
*
* It's not needed for peripheral side, which dedicates endpoints;
* though it _might_ use SOF irqs for other purposes.
*
* And it's not currently needed for host side, which also dedicates
* endpoints, relies on TX/RX interval registers, and isn't claimed
* to support ISO transfers yet.
*/
if (int_usb & MUSB_INTR_SOF) {
void __iomem *mbase = musb->mregs;
struct musb_hw_ep *ep;
u8 epnum;
u16 frame;
dev_dbg(musb->controller, "START_OF_FRAME\n");
handled = IRQ_HANDLED;
/* start any periodic Tx transfers waiting for current frame */
frame = musb_readw(mbase, MUSB_FRAME);
ep = musb->endpoints;
for (epnum = 1; (epnum < musb->nr_endpoints)
&& (musb->epmask >= (1 << epnum));
epnum++, ep++) {
/*
* FIXME handle framecounter wraps (12 bits)
* eliminate duplicated StartUrb logic
*/
if (ep->dwWaitFrame >= frame) {
ep->dwWaitFrame = 0;
pr_debug("SOF --> periodic TX%s on %d\n",
ep->tx_channel ? " DMA" : "",
epnum);
if (!ep->tx_channel)
musb_h_tx_start(musb, epnum);
else
cppi_hostdma_start(musb, epnum);
}
} /* end of for loop */
}
#endif
schedule_delayed_work(&musb->irq_work, 0);
return handled;
}
/*-------------------------------------------------------------------------*/
static void musb_disable_interrupts(struct musb *musb)
{
void __iomem *mbase = musb->mregs;
/* disable interrupts */
musb_writeb(mbase, MUSB_INTRUSBE, 0);
musb->intrtxe = 0;
musb_writew(mbase, MUSB_INTRTXE, 0);
musb->intrrxe = 0;
musb_writew(mbase, MUSB_INTRRXE, 0);
/* flush pending interrupts */
musb_clearb(mbase, MUSB_INTRUSB);
musb_clearw(mbase, MUSB_INTRTX);
musb_clearw(mbase, MUSB_INTRRX);
}
static void musb_enable_interrupts(struct musb *musb)
{
void __iomem *regs = musb->mregs;
/* Set INT enable registers, enable interrupts */
musb->intrtxe = musb->epmask;
musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
musb->intrrxe = musb->epmask & 0xfffe;
musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
}
/*
* Program the HDRC to start (enable interrupts, dma, etc.).
*/
void musb_start(struct musb *musb)
{
void __iomem *regs = musb->mregs;
u8 devctl = musb_readb(regs, MUSB_DEVCTL);
u8 power;
musb_dbg(musb, "<== devctl %02x", devctl);
musb_enable_interrupts(musb);
musb_writeb(regs, MUSB_TESTMODE, 0);
power = MUSB_POWER_ISOUPDATE;
/*
* treating UNKNOWN as unspecified maximum speed, in which case
* we will default to high-speed.
*/
if (musb->config->maximum_speed == USB_SPEED_HIGH ||
musb->config->maximum_speed == USB_SPEED_UNKNOWN)
power |= MUSB_POWER_HSENAB;
musb_writeb(regs, MUSB_POWER, power);
musb->is_active = 0;
devctl = musb_readb(regs, MUSB_DEVCTL);
devctl &= ~MUSB_DEVCTL_SESSION;
/* session started after:
* (a) ID-grounded irq, host mode;
* (b) vbus present/connect IRQ, peripheral mode;
* (c) peripheral initiates, using SRP
*/
if (musb->port_mode != MUSB_HOST &&
musb_get_state(musb) != OTG_STATE_A_WAIT_BCON &&
(devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
musb->is_active = 1;
} else {
devctl |= MUSB_DEVCTL_SESSION;
}
musb_platform_enable(musb);
musb_writeb(regs, MUSB_DEVCTL, devctl);
}
/*
* Make the HDRC stop (disable interrupts, etc.);
* reversible by musb_start
* called on gadget driver unregister
* with controller locked, irqs blocked
* acts as a NOP unless some role activated the hardware
*/
void musb_stop(struct musb *musb)
{
/* stop IRQs, timers, ... */
musb_platform_disable(musb);
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
/* FIXME
* - mark host and/or peripheral drivers unusable/inactive
* - disable DMA (and enable it in HdrcStart)
* - make sure we can musb_start() after musb_stop(); with
* OTG mode, gadget driver module rmmod/modprobe cycles that
* - ...
*/
musb_platform_try_idle(musb, 0);
}
/*-------------------------------------------------------------------------*/
/*
* The silicon either has hard-wired endpoint configurations, or else
* "dynamic fifo" sizing. The driver has support for both, though at this
* writing only the dynamic sizing is very well tested. Since we switched
* away from compile-time hardware parameters, we can no longer rely on
* dead code elimination to leave only the relevant one in the object file.
*
* We don't currently use dynamic fifo setup capability to do anything
* more than selecting one of a bunch of predefined configurations.
*/
static ushort fifo_mode;
/* "modprobe ... fifo_mode=1" etc */
module_param(fifo_mode, ushort, 0);
MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
/*
* tables defining fifo_mode values. define more if you like.
* for host side, make sure both halves of ep1 are set up.
*/
/* mode 0 - fits in 2KB */
static struct musb_fifo_cfg mode_0_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
};
/* mode 1 - fits in 4KB */
static struct musb_fifo_cfg mode_1_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
};
/* mode 2 - fits in 4KB */
static struct musb_fifo_cfg mode_2_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
};
/* mode 3 - fits in 4KB */
static struct musb_fifo_cfg mode_3_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
};
/* mode 4 - fits in 16KB */
static struct musb_fifo_cfg mode_4_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
};
/* mode 5 - fits in 8KB */
static struct musb_fifo_cfg mode_5_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
};
/*
* configure a fifo; for non-shared endpoints, this may be called
* once for a tx fifo and once for an rx fifo.
*
* returns negative errno or offset for next fifo.
*/
static int
fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
const struct musb_fifo_cfg *cfg, u16 offset)
{
void __iomem *mbase = musb->mregs;
int size = 0;
u16 maxpacket = cfg->maxpacket;
u16 c_off = offset >> 3;
u8 c_size;
/* expect hw_ep has already been zero-initialized */
size = ffs(max(maxpacket, (u16) 8)) - 1;
maxpacket = 1 << size;
c_size = size - 3;
if (cfg->mode == BUF_DOUBLE) {
if ((offset + (maxpacket << 1)) >
(1 << (musb->config->ram_bits + 2)))
return -EMSGSIZE;
c_size |= MUSB_FIFOSZ_DPB;
} else {
if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
return -EMSGSIZE;
}
/* configure the FIFO */
musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
/* EP0 reserved endpoint for control, bidirectional;
* EP1 reserved for bulk, two unidirectional halves.
*/
if (hw_ep->epnum == 1)
musb->bulk_ep = hw_ep;
/* REVISIT error check: be sure ep0 can both rx and tx ... */
switch (cfg->style) {
case FIFO_TX:
musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
musb_writew(mbase, MUSB_TXFIFOADD, c_off);
hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
hw_ep->max_packet_sz_tx = maxpacket;
break;
case FIFO_RX:
musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
musb_writew(mbase, MUSB_RXFIFOADD, c_off);
hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
hw_ep->max_packet_sz_rx = maxpacket;
break;
case FIFO_RXTX:
musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
musb_writew(mbase, MUSB_TXFIFOADD, c_off);
hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
hw_ep->max_packet_sz_rx = maxpacket;
musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
musb_writew(mbase, MUSB_RXFIFOADD, c_off);
hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
hw_ep->max_packet_sz_tx = maxpacket;
hw_ep->is_shared_fifo = true;
break;
}
/* NOTE rx and tx endpoint irqs aren't managed separately,
* which happens to be ok
*/
musb->epmask |= (1 << hw_ep->epnum);
return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
}
static struct musb_fifo_cfg ep0_cfg = {
.style = FIFO_RXTX, .maxpacket = 64,
};
static int ep_config_from_table(struct musb *musb)
{
const struct musb_fifo_cfg *cfg;
unsigned i, n;
int offset;
struct musb_hw_ep *hw_ep = musb->endpoints;
if (musb->config->fifo_cfg) {
cfg = musb->config->fifo_cfg;
n = musb->config->fifo_cfg_size;
goto done;
}
switch (fifo_mode) {
default:
fifo_mode = 0;
fallthrough;
case 0:
cfg = mode_0_cfg;
n = ARRAY_SIZE(mode_0_cfg);
break;
case 1:
cfg = mode_1_cfg;
n = ARRAY_SIZE(mode_1_cfg);
break;
case 2:
cfg = mode_2_cfg;
n = ARRAY_SIZE(mode_2_cfg);
break;
case 3:
cfg = mode_3_cfg;
n = ARRAY_SIZE(mode_3_cfg);
break;
case 4:
cfg = mode_4_cfg;
n = ARRAY_SIZE(mode_4_cfg);
break;
case 5:
cfg = mode_5_cfg;
n = ARRAY_SIZE(mode_5_cfg);
break;
}
pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
done:
offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
/* assert(offset > 0) */
/* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
* be better than static musb->config->num_eps and DYN_FIFO_SIZE...
*/
for (i = 0; i < n; i++) {
u8 epn = cfg->hw_ep_num;
if (epn >= musb->config->num_eps) {
pr_debug("%s: invalid ep %d\n",
musb_driver_name, epn);
return -EINVAL;
}
offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
if (offset < 0) {
pr_debug("%s: mem overrun, ep %d\n",
musb_driver_name, epn);
return offset;
}
epn++;
musb->nr_endpoints = max(epn, musb->nr_endpoints);
}
pr_debug("%s: %d/%d max ep, %d/%d memory\n",
musb_driver_name,
n + 1, musb->config->num_eps * 2 - 1,
offset, (1 << (musb->config->ram_bits + 2)));
if (!musb->bulk_ep) {
pr_debug("%s: missing bulk\n", musb_driver_name);
return -EINVAL;
}
return 0;
}
/*
* ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
* @param musb the controller
*/
static int ep_config_from_hw(struct musb *musb)
{
u8 epnum = 0;
struct musb_hw_ep *hw_ep;
void __iomem *mbase = musb->mregs;
int ret = 0;
musb_dbg(musb, "<== static silicon ep config");
/* FIXME pick up ep0 maxpacket size */
for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
musb_ep_select(mbase, epnum);
hw_ep = musb->endpoints + epnum;
ret = musb_read_fifosize(musb, hw_ep, epnum);
if (ret < 0)
break;
/* FIXME set up hw_ep->{rx,tx}_double_buffered */
/* pick an RX/TX endpoint for bulk */
if (hw_ep->max_packet_sz_tx < 512
|| hw_ep->max_packet_sz_rx < 512)
continue;
/* REVISIT: this algorithm is lazy, we should at least
* try to pick a double buffered endpoint.
*/
if (musb->bulk_ep)
continue;
musb->bulk_ep = hw_ep;
}
if (!musb->bulk_ep) {
pr_debug("%s: missing bulk\n", musb_driver_name);
return -EINVAL;
}
return 0;
}
enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
* configure endpoints, or take their config from silicon
*/
static int musb_core_init(u16 musb_type, struct musb *musb)
{
u8 reg;
char *type;
char aInfo[90];
void __iomem *mbase = musb->mregs;
int status = 0;
int i;
/* log core options (read using indexed model) */
reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
if (reg & MUSB_CONFIGDATA_DYNFIFO) {
strcat(aInfo, ", dyn FIFOs");
musb->dyn_fifo = true;
}
if (reg & MUSB_CONFIGDATA_MPRXE) {
strcat(aInfo, ", bulk combine");
musb->bulk_combine = true;
}
if (reg & MUSB_CONFIGDATA_MPTXE) {
strcat(aInfo, ", bulk split");
musb->bulk_split = true;
}
if (reg & MUSB_CONFIGDATA_HBRXE) {
strcat(aInfo, ", HB-ISO Rx");
musb->hb_iso_rx = true;
}
if (reg & MUSB_CONFIGDATA_HBTXE) {
strcat(aInfo, ", HB-ISO Tx");
musb->hb_iso_tx = true;
}
if (reg & MUSB_CONFIGDATA_SOFTCONE)
strcat(aInfo, ", SoftConn");
pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
if (MUSB_CONTROLLER_MHDRC == musb_type) {
musb->is_multipoint = 1;
type = "M";
} else {
musb->is_multipoint = 0;
type = "";
if (IS_ENABLED(CONFIG_USB) &&
!IS_ENABLED(CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB)) {
pr_err("%s: kernel must disable external hubs, please fix the configuration\n",
musb_driver_name);
}
}
/* log release info */
musb->hwvers = musb_readw(mbase, MUSB_HWVERS);
pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
MUSB_HWVERS_MINOR(musb->hwvers),
(musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
/* configure ep0 */
musb_configure_ep0(musb);
/* discover endpoint configuration */
musb->nr_endpoints = 1;
musb->epmask = 1;
if (musb->dyn_fifo)
status = ep_config_from_table(musb);
else
status = ep_config_from_hw(musb);
if (status < 0)
return status;
/* finish init, and print endpoint config */
for (i = 0; i < musb->nr_endpoints; i++) {
struct musb_hw_ep *hw_ep = musb->endpoints + i;
hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
if (musb->ops->quirks & MUSB_IN_TUSB) {
hw_ep->fifo_async = musb->async + 0x400 +
musb->io.fifo_offset(i);
hw_ep->fifo_sync = musb->sync + 0x400 +
musb->io.fifo_offset(i);
hw_ep->fifo_sync_va =
musb->sync_va + 0x400 + musb->io.fifo_offset(i);
if (i == 0)
hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
else
hw_ep->conf = mbase + 0x400 +
(((i - 1) & 0xf) << 2);
}
#endif
hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
hw_ep->rx_reinit = 1;
hw_ep->tx_reinit = 1;
if (hw_ep->max_packet_sz_tx) {
musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
musb_driver_name, i,
hw_ep->is_shared_fifo ? "shared" : "tx",
hw_ep->tx_double_buffered
? "doublebuffer, " : "",
hw_ep->max_packet_sz_tx);
}
if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
musb_driver_name, i,
"rx",
hw_ep->rx_double_buffered
? "doublebuffer, " : "",
hw_ep->max_packet_sz_rx);
}
if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
musb_dbg(musb, "hw_ep %d not configured", i);
}
return 0;
}
/*-------------------------------------------------------------------------*/
/*
* handle all the irqs defined by the HDRC core. for now we expect: other
* irq sources (phy, dma, etc) will be handled first, musb->int_* values
* will be assigned, and the irq will already have been acked.
*
* called in irq context with spinlock held, irqs blocked
*/
irqreturn_t musb_interrupt(struct musb *musb)
{
irqreturn_t retval = IRQ_NONE;
unsigned long status;
unsigned long epnum;
u8 devctl;
if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
return IRQ_NONE;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
trace_musb_isr(musb);
/**
* According to Mentor Graphics' documentation, flowchart on page 98,
* IRQ should be handled as follows:
*
* . Resume IRQ
* . Session Request IRQ
* . VBUS Error IRQ
* . Suspend IRQ
* . Connect IRQ
* . Disconnect IRQ
* . Reset/Babble IRQ
* . SOF IRQ (we're not using this one)
* . Endpoint 0 IRQ
* . TX Endpoints
* . RX Endpoints
*
* We will be following that flowchart in order to avoid any problems
* that might arise with internal Finite State Machine.
*/
if (musb->int_usb)
retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
if (musb->int_tx & 1) {
if (is_host_active(musb))
retval |= musb_h_ep0_irq(musb);
else
retval |= musb_g_ep0_irq(musb);
/* we have just handled endpoint 0 IRQ, clear it */
musb->int_tx &= ~BIT(0);
}
status = musb->int_tx;
for_each_set_bit(epnum, &status, 16) {
retval = IRQ_HANDLED;
if (is_host_active(musb))
musb_host_tx(musb, epnum);
else
musb_g_tx(musb, epnum);
}
status = musb->int_rx;
for_each_set_bit(epnum, &status, 16) {
retval = IRQ_HANDLED;
if (is_host_active(musb))
musb_host_rx(musb, epnum);
else
musb_g_rx(musb, epnum);
}
return retval;
}
EXPORT_SYMBOL_GPL(musb_interrupt);
#ifndef CONFIG_MUSB_PIO_ONLY
static bool use_dma = true;
/* "modprobe ... use_dma=0" etc */
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
{
/* called with controller lock already held */
if (!epnum) {
if (!is_cppi_enabled(musb)) {
/* endpoint 0 */
if (is_host_active(musb))
musb_h_ep0_irq(musb);
else
musb_g_ep0_irq(musb);
}
} else {
/* endpoints 1..15 */
if (transmit) {
if (is_host_active(musb))
musb_host_tx(musb, epnum);
else
musb_g_tx(musb, epnum);
} else {
/* receive */
if (is_host_active(musb))
musb_host_rx(musb, epnum);
else
musb_g_rx(musb, epnum);
}
}
}
EXPORT_SYMBOL_GPL(musb_dma_completion);
#else
#define use_dma 0
#endif
static int (*musb_phy_callback)(enum musb_vbus_id_status status);
/*
* musb_mailbox - optional phy notifier function
* @status phy state change
*
* Optionally gets called from the USB PHY. Note that the USB PHY must be
* disabled at the point the phy_callback is registered or unregistered.
*/
int musb_mailbox(enum musb_vbus_id_status status)
{
if (musb_phy_callback)
return musb_phy_callback(status);
return -ENODEV;
};
EXPORT_SYMBOL_GPL(musb_mailbox);
/*-------------------------------------------------------------------------*/
static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
int ret;
spin_lock_irqsave(&musb->lock, flags);
ret = sprintf(buf, "%s\n", musb_otg_state_string(musb));
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
static ssize_t
mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
int status;
spin_lock_irqsave(&musb->lock, flags);
if (sysfs_streq(buf, "host"))
status = musb_platform_set_mode(musb, MUSB_HOST);
else if (sysfs_streq(buf, "peripheral"))
status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
else if (sysfs_streq(buf, "otg"))
status = musb_platform_set_mode(musb, MUSB_OTG);
else
status = -EINVAL;
spin_unlock_irqrestore(&musb->lock, flags);
return (status == 0) ? n : status;
}
static DEVICE_ATTR_RW(mode);
static ssize_t
vbus_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
unsigned long val;
if (sscanf(buf, "%lu", &val) < 1) {
dev_err(dev, "Invalid VBUS timeout ms value\n");
return -EINVAL;
}
spin_lock_irqsave(&musb->lock, flags);
/* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
if (musb_get_state(musb) == OTG_STATE_A_WAIT_BCON)
musb->is_active = 0;
musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
spin_unlock_irqrestore(&musb->lock, flags);
return n;
}
static ssize_t
vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
unsigned long val;
int vbus;
u8 devctl;
pm_runtime_get_sync(dev);
spin_lock_irqsave(&musb->lock, flags);
val = musb->a_wait_bcon;
vbus = musb_platform_get_vbus_status(musb);
if (vbus < 0) {
/* Use default MUSB method by means of DEVCTL register */
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if ((devctl & MUSB_DEVCTL_VBUS)
== (3 << MUSB_DEVCTL_VBUS_SHIFT))
vbus = 1;
else
vbus = 0;
}
spin_unlock_irqrestore(&musb->lock, flags);
pm_runtime_put_sync(dev);
return sprintf(buf, "Vbus %s, timeout %lu msec\n",
vbus ? "on" : "off", val);
}
static DEVICE_ATTR_RW(vbus);
/* Gadget drivers can't know that a host is connected so they might want
* to start SRP, but users can. This allows userspace to trigger SRP.
*/
static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
struct musb *musb = dev_to_musb(dev);
unsigned short srp;
if (sscanf(buf, "%hu", &srp) != 1
|| (srp != 1)) {
dev_err(dev, "SRP: Value must be 1\n");
return -EINVAL;
}
if (srp == 1)
musb_g_wakeup(musb);
return n;
}
static DEVICE_ATTR_WO(srp);
static struct attribute *musb_attrs[] = {
&dev_attr_mode.attr,
&dev_attr_vbus.attr,
&dev_attr_srp.attr,
NULL
};
ATTRIBUTE_GROUPS(musb);
#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
(2 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
(3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
static bool musb_state_needs_recheck(struct musb *musb, u8 devctl,
const char *desc)
{
if (musb->quirk_retries && !musb->flush_irq_work) {
trace_musb_state(musb, devctl, desc);
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
musb->quirk_retries--;
return true;
}
return false;
}
/*
* Check the musb devctl session bit to determine if we want to
* allow PM runtime for the device. In general, we want to keep things
* active when the session bit is set except after host disconnect.
*
* Only called from musb_irq_work. If this ever needs to get called
* elsewhere, proper locking must be implemented for musb->session.
*/
static void musb_pm_runtime_check_session(struct musb *musb)
{
u8 devctl, s;
int error;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
/* Handle session status quirks first */
s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_DISCONNECT_99:
musb_state_needs_recheck(musb, devctl,
"Poll devctl in case of suspend after disconnect");
break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb_state_needs_recheck(musb, devctl,
"Poll devctl on invalid vbus, assume no session"))
return;
fallthrough;
case MUSB_QUIRK_A_DISCONNECT_19:
if (musb_state_needs_recheck(musb, devctl,
"Poll devctl on possible host mode disconnect"))
return;
if (!musb->session)
break;
trace_musb_state(musb, devctl, "Allow PM on possible host mode disconnect");
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
musb->session = false;
return;
default:
break;
}
/* No need to do anything if session has not changed */
s = devctl & MUSB_DEVCTL_SESSION;
if (s == musb->session)
return;
/* Block PM or allow PM? */
if (s) {
trace_musb_state(musb, devctl, "Block PM on active session");
error = pm_runtime_get_sync(musb->controller);
if (error < 0)
dev_err(musb->controller, "Could not enable: %i\n",
error);
musb->quirk_retries = 3;
/*
* We can get a spurious MUSB_INTR_SESSREQ interrupt on start-up
* in B-peripheral mode with nothing connected and the session
* bit clears silently. Check status again in 3 seconds.
*/
if (devctl & MUSB_DEVCTL_BDEVICE)
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(3000));
} else {
trace_musb_state(musb, devctl, "Allow PM with no session");
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
musb->session = s;
}
/* Only used to provide driver mode change events */
static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work.work);
int error;
error = pm_runtime_resume_and_get(musb->controller);
if (error < 0) {
dev_err(musb->controller, "Could not enable: %i\n", error);
return;
}
musb_pm_runtime_check_session(musb);
if (musb_get_state(musb) != musb->xceiv_old_state) {
musb->xceiv_old_state = musb_get_state(musb);
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
static void musb_recover_from_babble(struct musb *musb)
{
int ret;
u8 devctl;
musb_disable_interrupts(musb);
/*
* wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
* it some slack and wait for 10us.
*/
udelay(10);
ret = musb_platform_recover(musb);
if (ret) {
musb_enable_interrupts(musb);
return;
}
/* drop session bit */
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
/* tell usbcore about it */
musb_root_disconnect(musb);
/*
* When a babble condition occurs, the musb controller
* removes the session bit and the endpoint config is lost.
*/
if (musb->dyn_fifo)
ret = ep_config_from_table(musb);
else
ret = ep_config_from_hw(musb);
/* restart session */
if (ret == 0)
musb_start(musb);
}
/* --------------------------------------------------------------------------
* Init support
*/
static struct musb *allocate_instance(struct device *dev,
const struct musb_hdrc_config *config, void __iomem *mbase)
{
struct musb *musb;
struct musb_hw_ep *ep;
int epnum;
int ret;
musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
if (!musb)
return NULL;
INIT_LIST_HEAD(&musb->control);
INIT_LIST_HEAD(&musb->in_bulk);
INIT_LIST_HEAD(&musb->out_bulk);
INIT_LIST_HEAD(&musb->pending_list);
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
musb->mregs = mbase;
musb->ctrl_base = mbase;
musb->nIrq = -ENODEV;
musb->config = config;
BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
for (epnum = 0, ep = musb->endpoints;
epnum < musb->config->num_eps;
epnum++, ep++) {
ep->musb = musb;
ep->epnum = epnum;
}
musb->controller = dev;
ret = musb_host_alloc(musb);
if (ret < 0)
goto err_free;
dev_set_drvdata(dev, musb);
return musb;
err_free:
return NULL;
}
static void musb_free(struct musb *musb)
{
/* this has multiple entry modes. it handles fault cleanup after
* probe(), where things may be partially set up, as well as rmmod
* cleanup after everything's been de-activated.
*/
if (musb->nIrq >= 0) {
if (musb->irq_wake)
disable_irq_wake(musb->nIrq);
free_irq(musb->nIrq, musb);
}
musb_host_free(musb);
}
struct musb_pending_work {
int (*callback)(struct musb *musb, void *data);
void *data;
struct list_head node;
};
#ifdef CONFIG_PM
/*
* Called from musb_runtime_resume(), musb_resume(), and
* musb_queue_resume_work(). Callers must take musb->lock.
*/
static int musb_run_resume_work(struct musb *musb)
{
struct musb_pending_work *w, *_w;
unsigned long flags;
int error = 0;
spin_lock_irqsave(&musb->list_lock, flags);
list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
if (w->callback) {
error = w->callback(musb, w->data);
if (error < 0) {
dev_err(musb->controller,
"resume callback %p failed: %i\n",
w->callback, error);
}
}
list_del(&w->node);
devm_kfree(musb->controller, w);
}
spin_unlock_irqrestore(&musb->list_lock, flags);
return error;
}
#endif
/*
* Called to run work if device is active or else queue the work to happen
* on resume. Caller must take musb->lock and must hold an RPM reference.
*
* Note that we cowardly refuse queuing work after musb PM runtime
* resume is done calling musb_run_resume_work() and return -EINPROGRESS
* instead.
*/
int musb_queue_resume_work(struct musb *musb,
int (*callback)(struct musb *musb, void *data),
void *data)
{
struct musb_pending_work *w;
unsigned long flags;
bool is_suspended;
int error;
if (WARN_ON(!callback))
return -EINVAL;
spin_lock_irqsave(&musb->list_lock, flags);
is_suspended = musb->is_runtime_suspended;
if (is_suspended) {
w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
if (!w) {
error = -ENOMEM;
goto out_unlock;
}
w->callback = callback;
w->data = data;
list_add_tail(&w->node, &musb->pending_list);
error = 0;
}
out_unlock:
spin_unlock_irqrestore(&musb->list_lock, flags);
if (!is_suspended)
error = callback(musb, data);
return error;
}
EXPORT_SYMBOL_GPL(musb_queue_resume_work);
static void musb_deassert_reset(struct work_struct *work)
{
struct musb *musb;
unsigned long flags;
musb = container_of(work, struct musb, deassert_reset_work.work);
spin_lock_irqsave(&musb->lock, flags);
if (musb->port1_status & USB_PORT_STAT_RESET)
musb_port_reset(musb, false);
spin_unlock_irqrestore(&musb->lock, flags);
}
/*
* Perform generic per-controller initialization.
*
* @dev: the controller (already clocked, etc)
* @nIrq: IRQ number
* @ctrl: virtual address of controller registers,
* not yet corrected for platform-specific offsets
*/
static int
musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
{
int status;
struct musb *musb;
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
/* The driver might handle more features than the board; OK.
* Fail when the board needs a feature that's not enabled.
*/
if (!plat) {
dev_err(dev, "no platform_data?\n");
status = -ENODEV;
goto fail0;
}
/* allocate */
musb = allocate_instance(dev, plat->config, ctrl);
if (!musb) {
status = -ENOMEM;
goto fail0;
}
spin_lock_init(&musb->lock);
spin_lock_init(&musb->list_lock);
musb->min_power = plat->min_power;
musb->ops = plat->platform_ops;
musb->port_mode = plat->mode;
/*
* Initialize the default IO functions. At least omap2430 needs
* these early. We initialize the platform specific IO functions
* later on.
*/
musb_readb = musb_default_readb;
musb_writeb = musb_default_writeb;
musb_readw = musb_default_readw;
musb_writew = musb_default_writew;
/* The musb_platform_init() call:
* - adjusts musb->mregs
* - sets the musb->isr
* - may initialize an integrated transceiver
* - initializes musb->xceiv, usually by otg_get_phy()
* - stops powering VBUS
*
* There are various transceiver configurations.
* DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
* external/discrete ones in various flavors (twl4030 family,
* isp1504, non-OTG, etc) mostly hooking up through ULPI.
*/
status = musb_platform_init(musb);
if (status < 0)
goto fail1;
if (!musb->isr) {
status = -ENODEV;
goto fail2;
}
/* Most devices use indexed offset or flat offset */
if (musb->ops->quirks & MUSB_INDEXED_EP) {
musb->io.ep_offset = musb_indexed_ep_offset;
musb->io.ep_select = musb_indexed_ep_select;
} else {
musb->io.ep_offset = musb_flat_ep_offset;
musb->io.ep_select = musb_flat_ep_select;
}
if (musb->ops->quirks & MUSB_G_NO_SKB_RESERVE)
musb->g.quirk_avoids_skb_reserve = 1;
/* At least tusb6010 has its own offsets */
if (musb->ops->ep_offset)
musb->io.ep_offset = musb->ops->ep_offset;
if (musb->ops->ep_select)
musb->io.ep_select = musb->ops->ep_select;
if (musb->ops->fifo_mode)
fifo_mode = musb->ops->fifo_mode;
else
fifo_mode = 4;
if (musb->ops->fifo_offset)
musb->io.fifo_offset = musb->ops->fifo_offset;
else
musb->io.fifo_offset = musb_default_fifo_offset;
if (musb->ops->busctl_offset)
musb->io.busctl_offset = musb->ops->busctl_offset;
else
musb->io.busctl_offset = musb_default_busctl_offset;
if (musb->ops->readb)
musb_readb = musb->ops->readb;
if (musb->ops->writeb)
musb_writeb = musb->ops->writeb;
if (musb->ops->clearb)
musb_clearb = musb->ops->clearb;
else
musb_clearb = musb_readb;
if (musb->ops->readw)
musb_readw = musb->ops->readw;
if (musb->ops->writew)
musb_writew = musb->ops->writew;
if (musb->ops->clearw)
musb_clearw = musb->ops->clearw;
else
musb_clearw = musb_readw;
#ifndef CONFIG_MUSB_PIO_ONLY
if (!musb->ops->dma_init || !musb->ops->dma_exit) {
dev_err(dev, "DMA controller not set\n");
status = -ENODEV;
goto fail2;
}
musb_dma_controller_create = musb->ops->dma_init;
musb_dma_controller_destroy = musb->ops->dma_exit;
#endif
if (musb->ops->read_fifo)
musb->io.read_fifo = musb->ops->read_fifo;
else
musb->io.read_fifo = musb_default_read_fifo;
if (musb->ops->write_fifo)
musb->io.write_fifo = musb->ops->write_fifo;
else
musb->io.write_fifo = musb_default_write_fifo;
if (musb->ops->get_toggle)
musb->io.get_toggle = musb->ops->get_toggle;
else
musb->io.get_toggle = musb_default_get_toggle;
if (musb->ops->set_toggle)
musb->io.set_toggle = musb->ops->set_toggle;
else
musb->io.set_toggle = musb_default_set_toggle;
if (IS_ENABLED(CONFIG_USB_PHY) && musb->xceiv && !musb->xceiv->io_ops) {
musb->xceiv->io_dev = musb->controller;
musb->xceiv->io_priv = musb->mregs;
musb->xceiv->io_ops = &musb_ulpi_access;
}
if (musb->ops->phy_callback)
musb_phy_callback = musb->ops->phy_callback;
/*
* We need musb_read/write functions initialized for PM.
* Note that at least 2430 glue needs autosuspend delay
* somewhere above 300 ms for the hardware to idle properly
* after disconnecting the cable in host mode. Let's use
* 500 ms for some margin.
*/
pm_runtime_use_autosuspend(musb->controller);
pm_runtime_set_autosuspend_delay(musb->controller, 500);
pm_runtime_enable(musb->controller);
pm_runtime_get_sync(musb->controller);
status = usb_phy_init(musb->xceiv);
if (status < 0)
goto err_usb_phy_init;
if (use_dma && dev->dma_mask) {
musb->dma_controller =
musb_dma_controller_create(musb, musb->mregs);
if (IS_ERR(musb->dma_controller)) {
status = PTR_ERR(musb->dma_controller);
goto fail2_5;
}
}
/* be sure interrupts are disabled before connecting ISR */
musb_platform_disable(musb);
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
/* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
musb_writeb(musb->mregs, MUSB_POWER, 0);
/* Init IRQ workqueue before request_irq */
INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
/* setup musb parts of the core (especially endpoints) */
status = musb_core_init(plat->config->multipoint
? MUSB_CONTROLLER_MHDRC
: MUSB_CONTROLLER_HDRC, musb);
if (status < 0)
goto fail3;
timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
/* attach to the IRQ */
if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
dev_err(dev, "request_irq %d failed!\n", nIrq);
status = -ENODEV;
goto fail3;
}
musb->nIrq = nIrq;
/* FIXME this handles wakeup irqs wrong */
if (enable_irq_wake(nIrq) == 0) {
musb->irq_wake = 1;
device_init_wakeup(dev, 1);
} else {
musb->irq_wake = 0;
}
/* program PHY to use external vBus if required */
if (plat->extvbus) {
u8 busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
busctl |= MUSB_ULPI_USE_EXTVBUS;
musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
}
MUSB_DEV_MODE(musb);
musb_set_state(musb, OTG_STATE_B_IDLE);
switch (musb->port_mode) {
case MUSB_HOST:
status = musb_host_setup(musb, plat->power);
if (status < 0)
goto fail3;
status = musb_platform_set_mode(musb, MUSB_HOST);
break;
case MUSB_PERIPHERAL:
status = musb_gadget_setup(musb);
if (status < 0)
goto fail3;
status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
break;
case MUSB_OTG:
status = musb_host_setup(musb, plat->power);
if (status < 0)
goto fail3;
status = musb_gadget_setup(musb);
if (status) {
musb_host_cleanup(musb);
goto fail3;
}
status = musb_platform_set_mode(musb, MUSB_OTG);
break;
default:
dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
break;
}
if (status < 0)
goto fail3;
musb_init_debugfs(musb);
musb->is_initialized = 1;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
fail3:
cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
if (musb->dma_controller)
musb_dma_controller_destroy(musb->dma_controller);
fail2_5:
usb_phy_shutdown(musb->xceiv);
err_usb_phy_init:
pm_runtime_dont_use_autosuspend(musb->controller);
pm_runtime_put_sync(musb->controller);
pm_runtime_disable(musb->controller);
fail2:
if (musb->irq_wake)
device_init_wakeup(dev, 0);
musb_platform_exit(musb);
fail1:
dev_err_probe(musb->controller, status, "%s failed\n", __func__);
musb_free(musb);
fail0:
return status;
}
/*-------------------------------------------------------------------------*/
/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
* bridge to a platform device; this driver then suffices.
*/
static int musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq_byname(pdev, "mc");
void __iomem *base;
if (irq < 0)
return irq;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
return musb_init_controller(dev, irq, base);
}
static void musb_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
/* this gets called on rmmod.
* - Host mode: host may still be active
* - Peripheral mode: peripheral is deactivated (or never-activated)
* - OTG mode: both roles are deactivated (or never-activated)
*/
musb_exit_debugfs(musb);
cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
pm_runtime_get_sync(musb->controller);
musb_host_cleanup(musb);
musb_gadget_cleanup(musb);
musb_platform_disable(musb);
spin_lock_irqsave(&musb->lock, flags);
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
spin_unlock_irqrestore(&musb->lock, flags);
musb_platform_exit(musb);
pm_runtime_dont_use_autosuspend(musb->controller);
pm_runtime_put_sync(musb->controller);
pm_runtime_disable(musb->controller);
musb_phy_callback = NULL;
if (musb->dma_controller)
musb_dma_controller_destroy(musb->dma_controller);
usb_phy_shutdown(musb->xceiv);
musb_free(musb);
device_init_wakeup(dev, 0);
}
#ifdef CONFIG_PM
static void musb_save_context(struct musb *musb)
{
int i;
void __iomem *musb_base = musb->mregs;
void __iomem *epio;
musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
musb->context.busctl = musb_readb(musb_base, MUSB_ULPI_BUSCONTROL);
musb->context.power = musb_readb(musb_base, MUSB_POWER);
musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
musb->context.index = musb_readb(musb_base, MUSB_INDEX);
musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
for (i = 0; i < musb->config->num_eps; ++i) {
epio = musb->endpoints[i].regs;
if (!epio)
continue;
musb_writeb(musb_base, MUSB_INDEX, i);
musb->context.index_regs[i].txmaxp =
musb_readw(epio, MUSB_TXMAXP);
musb->context.index_regs[i].txcsr =
musb_readw(epio, MUSB_TXCSR);
musb->context.index_regs[i].rxmaxp =
musb_readw(epio, MUSB_RXMAXP);
musb->context.index_regs[i].rxcsr =
musb_readw(epio, MUSB_RXCSR);
if (musb->dyn_fifo) {
musb->context.index_regs[i].txfifoadd =
musb_readw(musb_base, MUSB_TXFIFOADD);
musb->context.index_regs[i].rxfifoadd =
musb_readw(musb_base, MUSB_RXFIFOADD);
musb->context.index_regs[i].txfifosz =
musb_readb(musb_base, MUSB_TXFIFOSZ);
musb->context.index_regs[i].rxfifosz =
musb_readb(musb_base, MUSB_RXFIFOSZ);
}
musb->context.index_regs[i].txtype =
musb_readb(epio, MUSB_TXTYPE);
musb->context.index_regs[i].txinterval =
musb_readb(epio, MUSB_TXINTERVAL);
musb->context.index_regs[i].rxtype =
musb_readb(epio, MUSB_RXTYPE);
musb->context.index_regs[i].rxinterval =
musb_readb(epio, MUSB_RXINTERVAL);
musb->context.index_regs[i].txfunaddr =
musb_read_txfunaddr(musb, i);
musb->context.index_regs[i].txhubaddr =
musb_read_txhubaddr(musb, i);
musb->context.index_regs[i].txhubport =
musb_read_txhubport(musb, i);
musb->context.index_regs[i].rxfunaddr =
musb_read_rxfunaddr(musb, i);
musb->context.index_regs[i].rxhubaddr =
musb_read_rxhubaddr(musb, i);
musb->context.index_regs[i].rxhubport =
musb_read_rxhubport(musb, i);
}
}
static void musb_restore_context(struct musb *musb)
{
int i;
void __iomem *musb_base = musb->mregs;
void __iomem *epio;
u8 power;
musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
musb_writeb(musb_base, MUSB_ULPI_BUSCONTROL, musb->context.busctl);
/* Don't affect SUSPENDM/RESUME bits in POWER reg */
power = musb_readb(musb_base, MUSB_POWER);
power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
power |= musb->context.power;
musb_writeb(musb_base, MUSB_POWER, power);
musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
if (musb->context.devctl & MUSB_DEVCTL_SESSION)
musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
for (i = 0; i < musb->config->num_eps; ++i) {
epio = musb->endpoints[i].regs;
if (!epio)
continue;
musb_writeb(musb_base, MUSB_INDEX, i);
musb_writew(epio, MUSB_TXMAXP,
musb->context.index_regs[i].txmaxp);
musb_writew(epio, MUSB_TXCSR,
musb->context.index_regs[i].txcsr);
musb_writew(epio, MUSB_RXMAXP,
musb->context.index_regs[i].rxmaxp);
musb_writew(epio, MUSB_RXCSR,
musb->context.index_regs[i].rxcsr);
if (musb->dyn_fifo) {
musb_writeb(musb_base, MUSB_TXFIFOSZ,
musb->context.index_regs[i].txfifosz);
musb_writeb(musb_base, MUSB_RXFIFOSZ,
musb->context.index_regs[i].rxfifosz);
musb_writew(musb_base, MUSB_TXFIFOADD,
musb->context.index_regs[i].txfifoadd);
musb_writew(musb_base, MUSB_RXFIFOADD,
musb->context.index_regs[i].rxfifoadd);
}
musb_writeb(epio, MUSB_TXTYPE,
musb->context.index_regs[i].txtype);
musb_writeb(epio, MUSB_TXINTERVAL,
musb->context.index_regs[i].txinterval);
musb_writeb(epio, MUSB_RXTYPE,
musb->context.index_regs[i].rxtype);
musb_writeb(epio, MUSB_RXINTERVAL,
musb->context.index_regs[i].rxinterval);
musb_write_txfunaddr(musb, i,
musb->context.index_regs[i].txfunaddr);
musb_write_txhubaddr(musb, i,
musb->context.index_regs[i].txhubaddr);
musb_write_txhubport(musb, i,
musb->context.index_regs[i].txhubport);
musb_write_rxfunaddr(musb, i,
musb->context.index_regs[i].rxfunaddr);
musb_write_rxhubaddr(musb, i,
musb->context.index_regs[i].rxhubaddr);
musb_write_rxhubport(musb, i,
musb->context.index_regs[i].rxhubport);
}
musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
}
static int musb_suspend(struct device *dev)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
musb_platform_disable(musb);
musb_disable_interrupts(musb);
musb->flush_irq_work = true;
while (flush_delayed_work(&musb->irq_work))
;
musb->flush_irq_work = false;
if (!(musb->ops->quirks & MUSB_PRESERVE_SESSION))
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
WARN_ON(!list_empty(&musb->pending_list));
spin_lock_irqsave(&musb->lock, flags);
if (is_peripheral_active(musb)) {
/* FIXME force disconnect unless we know USB will wake
* the system up quickly enough to respond ...
*/
} else if (is_host_active(musb)) {
/* we know all the children are suspended; sometimes
* they will even be wakeup-enabled.
*/
}
musb_save_context(musb);
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume(struct device *dev)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
int error;
u8 devctl;
u8 mask;
/*
* For static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
*
* For the DSPS glue layer though, a full register restore has to
* be done. As it shouldn't harm other platforms, we do it
* unconditionally.
*/
musb_restore_context(musb);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
if ((devctl & mask) != (musb->context.devctl & mask))
musb->port1_status = 0;
musb_enable_interrupts(musb);
musb_platform_enable(musb);
/* session might be disabled in suspend */
if (musb->port_mode == MUSB_HOST &&
!(musb->ops->quirks & MUSB_PRESERVE_SESSION)) {
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
}
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
if (error)
dev_err(musb->controller, "resume work failed with %i\n",
error);
spin_unlock_irqrestore(&musb->lock, flags);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
static int musb_runtime_suspend(struct device *dev)
{
struct musb *musb = dev_to_musb(dev);
musb_save_context(musb);
musb->is_runtime_suspended = 1;
return 0;
}
static int musb_runtime_resume(struct device *dev)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
int error;
/*
* When pm_runtime_get_sync called for the first time in driver
* init, some of the structure is still not initialized which is
* used in restore function. But clock needs to be
* enabled before any register access, so
* pm_runtime_get_sync has to be called.
* Also context restore without save does not make
* any sense
*/
if (!musb->is_initialized)
return 0;
musb_restore_context(musb);
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
if (error)
dev_err(musb->controller, "resume work failed with %i\n",
error);
musb->is_runtime_suspended = 0;
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
.resume = musb_resume,
.runtime_suspend = musb_runtime_suspend,
.runtime_resume = musb_runtime_resume,
};
#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
#else
#define MUSB_DEV_PM_OPS NULL
#endif
static struct platform_driver musb_driver = {
.driver = {
.name = musb_driver_name,
.bus = &platform_bus_type,
.pm = MUSB_DEV_PM_OPS,
.dev_groups = musb_groups,
},
.probe = musb_probe,
.remove_new = musb_remove,
};
module_platform_driver(musb_driver);
| linux-master | drivers/usb/musb/musb_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2007 by Texas Instruments
* Some code has been taken from tusb6010.c
* Copyrights for that are attributable to:
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <[email protected]>
*
* This file is part of the Inventra Controller Driver for Linux.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/usb/musb.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
#include "musb_core.h"
#include "omap2430.h"
struct omap2430_glue {
struct device *dev;
struct platform_device *musb;
enum musb_vbus_id_status status;
struct work_struct omap_musb_mailbox_work;
struct device *control_otghs;
unsigned int is_runtime_suspended:1;
unsigned int needs_resume:1;
unsigned int phy_suspended:1;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
static struct omap2430_glue *_glue;
static inline void omap2430_low_level_exit(struct musb *musb)
{
u32 l;
/* in any role */
l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l |= ENABLEFORCE; /* enable MSTANDBY */
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
}
static inline void omap2430_low_level_init(struct musb *musb)
{
u32 l;
l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l &= ~ENABLEFORCE; /* disable MSTANDBY */
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
}
static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
{
struct omap2430_glue *glue = _glue;
if (!glue) {
pr_err("%s: musb core is not yet initialized\n", __func__);
return -EPROBE_DEFER;
}
glue->status = status;
if (!glue_to_musb(glue)) {
pr_err("%s: musb core is not yet ready\n", __func__);
return -EPROBE_DEFER;
}
schedule_work(&glue->omap_musb_mailbox_work);
return 0;
}
/*
* HDRC controls CPEN, but beware current surges during device connect.
* They can trigger transient overcurrent conditions that must be ignored.
*
* Note that we're skipping A_WAIT_VFALL -> A_IDLE and jumping right to B_IDLE
* as set by musb_set_peripheral().
*/
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
{
struct musb *musb = glue_to_musb(glue);
int error;
pm_runtime_get_sync(musb->controller);
dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
switch (glue->status) {
case MUSB_ID_GROUND:
dev_dbg(musb->controller, "ID GND\n");
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_IDLE:
error = musb_set_host(musb);
if (error)
break;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
fallthrough;
case OTG_STATE_A_WAIT_VRISE:
case OTG_STATE_A_WAIT_BCON:
case OTG_STATE_A_HOST:
/*
* On multiple ID ground interrupts just keep enabling
* VBUS. At least cpcap VBUS shuts down otherwise.
*/
otg_set_vbus(musb->xceiv->otg, 1);
break;
default:
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
musb->xceiv->last_event = USB_EVENT_ID;
if (musb->gadget_driver) {
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_HOST);
otg_set_vbus(musb->xceiv->otg, 1);
}
break;
}
break;
case MUSB_VBUS_VALID:
dev_dbg(musb->controller, "VBUS Connect\n");
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
musb->xceiv->last_event = USB_EVENT_VBUS;
omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
break;
case MUSB_ID_FLOAT:
case MUSB_VBUS_OFF:
dev_dbg(musb->controller, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
musb_set_peripheral(musb);
otg_set_vbus(musb->xceiv->otg, 0);
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
break;
default:
dev_dbg(musb->controller, "ID float\n");
}
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
atomic_notifier_call_chain(&musb->xceiv->notifier,
musb->xceiv->last_event, NULL);
}
static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
{
struct omap2430_glue *glue = container_of(mailbox_work,
struct omap2430_glue, omap_musb_mailbox_work);
omap_musb_set_mailbox(glue);
}
static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
{
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
static int omap2430_musb_init(struct musb *musb)
{
u32 l;
int status = 0;
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
/* We require some kind of external transceiver, hooked
* up through ULPI. TWL4030-family PMICs include one,
* which needs a driver, drivers aren't always needed.
*/
musb->phy = devm_phy_get(dev->parent, "usb2-phy");
/* We can't totally remove musb->xceiv as of now because
* musb core uses xceiv.state and xceiv.otg. Once we have
* a separate state machine to handle otg, these can be moved
* out of xceiv and then we can start using the generic PHY
* framework
*/
musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent, "usb-phy", 0);
if (IS_ERR(musb->xceiv)) {
status = PTR_ERR(musb->xceiv);
if (status == -ENXIO)
return status;
dev_dbg(dev, "HS USB OTG: no transceiver configured\n");
return -EPROBE_DEFER;
}
if (IS_ERR(musb->phy)) {
dev_err(dev, "HS USB OTG: no PHY configured\n");
return PTR_ERR(musb->phy);
}
musb->isr = omap2430_musb_interrupt;
phy_init(musb->phy);
phy_power_on(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
if (data->interface_type == MUSB_INTERFACE_UTMI) {
/* OMAP4 uses Internal PHY GS70 which uses UTMI interface */
l &= ~ULPI_12PIN; /* Disable ULPI */
l |= UTMI_8BIT; /* Enable UTMI */
} else {
l |= ULPI_12PIN;
}
musb_writel(musb->mregs, OTG_INTERFSEL, l);
dev_dbg(dev, "HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
"sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
musb_readl(musb->mregs, OTG_REVISION),
musb_readl(musb->mregs, OTG_SYSCONFIG),
musb_readl(musb->mregs, OTG_SYSSTATUS),
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
return 0;
}
static void omap2430_musb_enable(struct musb *musb)
{
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
if (glue->status == MUSB_UNKNOWN)
glue->status = MUSB_VBUS_OFF;
omap_musb_set_mailbox(glue);
}
static void omap2430_musb_disable(struct musb *musb)
{
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
}
static int omap2430_musb_exit(struct musb *musb)
{
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
phy_power_off(musb->phy);
phy_exit(musb->phy);
musb->phy = NULL;
cancel_work_sync(&glue->omap_musb_mailbox_work);
return 0;
}
static const struct musb_platform_ops omap2430_ops = {
.quirks = MUSB_DMA_INVENTRA,
#ifdef CONFIG_USB_INVENTRA_DMA
.dma_init = musbhs_dma_controller_create,
.dma_exit = musbhs_dma_controller_destroy,
#endif
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
.enable = omap2430_musb_enable,
.disable = omap2430_musb_disable,
.phy_callback = omap2430_musb_mailbox,
};
static u64 omap2430_dmamask = DMA_BIT_MASK(32);
static int omap2430_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_musb_board_data *data;
struct platform_device *musb;
struct omap2430_glue *glue;
struct device_node *np = pdev->dev.of_node;
struct musb_hdrc_config *config;
struct device_node *control_node;
struct platform_device *control_pdev;
int ret = -ENOMEM, val;
bool populate_irqs = false;
if (!np)
return -ENODEV;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
goto err0;
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
dev_err(&pdev->dev, "failed to allocate musb device\n");
goto err0;
}
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &omap2430_dmamask;
musb->dev.coherent_dma_mask = omap2430_dmamask;
/*
* Legacy SoCs using omap_device get confused if node is moved
* because of interconnect properties mixed into the node.
*/
if (of_property_present(np, "ti,hwmods")) {
dev_warn(&pdev->dev, "please update to probe with ti-sysc\n");
populate_irqs = true;
} else {
device_set_of_node_from_dev(&musb->dev, &pdev->dev);
}
of_node_put(np);
glue->dev = &pdev->dev;
glue->musb = musb;
glue->status = MUSB_UNKNOWN;
glue->control_otghs = ERR_PTR(-ENODEV);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto err2;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
goto err2;
config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
if (!config)
goto err2;
of_property_read_u32(np, "mode", (u32 *)&pdata->mode);
of_property_read_u32(np, "interface-type",
(u32 *)&data->interface_type);
of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
of_property_read_u32(np, "power", (u32 *)&pdata->power);
ret = of_property_read_u32(np, "multipoint", &val);
if (!ret && val)
config->multipoint = true;
pdata->board_data = data;
pdata->config = config;
control_node = of_parse_phandle(np, "ctrl-module", 0);
if (control_node) {
control_pdev = of_find_device_by_node(control_node);
of_node_put(control_node);
if (!control_pdev) {
dev_err(&pdev->dev, "Failed to get control device\n");
ret = -EINVAL;
goto err2;
}
glue->control_otghs = &control_pdev->dev;
}
pdata->platform_ops = &omap2430_ops;
platform_set_drvdata(pdev, glue);
/*
* REVISIT if we ever have two instances of the wrapper, we will be
* in big trouble
*/
_glue = glue;
INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work);
ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto err2;
}
if (populate_irqs) {
struct resource musb_res[3];
struct resource *res;
int i = 0;
memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -EINVAL;
goto err2;
}
musb_res[i].start = res->start;
musb_res[i].end = res->end;
musb_res[i].flags = res->flags;
musb_res[i].name = res->name;
i++;
ret = of_irq_get_byname(np, "mc");
if (ret > 0) {
musb_res[i].start = ret;
musb_res[i].flags = IORESOURCE_IRQ;
musb_res[i].name = "mc";
i++;
}
ret = of_irq_get_byname(np, "dma");
if (ret > 0) {
musb_res[i].start = ret;
musb_res[i].flags = IORESOURCE_IRQ;
musb_res[i].name = "dma";
i++;
}
ret = platform_device_add_resources(musb, musb_res, i);
if (ret) {
dev_err(&pdev->dev, "failed to add IRQ resources\n");
goto err2;
}
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
dev_err(&pdev->dev, "failed to add platform_data\n");
goto err2;
}
pm_runtime_enable(glue->dev);
ret = platform_device_add(musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device\n");
goto err3;
}
return 0;
err3:
pm_runtime_disable(glue->dev);
err2:
platform_device_put(musb);
err0:
return ret;
}
static void omap2430_remove(struct platform_device *pdev)
{
struct omap2430_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
pm_runtime_disable(glue->dev);
}
#ifdef CONFIG_PM
static int omap2430_runtime_suspend(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
if (!musb)
return 0;
musb->context.otg_interfsel = musb_readl(musb->mregs,
OTG_INTERFSEL);
omap2430_low_level_exit(musb);
if (!glue->phy_suspended) {
phy_power_off(musb->phy);
phy_exit(musb->phy);
}
glue->is_runtime_suspended = 1;
return 0;
}
static int omap2430_runtime_resume(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
if (!musb)
return 0;
if (!glue->phy_suspended) {
phy_init(musb->phy);
phy_power_on(musb->phy);
}
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
/* Wait for musb to get oriented. Otherwise we can get babble */
usleep_range(200000, 250000);
glue->is_runtime_suspended = 0;
return 0;
}
/* I2C and SPI PHYs need to be suspended before the glue layer */
static int omap2430_suspend(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
phy_power_off(musb->phy);
phy_exit(musb->phy);
glue->phy_suspended = 1;
return 0;
}
/* Glue layer needs to be suspended after musb_suspend() */
static int omap2430_suspend_late(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
if (glue->is_runtime_suspended)
return 0;
glue->needs_resume = 1;
return omap2430_runtime_suspend(dev);
}
static int omap2430_resume_early(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
if (!glue->needs_resume)
return 0;
glue->needs_resume = 0;
return omap2430_runtime_resume(dev);
}
static int omap2430_resume(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
phy_init(musb->phy);
phy_power_on(musb->phy);
glue->phy_suspended = 0;
return 0;
}
static const struct dev_pm_ops omap2430_pm_ops = {
.runtime_suspend = omap2430_runtime_suspend,
.runtime_resume = omap2430_runtime_resume,
.suspend = omap2430_suspend,
.suspend_late = omap2430_suspend_late,
.resume_early = omap2430_resume_early,
.resume = omap2430_resume,
};
#define DEV_PM_OPS (&omap2430_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif
#ifdef CONFIG_OF
static const struct of_device_id omap2430_id_table[] = {
{
.compatible = "ti,omap4-musb"
},
{
.compatible = "ti,omap3-musb"
},
{},
};
MODULE_DEVICE_TABLE(of, omap2430_id_table);
#endif
static struct platform_driver omap2430_driver = {
.probe = omap2430_probe,
.remove_new = omap2430_remove,
.driver = {
.name = "musb-omap2430",
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(omap2430_id_table),
},
};
module_platform_driver(omap2430_driver);
MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/musb/omap2430.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver debugfs support
*
* Copyright 2010 Nokia Corporation
* Contact: Felipe Balbi <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "musb_core.h"
#include "musb_debug.h"
struct musb_register_map {
char *name;
unsigned offset;
unsigned size;
};
static const struct musb_register_map musb_regmap[] = {
{ "FAddr", MUSB_FADDR, 8 },
{ "Power", MUSB_POWER, 8 },
{ "Frame", MUSB_FRAME, 16 },
{ "Index", MUSB_INDEX, 8 },
{ "Testmode", MUSB_TESTMODE, 8 },
{ "TxMaxPp", MUSB_TXMAXP, 16 },
{ "TxCSRp", MUSB_TXCSR, 16 },
{ "RxMaxPp", MUSB_RXMAXP, 16 },
{ "RxCSR", MUSB_RXCSR, 16 },
{ "RxCount", MUSB_RXCOUNT, 16 },
{ "IntrRxE", MUSB_INTRRXE, 16 },
{ "IntrTxE", MUSB_INTRTXE, 16 },
{ "IntrUsbE", MUSB_INTRUSBE, 8 },
{ "DevCtl", MUSB_DEVCTL, 8 },
{ "VControl", 0x68, 32 },
{ "HWVers", 0x69, 16 },
{ "LinkInfo", MUSB_LINKINFO, 8 },
{ "VPLen", MUSB_VPLEN, 8 },
{ "HS_EOF1", MUSB_HS_EOF1, 8 },
{ "FS_EOF1", MUSB_FS_EOF1, 8 },
{ "LS_EOF1", MUSB_LS_EOF1, 8 },
{ "SOFT_RST", 0x7F, 8 },
{ "DMA_CNTLch0", 0x204, 16 },
{ "DMA_ADDRch0", 0x208, 32 },
{ "DMA_COUNTch0", 0x20C, 32 },
{ "DMA_CNTLch1", 0x214, 16 },
{ "DMA_ADDRch1", 0x218, 32 },
{ "DMA_COUNTch1", 0x21C, 32 },
{ "DMA_CNTLch2", 0x224, 16 },
{ "DMA_ADDRch2", 0x228, 32 },
{ "DMA_COUNTch2", 0x22C, 32 },
{ "DMA_CNTLch3", 0x234, 16 },
{ "DMA_ADDRch3", 0x238, 32 },
{ "DMA_COUNTch3", 0x23C, 32 },
{ "DMA_CNTLch4", 0x244, 16 },
{ "DMA_ADDRch4", 0x248, 32 },
{ "DMA_COUNTch4", 0x24C, 32 },
{ "DMA_CNTLch5", 0x254, 16 },
{ "DMA_ADDRch5", 0x258, 32 },
{ "DMA_COUNTch5", 0x25C, 32 },
{ "DMA_CNTLch6", 0x264, 16 },
{ "DMA_ADDRch6", 0x268, 32 },
{ "DMA_COUNTch6", 0x26C, 32 },
{ "DMA_CNTLch7", 0x274, 16 },
{ "DMA_ADDRch7", 0x278, 32 },
{ "DMA_COUNTch7", 0x27C, 32 },
{ "ConfigData", MUSB_CONFIGDATA,8 },
{ "BabbleCtl", MUSB_BABBLE_CTL,8 },
{ "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
{ "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
{ "TxFIFOadd", MUSB_TXFIFOADD, 16 },
{ "RxFIFOadd", MUSB_RXFIFOADD, 16 },
{ "EPInfo", MUSB_EPINFO, 8 },
{ "RAMInfo", MUSB_RAMINFO, 8 },
{ } /* Terminating Entry */
};
static int musb_regdump_show(struct seq_file *s, void *unused)
{
struct musb *musb = s->private;
unsigned i;
seq_printf(s, "MUSB (M)HDRC Register Dump\n");
pm_runtime_get_sync(musb->controller);
for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
switch (musb_regmap[i].size) {
case 8:
seq_printf(s, "%-12s: %02x\n", musb_regmap[i].name,
musb_readb(musb->mregs, musb_regmap[i].offset));
break;
case 16:
seq_printf(s, "%-12s: %04x\n", musb_regmap[i].name,
musb_readw(musb->mregs, musb_regmap[i].offset));
break;
case 32:
seq_printf(s, "%-12s: %08x\n", musb_regmap[i].name,
musb_readl(musb->mregs, musb_regmap[i].offset));
break;
}
}
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(musb_regdump);
static int musb_test_mode_show(struct seq_file *s, void *unused)
{
struct musb *musb = s->private;
unsigned test;
pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
if (test == (MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS))
seq_printf(s, "force host full-speed\n");
else if (test == (MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_HS))
seq_printf(s, "force host high-speed\n");
else if (test == MUSB_TEST_FORCE_HOST)
seq_printf(s, "force host\n");
else if (test == MUSB_TEST_FIFO_ACCESS)
seq_printf(s, "fifo access\n");
else if (test == MUSB_TEST_FORCE_FS)
seq_printf(s, "force full-speed\n");
else if (test == MUSB_TEST_FORCE_HS)
seq_printf(s, "force high-speed\n");
else if (test == MUSB_TEST_PACKET)
seq_printf(s, "test packet\n");
else if (test == MUSB_TEST_K)
seq_printf(s, "test K\n");
else if (test == MUSB_TEST_J)
seq_printf(s, "test J\n");
else if (test == MUSB_TEST_SE0_NAK)
seq_printf(s, "test SE0 NAK\n");
return 0;
}
static int musb_test_mode_open(struct inode *inode, struct file *file)
{
return single_open(file, musb_test_mode_show, inode->i_private);
}
static ssize_t musb_test_mode_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct musb *musb = s->private;
u8 test;
char buf[24];
memset(buf, 0x00, sizeof(buf));
if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
if (test) {
dev_err(musb->controller, "Error: test mode is already set. "
"Please do USB Bus Reset to start a new test.\n");
goto ret;
}
if (strstarts(buf, "force host full-speed"))
test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS;
else if (strstarts(buf, "force host high-speed"))
test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_HS;
else if (strstarts(buf, "force host"))
test = MUSB_TEST_FORCE_HOST;
else if (strstarts(buf, "fifo access"))
test = MUSB_TEST_FIFO_ACCESS;
else if (strstarts(buf, "force full-speed"))
test = MUSB_TEST_FORCE_FS;
else if (strstarts(buf, "force high-speed"))
test = MUSB_TEST_FORCE_HS;
else if (strstarts(buf, "test packet")) {
test = MUSB_TEST_PACKET;
musb_load_testpacket(musb);
}
else if (strstarts(buf, "test K"))
test = MUSB_TEST_K;
else if (strstarts(buf, "test J"))
test = MUSB_TEST_J;
else if (strstarts(buf, "test SE0 NAK"))
test = MUSB_TEST_SE0_NAK;
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
ret:
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return count;
}
static const struct file_operations musb_test_mode_fops = {
.open = musb_test_mode_open,
.write = musb_test_mode_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int musb_softconnect_show(struct seq_file *s, void *unused)
{
struct musb *musb = s->private;
u8 reg;
int connect;
switch (musb_get_state(musb)) {
case OTG_STATE_A_HOST:
case OTG_STATE_A_WAIT_BCON:
pm_runtime_get_sync(musb->controller);
reg = musb_readb(musb->mregs, MUSB_DEVCTL);
connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
break;
default:
connect = -1;
}
seq_printf(s, "%d\n", connect);
return 0;
}
static int musb_softconnect_open(struct inode *inode, struct file *file)
{
return single_open(file, musb_softconnect_show, inode->i_private);
}
static ssize_t musb_softconnect_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct musb *musb = s->private;
char buf[2];
u8 reg;
memset(buf, 0x00, sizeof(buf));
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
pm_runtime_get_sync(musb->controller);
if (!strncmp(buf, "0", 1)) {
switch (musb_get_state(musb)) {
case OTG_STATE_A_HOST:
musb_root_disconnect(musb);
reg = musb_readb(musb->mregs, MUSB_DEVCTL);
reg &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, reg);
break;
default:
break;
}
} else if (!strncmp(buf, "1", 1)) {
switch (musb_get_state(musb)) {
case OTG_STATE_A_WAIT_BCON:
/*
* musb_save_context() called in musb_runtime_suspend()
* might cache devctl with SESSION bit cleared during
* soft-disconnect, so specifically set SESSION bit
* here to preserve it for musb_runtime_resume().
*/
musb->context.devctl |= MUSB_DEVCTL_SESSION;
reg = musb_readb(musb->mregs, MUSB_DEVCTL);
reg |= MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, reg);
break;
default:
break;
}
}
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return count;
}
/*
* In host mode, connect/disconnect the bus without physically
* remove the devices.
*/
static const struct file_operations musb_softconnect_fops = {
.open = musb_softconnect_open,
.write = musb_softconnect_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void musb_init_debugfs(struct musb *musb)
{
struct dentry *root;
root = debugfs_create_dir(dev_name(musb->controller), usb_debug_root);
musb->debugfs_root = root;
debugfs_create_file("regdump", S_IRUGO, root, musb, &musb_regdump_fops);
debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, musb,
&musb_test_mode_fops);
debugfs_create_file("softconnect", S_IRUGO | S_IWUSR, root, musb,
&musb_softconnect_fops);
}
void /* __init_or_exit */ musb_exit_debugfs(struct musb *musb)
{
debugfs_remove_recursive(musb->debugfs_root);
}
| linux-master | drivers/usb/musb/musb_debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver host support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include "musb_core.h"
#include "musb_host.h"
#include "musb_trace.h"
/* MUSB HOST status 22-mar-2006
*
* - There's still lots of partial code duplication for fault paths, so
* they aren't handled as consistently as they need to be.
*
* - PIO mostly behaved when last tested.
* + including ep0, with all usbtest cases 9, 10
* + usbtest 14 (ep0out) doesn't seem to run at all
* + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
* configurations, but otherwise double buffering passes basic tests.
* + for 2.6.N, for N > ~10, needs API changes for hcd framework.
*
* - DMA (CPPI) ... partially behaves, not currently recommended
* + about 1/15 the speed of typical EHCI implementations (PCI)
* + RX, all too often reqpkt seems to misbehave after tx
* + TX, no known issues (other than evident silicon issue)
*
* - DMA (Mentor/OMAP) ...has at least toggle update problems
*
* - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
* starvation ... nothing yet for TX, interrupt, or bulk.
*
* - Not tested with HNP, but some SRP paths seem to behave.
*
* NOTE 24-August-2006:
*
* - Bulk traffic finally uses both sides of hardware ep1, freeing up an
* extra endpoint for periodic use enabling hub + keybd + mouse. That
* mostly works, except that with "usbnet" it's easy to trigger cases
* with "ping" where RX loses. (a) ping to davinci, even "ping -f",
* fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
* although ARP RX wins. (That test was done with a full speed link.)
*/
/*
* NOTE on endpoint usage:
*
* CONTROL transfers all go through ep0. BULK ones go through dedicated IN
* and OUT endpoints ... hardware is dedicated for those "async" queue(s).
* (Yes, bulk _could_ use more of the endpoints than that, and would even
* benefit from it.)
*
* INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
* So far that scheduling is both dumb and optimistic: the endpoint will be
* "claimed" until its software queue is no longer refilled. No multiplexing
* of transfers between endpoints, or anything clever.
*/
struct musb *hcd_to_musb(struct usb_hcd *hcd)
{
return *(struct musb **) hcd->hcd_priv;
}
static void musb_ep_program(struct musb *musb, u8 epnum,
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len);
/*
* Clear TX fifo. Needed to avoid BABBLE errors.
*/
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
u16 csr;
int retries = 1000;
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
csr = musb_readw(epio, MUSB_TXCSR);
/*
* FIXME: sometimes the tx fifo flush failed, it has been
* observed during device disconnect on AM335x.
*
* To reproduce the issue, ensure tx urb(s) are queued when
* unplug the usb device which is connected to AM335x usb
* host port.
*
* I found using a usb-ethernet device and running iperf
* (client on AM335x) has very high chance to trigger it.
*
* Better to turn on musb_dbg() in musb_cleanup_urb() with
* CPPI enabled to see the issue when aborting the tx channel.
*/
if (dev_WARN_ONCE(musb->controller, retries-- < 1,
"Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr))
return;
mdelay(1);
}
}
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
{
void __iomem *epio = ep->regs;
u16 csr;
int retries = 5;
/* scrub any data left in the fifo */
do {
csr = musb_readw(epio, MUSB_TXCSR);
if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
break;
musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
csr = musb_readw(epio, MUSB_TXCSR);
udelay(10);
} while (--retries);
WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr);
/* and reset for the next transfer */
musb_writew(epio, MUSB_TXCSR, 0);
}
/*
* Start transmit. Caller is responsible for locking shared resources.
* musb must be locked.
*/
static inline void musb_h_tx_start(struct musb_hw_ep *ep)
{
u16 txcsr;
/* NOTE: no locks here; caller should lock and select EP */
if (ep->epnum) {
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
} else {
txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
musb_writew(ep->regs, MUSB_CSR0, txcsr);
}
}
static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
{
u16 txcsr;
/* NOTE: no locks here; caller should lock and select EP */
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
if (is_cppi_enabled(ep->musb))
txcsr |= MUSB_TXCSR_DMAMODE;
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
}
static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
{
if (is_in != 0 || ep->is_shared_fifo)
ep->in_qh = qh;
if (is_in == 0 || ep->is_shared_fifo)
ep->out_qh = qh;
}
static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
{
return is_in ? ep->in_qh : ep->out_qh;
}
/*
* Start the URB at the front of an endpoint's queue
* end must be claimed from the caller.
*
* Context: controller locked, irqs blocked
*/
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{
u32 len;
void __iomem *mbase = musb->mregs;
struct urb *urb = next_urb(qh);
void *buf = urb->transfer_buffer;
u32 offset = 0;
struct musb_hw_ep *hw_ep = qh->hw_ep;
int epnum = hw_ep->epnum;
/* initialize software qh state */
qh->offset = 0;
qh->segsize = 0;
/* gather right source of data */
switch (qh->type) {
case USB_ENDPOINT_XFER_CONTROL:
/* control transfers always start with SETUP */
is_in = 0;
musb->ep0_stage = MUSB_EP0_START;
buf = urb->setup_packet;
len = 8;
break;
case USB_ENDPOINT_XFER_ISOC:
qh->iso_idx = 0;
qh->frame = 0;
offset = urb->iso_frame_desc[0].offset;
len = urb->iso_frame_desc[0].length;
break;
default: /* bulk, interrupt */
/* actual_length may be nonzero on retry paths */
buf = urb->transfer_buffer + urb->actual_length;
len = urb->transfer_buffer_length - urb->actual_length;
}
trace_musb_urb_start(musb, urb);
/* Configure endpoint */
musb_ep_set_qh(hw_ep, is_in, qh);
musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
/* transmit may have more work: start it when it is time */
if (is_in)
return;
/* determine if the time is right for a periodic transfer */
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
musb_dbg(musb, "check whether there's still time for periodic Tx");
/* FIXME this doesn't implement that scheduling policy ...
* or handle framecounter wrapping
*/
if (1) { /* Always assume URB_ISO_ASAP */
/* REVISIT the SOF irq handler shouldn't duplicate
* this code; and we don't init urb->start_frame...
*/
qh->frame = 0;
goto start;
} else {
qh->frame = urb->start_frame;
/* enable SOF interrupt so we can count down */
musb_dbg(musb, "SOF for %d", epnum);
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
}
break;
default:
start:
musb_dbg(musb, "Start TX%d %s", epnum,
hw_ep->tx_channel ? "dma" : "pio");
if (!hw_ep->tx_channel)
musb_h_tx_start(hw_ep);
else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
musb_h_tx_dma_start(hw_ep);
}
}
/* Context: caller owns controller lock, IRQs are blocked */
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
trace_musb_urb_gb(musb, urb);
usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
spin_unlock(&musb->lock);
usb_hcd_giveback_urb(musb->hcd, urb, status);
spin_lock(&musb->lock);
}
/*
* Advance this hardware endpoint's queue, completing the specified URB and
* advancing to either the next URB queued to that qh, or else invalidating
* that qh and advancing to the next qh scheduled after the current one.
*
* Context: caller owns controller lock, IRQs are blocked
*/
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
struct musb_hw_ep *hw_ep, int is_in)
{
struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
struct musb_hw_ep *ep = qh->hw_ep;
int ready = qh->is_ready;
int status;
u16 toggle;
status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
/* save toggle eagerly, for paranoia */
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
toggle = musb->io.get_toggle(qh, !is_in);
usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
break;
case USB_ENDPOINT_XFER_ISOC:
if (status == 0 && urb->error_count)
status = -EXDEV;
break;
}
qh->is_ready = 0;
musb_giveback(musb, urb, status);
qh->is_ready = ready;
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
* invalidate qh as soon as list_empty(&hep->urb_list)
*/
if (list_empty(&qh->hep->urb_list)) {
struct list_head *head;
struct dma_controller *dma = musb->dma_controller;
if (is_in) {
ep->rx_reinit = 1;
if (ep->rx_channel) {
dma->channel_release(ep->rx_channel);
ep->rx_channel = NULL;
}
} else {
ep->tx_reinit = 1;
if (ep->tx_channel) {
dma->channel_release(ep->tx_channel);
ep->tx_channel = NULL;
}
}
/* Clobber old pointers to this qh */
musb_ep_set_qh(ep, is_in, NULL);
qh->hep->hcpriv = NULL;
switch (qh->type) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
/* fifo policy for these lists, except that NAKing
* should rotate a qh to the end (for fairness).
*/
if (qh->mux == 1) {
head = qh->ring.prev;
list_del(&qh->ring);
kfree(qh);
qh = first_qh(head);
break;
}
fallthrough;
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
/* this is where periodic bandwidth should be
* de-allocated if it's tracked and allocated;
* and where we'd update the schedule tree...
*/
kfree(qh);
qh = NULL;
break;
}
}
if (qh != NULL && qh->is_ready) {
musb_dbg(musb, "... next ep%d %cX urb %p",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
}
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
{
/* we don't want fifo to fill itself again;
* ignore dma (various models),
* leave toggle alone (may not have been saved yet)
*/
csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
csr &= ~(MUSB_RXCSR_H_REQPKT
| MUSB_RXCSR_H_AUTOREQ
| MUSB_RXCSR_AUTOCLEAR);
/* write 2x to allow double buffering */
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
/* flush writebuffer */
return musb_readw(hw_ep->regs, MUSB_RXCSR);
}
/*
* PIO RX for a packet (or part of it).
*/
static bool
musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
{
u16 rx_count;
u8 *buf;
u16 csr;
bool done = false;
u32 length;
int do_flush = 0;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
int pipe = urb->pipe;
void *buffer = urb->transfer_buffer;
/* musb_ep_select(mbase, epnum); */
rx_count = musb_readw(epio, MUSB_RXCOUNT);
musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
urb->transfer_buffer, qh->offset,
urb->transfer_buffer_length);
/* unload FIFO */
if (usb_pipeisoc(pipe)) {
int status = 0;
struct usb_iso_packet_descriptor *d;
if (iso_err) {
status = -EILSEQ;
urb->error_count++;
}
d = urb->iso_frame_desc + qh->iso_idx;
buf = buffer + d->offset;
length = d->length;
if (rx_count > length) {
if (status == 0) {
status = -EOVERFLOW;
urb->error_count++;
}
musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
do_flush = 1;
} else
length = rx_count;
urb->actual_length += length;
d->actual_length = length;
d->status = status;
/* see if we are done */
done = (++qh->iso_idx >= urb->number_of_packets);
} else {
/* non-isoch */
buf = buffer + qh->offset;
length = urb->transfer_buffer_length - qh->offset;
if (rx_count > length) {
if (urb->status == -EINPROGRESS)
urb->status = -EOVERFLOW;
musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
do_flush = 1;
} else
length = rx_count;
urb->actual_length += length;
qh->offset += length;
/* see if we are done */
done = (urb->actual_length == urb->transfer_buffer_length)
|| (rx_count < qh->maxpacket)
|| (urb->status != -EINPROGRESS);
if (done
&& (urb->status == -EINPROGRESS)
&& (urb->transfer_flags & URB_SHORT_NOT_OK)
&& (urb->actual_length
< urb->transfer_buffer_length))
urb->status = -EREMOTEIO;
}
musb_read_fifo(hw_ep, length, buf);
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_H_WZC_BITS;
if (unlikely(do_flush))
musb_h_flush_rxfifo(hw_ep, csr);
else {
/* REVISIT this assumes AUTOCLEAR is never set */
csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
if (!done)
csr |= MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, csr);
}
return done;
}
/* we don't always need to reinit a given side of an endpoint...
* when we do, use tx/rx reinit routine and then construct a new CSR
* to address data toggle, NYET, and DMA or PIO.
*
* it's possible that driver bugs (especially for DMA) or aborting a
* transfer might have left the endpoint busier than it should be.
* the busy/not-empty tests are basically paranoia.
*/
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
{
struct musb_hw_ep *ep = musb->endpoints + epnum;
u16 csr;
/* NOTE: we know the "rx" fifo reinit never triggers for ep0.
* That always uses tx_reinit since ep0 repurposes TX register
* offsets; the initial SETUP packet is also a kind of OUT.
*/
/* if programmed for Tx, put it in RX mode */
if (ep->is_shared_fifo) {
csr = musb_readw(ep->regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_MODE) {
musb_h_tx_flush_fifo(ep);
csr = musb_readw(ep->regs, MUSB_TXCSR);
musb_writew(ep->regs, MUSB_TXCSR,
csr | MUSB_TXCSR_FRCDATATOG);
}
/*
* Clear the MODE bit (and everything else) to enable Rx.
* NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
*/
if (csr & MUSB_TXCSR_DMAMODE)
musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
musb_writew(ep->regs, MUSB_TXCSR, 0);
/* scrub all previous state, clearing toggle */
}
csr = musb_readw(ep->regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_RXPKTRDY)
WARNING("rx%d, packet/%d ready?\n", ep->epnum,
musb_readw(ep->regs, MUSB_RXCOUNT));
musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
musb_write_rxhubport(musb, epnum, qh->h_port_reg);
} else
musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
/* protocol/endpoint, interval/NAKlimit, i/o size */
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
/* NOTE: bulk combining rewrites high bits of maxpacket */
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffer mode.
*/
musb_writew(ep->regs, MUSB_RXMAXP,
qh->maxpacket | ((qh->hb_mult - 1) << 11));
ep->rx_reinit = 0;
}
static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
u32 *length, u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
void __iomem *epio = hw_ep->regs;
u16 pkt_size = qh->maxpacket;
u16 csr;
if (*length > channel->max_len)
*length = channel->max_len;
csr = musb_readw(epio, MUSB_TXCSR);
if (*length > pkt_size) {
*mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
/*
* Enable Autoset according to table
* below
* bulk_split hb_mult Autoset_Enable
* 0 1 Yes(Normal)
* 0 >1 No(High BW ISO)
* 1 1 Yes(HS bulk)
* 1 >1 Yes(FS bulk)
*/
if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
can_bulk_split(hw_ep->musb, qh->type)))
csr |= MUSB_TXCSR_AUTOSET;
} else {
*mode = 0;
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
}
channel->desired_mode = *mode;
musb_writew(epio, MUSB_TXCSR, csr);
}
static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
struct urb *urb,
u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
channel->actual_len = 0;
/*
* TX uses "RNDIS" mode automatically but needs help
* to identify the zero-length-final-packet case.
*/
*mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
}
static bool musb_tx_dma_program(struct dma_controller *dma,
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
struct urb *urb, u32 offset, u32 length)
{
struct dma_channel *channel = hw_ep->tx_channel;
u16 pkt_size = qh->maxpacket;
u8 mode;
if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
musb_tx_dma_set_mode_mentor(hw_ep, qh,
&length, &mode);
else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
musb_tx_dma_set_mode_cppi_tusb(hw_ep, urb, &mode);
else
return false;
qh->segsize = length;
/*
* Ensure the data reaches to main memory before starting
* DMA transfer
*/
wmb();
if (!dma->channel_program(channel, pkt_size, mode,
urb->transfer_dma + offset, length)) {
void __iomem *epio = hw_ep->regs;
u16 csr;
dma->channel_release(channel);
hw_ep->tx_channel = NULL;
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
return false;
}
return true;
}
/*
* Program an HDRC endpoint as per the given URB
* Context: irqs blocked, controller lock held
*/
static void musb_ep_program(struct musb *musb, u8 epnum,
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len)
{
struct dma_controller *dma_controller;
struct dma_channel *dma_channel;
u8 dma_ok;
void __iomem *mbase = musb->mregs;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
u16 packet_sz = qh->maxpacket;
u8 use_dma = 1;
u16 csr;
musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
"h_addr%02x h_port%02x bytes %d",
is_out ? "-->" : "<--",
epnum, urb, urb->dev->speed,
qh->addr_reg, qh->epnum, is_out ? "out" : "in",
qh->h_addr_reg, qh->h_port_reg,
len);
musb_ep_select(mbase, epnum);
if (is_out && !len) {
use_dma = 0;
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
hw_ep->tx_channel = NULL;
}
/* candidate for DMA? */
dma_controller = musb->dma_controller;
if (use_dma && is_dma_capable() && epnum && dma_controller) {
dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
if (!dma_channel) {
dma_channel = dma_controller->channel_alloc(
dma_controller, hw_ep, is_out);
if (is_out)
hw_ep->tx_channel = dma_channel;
else
hw_ep->rx_channel = dma_channel;
}
} else
dma_channel = NULL;
/* make sure we clear DMAEnab, autoSet bits from previous run */
/* OUT/transmit/EP0 or IN/receive? */
if (is_out) {
u16 csr;
u16 int_txe;
u16 load_count;
csr = musb_readw(epio, MUSB_TXCSR);
/* disable interrupt in case we flush */
int_txe = musb->intrtxe;
musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
/* general endpoint setup */
if (epnum) {
/* flush all old state, set default */
/*
* We could be flushing valid
* packets in double buffering
* case
*/
if (!hw_ep->tx_double_buffered)
musb_h_tx_flush_fifo(hw_ep);
/*
* We must not clear the DMAMODE bit before or in
* the same cycle with the DMAENAB bit, so we clear
* the latter first...
*/
csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
| MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_FRCDATATOG
| MUSB_TXCSR_H_RXSTALL
| MUSB_TXCSR_H_ERROR
| MUSB_TXCSR_TXPKTRDY
);
csr |= MUSB_TXCSR_MODE;
if (!hw_ep->tx_double_buffered)
csr |= musb->io.set_toggle(qh, is_out, urb);
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
csr &= ~MUSB_TXCSR_DMAMODE;
musb_writew(epio, MUSB_TXCSR, csr);
csr = musb_readw(epio, MUSB_TXCSR);
} else {
/* endpoint 0: just flush */
musb_h_ep0_flush_fifo(hw_ep);
}
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
musb_write_txfunaddr(musb, epnum, qh->addr_reg);
musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
musb_write_txhubport(musb, epnum, qh->h_port_reg);
/* FIXME if !epnum, do the same for RX ... */
} else
musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
if (can_bulk_split(musb, qh->type)) {
qh->hb_mult = hw_ep->max_packet_sz_tx
/ packet_sz;
musb_writew(epio, MUSB_TXMAXP, packet_sz
| ((qh->hb_mult) - 1) << 11);
} else {
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
((qh->hb_mult - 1) << 11));
}
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
if (musb->is_multipoint)
musb_writeb(epio, MUSB_TYPE0,
qh->type_reg);
}
if (can_bulk_split(musb, qh->type))
load_count = min((u32) hw_ep->max_packet_sz_tx,
len);
else
load_count = min((u32) packet_sz, len);
if (dma_channel && musb_tx_dma_program(dma_controller,
hw_ep, qh, urb, offset, len))
load_count = 0;
if (load_count) {
/* PIO to load FIFO */
qh->segsize = load_count;
if (!buf) {
sg_miter_start(&qh->sg_miter, urb->sg, 1,
SG_MITER_ATOMIC
| SG_MITER_FROM_SG);
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller,
"error: sg"
"list empty\n");
sg_miter_stop(&qh->sg_miter);
goto finish;
}
buf = qh->sg_miter.addr + urb->sg->offset +
urb->actual_length;
load_count = min_t(u32, load_count,
qh->sg_miter.length);
musb_write_fifo(hw_ep, load_count, buf);
qh->sg_miter.consumed = load_count;
sg_miter_stop(&qh->sg_miter);
} else
musb_write_fifo(hw_ep, load_count, buf);
}
finish:
/* re-enable interrupt */
musb_writew(mbase, MUSB_INTRTXE, int_txe);
/* IN/receive */
} else {
u16 csr = 0;
if (hw_ep->rx_reinit) {
musb_rx_reinit(musb, qh, epnum);
csr |= musb->io.set_toggle(qh, is_out, urb);
if (qh->type == USB_ENDPOINT_XFER_INT)
csr |= MUSB_RXCSR_DISNYET;
} else {
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
if (csr & (MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_H_REQPKT))
ERR("broken !rx_reinit, ep%d csr %04x\n",
hw_ep->epnum, csr);
/* scrub any stale state, leaving toggle alone */
csr &= MUSB_RXCSR_DISNYET;
}
/* kick things off */
if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
/* Candidate for DMA */
dma_channel->actual_len = 0L;
qh->segsize = len;
/* AUTOREQ is in a DMA register */
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
/*
* Unless caller treats short RX transfers as
* errors, we dare not queue multiple transfers.
*/
dma_ok = dma_controller->channel_program(dma_channel,
packet_sz, !(urb->transfer_flags &
URB_SHORT_NOT_OK),
urb->transfer_dma + offset,
qh->segsize);
if (!dma_ok) {
dma_controller->channel_release(dma_channel);
hw_ep->rx_channel = dma_channel = NULL;
} else
csr |= MUSB_RXCSR_DMAENAB;
}
csr |= MUSB_RXCSR_H_REQPKT;
musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
}
}
/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
* the end; avoids starvation for other endpoints.
*/
static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
int is_in)
{
struct dma_channel *dma;
struct urb *urb;
void __iomem *mbase = musb->mregs;
void __iomem *epio = ep->regs;
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr, tx_csr;
u16 toggle;
musb_ep_select(mbase, ep->epnum);
if (is_in) {
dma = is_dma_capable() ? ep->rx_channel : NULL;
/*
* Need to stop the transaction by clearing REQPKT first
* then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
* DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
*/
rx_csr = musb_readw(epio, MUSB_RXCSR);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, rx_csr);
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
cur_qh = first_qh(&musb->in_bulk);
} else {
dma = is_dma_capable() ? ep->tx_channel : NULL;
/* clear nak timeout bit */
tx_csr = musb_readw(epio, MUSB_TXCSR);
tx_csr |= MUSB_TXCSR_H_WZC_BITS;
tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
musb_writew(epio, MUSB_TXCSR, tx_csr);
cur_qh = first_qh(&musb->out_bulk);
}
if (cur_qh) {
urb = next_urb(cur_qh);
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
musb->dma_controller->channel_abort(dma);
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
toggle = musb->io.get_toggle(cur_qh, !is_in);
usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
if (is_in) {
/* move cur_qh to end of queue */
list_move_tail(&cur_qh->ring, &musb->in_bulk);
/* get the next qh from musb->in_bulk */
next_qh = first_qh(&musb->in_bulk);
/* set rx_reinit and schedule the next qh */
ep->rx_reinit = 1;
} else {
/* move cur_qh to end of queue */
list_move_tail(&cur_qh->ring, &musb->out_bulk);
/* get the next qh from musb->out_bulk */
next_qh = first_qh(&musb->out_bulk);
/* set tx_reinit and schedule the next qh */
ep->tx_reinit = 1;
}
if (next_qh)
musb_start_urb(musb, is_in, next_qh);
}
}
/*
* Service the default endpoint (ep0) as host.
* Return true until it's time to start the status stage.
*/
static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
{
bool more = false;
u8 *fifo_dest = NULL;
u16 fifo_count = 0;
struct musb_hw_ep *hw_ep = musb->control_ep;
struct musb_qh *qh = hw_ep->in_qh;
struct usb_ctrlrequest *request;
switch (musb->ep0_stage) {
case MUSB_EP0_IN:
fifo_dest = urb->transfer_buffer + urb->actual_length;
fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
urb->actual_length);
if (fifo_count < len)
urb->status = -EOVERFLOW;
musb_read_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
if (len < qh->maxpacket) {
/* always terminate on short read; it's
* rarely reported as an error.
*/
} else if (urb->actual_length <
urb->transfer_buffer_length)
more = true;
break;
case MUSB_EP0_START:
request = (struct usb_ctrlrequest *) urb->setup_packet;
if (!request->wLength) {
musb_dbg(musb, "start no-DATA");
break;
} else if (request->bRequestType & USB_DIR_IN) {
musb_dbg(musb, "start IN-DATA");
musb->ep0_stage = MUSB_EP0_IN;
more = true;
break;
} else {
musb_dbg(musb, "start OUT-DATA");
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
fallthrough;
case MUSB_EP0_OUT:
fifo_count = min_t(size_t, qh->maxpacket,
urb->transfer_buffer_length -
urb->actual_length);
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
more = true;
}
break;
default:
ERR("bogus ep0 stage %d\n", musb->ep0_stage);
break;
}
return more;
}
/*
* Handle default endpoint interrupt as host. Only called in IRQ time
* from musb_interrupt().
*
* called with controller irqlocked
*/
irqreturn_t musb_h_ep0_irq(struct musb *musb)
{
struct urb *urb;
u16 csr, len;
int status = 0;
void __iomem *mbase = musb->mregs;
struct musb_hw_ep *hw_ep = musb->control_ep;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
bool complete = false;
irqreturn_t retval = IRQ_NONE;
/* ep0 only has one queue, "in" */
urb = next_urb(qh);
musb_ep_select(mbase, 0);
csr = musb_readw(epio, MUSB_CSR0);
len = (csr & MUSB_CSR0_RXPKTRDY)
? musb_readb(epio, MUSB_COUNT0)
: 0;
musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
csr, qh, len, urb, musb->ep0_stage);
/* if we just did status stage, we are done */
if (MUSB_EP0_STATUS == musb->ep0_stage) {
retval = IRQ_HANDLED;
complete = true;
}
/* prepare status */
if (csr & MUSB_CSR0_H_RXSTALL) {
musb_dbg(musb, "STALLING ENDPOINT");
status = -EPIPE;
} else if (csr & MUSB_CSR0_H_ERROR) {
musb_dbg(musb, "no response, csr0 %04x", csr);
status = -EPROTO;
} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
musb_dbg(musb, "control NAK timeout");
/* NOTE: this code path would be a good place to PAUSE a
* control transfer, if another one is queued, so that
* ep0 is more likely to stay busy. That's already done
* for bulk RX transfers.
*
* if (qh->ring.next != &musb->control), then
* we have a candidate... NAKing is *NOT* an error
*/
musb_writew(epio, MUSB_CSR0, 0);
retval = IRQ_HANDLED;
}
if (status) {
musb_dbg(musb, "aborting");
retval = IRQ_HANDLED;
if (urb)
urb->status = status;
complete = true;
/* use the proper sequence to abort the transfer */
if (csr & MUSB_CSR0_H_REQPKT) {
csr &= ~MUSB_CSR0_H_REQPKT;
musb_writew(epio, MUSB_CSR0, csr);
csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
musb_writew(epio, MUSB_CSR0, csr);
} else {
musb_h_ep0_flush_fifo(hw_ep);
}
musb_writeb(epio, MUSB_NAKLIMIT0, 0);
/* clear it */
musb_writew(epio, MUSB_CSR0, 0);
}
if (unlikely(!urb)) {
/* stop endpoint since we have no place for its data, this
* SHOULD NEVER HAPPEN! */
ERR("no URB for end 0\n");
musb_h_ep0_flush_fifo(hw_ep);
goto done;
}
if (!complete) {
/* call common logic and prepare response */
if (musb_h_ep0_continue(musb, len, urb)) {
/* more packets required */
csr = (MUSB_EP0_IN == musb->ep0_stage)
? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
} else {
/* data transfer complete; perform status phase */
if (usb_pipeout(urb->pipe)
|| !urb->transfer_buffer_length)
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_H_REQPKT;
else
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
/* disable ping token in status phase */
csr |= MUSB_CSR0_H_DIS_PING;
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
}
musb_writew(epio, MUSB_CSR0, csr);
retval = IRQ_HANDLED;
} else
musb->ep0_stage = MUSB_EP0_IDLE;
/* call completion handler if done */
if (complete)
musb_advance_schedule(musb, urb, hw_ep, 1);
done:
return retval;
}
#ifdef CONFIG_USB_INVENTRA_DMA
/* Host side TX (OUT) using Mentor DMA works as follows:
submit_urb ->
- if queue was empty, Program Endpoint
- ... which starts DMA to fifo in mode 1 or 0
DMA Isr (transfer complete) -> TxAvail()
- Stop DMA (~DmaEnab) (<--- Alert ... currently happens
only in musb_cleanup_urb)
- TxPktRdy has to be set in mode 0 or for
short packets in mode 1.
*/
#endif
/* Service a Tx-Available or dma completion irq for the endpoint */
void musb_host_tx(struct musb *musb, u8 epnum)
{
int pipe;
bool done = false;
u16 tx_csr;
size_t length = 0;
size_t offset = 0;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->out_qh;
struct urb *urb = next_urb(qh);
u32 status = 0;
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
bool transfer_pending = false;
musb_ep_select(mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
/* with CPPI, DMA sometimes triggers "extra" irqs */
if (!urb) {
musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
return;
}
pipe = urb->pipe;
dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
trace_musb_urb_tx(musb, urb);
musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
dma ? ", dma" : "");
/* check for errors */
if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
/* dma was disabled, fifo flushed */
musb_dbg(musb, "TX end %d stall", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
/* (NON-ISO) dma was disabled, fifo flushed */
musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
&& !list_is_singular(&musb->out_bulk)) {
musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
musb_bulk_nak_timeout(musb, hw_ep, 0);
} else {
musb_dbg(musb, "TX ep%d device not responding", epnum);
/* NOTE: this code path would be a good place to PAUSE a
* transfer, if there's some other (nonperiodic) tx urb
* that could use this fifo. (dma complicates it...)
* That's already done for bulk RX transfers.
*
* if (bulk && qh->ring.next != &musb->out_bulk), then
* we have a candidate... NAKing is *NOT* an error
*/
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY);
}
return;
}
done:
if (status) {
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
musb->dma_controller->channel_abort(dma);
}
/* do the proper sequence to abort the transfer in the
* usb core; the dma engine should already be stopped.
*/
musb_h_tx_flush_fifo(hw_ep);
tx_csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_ERROR
| MUSB_TXCSR_H_RXSTALL
| MUSB_TXCSR_H_NAKTIMEOUT
);
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR, tx_csr);
/* REVISIT may need to clear FLUSHFIFO ... */
musb_writew(epio, MUSB_TXCSR, tx_csr);
musb_writeb(epio, MUSB_TXINTERVAL, 0);
done = true;
}
/* second cppi case */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
return;
}
if (is_dma_capable() && dma && !status) {
/*
* DMA has completed. But if we're using DMA mode 1 (multi
* packet DMA), we need a terminal TXPKTRDY interrupt before
* we can consider this transfer completed, lest we trash
* its last packet when writing the next URB's data. So we
* switch back to mode 0 to get that interrupt; we'll come
* back here once it happens.
*/
if (tx_csr & MUSB_TXCSR_DMAMODE) {
/*
* We shouldn't clear DMAMODE with DMAENAB set; so
* clear them in a safe order. That should be OK
* once TXPKTRDY has been set (and I've never seen
* it being 0 at this moment -- DMA interrupt latency
* is significant) but if it hasn't been then we have
* no choice but to stop being polite and ignore the
* programmer's guide... :-)
*
* Note that we must write TXCSR with TXPKTRDY cleared
* in order not to re-trigger the packet send (this bit
* can't be cleared by CPU), and there's another caveat:
* TXPKTRDY may be set shortly and then cleared in the
* double-buffered FIFO mode, so we do an extra TXCSR
* read for debouncing...
*/
tx_csr &= musb_readw(epio, MUSB_TXCSR);
if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
tx_csr &= ~(MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR,
tx_csr | MUSB_TXCSR_H_WZC_BITS);
}
tx_csr &= ~(MUSB_TXCSR_DMAMODE |
MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR,
tx_csr | MUSB_TXCSR_H_WZC_BITS);
/*
* There is no guarantee that we'll get an interrupt
* after clearing DMAMODE as we might have done this
* too late (after TXPKTRDY was cleared by controller).
* Re-read TXCSR as we have spoiled its previous value.
*/
tx_csr = musb_readw(epio, MUSB_TXCSR);
}
/*
* We may get here from a DMA completion or TXPKTRDY interrupt.
* In any case, we must check the FIFO status here and bail out
* only if the FIFO still has data -- that should prevent the
* "missed" TXPKTRDY interrupts and deal with double-buffered
* FIFO mode too...
*/
if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
musb_dbg(musb,
"DMA complete but FIFO not empty, CSR %04x",
tx_csr);
return;
}
}
if (!status || dma || usb_pipeisoc(pipe)) {
if (dma)
length = dma->actual_len;
else
length = qh->segsize;
qh->offset += length;
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
d->actual_length = length;
d->status = status;
if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
} else {
d++;
offset = d->offset;
length = d->length;
}
} else if (dma && urb->transfer_buffer_length == qh->offset) {
done = true;
} else {
/* see if we need to send more data, or ZLP */
if (qh->segsize < qh->maxpacket)
done = true;
else if (qh->offset == urb->transfer_buffer_length
&& !(urb->transfer_flags
& URB_ZERO_PACKET))
done = true;
if (!done) {
offset = qh->offset;
length = urb->transfer_buffer_length - offset;
transfer_pending = true;
}
}
}
/* urb->status != -EINPROGRESS means request has been faulted,
* so we must abort this transfer after cleanup
*/
if (urb->status != -EINPROGRESS) {
done = true;
if (status == 0)
status = urb->status;
}
if (done) {
/* set status */
urb->status = status;
urb->actual_length = qh->offset;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
return;
} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
offset, length)) {
if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
musb_h_tx_dma_start(hw_ep);
return;
}
} else if (tx_csr & MUSB_TXCSR_DMAENAB) {
musb_dbg(musb, "not complete, but DMA enabled?");
return;
}
/*
* PIO: start next packet in this URB.
*
* REVISIT: some docs say that when hw_ep->tx_double_buffered,
* (and presumably, FIFO is not half-full) we should write *two*
* packets before updating TXCSR; other docs disagree...
*/
if (length > qh->maxpacket)
length = qh->maxpacket;
/* Unmap the buffer so that CPU can use it */
usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
/*
* We need to map sg if the transfer_buffer is
* NULL.
*/
if (!urb->transfer_buffer) {
/* sg_miter_start is already done in musb_ep_program */
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
sg_miter_stop(&qh->sg_miter);
status = -EINVAL;
goto done;
}
length = min_t(u32, length, qh->sg_miter.length);
musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
qh->sg_miter.consumed = length;
sg_miter_stop(&qh->sg_miter);
} else {
musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
}
qh->segsize = length;
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
}
#ifdef CONFIG_USB_TI_CPPI41_DMA
/* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{
struct dma_channel *channel = hw_ep->rx_channel;
void __iomem *epio = hw_ep->regs;
dma_addr_t *buf;
u32 length;
u16 val;
buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
(u32)urb->transfer_dma;
length = urb->iso_frame_desc[qh->iso_idx].length;
val = musb_readw(epio, MUSB_RXCSR);
val |= MUSB_RXCSR_DMAENAB;
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
return dma->channel_program(channel, qh->maxpacket, 0,
(u32)buf, length);
}
#else
static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{
return false;
}
#endif
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
defined(CONFIG_USB_TI_CPPI41_DMA)
/* Host side RX (IN) using Mentor DMA works as follows:
submit_urb ->
- if queue was empty, ProgramEndpoint
- first IN token is sent out (by setting ReqPkt)
LinuxIsr -> RxReady()
/\ => first packet is received
| - Set in mode 0 (DmaEnab, ~ReqPkt)
| -> DMA Isr (transfer complete) -> RxReady()
| - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
| - if urb not complete, send next IN token (ReqPkt)
| | else complete urb.
| |
---------------------------
*
* Nuances of mode 1:
* For short packets, no ack (+RxPktRdy) is sent automatically
* (even if AutoClear is ON)
* For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
* automatically => major problem, as collecting the next packet becomes
* difficult. Hence mode 1 is not used.
*
* REVISIT
* All we care about at this driver level is that
* (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
* (b) termination conditions are: short RX, or buffer full;
* (c) fault modes include
* - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
* (and that endpoint's dma queue stops immediately)
* - overflow (full, PLUS more bytes in the terminal packet)
*
* So for example, usb-storage sets URB_SHORT_NOT_OK, and would
* thus be a great candidate for using mode 1 ... for all but the
* last packet of one URB's transfer.
*/
static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{
struct dma_channel *channel = hw_ep->rx_channel;
void __iomem *epio = hw_ep->regs;
u16 val;
int pipe;
bool done;
pipe = urb->pipe;
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
d->actual_length = len;
/* even if there was an error, we did the dma
* for iso_frame_desc->length
*/
if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
} else {
/* REVISIT: Why ignore return value here? */
if (musb_dma_cppi41(hw_ep->musb))
done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
urb, len);
done = false;
}
} else {
/* done if urb buffer is full or short packet is recd */
done = (urb->actual_length + len >=
urb->transfer_buffer_length
|| channel->actual_len < qh->maxpacket
|| channel->rx_packet_done);
}
/* send IN token for next packet, without AUTOREQ */
if (!done) {
val = musb_readw(epio, MUSB_RXCSR);
val |= MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
}
return done;
}
/* Disadvantage of using mode 1:
* It's basically usable only for mass storage class; essentially all
* other protocols also terminate transfers on short packets.
*
* Details:
* An extra IN token is sent at the end of the transfer (due to AUTOREQ)
* If you try to use mode 1 for (transfer_buffer_length - 512), and try
* to use the extra IN token to grab the last packet using mode 0, then
* the problem is that you cannot be sure when the device will send the
* last packet and RxPktRdy set. Sometimes the packet is recd too soon
* such that it gets lost when RxCSR is re-set at the end of the mode 1
* transfer, while sometimes it is recd just a little late so that if you
* try to configure for mode 0 soon after the mode 1 transfer is
* completed, you will find rxcount 0. Okay, so you might think why not
* wait for an interrupt when the pkt is recd. Well, you won't get any!
*/
static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len,
u8 iso_err)
{
struct musb *musb = hw_ep->musb;
void __iomem *epio = hw_ep->regs;
struct dma_channel *channel = hw_ep->rx_channel;
u16 rx_count, val;
int length, pipe, done;
dma_addr_t buf;
rx_count = musb_readw(epio, MUSB_RXCOUNT);
pipe = urb->pipe;
if (usb_pipeisoc(pipe)) {
int d_status = 0;
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
if (iso_err) {
d_status = -EILSEQ;
urb->error_count++;
}
if (rx_count > d->length) {
if (d_status == 0) {
d_status = -EOVERFLOW;
urb->error_count++;
}
musb_dbg(musb, "** OVERFLOW %d into %d",
rx_count, d->length);
length = d->length;
} else
length = rx_count;
d->status = d_status;
buf = urb->transfer_dma + d->offset;
} else {
length = rx_count;
buf = urb->transfer_dma + urb->actual_length;
}
channel->desired_mode = 0;
#ifdef USE_MODE1
/* because of the issue below, mode 1 will
* only rarely behave with correct semantics.
*/
if ((urb->transfer_flags & URB_SHORT_NOT_OK)
&& (urb->transfer_buffer_length - urb->actual_length)
> qh->maxpacket)
channel->desired_mode = 1;
if (rx_count < hw_ep->max_packet_sz_rx) {
length = rx_count;
channel->desired_mode = 0;
} else {
length = urb->transfer_buffer_length;
}
#endif
/* See comments above on disadvantages of using mode 1 */
val = musb_readw(epio, MUSB_RXCSR);
val &= ~MUSB_RXCSR_H_REQPKT;
if (channel->desired_mode == 0)
val &= ~MUSB_RXCSR_H_AUTOREQ;
else
val |= MUSB_RXCSR_H_AUTOREQ;
val |= MUSB_RXCSR_DMAENAB;
/* autoclear shouldn't be set in high bandwidth */
if (qh->hb_mult == 1)
val |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
/* REVISIT if when actual_length != 0,
* transfer_buffer_length needs to be
* adjusted first...
*/
done = dma->channel_program(channel, qh->maxpacket,
channel->desired_mode,
buf, length);
if (!done) {
dma->channel_release(channel);
hw_ep->rx_channel = NULL;
channel = NULL;
val = musb_readw(epio, MUSB_RXCSR);
val &= ~(MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_H_AUTOREQ
| MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, val);
}
return done;
}
#else
static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{
return false;
}
static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len,
u8 iso_err)
{
return false;
}
#endif
/*
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
* and high-bandwidth IN transfer cases.
*/
void musb_host_rx(struct musb *musb, u8 epnum)
{
struct urb *urb;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
struct dma_controller *c = musb->dma_controller;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
size_t xfer_len;
void __iomem *mbase = musb->mregs;
u16 rx_csr, val;
bool iso_err = false;
bool done = false;
u32 status;
struct dma_channel *dma;
unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
musb_ep_select(mbase, epnum);
urb = next_urb(qh);
dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
status = 0;
xfer_len = 0;
rx_csr = musb_readw(epio, MUSB_RXCSR);
val = rx_csr;
if (unlikely(!urb)) {
/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
* usbtest #11 (unlinks) triggers it regularly, sometimes
* with fifo full. (Only with DMA??)
*/
musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
epnum, val, musb_readw(epio, MUSB_RXCOUNT));
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
return;
}
trace_musb_urb_rx(musb, urb);
/* check for errors, concurrent stall & unlink is not really
* handled yet! */
if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
musb_dbg(musb, "RX end %d STALL", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
/*
* The three-strikes error could only happen when the USB
* device is not accessible, for example detached or powered
* off. So return the fatal error -ESHUTDOWN so hopefully the
* USB device drivers won't immediately resubmit the same URB.
*/
status = -ESHUTDOWN;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
rx_csr &= ~MUSB_RXCSR_H_ERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
musb_dbg(musb, "RX end %d NAK timeout", epnum);
/* NOTE: NAKing is *NOT* an error, so we want to
* continue. Except ... if there's a request for
* another QH, use that instead of starving it.
*
* Devices like Ethernet and serial adapters keep
* reads posted at all times, which will starve
* other devices without this logic.
*/
if (usb_pipebulk(urb->pipe)
&& qh->mux == 1
&& !list_is_singular(&musb->in_bulk)) {
musb_bulk_nak_timeout(musb, hw_ep, 1);
return;
}
musb_ep_select(mbase, epnum);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
goto finish;
} else {
musb_dbg(musb, "RX end %d ISO data error", epnum);
/* packet error reported later */
iso_err = true;
}
} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
epnum);
status = -EPROTO;
}
/* faults abort the transfer */
if (status) {
/* clean up dma and collect transfer count */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
musb->dma_controller->channel_abort(dma);
xfer_len = dma->actual_len;
}
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
musb_writeb(epio, MUSB_RXINTERVAL, 0);
done = true;
goto finish;
}
if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
goto finish;
}
/* thorough shutdown for now ... given more precise fault handling
* and better queueing support, we might keep a DMA pipeline going
* while processing this irq for earlier completions.
*/
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
(rx_csr & MUSB_RXCSR_H_REQPKT)) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
* and also duplicates dma cleanup code above ... plus,
* shouldn't this be the "half full" double buffer case?
*/
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
musb->dma_controller->channel_abort(dma);
xfer_len = dma->actual_len;
done = true;
}
musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
xfer_len, dma ? ", dma" : "");
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | rx_csr);
}
if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
xfer_len = dma->actual_len;
val &= ~(MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_H_AUTOREQ
| MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
musb_dma_cppi41(musb)) {
done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
musb_dbg(hw_ep->musb,
"ep %d dma %s, rxcsr %04x, rxcount %d",
epnum, done ? "off" : "reset",
musb_readw(epio, MUSB_RXCSR),
musb_readw(epio, MUSB_RXCOUNT));
} else {
done = true;
}
} else if (urb->status == -EINPROGRESS) {
/* if no errors, be sure a packet is ready for unloading */
if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
status = -EPROTO;
ERR("Rx interrupt with no errors or packet!\n");
/* FIXME this is another "SHOULD NEVER HAPPEN" */
/* SCRUB (RX) */
/* do the proper sequence to abort the transfer */
musb_ep_select(mbase, epnum);
val &= ~MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, val);
goto finish;
}
/* we are expecting IN packets */
if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
musb_dma_cppi41(musb)) && dma) {
musb_dbg(hw_ep->musb,
"RX%d count %d, buffer 0x%llx len %d/%d",
epnum, musb_readw(epio, MUSB_RXCOUNT),
(unsigned long long) urb->transfer_dma
+ urb->actual_length,
qh->offset,
urb->transfer_buffer_length);
if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
xfer_len, iso_err))
goto finish;
else
dev_err(musb->controller, "error: rx_dma failed\n");
}
if (!dma) {
unsigned int received_len;
/* Unmap the buffer so that CPU can use it */
usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
/*
* We need to map sg if the transfer_buffer is
* NULL.
*/
if (!urb->transfer_buffer) {
qh->use_sg = true;
sg_miter_start(&qh->sg_miter, urb->sg, 1,
sg_flags);
}
if (qh->use_sg) {
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
sg_miter_stop(&qh->sg_miter);
status = -EINVAL;
done = true;
goto finish;
}
urb->transfer_buffer = qh->sg_miter.addr;
received_len = urb->actual_length;
qh->offset = 0x0;
done = musb_host_packet_rx(musb, urb, epnum,
iso_err);
/* Calculate the number of bytes received */
received_len = urb->actual_length -
received_len;
qh->sg_miter.consumed = received_len;
sg_miter_stop(&qh->sg_miter);
} else {
done = musb_host_packet_rx(musb, urb,
epnum, iso_err);
}
musb_dbg(musb, "read %spacket", done ? "last " : "");
}
}
finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
if (qh->use_sg) {
qh->use_sg = false;
urb->transfer_buffer = NULL;
}
if (urb->status == -EINPROGRESS)
urb->status = status;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
}
}
/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
* the software schedule associates multiple such nodes with a given
* host side hardware endpoint + direction; scheduling may activate
* that hardware endpoint.
*/
static int musb_schedule(
struct musb *musb,
struct musb_qh *qh,
int is_in)
{
int idle = 0;
int best_diff;
int best_end, epnum;
struct musb_hw_ep *hw_ep = NULL;
struct list_head *head = NULL;
u8 toggle;
u8 txtype;
struct urb *urb = next_urb(qh);
/* use fixed hardware for control and bulk */
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
head = &musb->control;
hw_ep = musb->control_ep;
goto success;
}
/* else, periodic transfers get muxed to other endpoints */
/*
* We know this qh hasn't been scheduled, so all we need to do
* is choose which hardware endpoint to put it on ...
*
* REVISIT what we really want here is a regular schedule tree
* like e.g. OHCI uses.
*/
best_diff = 4096;
best_end = -1;
for (epnum = 1, hw_ep = musb->endpoints + 1;
epnum < musb->nr_endpoints;
epnum++, hw_ep++) {
int diff;
if (musb_ep_get_qh(hw_ep, is_in) != NULL)
continue;
if (hw_ep == musb->bulk_ep)
continue;
if (is_in)
diff = hw_ep->max_packet_sz_rx;
else
diff = hw_ep->max_packet_sz_tx;
diff -= (qh->maxpacket * qh->hb_mult);
if (diff >= 0 && best_diff > diff) {
/*
* Mentor controller has a bug in that if we schedule
* a BULK Tx transfer on an endpoint that had earlier
* handled ISOC then the BULK transfer has to start on
* a zero toggle. If the BULK transfer starts on a 1
* toggle then this transfer will fail as the mentor
* controller starts the Bulk transfer on a 0 toggle
* irrespective of the programming of the toggle bits
* in the TXCSR register. Check for this condition
* while allocating the EP for a Tx Bulk transfer. If
* so skip this EP.
*/
hw_ep = musb->endpoints + epnum;
toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
>> 4) & 0x3;
if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
continue;
best_diff = diff;
best_end = epnum;
}
}
/* use bulk reserved ep1 if no other ep is free */
if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
hw_ep = musb->bulk_ep;
if (is_in)
head = &musb->in_bulk;
else
head = &musb->out_bulk;
/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
* multiplexed. This scheme does not work in high speed to full
* speed scenario as NAK interrupts are not coming from a
* full speed device connected to a high speed device.
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
* 4 (8 frame or 8ms) for FS device.
*/
if (qh->dev)
qh->intv_reg =
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;
} else if (best_end < 0) {
dev_err(musb->controller,
"%s hwep alloc failed for %dx%d\n",
musb_ep_xfertype_string(qh->type),
qh->hb_mult, qh->maxpacket);
return -ENOSPC;
}
idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end;
musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
success:
if (head) {
idle = list_empty(head);
list_add_tail(&qh->ring, head);
qh->mux = 1;
}
qh->hw_ep = hw_ep;
qh->hep->hcpriv = qh;
if (idle)
musb_start_urb(musb, is_in, qh);
return 0;
}
static int musb_urb_enqueue(
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
struct usb_host_endpoint *hep = urb->ep;
struct musb_qh *qh;
struct usb_endpoint_descriptor *epd = &hep->desc;
int ret;
unsigned type_reg;
unsigned interval;
/* host role must be active */
if (!is_host_active(musb) || !musb->is_active)
return -ENODEV;
trace_musb_urb_enq(musb, urb);
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
qh = ret ? NULL : hep->hcpriv;
if (qh)
urb->hcpriv = qh;
spin_unlock_irqrestore(&musb->lock, flags);
/* DMA mapping was already done, if needed, and this urb is on
* hep->urb_list now ... so we're done, unless hep wasn't yet
* scheduled onto a live qh.
*
* REVISIT best to keep hep->hcpriv valid until the endpoint gets
* disabled, testing for empty qh->ring and avoiding qh setup costs
* except for the first urb queued after a config change.
*/
if (qh || ret)
return ret;
/* Allocate and initialize qh, minimizing the work done each time
* hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
*
* REVISIT consider a dedicated qh kmem_cache, so it's harder
* for bugs in other kernel code to break this driver...
*/
qh = kzalloc(sizeof *qh, mem_flags);
if (!qh) {
spin_lock_irqsave(&musb->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&musb->lock, flags);
return -ENOMEM;
}
qh->hep = hep;
qh->dev = urb->dev;
INIT_LIST_HEAD(&qh->ring);
qh->is_ready = 1;
qh->maxpacket = usb_endpoint_maxp(epd);
qh->type = usb_endpoint_type(epd);
/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
* Some musb cores don't support high bandwidth ISO transfers; and
* we don't (yet!) support high bandwidth interrupt transfers.
*/
qh->hb_mult = usb_endpoint_maxp_mult(epd);
if (qh->hb_mult > 1) {
int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
if (ok)
ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
if (!ok) {
dev_err(musb->controller,
"high bandwidth %s (%dx%d) not supported\n",
musb_ep_xfertype_string(qh->type),
qh->hb_mult, qh->maxpacket & 0x7ff);
ret = -EMSGSIZE;
goto done;
}
qh->maxpacket &= 0x7ff;
}
qh->epnum = usb_endpoint_num(epd);
/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
/* precompute rxtype/txtype/type0 register */
type_reg = (qh->type << 4) | qh->epnum;
switch (urb->dev->speed) {
case USB_SPEED_LOW:
type_reg |= 0xc0;
break;
case USB_SPEED_FULL:
type_reg |= 0x80;
break;
default:
type_reg |= 0x40;
}
qh->type_reg = type_reg;
/* Precompute RXINTERVAL/TXINTERVAL register */
switch (qh->type) {
case USB_ENDPOINT_XFER_INT:
/*
* Full/low speeds use the linear encoding,
* high speed uses the logarithmic encoding.
*/
if (urb->dev->speed <= USB_SPEED_FULL) {
interval = max_t(u8, epd->bInterval, 1);
break;
}
fallthrough;
case USB_ENDPOINT_XFER_ISOC:
/* ISO always uses logarithmic encoding */
interval = min_t(u8, epd->bInterval, 16);
break;
default:
/* REVISIT we actually want to use NAK limits, hinting to the
* transfer scheduling logic to try some other qh, e.g. try
* for 2 msec first:
*
* interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
*
* The downside of disabling this is that transfer scheduling
* gets VERY unfair for nonperiodic transfers; a misbehaving
* peripheral could make that hurt. That's perfectly normal
* for reads from network or serial adapters ... so we have
* partial NAKlimit support for bulk RX.
*
* The upside of disabling it is simpler transfer scheduling.
*/
interval = 0;
}
qh->intv_reg = interval;
/* precompute addressing for external hub/tt ports */
if (musb->is_multipoint) {
struct usb_device *parent = urb->dev->parent;
if (parent != hcd->self.root_hub) {
qh->h_addr_reg = (u8) parent->devnum;
/* set up tt info if needed */
if (urb->dev->tt) {
qh->h_port_reg = (u8) urb->dev->ttport;
if (urb->dev->tt->hub)
qh->h_addr_reg =
(u8) urb->dev->tt->hub->devnum;
if (urb->dev->tt->multi)
qh->h_addr_reg |= 0x80;
}
}
}
/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
* until we get real dma queues (with an entry for each urb/buffer),
* we only have work to do in the former case.
*/
spin_lock_irqsave(&musb->lock, flags);
if (hep->hcpriv || !next_urb(qh)) {
/* some concurrent activity submitted another urb to hep...
* odd, rare, error prone, but legal.
*/
kfree(qh);
qh = NULL;
ret = 0;
} else
ret = musb_schedule(musb, qh,
epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
if (ret == 0) {
urb->hcpriv = qh;
/* FIXME set urb->start_frame for iso/intr, it's tested in
* musb_start_urb(), but otherwise only konicawc cares ...
*/
}
spin_unlock_irqrestore(&musb->lock, flags);
done:
if (ret != 0) {
spin_lock_irqsave(&musb->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&musb->lock, flags);
kfree(qh);
}
return ret;
}
/*
* abort a transfer that's at the head of a hardware queue.
* called with controller locked, irqs blocked
* that hardware queue advances to the next transfer, unless prevented
*/
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{
struct musb_hw_ep *ep = qh->hw_ep;
struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
unsigned hw_end = ep->epnum;
void __iomem *regs = ep->musb->mregs;
int is_in = usb_pipein(urb->pipe);
int status = 0;
u16 csr;
struct dma_channel *dma = NULL;
musb_ep_select(regs, hw_end);
if (is_dma_capable()) {
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
is_in ? 'R' : 'T', ep->epnum,
urb, status);
urb->actual_length += dma->actual_len;
}
}
/* turn off DMA requests, discard state, stop polling ... */
if (ep->epnum && is_in) {
/* giveback saves bulk toggle */
csr = musb_h_flush_rxfifo(ep, 0);
/* clear the endpoint's irq status here to avoid bogus irqs */
if (is_dma_capable() && dma)
musb_platform_clear_ep_rxintr(musb, ep->epnum);
} else if (ep->epnum) {
musb_h_tx_flush_fifo(ep);
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_RXSTALL
| MUSB_TXCSR_H_NAKTIMEOUT
| MUSB_TXCSR_H_ERROR
| MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
musb_writew(epio, MUSB_TXCSR, csr);
/* flush cpu writebuffer */
csr = musb_readw(epio, MUSB_TXCSR);
} else {
musb_h_ep0_flush_fifo(ep);
}
if (status == 0)
musb_advance_schedule(ep->musb, urb, ep, is_in);
return status;
}
static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct musb *musb = hcd_to_musb(hcd);
struct musb_qh *qh;
unsigned long flags;
int is_in = usb_pipein(urb->pipe);
int ret;
trace_musb_urb_deq(musb, urb);
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret)
goto done;
qh = urb->hcpriv;
if (!qh)
goto done;
/*
* Any URB not actively programmed into endpoint hardware can be
* immediately given back; that's any URB not at the head of an
* endpoint queue, unless someday we get real DMA queues. And even
* if it's at the head, it might not be known to the hardware...
*
* Otherwise abort current transfer, pending DMA, etc.; urb->status
* has already been updated. This is a synchronous abort; it'd be
* OK to hold off until after some IRQ, though.
*
* NOTE: qh is invalid unless !list_empty(&hep->urb_list)
*/
if (!qh->is_ready
|| urb->urb_list.prev != &qh->hep->urb_list
|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
int ready = qh->is_ready;
qh->is_ready = 0;
musb_giveback(musb, urb, 0);
qh->is_ready = ready;
/* If nothing else (usually musb_giveback) is using it
* and its URB list has emptied, recycle this qh.
*/
if (ready && list_empty(&qh->hep->urb_list)) {
qh->hep->hcpriv = NULL;
list_del(&qh->ring);
kfree(qh);
}
} else
ret = musb_cleanup_urb(urb, qh);
done:
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
/* disable an endpoint */
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
struct musb_qh *qh;
struct urb *urb;
spin_lock_irqsave(&musb->lock, flags);
qh = hep->hcpriv;
if (qh == NULL)
goto exit;
/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
/* Kick the first URB off the hardware, if needed */
qh->is_ready = 0;
if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
urb = next_urb(qh);
/* make software (then hardware) stop ASAP */
if (!urb->unlinked)
urb->status = -ESHUTDOWN;
/* cleanup */
musb_cleanup_urb(urb, qh);
/* Then nuke all the others ... and advance the
* queue on hw_ep (e.g. bulk ring) when we're done.
*/
while (!list_empty(&hep->urb_list)) {
urb = next_urb(qh);
urb->status = -ESHUTDOWN;
musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
}
} else {
/* Just empty the queue; the hardware is busy with
* other transfers, and since !qh->is_ready nothing
* will activate any of these as it advances.
*/
while (!list_empty(&hep->urb_list))
musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
hep->hcpriv = NULL;
list_del(&qh->ring);
kfree(qh);
}
exit:
spin_unlock_irqrestore(&musb->lock, flags);
}
static int musb_h_get_frame_number(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
return musb_readw(musb->mregs, MUSB_FRAME);
}
static int musb_h_start(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
/* NOTE: musb_start() is called when the hub driver turns
* on port power, or when (OTG) peripheral starts.
*/
hcd->state = HC_STATE_RUNNING;
musb->port1_status = 0;
return 0;
}
static void musb_h_stop(struct usb_hcd *hcd)
{
musb_stop(hcd_to_musb(hcd));
hcd->state = HC_STATE_HALT;
}
static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
int ret;
ret = musb_port_suspend(musb, true);
if (ret)
return ret;
if (!is_host_active(musb))
return 0;
switch (musb_get_state(musb)) {
case OTG_STATE_A_SUSPEND:
return 0;
case OTG_STATE_A_WAIT_VRISE:
/* ID could be grounded even if there's no device
* on the other end of the cable. NOTE that the
* A_WAIT_VRISE timers are messy with MUSB...
*/
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
musb_set_state(musb, OTG_STATE_A_WAIT_BCON);
break;
default:
break;
}
if (musb->is_active) {
WARNING("trying to suspend as %s while active\n",
musb_otg_state_string(musb));
return -EBUSY;
} else
return 0;
}
static int musb_bus_resume(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
if (musb->config &&
musb->config->host_port_deassert_reset_at_resume)
musb_port_reset(musb, false);
return 0;
}
#ifndef CONFIG_MUSB_PIO_ONLY
#define MUSB_USB_DMA_ALIGN 4
struct musb_temp_buffer {
void *kmalloc_ptr;
void *old_xfer_buffer;
u8 data[];
};
static void musb_free_temp_buffer(struct urb *urb)
{
enum dma_data_direction dir;
struct musb_temp_buffer *temp;
size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
data);
if (dir == DMA_FROM_DEVICE) {
if (usb_pipeisoc(urb->pipe))
length = urb->transfer_buffer_length;
else
length = urb->actual_length;
memcpy(temp->old_xfer_buffer, temp->data, length);
}
urb->transfer_buffer = temp->old_xfer_buffer;
kfree(temp->kmalloc_ptr);
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}
static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
{
enum dma_data_direction dir;
struct musb_temp_buffer *temp;
void *kmalloc_ptr;
size_t kmalloc_size;
if (urb->num_sgs || urb->sg ||
urb->transfer_buffer_length == 0 ||
!((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
return 0;
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
/* Allocate a buffer with enough padding for alignment */
kmalloc_size = urb->transfer_buffer_length +
sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
if (!kmalloc_ptr)
return -ENOMEM;
/* Position our struct temp_buffer such that data is aligned */
temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
temp->kmalloc_ptr = kmalloc_ptr;
temp->old_xfer_buffer = urb->transfer_buffer;
if (dir == DMA_TO_DEVICE)
memcpy(temp->data, urb->transfer_buffer,
urb->transfer_buffer_length);
urb->transfer_buffer = temp->data;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
return 0;
}
static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct musb *musb = hcd_to_musb(hcd);
int ret;
/*
* The DMA engine in RTL1.8 and above cannot handle
* DMA addresses that are not aligned to a 4 byte boundary.
* For such engine implemented (un)map_urb_for_dma hooks.
* Do not use these hooks for RTL<1.8
*/
if (musb->hwvers < MUSB_HWVERS_1800)
return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
ret = musb_alloc_temp_buffer(urb, mem_flags);
if (ret)
return ret;
ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
if (ret)
musb_free_temp_buffer(urb);
return ret;
}
static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
struct musb *musb = hcd_to_musb(hcd);
usb_hcd_unmap_urb_for_dma(hcd, urb);
/* Do not use this hook for RTL<1.8 (see description above) */
if (musb->hwvers < MUSB_HWVERS_1800)
return;
musb_free_temp_buffer(urb);
}
#endif /* !CONFIG_MUSB_PIO_ONLY */
static const struct hc_driver musb_hc_driver = {
.description = "musb-hcd",
.product_desc = "MUSB HDRC host driver",
.hcd_priv_size = sizeof(struct musb *),
.flags = HCD_USB2 | HCD_DMA | HCD_MEMORY,
/* not using irq handler or reset hooks from usbcore, since
* those must be shared with peripheral code for OTG configs
*/
.start = musb_h_start,
.stop = musb_h_stop,
.get_frame_number = musb_h_get_frame_number,
.urb_enqueue = musb_urb_enqueue,
.urb_dequeue = musb_urb_dequeue,
.endpoint_disable = musb_h_disable,
#ifndef CONFIG_MUSB_PIO_ONLY
.map_urb_for_dma = musb_map_urb_for_dma,
.unmap_urb_for_dma = musb_unmap_urb_for_dma,
#endif
.hub_status_data = musb_hub_status_data,
.hub_control = musb_hub_control,
.bus_suspend = musb_bus_suspend,
.bus_resume = musb_bus_resume,
/* .start_port_reset = NULL, */
/* .hub_irq_enable = NULL, */
};
int musb_host_alloc(struct musb *musb)
{
struct device *dev = musb->controller;
/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
if (!musb->hcd)
return -EINVAL;
*musb->hcd->hcd_priv = (unsigned long) musb;
musb->hcd->self.uses_pio_for_control = 1;
musb->hcd->uses_new_polling = 1;
musb->hcd->has_tt = 1;
return 0;
}
void musb_host_cleanup(struct musb *musb)
{
if (musb->port_mode == MUSB_PERIPHERAL)
return;
usb_remove_hcd(musb->hcd);
}
void musb_host_free(struct musb *musb)
{
usb_put_hcd(musb->hcd);
}
int musb_host_setup(struct musb *musb, int power_budget)
{
int ret;
struct usb_hcd *hcd = musb->hcd;
if (musb->port_mode == MUSB_HOST) {
MUSB_HST_MODE(musb);
musb_set_state(musb, OTG_STATE_A_IDLE);
}
if (musb->xceiv) {
otg_set_host(musb->xceiv->otg, &hcd->self);
musb->xceiv->otg->host = &hcd->self;
} else {
phy_set_mode(musb->phy, PHY_MODE_USB_HOST);
}
/* don't support otg protocols */
hcd->self.otg_port = 0;
hcd->power_budget = 2 * (power_budget ? : 250);
hcd->skip_phy_initialization = 1;
ret = usb_add_hcd(hcd, 0, 0);
if (ret < 0)
return ret;
device_wakeup_enable(hcd->self.controller);
return 0;
}
void musb_host_resume_root_hub(struct musb *musb)
{
usb_hcd_resume_root_hub(musb->hcd);
}
void musb_host_poke_root_hub(struct musb *musb)
{
MUSB_HST_MODE(musb);
if (musb->hcd->status_urb)
usb_hcd_poll_rh_status(musb->hcd);
else
usb_hcd_resume_root_hub(musb->hcd);
}
| linux-master | drivers/usb/musb/musb_host.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Allwinner sun4i MUSB Glue Layer
*
* Copyright (C) 2015 Hans de Goede <[email protected]>
*
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/extcon.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/soc/sunxi/sunxi_sram.h>
#include <linux/usb/musb.h>
#include <linux/usb/of.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/workqueue.h>
#include "musb_core.h"
/*
* Register offsets, note sunxi musb has a different layout then most
* musb implementations, we translate the layout in musb_readb & friends.
*/
#define SUNXI_MUSB_POWER 0x0040
#define SUNXI_MUSB_DEVCTL 0x0041
#define SUNXI_MUSB_INDEX 0x0042
#define SUNXI_MUSB_VEND0 0x0043
#define SUNXI_MUSB_INTRTX 0x0044
#define SUNXI_MUSB_INTRRX 0x0046
#define SUNXI_MUSB_INTRTXE 0x0048
#define SUNXI_MUSB_INTRRXE 0x004a
#define SUNXI_MUSB_INTRUSB 0x004c
#define SUNXI_MUSB_INTRUSBE 0x0050
#define SUNXI_MUSB_FRAME 0x0054
#define SUNXI_MUSB_TXFIFOSZ 0x0090
#define SUNXI_MUSB_TXFIFOADD 0x0092
#define SUNXI_MUSB_RXFIFOSZ 0x0094
#define SUNXI_MUSB_RXFIFOADD 0x0096
#define SUNXI_MUSB_FADDR 0x0098
#define SUNXI_MUSB_TXFUNCADDR 0x0098
#define SUNXI_MUSB_TXHUBADDR 0x009a
#define SUNXI_MUSB_TXHUBPORT 0x009b
#define SUNXI_MUSB_RXFUNCADDR 0x009c
#define SUNXI_MUSB_RXHUBADDR 0x009e
#define SUNXI_MUSB_RXHUBPORT 0x009f
#define SUNXI_MUSB_CONFIGDATA 0x00c0
/* VEND0 bits */
#define SUNXI_MUSB_VEND0_PIO_MODE 0
/* flags */
#define SUNXI_MUSB_FL_ENABLED 0
#define SUNXI_MUSB_FL_HOSTMODE 1
#define SUNXI_MUSB_FL_HOSTMODE_PEND 2
#define SUNXI_MUSB_FL_VBUS_ON 3
#define SUNXI_MUSB_FL_PHY_ON 4
#define SUNXI_MUSB_FL_HAS_SRAM 5
#define SUNXI_MUSB_FL_HAS_RESET 6
#define SUNXI_MUSB_FL_NO_CONFIGDATA 7
#define SUNXI_MUSB_FL_PHY_MODE_PEND 8
struct sunxi_musb_cfg {
const struct musb_hdrc_config *hdrc_config;
bool has_sram;
bool has_reset;
bool no_configdata;
};
/* Our read/write methods need access and do not get passed in a musb ref :| */
static struct musb *sunxi_musb;
struct sunxi_glue {
struct device *dev;
struct musb *musb;
struct platform_device *musb_pdev;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
struct platform_device *usb_phy;
struct usb_phy *xceiv;
enum phy_mode phy_mode;
unsigned long flags;
struct work_struct work;
struct extcon_dev *extcon;
struct notifier_block host_nb;
};
/* phy_power_on / off may sleep, so we use a workqueue */
static void sunxi_musb_work(struct work_struct *work)
{
struct sunxi_glue *glue = container_of(work, struct sunxi_glue, work);
bool vbus_on, phy_on;
if (!test_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
return;
if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
struct musb *musb = glue->musb;
unsigned long flags;
u8 devctl;
spin_lock_irqsave(&musb->lock, flags);
devctl = readb(musb->mregs + SUNXI_MUSB_DEVCTL);
if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
MUSB_HST_MODE(musb);
devctl |= MUSB_DEVCTL_SESSION;
} else {
clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
devctl &= ~MUSB_DEVCTL_SESSION;
}
writeb(devctl, musb->mregs + SUNXI_MUSB_DEVCTL);
spin_unlock_irqrestore(&musb->lock, flags);
}
vbus_on = test_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
phy_on = test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
if (phy_on != vbus_on) {
if (vbus_on) {
phy_power_on(glue->phy);
set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
} else {
phy_power_off(glue->phy);
clear_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
}
}
if (test_and_clear_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags))
phy_set_mode(glue->phy, glue->phy_mode);
}
static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
if (is_on) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
} else {
clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
}
schedule_work(&glue->work);
}
static void sunxi_musb_pre_root_reset_end(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
sun4i_usb_phy_set_squelch_detect(glue->phy, false);
}
static void sunxi_musb_post_root_reset_end(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
sun4i_usb_phy_set_squelch_detect(glue->phy, true);
}
static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci)
{
struct musb *musb = __hci;
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
musb->int_usb = readb(musb->mregs + SUNXI_MUSB_INTRUSB);
if (musb->int_usb)
writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB);
if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
/* ep0 FADDR must be 0 when (re)entering peripheral mode */
musb_ep_select(musb->mregs, 0);
musb_writeb(musb->mregs, MUSB_FADDR, 0);
}
musb->int_tx = readw(musb->mregs + SUNXI_MUSB_INTRTX);
if (musb->int_tx)
writew(musb->int_tx, musb->mregs + SUNXI_MUSB_INTRTX);
musb->int_rx = readw(musb->mregs + SUNXI_MUSB_INTRRX);
if (musb->int_rx)
writew(musb->int_rx, musb->mregs + SUNXI_MUSB_INTRRX);
musb_interrupt(musb);
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_HANDLED;
}
static int sunxi_musb_host_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct sunxi_glue *glue = container_of(nb, struct sunxi_glue, host_nb);
if (event)
set_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
else
clear_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
set_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags);
schedule_work(&glue->work);
return NOTIFY_DONE;
}
static int sunxi_musb_init(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
int ret;
sunxi_musb = musb;
musb->phy = glue->phy;
musb->xceiv = glue->xceiv;
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) {
ret = sunxi_sram_claim(musb->controller->parent);
if (ret)
return ret;
}
ret = clk_prepare_enable(glue->clk);
if (ret)
goto error_sram_release;
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
ret = reset_control_deassert(glue->rst);
if (ret)
goto error_clk_disable;
}
writeb(SUNXI_MUSB_VEND0_PIO_MODE, musb->mregs + SUNXI_MUSB_VEND0);
/* Register notifier before calling phy_init() */
ret = devm_extcon_register_notifier(glue->dev, glue->extcon,
EXTCON_USB_HOST, &glue->host_nb);
if (ret)
goto error_reset_assert;
ret = phy_init(glue->phy);
if (ret)
goto error_reset_assert;
musb->isr = sunxi_musb_interrupt;
/* Stop the musb-core from doing runtime pm (not supported on sunxi) */
pm_runtime_get(musb->controller);
return 0;
error_reset_assert:
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
reset_control_assert(glue->rst);
error_clk_disable:
clk_disable_unprepare(glue->clk);
error_sram_release:
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
return ret;
}
static int sunxi_musb_exit(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
pm_runtime_put(musb->controller);
cancel_work_sync(&glue->work);
if (test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags))
phy_power_off(glue->phy);
phy_exit(glue->phy);
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
reset_control_assert(glue->rst);
clk_disable_unprepare(glue->clk);
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
devm_usb_put_phy(glue->dev, glue->xceiv);
return 0;
}
static void sunxi_musb_enable(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
glue->musb = musb;
/* musb_core does not call us in a balanced manner */
if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
return;
schedule_work(&glue->work);
}
static void sunxi_musb_disable(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
clear_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags);
}
static struct dma_controller *
sunxi_musb_dma_controller_create(struct musb *musb, void __iomem *base)
{
return NULL;
}
static void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
{
}
static int sunxi_musb_set_mode(struct musb *musb, u8 mode)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
enum phy_mode new_mode;
switch (mode) {
case MUSB_HOST:
new_mode = PHY_MODE_USB_HOST;
break;
case MUSB_PERIPHERAL:
new_mode = PHY_MODE_USB_DEVICE;
break;
case MUSB_OTG:
new_mode = PHY_MODE_USB_OTG;
break;
default:
dev_err(musb->controller->parent,
"Error requested mode not supported by this kernel\n");
return -EINVAL;
}
if (glue->phy_mode == new_mode)
return 0;
if (musb->port_mode != MUSB_OTG) {
dev_err(musb->controller->parent,
"Error changing modes is only supported in dual role mode\n");
return -EINVAL;
}
if (musb->port1_status & USB_PORT_STAT_ENABLE)
musb_root_disconnect(musb);
/*
* phy_set_mode may sleep, and we're called with a spinlock held,
* so let sunxi_musb_work deal with it.
*/
glue->phy_mode = new_mode;
set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
schedule_work(&glue->work);
return 0;
}
static int sunxi_musb_recover(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
/*
* Schedule a phy_set_mode with the current glue->phy_mode value,
* this will force end the current session.
*/
set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
schedule_work(&glue->work);
return 0;
}
/*
* sunxi musb register layout
* 0x00 - 0x17 fifo regs, 1 long per fifo
* 0x40 - 0x57 generic control regs (power - frame)
* 0x80 - 0x8f ep control regs (addressed through hw_ep->regs, indexed)
* 0x90 - 0x97 fifo control regs (indexed)
* 0x98 - 0x9f multipoint / busctl regs (indexed)
* 0xc0 configdata reg
*/
static u32 sunxi_musb_fifo_offset(u8 epnum)
{
return (epnum * 4);
}
static u32 sunxi_musb_ep_offset(u8 epnum, u16 offset)
{
WARN_ONCE(offset != 0,
"sunxi_musb_ep_offset called with non 0 offset\n");
return 0x80; /* indexed, so ignore epnum */
}
static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
{
return SUNXI_MUSB_TXFUNCADDR + offset;
}
static u8 sunxi_musb_readb(void __iomem *addr, u32 offset)
{
struct sunxi_glue *glue;
if (addr == sunxi_musb->mregs) {
/* generic control or fifo control reg access */
switch (offset) {
case MUSB_FADDR:
return readb(addr + SUNXI_MUSB_FADDR);
case MUSB_POWER:
return readb(addr + SUNXI_MUSB_POWER);
case MUSB_INTRUSB:
return readb(addr + SUNXI_MUSB_INTRUSB);
case MUSB_INTRUSBE:
return readb(addr + SUNXI_MUSB_INTRUSBE);
case MUSB_INDEX:
return readb(addr + SUNXI_MUSB_INDEX);
case MUSB_TESTMODE:
return 0; /* No testmode on sunxi */
case MUSB_DEVCTL:
return readb(addr + SUNXI_MUSB_DEVCTL);
case MUSB_TXFIFOSZ:
return readb(addr + SUNXI_MUSB_TXFIFOSZ);
case MUSB_RXFIFOSZ:
return readb(addr + SUNXI_MUSB_RXFIFOSZ);
case MUSB_CONFIGDATA + 0x10: /* See musb_read_configdata() */
glue = dev_get_drvdata(sunxi_musb->controller->parent);
/* A33 saves a reg, and we get to hardcode this */
if (test_bit(SUNXI_MUSB_FL_NO_CONFIGDATA,
&glue->flags))
return 0xde;
return readb(addr + SUNXI_MUSB_CONFIGDATA);
case MUSB_ULPI_BUSCONTROL:
dev_warn(sunxi_musb->controller->parent,
"sunxi-musb does not have ULPI bus control register\n");
return 0;
/* Offset for these is fixed by sunxi_musb_busctl_offset() */
case SUNXI_MUSB_TXFUNCADDR:
case SUNXI_MUSB_TXHUBADDR:
case SUNXI_MUSB_TXHUBPORT:
case SUNXI_MUSB_RXFUNCADDR:
case SUNXI_MUSB_RXHUBADDR:
case SUNXI_MUSB_RXHUBPORT:
/* multipoint / busctl reg access */
return readb(addr + offset);
default:
dev_err(sunxi_musb->controller->parent,
"Error unknown readb offset %u\n", offset);
return 0;
}
} else if (addr == (sunxi_musb->mregs + 0x80)) {
/* ep control reg access */
/* sunxi has a 2 byte hole before the txtype register */
if (offset >= MUSB_TXTYPE)
offset += 2;
return readb(addr + offset);
}
dev_err(sunxi_musb->controller->parent,
"Error unknown readb at 0x%x bytes offset\n",
(int)(addr - sunxi_musb->mregs));
return 0;
}
static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
{
if (addr == sunxi_musb->mregs) {
/* generic control or fifo control reg access */
switch (offset) {
case MUSB_FADDR:
return writeb(data, addr + SUNXI_MUSB_FADDR);
case MUSB_POWER:
return writeb(data, addr + SUNXI_MUSB_POWER);
case MUSB_INTRUSB:
return writeb(data, addr + SUNXI_MUSB_INTRUSB);
case MUSB_INTRUSBE:
return writeb(data, addr + SUNXI_MUSB_INTRUSBE);
case MUSB_INDEX:
return writeb(data, addr + SUNXI_MUSB_INDEX);
case MUSB_TESTMODE:
if (data)
dev_warn(sunxi_musb->controller->parent,
"sunxi-musb does not have testmode\n");
return;
case MUSB_DEVCTL:
return writeb(data, addr + SUNXI_MUSB_DEVCTL);
case MUSB_TXFIFOSZ:
return writeb(data, addr + SUNXI_MUSB_TXFIFOSZ);
case MUSB_RXFIFOSZ:
return writeb(data, addr + SUNXI_MUSB_RXFIFOSZ);
case MUSB_ULPI_BUSCONTROL:
dev_warn(sunxi_musb->controller->parent,
"sunxi-musb does not have ULPI bus control register\n");
return;
/* Offset for these is fixed by sunxi_musb_busctl_offset() */
case SUNXI_MUSB_TXFUNCADDR:
case SUNXI_MUSB_TXHUBADDR:
case SUNXI_MUSB_TXHUBPORT:
case SUNXI_MUSB_RXFUNCADDR:
case SUNXI_MUSB_RXHUBADDR:
case SUNXI_MUSB_RXHUBPORT:
/* multipoint / busctl reg access */
return writeb(data, addr + offset);
default:
dev_err(sunxi_musb->controller->parent,
"Error unknown writeb offset %u\n", offset);
return;
}
} else if (addr == (sunxi_musb->mregs + 0x80)) {
/* ep control reg access */
if (offset >= MUSB_TXTYPE)
offset += 2;
return writeb(data, addr + offset);
}
dev_err(sunxi_musb->controller->parent,
"Error unknown writeb at 0x%x bytes offset\n",
(int)(addr - sunxi_musb->mregs));
}
static u16 sunxi_musb_readw(void __iomem *addr, u32 offset)
{
if (addr == sunxi_musb->mregs) {
/* generic control or fifo control reg access */
switch (offset) {
case MUSB_INTRTX:
return readw(addr + SUNXI_MUSB_INTRTX);
case MUSB_INTRRX:
return readw(addr + SUNXI_MUSB_INTRRX);
case MUSB_INTRTXE:
return readw(addr + SUNXI_MUSB_INTRTXE);
case MUSB_INTRRXE:
return readw(addr + SUNXI_MUSB_INTRRXE);
case MUSB_FRAME:
return readw(addr + SUNXI_MUSB_FRAME);
case MUSB_TXFIFOADD:
return readw(addr + SUNXI_MUSB_TXFIFOADD);
case MUSB_RXFIFOADD:
return readw(addr + SUNXI_MUSB_RXFIFOADD);
case MUSB_HWVERS:
return 0; /* sunxi musb version is not known */
default:
dev_err(sunxi_musb->controller->parent,
"Error unknown readw offset %u\n", offset);
return 0;
}
} else if (addr == (sunxi_musb->mregs + 0x80)) {
/* ep control reg access */
return readw(addr + offset);
}
dev_err(sunxi_musb->controller->parent,
"Error unknown readw at 0x%x bytes offset\n",
(int)(addr - sunxi_musb->mregs));
return 0;
}
static void sunxi_musb_writew(void __iomem *addr, unsigned offset, u16 data)
{
if (addr == sunxi_musb->mregs) {
/* generic control or fifo control reg access */
switch (offset) {
case MUSB_INTRTX:
return writew(data, addr + SUNXI_MUSB_INTRTX);
case MUSB_INTRRX:
return writew(data, addr + SUNXI_MUSB_INTRRX);
case MUSB_INTRTXE:
return writew(data, addr + SUNXI_MUSB_INTRTXE);
case MUSB_INTRRXE:
return writew(data, addr + SUNXI_MUSB_INTRRXE);
case MUSB_FRAME:
return writew(data, addr + SUNXI_MUSB_FRAME);
case MUSB_TXFIFOADD:
return writew(data, addr + SUNXI_MUSB_TXFIFOADD);
case MUSB_RXFIFOADD:
return writew(data, addr + SUNXI_MUSB_RXFIFOADD);
default:
dev_err(sunxi_musb->controller->parent,
"Error unknown writew offset %u\n", offset);
return;
}
} else if (addr == (sunxi_musb->mregs + 0x80)) {
/* ep control reg access */
return writew(data, addr + offset);
}
dev_err(sunxi_musb->controller->parent,
"Error unknown writew at 0x%x bytes offset\n",
(int)(addr - sunxi_musb->mregs));
}
static const struct musb_platform_ops sunxi_musb_ops = {
.quirks = MUSB_INDEXED_EP,
.init = sunxi_musb_init,
.exit = sunxi_musb_exit,
.enable = sunxi_musb_enable,
.disable = sunxi_musb_disable,
.fifo_offset = sunxi_musb_fifo_offset,
.ep_offset = sunxi_musb_ep_offset,
.busctl_offset = sunxi_musb_busctl_offset,
.readb = sunxi_musb_readb,
.writeb = sunxi_musb_writeb,
.readw = sunxi_musb_readw,
.writew = sunxi_musb_writew,
.dma_init = sunxi_musb_dma_controller_create,
.dma_exit = sunxi_musb_dma_controller_destroy,
.set_mode = sunxi_musb_set_mode,
.recover = sunxi_musb_recover,
.set_vbus = sunxi_musb_set_vbus,
.pre_root_reset_end = sunxi_musb_pre_root_reset_end,
.post_root_reset_end = sunxi_musb_post_root_reset_end,
};
#define SUNXI_MUSB_RAM_BITS 11
/* Allwinner OTG supports up to 5 endpoints */
static struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(3, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(3, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(4, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(5, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(5, FIFO_RX, 512),
};
/* H3/V3s OTG supports only 4 endpoints */
static struct musb_fifo_cfg sunxi_musb_mode_cfg_4eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(3, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(3, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(4, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
};
static const struct musb_hdrc_config sunxi_musb_hdrc_config_5eps = {
.fifo_cfg = sunxi_musb_mode_cfg_5eps,
.fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_5eps),
.multipoint = true,
.dyn_fifo = true,
/* Two FIFOs per endpoint, plus ep_0. */
.num_eps = (ARRAY_SIZE(sunxi_musb_mode_cfg_5eps) / 2) + 1,
.ram_bits = SUNXI_MUSB_RAM_BITS,
};
static const struct musb_hdrc_config sunxi_musb_hdrc_config_4eps = {
.fifo_cfg = sunxi_musb_mode_cfg_4eps,
.fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_4eps),
.multipoint = true,
.dyn_fifo = true,
/* Two FIFOs per endpoint, plus ep_0. */
.num_eps = (ARRAY_SIZE(sunxi_musb_mode_cfg_4eps) / 2) + 1,
.ram_bits = SUNXI_MUSB_RAM_BITS,
};
static int sunxi_musb_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data pdata;
struct platform_device_info pinfo;
struct sunxi_glue *glue;
struct device_node *np = pdev->dev.of_node;
const struct sunxi_musb_cfg *cfg;
int ret;
if (!np) {
dev_err(&pdev->dev, "Error no device tree node found\n");
return -EINVAL;
}
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
memset(&pdata, 0, sizeof(pdata));
switch (usb_get_dr_mode(&pdev->dev)) {
#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
case USB_DR_MODE_HOST:
pdata.mode = MUSB_HOST;
glue->phy_mode = PHY_MODE_USB_HOST;
break;
#endif
#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_GADGET
case USB_DR_MODE_PERIPHERAL:
pdata.mode = MUSB_PERIPHERAL;
glue->phy_mode = PHY_MODE_USB_DEVICE;
break;
#endif
#ifdef CONFIG_USB_MUSB_DUAL_ROLE
case USB_DR_MODE_OTG:
pdata.mode = MUSB_OTG;
glue->phy_mode = PHY_MODE_USB_OTG;
break;
#endif
default:
dev_err(&pdev->dev, "Invalid or missing 'dr_mode' property\n");
return -EINVAL;
}
pdata.platform_ops = &sunxi_musb_ops;
cfg = of_device_get_match_data(&pdev->dev);
if (!cfg)
return -EINVAL;
pdata.config = cfg->hdrc_config;
glue->dev = &pdev->dev;
INIT_WORK(&glue->work, sunxi_musb_work);
glue->host_nb.notifier_call = sunxi_musb_host_notifier;
if (cfg->has_sram)
set_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags);
if (cfg->has_reset)
set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
if (cfg->no_configdata)
set_bit(SUNXI_MUSB_FL_NO_CONFIGDATA, &glue->flags);
glue->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(glue->clk)) {
dev_err(&pdev->dev, "Error getting clock: %ld\n",
PTR_ERR(glue->clk));
return PTR_ERR(glue->clk);
}
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
glue->rst = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(glue->rst))
return dev_err_probe(&pdev->dev, PTR_ERR(glue->rst),
"Error getting reset\n");
}
glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
if (IS_ERR(glue->extcon))
return dev_err_probe(&pdev->dev, PTR_ERR(glue->extcon),
"Invalid or missing extcon\n");
glue->phy = devm_phy_get(&pdev->dev, "usb");
if (IS_ERR(glue->phy))
return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
"Error getting phy\n");
glue->usb_phy = usb_phy_generic_register();
if (IS_ERR(glue->usb_phy)) {
dev_err(&pdev->dev, "Error registering usb-phy %ld\n",
PTR_ERR(glue->usb_phy));
return PTR_ERR(glue->usb_phy);
}
glue->xceiv = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(glue->xceiv)) {
ret = PTR_ERR(glue->xceiv);
dev_err(&pdev->dev, "Error getting usb-phy %d\n", ret);
goto err_unregister_usb_phy;
}
platform_set_drvdata(pdev, glue);
memset(&pinfo, 0, sizeof(pinfo));
pinfo.name = "musb-hdrc";
pinfo.id = PLATFORM_DEVID_AUTO;
pinfo.parent = &pdev->dev;
pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
pinfo.of_node_reused = true;
pinfo.res = pdev->resource;
pinfo.num_res = pdev->num_resources;
pinfo.data = &pdata;
pinfo.size_data = sizeof(pdata);
glue->musb_pdev = platform_device_register_full(&pinfo);
if (IS_ERR(glue->musb_pdev)) {
ret = PTR_ERR(glue->musb_pdev);
dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
goto err_unregister_usb_phy;
}
return 0;
err_unregister_usb_phy:
usb_phy_generic_unregister(glue->usb_phy);
return ret;
}
static void sunxi_musb_remove(struct platform_device *pdev)
{
struct sunxi_glue *glue = platform_get_drvdata(pdev);
struct platform_device *usb_phy = glue->usb_phy;
platform_device_unregister(glue->musb_pdev);
usb_phy_generic_unregister(usb_phy);
}
static const struct sunxi_musb_cfg sun4i_a10_musb_cfg = {
.hdrc_config = &sunxi_musb_hdrc_config_5eps,
.has_sram = true,
};
static const struct sunxi_musb_cfg sun6i_a31_musb_cfg = {
.hdrc_config = &sunxi_musb_hdrc_config_5eps,
.has_reset = true,
};
static const struct sunxi_musb_cfg sun8i_a33_musb_cfg = {
.hdrc_config = &sunxi_musb_hdrc_config_5eps,
.has_reset = true,
.no_configdata = true,
};
static const struct sunxi_musb_cfg sun8i_h3_musb_cfg = {
.hdrc_config = &sunxi_musb_hdrc_config_4eps,
.has_reset = true,
.no_configdata = true,
};
static const struct sunxi_musb_cfg suniv_f1c100s_musb_cfg = {
.hdrc_config = &sunxi_musb_hdrc_config_5eps,
.has_sram = true,
.has_reset = true,
.no_configdata = true,
};
static const struct of_device_id sunxi_musb_match[] = {
{ .compatible = "allwinner,sun4i-a10-musb",
.data = &sun4i_a10_musb_cfg, },
{ .compatible = "allwinner,sun6i-a31-musb",
.data = &sun6i_a31_musb_cfg, },
{ .compatible = "allwinner,sun8i-a33-musb",
.data = &sun8i_a33_musb_cfg, },
{ .compatible = "allwinner,sun8i-h3-musb",
.data = &sun8i_h3_musb_cfg, },
{ .compatible = "allwinner,suniv-f1c100s-musb",
.data = &suniv_f1c100s_musb_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, sunxi_musb_match);
static struct platform_driver sunxi_musb_driver = {
.probe = sunxi_musb_probe,
.remove_new = sunxi_musb_remove,
.driver = {
.name = "musb-sunxi",
.of_match_table = sunxi_musb_match,
},
};
module_platform_driver(sunxi_musb_driver);
MODULE_DESCRIPTION("Allwinner sunxi MUSB Glue Layer");
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/musb/sunxi.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.